CPP Unit Tests - Data
Collection
8 items
•
Updated
Unnamed: 0
int64 0
409
| Code
stringlengths 131
27.3k
| Unit Test
stringlengths 89
30.5k
|
---|---|---|
0 | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V1_COMPILE_MLIR_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V1_COMPILE_MLIR_UTIL_H_
#include <memory>
#include "absl/base/attributes.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_argument.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/xla_computation.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
Status ConvertMLIRToXlaComputation(
mlir::ModuleOp module_op, llvm::StringRef device_type,
xla::XlaComputation* xla_computation, bool use_tuple_args,
bool enable_op_fallback, bool return_tuple,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns =
{},
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes = {},
llvm::StringRef module_name = llvm::StringRef());
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
void CreateConvertMlirToXlaHloPipeline(
mlir::OpPassManager& pm, llvm::StringRef device_type,
bool enable_op_fallback,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
bool lower_to_xla_hlo = true, bool allow_partial_conversion = false);
struct TensorOrResourceShape {
TensorShape shape;
bool is_resource = false;
};
ABSL_DEPRECATED("Not meant to be used directly and should be a util.")
Status RefineShapes(llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
mlir::ModuleOp module);
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
Status BuildHloFromTf(mlir::ModuleOp module_op, xla::XlaBuilder& builder,
llvm::ArrayRef<xla::XlaOp> xla_params,
std::vector<xla::XlaOp>& returns,
llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
llvm::StringRef device_type,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes);
ABSL_DEPRECATED("Not meant to be used directly and should be a util.")
Status PopulateResultIOInfo(
mlir::ModuleOp module_op, llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
bool use_tuple_args, bool use_resource_updates_for_aliases,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result);
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
absl::StatusOr<std::string> CompileMlirToXlaHlo(
mlir::ModuleOp module_op, llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
llvm::StringRef device_type, bool use_tuple_args, bool enable_op_fallback,
bool use_return_tuple, bool use_resource_updates_for_aliases,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
llvm::StringRef module_name = llvm::StringRef(),
bool lower_to_xla_hlo = true);
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
absl::StatusOr<std::string> CompileSerializedMlirToXlaHlo(
llvm::StringRef mlir_module_string, llvm::ArrayRef<TensorShape> arg_shapes,
llvm::StringRef device_type, bool use_tuple_args, bool enable_op_fallback,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes = {},
llvm::StringRef module_name = llvm::StringRef(),
bool lower_to_xla_hlo = true);
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
Status CompileGraphToXlaHlo(
mlir::ModuleOp module_op, llvm::ArrayRef<XlaArgument> args,
llvm::StringRef device_type, bool use_tuple_args, bool enable_op_fallback,
bool use_return_tuple,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes);
ABSL_DEPRECATED(
"Use v1/compile_tf_graph.h::CompileTensorflowGraphToHlo instead.")
Status BuildHloFromGraph(
const Graph& graph, xla::XlaBuilder& builder,
mlir::MLIRContext& mlir_context, llvm::ArrayRef<xla::XlaOp> xla_params,
std::vector<xla::XlaOp>& returns, bool unconditionally_use_output_shapes,
llvm::ArrayRef<XlaArgument> args, llvm::ArrayRef<std::string> control_rets,
llvm::StringRef device_type, const FunctionLibraryDefinition& flib_def,
const GraphDebugInfo& debug_info,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes = {});
static inline Status CompileToHloGraphAnalysisFailedError() {
return errors::Internal("disabled after graph analysis");
}
void RegisterConvertMlirToXlaHloPipelineWithDefaults();
}
#endif
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include <memory>
#include <string>
#include "tensorflow/compiler/mlir/tf2xla/mlir_bridge_rollout_policy.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Transforms/Passes.h"
#include "stablehlo/dialect/Register.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/bridge_logger.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/translate_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/lowering_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/shape.h"
#include "xla/translate/mhlo_to_hlo/layout_util.h"
#include "xla/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include "xla/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/core_platform_payloads.pb.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kGroupSizeAttrName =
"tf2xla.collective_info.group_size";
constexpr absl::string_view kGroupKeyAttrName =
"tf2xla.collective_info.group_key";
absl::StatusOr<TensorShape> GetTensorShapeFromXlaArgument(
const XlaArgument& arg) {
if (absl::holds_alternative<xla::Shape>(arg.shape)) {
TensorShape arg_shape;
TF_RETURN_IF_ERROR(
XLAShapeToTensorShape(std::get<xla::Shape>(arg.shape), &arg_shape));
return arg_shape;
} else {
return std::get<TensorShape>(arg.shape);
}
}
Status MaybeRewriteLayoutWithShardedShape(
mlir::StringAttr sharding,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
xla::Shape* shape) {
if (!sharding) return absl::OkStatus();
xla::OpSharding op_sharding;
if (tensorflow::DecodeShardingAttribute(sharding, op_sharding).failed()) {
return errors::InvalidArgument("failed to parse sharding '",
sharding.getValue().str(), "'");
}
std::optional<xla::HloSharding> hlo_sharding;
TF_ASSIGN_OR_RETURN(hlo_sharding, xla::HloSharding::FromProto(op_sharding));
TF_RETURN_IF_ERROR(RewriteLayoutWithShardedShape(
hlo_sharding, false, shape_determination_fns, shape));
return absl::OkStatus();
}
Status GetXlaInputShapes(
mlir::ModuleOp module, llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
bool use_tuple_args,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
std::vector<xla::Shape>* xla_input_shapes) {
xla_input_shapes->clear();
mlir::func::FuncOp main_func =
module.lookupSymbol<mlir::func::FuncOp>("main");
TF_RET_CHECK(main_func != nullptr) << "No main function found";
mlir::FunctionType func_type = main_func.getFunctionType();
int num_args = func_type.getNumInputs();
xla_input_shapes->reserve(num_args);
std::vector<xla::Shape> individual_arg_shapes;
individual_arg_shapes.reserve(num_args);
for (int i = 0; i < num_args; ++i) {
individual_arg_shapes.emplace_back();
xla::Shape& xla_shape = individual_arg_shapes.back();
DataType arg_dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(func_type.getInput(i), &arg_dtype));
auto layout_preference = shape_determination_fns.layout_preference_fn(
arg_shapes[i].shape, arg_dtype, std::nullopt);
TF_ASSIGN_OR_RETURN(xla_shape,
shape_determination_fns.shape_representation_fn(
arg_shapes[i].shape, arg_dtype,
false, layout_preference));
auto sharding =
main_func.getArgAttrOfType<mlir::StringAttr>(i, "mhlo.sharding");
TF_RETURN_IF_ERROR(MaybeRewriteLayoutWithShardedShape(
sharding, shape_determination_fns, &xla_shape));
}
if (use_tuple_args) {
xla_input_shapes->push_back(
xla::ShapeUtil::MakeTupleShape(individual_arg_shapes));
} else {
*xla_input_shapes = individual_arg_shapes;
}
return absl::OkStatus();
}
mlir::RankedTensorType GetBufferType(mlir::Type ty) {
auto ranked_ty = mlir::dyn_cast_or_null<mlir::RankedTensorType>(ty);
if (!ranked_ty) return {};
int64_t rank = ranked_ty.getRank();
llvm::SmallVector<int64_t, 4> dims = llvm::to_vector<4>(ranked_ty.getShape());
auto encoding = mlir::dyn_cast_or_null<mlir::mhlo::TypeExtensionsAttr>(
ranked_ty.getEncoding());
if (encoding && !encoding.getBounds().empty()) {
for (int64_t dim = 0; dim < rank; ++dim) {
if (dims[dim] == mlir::ShapedType::kDynamic) {
dims[dim] = encoding.getBounds()[dim];
}
}
}
return GetTypeFromTFTensorShape(dims, ranked_ty.getElementType());
}
Status GetOutputInfo(
mlir::ModuleOp module, bool use_resource_updates_for_aliases,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
xla::Shape* xla_output_shape, std::vector<XlaOutputDescription>* outputs,
std::vector<XlaResourceUpdate>* resource_updates) {
auto shape_representation_fn_no_fast_memory =
[shape_determination_fns](
const xla::Shape& xla_shape) -> absl::StatusOr<xla::Shape> {
TensorShape shape;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(xla_shape, &shape));
TF_ASSIGN_OR_RETURN(DataType dtype, EncodePrimitiveTypeAsDataType(
xla_shape.element_type()));
auto layout_preference = shape_determination_fns.layout_preference_fn(
shape, dtype, std::nullopt);
return shape_determination_fns.shape_representation_fn(
shape, dtype, false, layout_preference);
};
mlir::func::FuncOp main_func =
module.lookupSymbol<mlir::func::FuncOp>("main");
mlir::FunctionType func_type = main_func.getFunctionType();
outputs->clear();
outputs->reserve(func_type.getNumResults());
resource_updates->clear();
resource_updates->reserve(func_type.getNumResults());
std::vector<xla::Shape> shapes;
shapes.reserve(func_type.getNumResults());
llvm::SmallDenseMap<unsigned, unsigned> output_to_input_alias;
for (unsigned i = 0; i < main_func.getNumArguments(); ++i)
if (auto aliasing_output = main_func.getArgAttrOfType<mlir::IntegerAttr>(
i, "tf.aliasing_output"))
output_to_input_alias[aliasing_output.getInt()] = i;
auto return_op = main_func.begin()->getTerminator();
for (const auto& type_and_idx : llvm::enumerate(func_type.getResults())) {
size_t idx = type_and_idx.index();
auto result_ty = mlir::cast<mlir::RankedTensorType>(type_and_idx.value());
mlir::RankedTensorType buffer_ty = result_ty;
if (!buffer_ty.hasStaticShape()) {
mlir::Value return_val = return_op->getOperand(idx);
if (auto owner = mlir::dyn_cast_or_null<mlir::tensor::CastOp>(
return_val.getDefiningOp())) {
buffer_ty = GetBufferType(owner.getOperand().getType());
if (!buffer_ty || !buffer_ty.hasStaticShape()) {
return errors::InvalidArgument(
"results needs to be static or bounded");
}
}
}
xla::Shape shape = xla::TypeToShape(buffer_ty);
if (shape.element_type() == xla::PRIMITIVE_TYPE_INVALID) {
return errors::InvalidArgument("XLA conversion failed for MLIR type.");
}
TF_ASSIGN_OR_RETURN(shape, shape_representation_fn_no_fast_memory(shape));
if (!result_ty.hasStaticShape()) {
int64_t rank = result_ty.getRank();
for (int64_t dim = 0; dim < rank; ++dim) {
if (result_ty.isDynamicDim(dim)) {
shape.set_dynamic_dimension(dim, true);
}
}
}
auto sharding = main_func.getResultAttrOfType<mlir::StringAttr>(
type_and_idx.index(), "mhlo.sharding");
TF_RETURN_IF_ERROR(MaybeRewriteLayoutWithShardedShape(
sharding, shape_determination_fns, &shape));
auto tensor_type =
mlir::dyn_cast<mlir::RankedTensorType>(type_and_idx.value());
shapes.push_back(shape);
auto it = output_to_input_alias.find(type_and_idx.index());
if (it != output_to_input_alias.end() && use_resource_updates_for_aliases) {
resource_updates->emplace_back();
XlaResourceUpdate& resource_update = resource_updates->back();
resource_update.input_index = it->getSecond();
resource_update.modified = true;
TF_RETURN_IF_ERROR(ConvertToDataType(tensor_type, &resource_update.type));
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(shape, &resource_update.shape));
continue;
}
outputs->emplace_back();
XlaOutputDescription& out_desc = outputs->back();
TF_RETURN_IF_ERROR(ConvertToDataType(tensor_type, &out_desc.type));
out_desc.is_constant = false;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(shape, &out_desc.shape));
out_desc.input_index =
it != output_to_input_alias.end() ? it->getSecond() : -1;
out_desc.is_tensor_list = false;
}
*xla_output_shape = xla::ShapeUtil::MakeTupleShape(shapes);
return absl::OkStatus();
}
void GetInputMappingForMlir(int num_inputs, std::vector<int>* input_mapping) {
input_mapping->resize(num_inputs, 0);
std::iota(input_mapping->begin(), input_mapping->end(), 0);
}
static void RegisterDialects(mlir::DialectRegistry& registry) {
mlir::RegisterAllTensorFlowDialects(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::stablehlo::registerAllDialects(registry);
}
bool CanInlineFunctionsPostLegalization(llvm::StringRef device_type) {
return device_type == DEVICE_TPU_XLA_JIT;
}
void AddLegalizationPasses(mlir::OpPassManager& pm, bool legalize_chlo,
llvm::StringRef device_type, bool enable_op_fallback,
bool lower_to_xla_hlo) {
if (lower_to_xla_hlo) {
mlir::quant::stablehlo::AddQuantizationLoweringPasses(pm);
pm.addPass(mlir::mhlo::createLegalizeTFPass(
legalize_chlo,
device_type, enable_op_fallback));
}
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateInfeedsOpsXlaAdjustLayoutPass());
if (lower_to_xla_hlo) {
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
}
}
}
void CreateConvertMlirToXlaHloPipeline(
mlir::OpPassManager& pm, llvm::StringRef device_type,
bool enable_op_fallback,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
bool lower_to_xla_hlo, bool allow_partial_conversion) {
bool legalize_chlo = true;
pm.addNestedPass<mlir::func::FuncOp>(
tensorflow::tf2xla::internal::CreateInputLoweringMetricsPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateTFXLADeviceSpecificTransformsPass(device_type));
pm.addPass(mlir::TF::CreateTFFunctionalControlFlowToRegions());
pm.addPass(mlir::createInlinerPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::TF::CreateDropWhileShapeInvariantPass());
if (lower_to_xla_hlo) {
pm.addNestedPass<mlir::func::FuncOp>(
mlir::TF::CreateReplicateTensorListInitOpsPass());
}
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::createSCCPPass());
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addPass(mlir::createSCCPPass());
if (lower_to_xla_hlo) {
pm.addPass(mlir::TF::CreateTensorListOpsDecompositionPass());
}
pm.addPass(mlir::TF::CreateStackOpsDecompositionPass());
if (lower_to_xla_hlo) {
pm.addPass(mlir::TF::CreateTensorArrayOpsDecompositionPass());
}
pm.addNestedPass<mlir::func::FuncOp>(
mlir::TFDevice::CreateDecomposeResourceOpsPass());
pm.addPass(mlir::TF::CreatePromoteResourcesToArgsPass());
pm.addPass(mlir::createSymbolDCEPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createSinkConstantsToControlFlowPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
if (lower_to_xla_hlo) {
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
}
pm.addNestedPass<mlir::func::FuncOp>(mlir::TF::CreateLowerQuantizedPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::stablehlo::CreateConvertTFQuantTypesPass());
if (lower_to_xla_hlo) {
for (auto& target_pass : custom_legalization_passes) {
pm.addNestedPass<mlir::func::FuncOp>(std::move(target_pass));
}
pm.addPass(mlir::mhlo::CreateLegalizeTFCollectivePass());
}
AddLegalizationPasses(pm, legalize_chlo, device_type, enable_op_fallback,
lower_to_xla_hlo);
if (lower_to_xla_hlo) {
pm.addPass(mlir::mhlo::CreateLegalizeTFCommunicationPass());
if (!allow_partial_conversion) {
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateVerifyTFXLALegalizationPass(legalize_chlo));
}
}
if (CanInlineFunctionsPostLegalization(device_type)) {
pm.addPass(mlir::createInlinerPass());
}
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createSinkConstantsToControlFlowPass());
}
Status RefineShapes(llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
mlir::ModuleOp module) {
auto producer_or = GetTfGraphProducerVersion(module);
if (!producer_or.ok()) ret | #include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include <initializer_list>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/xla_builder.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using ::mlir::OpPassManager;
using ::tensorflow::monitoring::testing::CellReader;
using ::testing::HasSubstr;
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.Const"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
TEST(LegalizeMlirTest, LegalizesModule) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
std::vector<tensorflow::TensorShape> arg_shapes;
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
kMlirModuleStr, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result);
EXPECT_TRUE(status.ok());
EXPECT_THAT(status.value(), HasSubstr("mhlo.const"));
}
TEST(LegalizeMlirTest, FailsLegalizesModule) {
constexpr char failed_legalization[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.DoesntExist"() : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> count(
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_pass_count");
std::vector<tensorflow::TensorShape> arg_shapes;
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
failed_legalization, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result);
EXPECT_FALSE(status.ok());
EXPECT_EQ(count.Delta("tf.DoesntExist", "Unknown"), 1);
}
TEST(CompileMlirUtil, CreatesPipeline) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
false,
{});
EXPECT_FALSE(pass_manager.getPasses().empty());
}
TEST(CompileMlirUtil, HasLegalizationPass) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
absl::string_view kLegalizeTfPass = "xla-legalize-tf";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
true,
{});
std::string pass_description;
llvm::raw_string_ostream raw_stream(pass_description);
pass_manager.printAsTextualPipeline(raw_stream);
EXPECT_THAT(pass_description, HasSubstr(kLegalizeTfPass));
}
TEST(CompileMlirUtil, DoesNotHaveLegalizationPass) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
absl::string_view kLegalizeTfPass = "xla-legalize-tf";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
false,
{},
false);
std::string pass_description;
llvm::raw_string_ostream raw_stream(pass_description);
pass_manager.printAsTextualPipeline(raw_stream);
EXPECT_THAT(pass_description, Not(HasSubstr(kLegalizeTfPass)));
}
TEST(CompileMlirUtil, DoesNotLowerWhenTold) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
std::vector<tensorflow::TensorShape> arg_shapes;
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
kMlirModuleStr, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result,
{},
"",
false);
EXPECT_TRUE(status.ok());
EXPECT_THAT(status.value(), HasSubstr("tf.Const"));
}
TEST(CompileMlirUtil, CanonicalizationIsExplicitDuringInlining) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
absl::string_view kInlinePass =
"inline{default-pipeline=canonicalize "
"inlining-threshold=4294967295 max-iterations=4 }";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
true,
{});
std::string pass_description;
llvm::raw_string_ostream raw_stream(pass_description);
pass_manager.printAsTextualPipeline(raw_stream);
EXPECT_THAT(pass_description, HasSubstr(kInlinePass));
}
TEST(LegalizeMlirTest, LegalizesModuleWithDynamicShape) {
constexpr char legalization[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>> {
%0 = "tf.Identity"(%arg0) : (tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
func.return %0 : tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
}
})";
std::vector<tensorflow::TensorShape> arg_shapes = {{1}};
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
legalization, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result);
EXPECT_TRUE(status.ok());
}
absl::StatusOr<std::unique_ptr<Graph>> BuildOpGraphWithOutputShapes() {
DataType data_type = DT_INT32;
std::initializer_list<int64_t> dims = {2, 3, 4, 5};
Tensor tensor(data_type, TensorShape(dims));
for (int i = 0; i < 2 * 3 * 4 * 5; ++i) {
tensor.flat<int32>()(i) = i;
}
NodeDef node;
auto builder = NodeDefBuilder("some_node", "Const")
.Attr("dtype", data_type)
.Attr("value", tensor);
AttrValue shape_attr;
TensorShapeProto* shape_proto = shape_attr.mutable_list()->add_shape();
shape_proto->add_dim()->set_size(1);
builder.Attr("_output_shapes", shape_attr);
TF_RETURN_IF_ERROR(builder.Finalize(&node));
return CreateSingleOpGraph(node, {}, {DataType::DT_INT32});
}
absl::Status BuildHloFromGraph(Graph& graph, bool use_output_shapes) {
xla::XlaBuilder builder(
::testing::UnitTest::GetInstance()->current_test_info()->name());
mlir::MLIRContext mlir_context;
llvm::SmallVector<xla::XlaOp, 4> xla_params;
std::vector<xla::XlaOp> returns(1);
return BuildHloFromGraph(graph, builder, mlir_context, xla_params, returns,
use_output_shapes, {},
{}, DEVICE_TPU,
FunctionLibraryDefinition(OpRegistry::Global()),
{},
{});
}
TEST(CompileMlirUtil, UsesCorrectOriginalShapeWithoutOutputShapes) {
TF_ASSERT_OK_AND_ASSIGN(auto graph, BuildOpGraphWithOutputShapes());
auto build_result = BuildHloFromGraph(*graph, false);
TF_ASSERT_OK(build_result);
}
TEST(CompileMlirUtil, UsesIncorrectOutputShapesWhenPresent) {
TF_ASSERT_OK_AND_ASSIGN(auto graph, BuildOpGraphWithOutputShapes());
auto build_result = BuildHloFromGraph(*graph, true);
ASSERT_FALSE(build_result.ok());
EXPECT_THAT(build_result.message(),
HasSubstr("op operand type 'tensor<2x3x4x5xi32>' and result type "
"'tensor<1xi32>' are cast incompatible"));
}
}
} |
1 | #ifndef TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_C_XNNPACK_PLUGIN_H_
#define TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_C_XNNPACK_PLUGIN_H_
#include "tensorflow/lite/core/acceleration/configuration/c/delegate_plugin.h"
#ifdef __cplusplus
extern "C" {
#endif
const TfLiteDelegatePlugin* TfLiteXnnpackDelegatePluginCApi();
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.h"
#include <memory>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
extern "C" {
static TfLiteDelegate* CreateDelegate(const void* settings) {
const ::tflite::TFLiteSettings* tflite_settings =
static_cast<const ::tflite::TFLiteSettings*>(settings);
auto options(TfLiteXNNPackDelegateOptionsDefault());
const auto* xnnpack_settings = tflite_settings->xnnpack_settings();
if (xnnpack_settings) {
options.num_threads = xnnpack_settings->num_threads();
if (xnnpack_settings->flags()) {
options.flags = xnnpack_settings->flags();
}
if (xnnpack_settings->experimental_weight_cache_file_path()) {
options.experimental_weight_cache_file_path =
xnnpack_settings->experimental_weight_cache_file_path()->c_str();
}
}
return TfLiteXNNPackDelegateCreate(&options);
}
static void DestroyDelegate(TfLiteDelegate* delegate) {
TfLiteXNNPackDelegateDelete(delegate);
}
static int DelegateErrno(TfLiteDelegate* from_delegate) { return 0; }
static constexpr TfLiteDelegatePlugin kPluginCApi{
CreateDelegate,
DestroyDelegate,
DelegateErrno,
};
const TfLiteDelegatePlugin* TfLiteXnnpackDelegatePluginCApi() {
return &kPluginCApi;
}
} | #include "tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "pthreadpool.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
class XnnpackTest : public testing::Test {
public:
static constexpr int kNumThreadsForTest = 7;
void SetUp() override {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_num_threads(kNumThreadsForTest);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
}
~XnnpackTest() override = default;
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *settings_;
};
constexpr int XnnpackTest::kNumThreadsForTest;
TEST_F(XnnpackTest, CanCreateAndDestroyDelegate) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
EXPECT_NE(delegate, nullptr);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, CanGetDelegateErrno) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int error_number =
TfLiteXnnpackDelegatePluginCApi()->get_delegate_errno(delegate);
EXPECT_EQ(error_number, 0);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, SetsCorrectThreadCount) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
pthreadpool_t threadpool =
static_cast<pthreadpool_t>(TfLiteXNNPackDelegateGetThreadPool(delegate));
int thread_count = pthreadpool_get_threads_count(threadpool);
EXPECT_EQ(thread_count, kNumThreadsForTest);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesDefaultFlagsByDefault) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, TfLiteXNNPackDelegateOptionsDefault().flags);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesSpecifiedFlagsWhenNonzero) {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_flags(
tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesDefaultFlagsWhenZero) {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_flags(
tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, TfLiteXNNPackDelegateOptionsDefault().flags);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
} |
2 | #ifndef QUICHE_QUIC_CORE_CRYPTO_CERTIFICATE_UTIL_H_
#define QUICHE_QUIC_CORE_CRYPTO_CERTIFICATE_UTIL_H_
#include <string>
#include "absl/strings/string_view.h"
#include "openssl/evp.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/platform/api/quic_export.h"
namespace quic {
struct QUICHE_EXPORT CertificateTimestamp {
uint16_t year;
uint8_t month;
uint8_t day;
uint8_t hour;
uint8_t minute;
uint8_t second;
};
struct QUICHE_EXPORT CertificateOptions {
absl::string_view subject;
uint64_t serial_number;
CertificateTimestamp validity_start;
CertificateTimestamp validity_end;
};
QUICHE_EXPORT bssl::UniquePtr<EVP_PKEY> MakeKeyPairForSelfSignedCertificate();
QUICHE_EXPORT std::string CreateSelfSignedCertificate(
EVP_PKEY& key, const CertificateOptions& options);
}
#endif
#include "quiche/quic/core/crypto/certificate_util.h"
#include <string>
#include <vector>
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "openssl/bn.h"
#include "openssl/bytestring.h"
#include "openssl/digest.h"
#include "openssl/ec_key.h"
#include "openssl/mem.h"
#include "openssl/pkcs7.h"
#include "openssl/pool.h"
#include "openssl/rsa.h"
#include "openssl/stack.h"
#include "quiche/quic/core/crypto/boring_utils.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
bool AddEcdsa256SignatureAlgorithm(CBB* cbb) {
static const uint8_t kEcdsaWithSha256[] = {0x2a, 0x86, 0x48, 0xce,
0x3d, 0x04, 0x03, 0x02};
CBB sequence, oid;
if (!CBB_add_asn1(cbb, &sequence, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1(&sequence, &oid, CBS_ASN1_OBJECT)) {
return false;
}
if (!CBB_add_bytes(&oid, kEcdsaWithSha256, sizeof(kEcdsaWithSha256))) {
return false;
}
return CBB_flush(cbb);
}
bool AddName(CBB* cbb, absl::string_view name) {
static const uint8_t kCommonName[] = {0x55, 0x04, 0x03};
static const uint8_t kCountryName[] = {0x55, 0x04, 0x06};
static const uint8_t kOrganizationName[] = {0x55, 0x04, 0x0a};
static const uint8_t kOrganizationalUnitName[] = {0x55, 0x04, 0x0b};
std::vector<std::string> attributes =
absl::StrSplit(name, ',', absl::SkipEmpty());
if (attributes.empty()) {
QUIC_LOG(ERROR) << "Missing DN or wrong format";
return false;
}
CBB rdns;
if (!CBB_add_asn1(cbb, &rdns, CBS_ASN1_SEQUENCE)) {
return false;
}
for (const std::string& attribute : attributes) {
std::vector<std::string> parts =
absl::StrSplit(absl::StripAsciiWhitespace(attribute), '=');
if (parts.size() != 2) {
QUIC_LOG(ERROR) << "Wrong DN format at " + attribute;
return false;
}
const std::string& type_string = parts[0];
const std::string& value_string = parts[1];
absl::Span<const uint8_t> type_bytes;
if (type_string == "CN") {
type_bytes = kCommonName;
} else if (type_string == "C") {
type_bytes = kCountryName;
} else if (type_string == "O") {
type_bytes = kOrganizationName;
} else if (type_string == "OU") {
type_bytes = kOrganizationalUnitName;
} else {
QUIC_LOG(ERROR) << "Unrecognized type " + type_string;
return false;
}
CBB rdn, attr, type, value;
if (!CBB_add_asn1(&rdns, &rdn, CBS_ASN1_SET) ||
!CBB_add_asn1(&rdn, &attr, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1(&attr, &type, CBS_ASN1_OBJECT) ||
!CBB_add_bytes(&type, type_bytes.data(), type_bytes.size()) ||
!CBB_add_asn1(&attr, &value,
type_string == "C" ? CBS_ASN1_PRINTABLESTRING
: CBS_ASN1_UTF8STRING) ||
!AddStringToCbb(&value, value_string) || !CBB_flush(&rdns)) {
return false;
}
}
if (!CBB_flush(cbb)) {
return false;
}
return true;
}
bool CBBAddTime(CBB* cbb, const CertificateTimestamp& timestamp) {
CBB child;
std::string formatted_time;
const bool is_utc_time = (1950 <= timestamp.year && timestamp.year < 2050);
if (is_utc_time) {
uint16_t year = timestamp.year - 1900;
if (year >= 100) {
year -= 100;
}
formatted_time = absl::StrFormat("%02d", year);
if (!CBB_add_asn1(cbb, &child, CBS_ASN1_UTCTIME)) {
return false;
}
} else {
formatted_time = absl::StrFormat("%04d", timestamp.year);
if (!CBB_add_asn1(cbb, &child, CBS_ASN1_GENERALIZEDTIME)) {
return false;
}
}
absl::StrAppendFormat(&formatted_time, "%02d%02d%02d%02d%02dZ",
timestamp.month, timestamp.day, timestamp.hour,
timestamp.minute, timestamp.second);
static const size_t kGeneralizedTimeLength = 15;
static const size_t kUTCTimeLength = 13;
QUICHE_DCHECK_EQ(formatted_time.size(),
is_utc_time ? kUTCTimeLength : kGeneralizedTimeLength);
return AddStringToCbb(&child, formatted_time) && CBB_flush(cbb);
}
bool CBBAddExtension(CBB* extensions, absl::Span<const uint8_t> oid,
bool critical, absl::Span<const uint8_t> contents) {
CBB extension, cbb_oid, cbb_contents;
if (!CBB_add_asn1(extensions, &extension, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1(&extension, &cbb_oid, CBS_ASN1_OBJECT) ||
!CBB_add_bytes(&cbb_oid, oid.data(), oid.size()) ||
(critical && !CBB_add_asn1_bool(&extension, 1)) ||
!CBB_add_asn1(&extension, &cbb_contents, CBS_ASN1_OCTETSTRING) ||
!CBB_add_bytes(&cbb_contents, contents.data(), contents.size()) ||
!CBB_flush(extensions)) {
return false;
}
return true;
}
bool IsEcdsa256Key(const EVP_PKEY& evp_key) {
if (EVP_PKEY_id(&evp_key) != EVP_PKEY_EC) {
return false;
}
const EC_KEY* key = EVP_PKEY_get0_EC_KEY(&evp_key);
if (key == nullptr) {
return false;
}
const EC_GROUP* group = EC_KEY_get0_group(key);
if (group == nullptr) {
return false;
}
return EC_GROUP_get_curve_name(group) == NID_X9_62_prime256v1;
}
}
bssl::UniquePtr<EVP_PKEY> MakeKeyPairForSelfSignedCertificate() {
bssl::UniquePtr<EVP_PKEY_CTX> context(
EVP_PKEY_CTX_new_id(EVP_PKEY_EC, nullptr));
if (!context) {
return nullptr;
}
if (EVP_PKEY_keygen_init(context.get()) != 1) {
return nullptr;
}
if (EVP_PKEY_CTX_set_ec_paramgen_curve_nid(context.get(),
NID_X9_62_prime256v1) != 1) {
return nullptr;
}
EVP_PKEY* raw_key = nullptr;
if (EVP_PKEY_keygen(context.get(), &raw_key) != 1) {
return nullptr;
}
return bssl::UniquePtr<EVP_PKEY>(raw_key);
}
std::string CreateSelfSignedCertificate(EVP_PKEY& key,
const CertificateOptions& options) {
std::string error;
if (!IsEcdsa256Key(key)) {
QUIC_LOG(ERROR) << "CreateSelfSignedCert only accepts ECDSA P-256 keys";
return error;
}
bssl::ScopedCBB cbb;
CBB tbs_cert, version, validity;
uint8_t* tbs_cert_bytes;
size_t tbs_cert_len;
if (!CBB_init(cbb.get(), 64) ||
!CBB_add_asn1(cbb.get(), &tbs_cert, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1(&tbs_cert, &version,
CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) ||
!CBB_add_asn1_uint64(&version, 2) ||
!CBB_add_asn1_uint64(&tbs_cert, options.serial_number) ||
!AddEcdsa256SignatureAlgorithm(&tbs_cert) ||
!AddName(&tbs_cert, options.subject) ||
!CBB_add_asn1(&tbs_cert, &validity, CBS_ASN1_SEQUENCE) ||
!CBBAddTime(&validity, options.validity_start) ||
!CBBAddTime(&validity, options.validity_end) ||
!AddName(&tbs_cert, options.subject) ||
!EVP_marshal_public_key(&tbs_cert, &key)) {
return error;
}
CBB outer_extensions, extensions;
if (!CBB_add_asn1(&tbs_cert, &outer_extensions,
3 | CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED) ||
!CBB_add_asn1(&outer_extensions, &extensions, CBS_ASN1_SEQUENCE)) {
return error;
}
constexpr uint8_t kKeyUsageOid[] = {0x55, 0x1d, 0x0f};
constexpr uint8_t kKeyUsageContent[] = {
0x3,
0x2,
0x0,
0x80,
};
CBBAddExtension(&extensions, kKeyUsageOid, true, kKeyUsageContent);
if (!CBB_finish(cbb.get(), &tbs_cert_bytes, &tbs_cert_len)) {
return error;
}
bssl::UniquePtr<uint8_t> delete_tbs_cert_bytes(tbs_cert_bytes);
CBB cert, signature;
bssl::ScopedEVP_MD_CTX ctx;
uint8_t* sig_out;
size_t sig_len;
uint8_t* cert_bytes;
size_t cert_len;
if (!CBB_init(cbb.get(), tbs_cert_len) ||
!CBB_add_asn1(cbb.get(), &cert, CBS_ASN1_SEQUENCE) ||
!CBB_add_bytes(&cert, tbs_cert_bytes, tbs_cert_len) ||
!AddEcdsa256SignatureAlgorithm(&cert) ||
!CBB_add_asn1(&cert, &signature, CBS_ASN1_BITSTRING) ||
!CBB_add_u8(&signature, 0 ) ||
!EVP_DigestSignInit(ctx.get(), nullptr, EVP_sha256(), nullptr, &key) ||
!EVP_DigestSign(ctx.get(), nullptr, &sig_len, tbs_cert_bytes,
tbs_cert_len) ||
!CBB_reserve(&signature, &sig_out, sig_len) ||
!EVP_DigestSign(ctx.get(), sig_out, &sig_len, tbs_cert_bytes,
tbs_cert_len) ||
!CBB_did_write(&signature, sig_len) ||
!CBB_finish(cbb.get(), &cert_bytes, &cert_len)) {
return error;
}
bssl::UniquePtr<uint8_t> delete_cert_bytes(cert_bytes);
return std::string(reinterpret_cast<char*>(cert_bytes), cert_len);
}
} | #include "quiche/quic/core/crypto/certificate_util.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "openssl/ssl.h"
#include "quiche/quic/core/crypto/certificate_view.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/platform/api/quic_test_output.h"
namespace quic {
namespace test {
namespace {
TEST(CertificateUtilTest, CreateSelfSignedCertificate) {
bssl::UniquePtr<EVP_PKEY> key = MakeKeyPairForSelfSignedCertificate();
ASSERT_NE(key, nullptr);
CertificatePrivateKey cert_key(std::move(key));
CertificateOptions options;
options.subject = "CN=subject";
options.serial_number = 0x12345678;
options.validity_start = {2020, 1, 1, 0, 0, 0};
options.validity_end = {2049, 12, 31, 0, 0, 0};
std::string der_cert =
CreateSelfSignedCertificate(*cert_key.private_key(), options);
ASSERT_FALSE(der_cert.empty());
QuicSaveTestOutput("CertificateUtilTest_CreateSelfSignedCert.crt", der_cert);
std::unique_ptr<CertificateView> cert_view =
CertificateView::ParseSingleCertificate(der_cert);
ASSERT_NE(cert_view, nullptr);
EXPECT_EQ(cert_view->public_key_type(), PublicKeyType::kP256);
std::optional<std::string> subject = cert_view->GetHumanReadableSubject();
ASSERT_TRUE(subject.has_value());
EXPECT_EQ(*subject, options.subject);
EXPECT_TRUE(
cert_key.ValidForSignatureAlgorithm(SSL_SIGN_ECDSA_SECP256R1_SHA256));
EXPECT_TRUE(cert_key.MatchesPublicKey(*cert_view));
}
}
}
} |
3 | #ifndef ABSL_BASE_INTERNAL_SYSINFO_H_
#define ABSL_BASE_INTERNAL_SYSINFO_H_
#ifndef _WIN32
#include <sys/types.h>
#endif
#include <cstdint>
#include "absl/base/config.h"
#include "absl/base/port.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
double NominalCPUFrequency();
int NumCPUs();
#ifdef _WIN32
using pid_t = uint32_t;
#endif
pid_t GetTID();
pid_t GetCachedTID();
}
ABSL_NAMESPACE_END
}
#endif
#include "absl/base/internal/sysinfo.h"
#include "absl/base/attributes.h"
#ifdef _WIN32
#include <windows.h>
#else
#include <fcntl.h>
#include <pthread.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#endif
#ifdef __linux__
#include <sys/syscall.h>
#endif
#if defined(__APPLE__) || defined(__FreeBSD__)
#include <sys/sysctl.h>
#endif
#ifdef __FreeBSD__
#include <pthread_np.h>
#endif
#ifdef __NetBSD__
#include <lwp.h>
#endif
#if defined(__myriad2__)
#include <rtems.h>
#endif
#include <string.h>
#include <cassert>
#include <cerrno>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <limits>
#include <thread>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/unscaledcycleclock.h"
#include "absl/base/thread_annotations.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace {
#if defined(_WIN32)
DWORD Win32CountSetBits(ULONG_PTR bitMask) {
for (DWORD bitSetCount = 0; ; ++bitSetCount) {
if (bitMask == 0) return bitSetCount;
bitMask &= bitMask - 1;
}
}
int Win32NumCPUs() {
#pragma comment(lib, "kernel32.lib")
using Info = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
DWORD info_size = sizeof(Info);
Info* info(static_cast<Info*>(malloc(info_size)));
if (info == nullptr) return 0;
bool success = GetLogicalProcessorInformation(info, &info_size);
if (!success && GetLastError() == ERROR_INSUFFICIENT_BUFFER) {
free(info);
info = static_cast<Info*>(malloc(info_size));
if (info == nullptr) return 0;
success = GetLogicalProcessorInformation(info, &info_size);
}
DWORD logicalProcessorCount = 0;
if (success) {
Info* ptr = info;
DWORD byteOffset = 0;
while (byteOffset + sizeof(Info) <= info_size) {
switch (ptr->Relationship) {
case RelationProcessorCore:
logicalProcessorCount += Win32CountSetBits(ptr->ProcessorMask);
break;
case RelationNumaNode:
case RelationCache:
case RelationProcessorPackage:
break;
default:
break;
}
byteOffset += sizeof(Info);
ptr++;
}
}
free(info);
return static_cast<int>(logicalProcessorCount);
}
#endif
}
static int GetNumCPUs() {
#if defined(__myriad2__)
return 1;
#elif defined(_WIN32)
const int hardware_concurrency = Win32NumCPUs();
return hardware_concurrency ? hardware_concurrency : 1;
#elif defined(_AIX)
return sysconf(_SC_NPROCESSORS_ONLN);
#else
return static_cast<int>(std::thread::hardware_concurrency());
#endif
}
#if defined(_WIN32)
static double GetNominalCPUFrequency() {
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
!WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
return 1.0;
#else
#pragma comment(lib, "advapi32.lib")
HKEY key;
if (RegOpenKeyExA(HKEY_LOCAL_MACHINE,
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0,
KEY_READ, &key) == ERROR_SUCCESS) {
DWORD type = 0;
DWORD data = 0;
DWORD data_size = sizeof(data);
auto result = RegQueryValueExA(key, "~MHz", nullptr, &type,
reinterpret_cast<LPBYTE>(&data), &data_size);
RegCloseKey(key);
if (result == ERROR_SUCCESS && type == REG_DWORD &&
data_size == sizeof(data)) {
return data * 1e6;
}
}
return 1.0;
#endif
}
#elif defined(CTL_HW) && defined(HW_CPU_FREQ)
static double GetNominalCPUFrequency() {
unsigned freq;
size_t size = sizeof(freq);
int mib[2] = {CTL_HW, HW_CPU_FREQ};
if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) {
return static_cast<double>(freq);
}
return 1.0;
}
#else
static bool ReadLongFromFile(const char *file, long *value) {
bool ret = false;
#if defined(_POSIX_C_SOURCE)
const int file_mode = (O_RDONLY | O_CLOEXEC);
#else
const int file_mode = O_RDONLY;
#endif
int fd = open(file, file_mode);
if (fd != -1) {
char line[1024];
char *err;
memset(line, '\0', sizeof(line));
ssize_t len;
do {
len = read(fd, line, sizeof(line) - 1);
} while (len < 0 && errno == EINTR);
if (len <= 0) {
ret = false;
} else {
const long temp_value = strtol(line, &err, 10);
if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
*value = temp_value;
ret = true;
}
}
close(fd);
}
return ret;
}
#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
static int64_t ReadMonotonicClockNanos() {
struct timespec t;
#ifdef CLOCK_MONOTONIC_RAW
int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t);
#else
int rc = clock_gettime(CLOCK_MONOTONIC, &t);
#endif
if (rc != 0) {
ABSL_INTERNAL_LOG(
FATAL, "clock_gettime() failed: (" + std::to_string(errno) + ")");
}
return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
}
class UnscaledCycleClockWrapperForInitializeFrequency {
public:
static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
};
struct TimeTscPair {
int64_t time;
int64_t tsc;
};
static TimeTscPair GetTimeTscPair() {
int64_t best_latency = std::numeric_limits<int64_t>::max();
TimeTscPair best;
for (int i = 0; i < 10; ++i) {
int64_t t0 = ReadMonotonicClockNanos();
int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now();
int64_t t1 = ReadMonotonicClockNanos();
int64_t latency = t1 - t0;
if (latency < best_latency) {
best_latency = latency;
best.time = t0;
best.tsc = tsc;
}
}
return best;
}
static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) {
auto t0 = GetTimeTscPair();
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = sleep_nanoseconds;
while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {}
auto t1 = GetTimeTscPair();
double elapsed_ticks = t1.tsc - t0.tsc;
double elapsed_time = (t1.time - t0.time) * 1e-9;
return elapsed_ticks / elapsed_time;
}
static double MeasureTscFrequency() {
double last_measurement = -1.0;
int sleep_nanoseconds = 1000000;
for (int i = 0; i < 8; ++i) {
double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds);
if (measurement * 0.99 < last_measurement &&
last_measurement < measurement * 1.01) {
return measurement;
}
last_measurement = measurement;
sleep_nanoseconds *= 2;
}
return last_measurement;
}
#endif
static double GetNominalCPUFrequency() {
long freq = 0;
if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
return freq * 1e3;
}
#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
return MeasureTscFrequency();
#else
if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
&freq)) {
return freq * 1e3;
}
return 1.0;
#endif
}
#endif
ABSL_CONST_INIT static once_flag init_num_cpus_once;
ABSL_CONST_INIT static int num_cpus = 0;
int NumCPUs() {
base_internal::LowLevelCallOnce(
&init_num_cpus_once, []() { num_cpus = GetNumCPUs(); });
return num_cpus;
}
ABSL_CONST_INIT static once_flag init_nominal_cpu_frequency_once;
ABSL_CONST_INIT static double nominal_cpu_frequency = 1.0;
double NominalCPUFrequency() {
base_internal::LowLevelCallOnce(
&init_nominal_cpu_frequency_once,
[]() { nominal_cpu_frequency = GetNominalCPUFrequency(); });
return nominal_cpu_frequency;
}
#if defined(_WIN32)
pid_t GetTID() {
return pid_t{GetCurrentThreadId()};
}
#elif defined(__linux__)
#ifndef SYS_gettid
#define SYS_gettid __NR_gettid
#endif
pid_t GetTID() {
return static_cast<pid_t>(syscall(SYS_gettid));
}
#elif defined(__akaros__)
pid_t GetTID() {
if (in_vcore_context())
return 0;
return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id;
}
#elif defined(__myriad2__)
pid_t GetTID() {
uint32_t tid;
rtems_task_ident(RTEMS_SELF, 0, &tid);
return tid;
}
#elif defined(__APPLE__)
pid_t GetTID() {
uint64_t tid;
pthread_threadid_np(nullptr, &tid);
return static_cast<pid_t>(tid);
}
#elif defined(__FreeBSD__)
pid_t GetTID() { return static_cast<pid_t>(pthread_getthreadid_np()); }
#elif defined(__OpenBSD__)
pid_t GetTID() { return getthrid(); }
#elif defined(__NetBSD__)
pid_t GetTID() { return static_cast<pid_t>(_lwp_self()); }
#elif defined(__native_client__)
pid_t GetTID() {
auto* thread = pthread_self();
static_assert(sizeof(pid_t) == sizeof(thread),
"In NaCL int expected to be the same size as a pointer");
return reinterpret_cast<pid_t>(thread);
}
#else
pid_t GetTID() {
return static_cast<pid_t>(pthread_self());
}
#endif
pid_t GetCachedTID() {
#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local pid_t thread_id = GetTID();
return thread_id;
#else
return GetTID();
#endif
}
}
ABSL_NAMESPACE_END
} | #include "absl/base/internal/sysinfo.h"
#ifndef _WIN32
#include <sys/types.h>
#include <unistd.h>
#endif
#include <thread>
#include <unordered_set>
#include <vector>
#include "gtest/gtest.h"
#include "absl/synchronization/barrier.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace {
TEST(SysinfoTest, NumCPUs) {
EXPECT_NE(NumCPUs(), 0)
<< "NumCPUs() should not have the default value of 0";
}
TEST(SysinfoTest, GetTID) {
EXPECT_EQ(GetTID(), GetTID());
#ifdef __native_client__
return;
#endif
for (int i = 0; i < 10; ++i) {
constexpr int kNumThreads = 10;
Barrier all_threads_done(kNumThreads);
std::vector<std::thread> threads;
Mutex mutex;
std::unordered_set<pid_t> tids;
for (int j = 0; j < kNumThreads; ++j) {
threads.push_back(std::thread([&]() {
pid_t id = GetTID();
{
MutexLock lock(&mutex);
ASSERT_TRUE(tids.find(id) == tids.end());
tids.insert(id);
}
all_threads_done.Block();
}));
}
for (auto& thread : threads) {
thread.join();
}
}
}
#ifdef __linux__
TEST(SysinfoTest, LinuxGetTID) {
EXPECT_EQ(GetTID(), getpid());
}
#endif
}
}
ABSL_NAMESPACE_END
} |
4 | #ifndef QUICHE_COMMON_HTTP_HTTP_HEADER_BLOCK_H_
#define QUICHE_COMMON_HTTP_HTTP_HEADER_BLOCK_H_
#include <stddef.h>
#include <functional>
#include <list>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "quiche/common/http/http_header_storage.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_linked_hash_map.h"
#include "quiche/common/quiche_text_utils.h"
namespace quiche {
namespace test {
class HttpHeaderBlockPeer;
class ValueProxyPeer;
}
#ifndef SPDY_HEADER_DEBUG
#if !defined(NDEBUG) || defined(ADDRESS_SANITIZER)
#define SPDY_HEADER_DEBUG 1
#else
#define SPDY_HEADER_DEBUG 0
#endif
#endif
class QUICHE_EXPORT HttpHeaderBlock {
private:
class QUICHE_EXPORT HeaderValue {
public:
HeaderValue(HttpHeaderStorage* storage, absl::string_view key,
absl::string_view initial_value);
HeaderValue(HeaderValue&& other);
HeaderValue& operator=(HeaderValue&& other);
void set_storage(HttpHeaderStorage* storage);
HeaderValue(const HeaderValue& other) = delete;
HeaderValue& operator=(const HeaderValue& other) = delete;
~HeaderValue();
void Append(absl::string_view fragment);
absl::string_view value() const { return as_pair().second; }
const std::pair<absl::string_view, absl::string_view>& as_pair() const;
size_t SizeEstimate() const { return size_; }
private:
absl::string_view ConsolidatedValue() const;
mutable HttpHeaderStorage* storage_;
mutable Fragments fragments_;
mutable std::pair<absl::string_view, absl::string_view> pair_;
size_t size_ = 0;
size_t separator_size_ = 0;
};
typedef quiche::QuicheLinkedHashMap<absl::string_view, HeaderValue,
quiche::StringPieceCaseHash,
quiche::StringPieceCaseEqual>
MapType;
public:
typedef std::pair<absl::string_view, absl::string_view> value_type;
enum class InsertResult {
kInserted,
kReplaced,
};
class QUICHE_EXPORT iterator {
public:
typedef std::pair<absl::string_view, absl::string_view> value_type;
typedef value_type& reference;
typedef value_type* pointer;
typedef std::forward_iterator_tag iterator_category;
typedef MapType::iterator::difference_type difference_type;
typedef const value_type& const_reference;
typedef const value_type* const_pointer;
explicit iterator(MapType::const_iterator it);
iterator(const iterator& other);
~iterator();
const_reference operator*() const {
#if SPDY_HEADER_DEBUG
QUICHE_CHECK(!dereference_forbidden_);
#endif
return it_->second.as_pair();
}
const_pointer operator->() const { return &(this->operator*()); }
bool operator==(const iterator& it) const { return it_ == it.it_; }
bool operator!=(const iterator& it) const { return !(*this == it); }
iterator& operator++() {
it_++;
return *this;
}
iterator operator++(int) {
auto ret = *this;
this->operator++();
return ret;
}
#if SPDY_HEADER_DEBUG
void forbid_dereference() { dereference_forbidden_ = true; }
#endif
private:
MapType::const_iterator it_;
#if SPDY_HEADER_DEBUG
bool dereference_forbidden_ = false;
#endif
};
typedef iterator const_iterator;
HttpHeaderBlock();
HttpHeaderBlock(const HttpHeaderBlock& other) = delete;
HttpHeaderBlock(HttpHeaderBlock&& other);
~HttpHeaderBlock();
HttpHeaderBlock& operator=(const HttpHeaderBlock& other) = delete;
HttpHeaderBlock& operator=(HttpHeaderBlock&& other);
HttpHeaderBlock Clone() const;
bool operator==(const HttpHeaderBlock& other) const;
bool operator!=(const HttpHeaderBlock& other) const;
std::string DebugString() const;
iterator begin() { return wrap_iterator(map_.begin()); }
iterator end() { return wrap_iterator(map_.end()); }
const_iterator begin() const { return wrap_const_iterator(map_.begin()); }
const_iterator end() const { return wrap_const_iterator(map_.end()); }
bool empty() const { return map_.empty(); }
size_t size() const { return map_.size(); }
iterator find(absl::string_view key) { return wrap_iterator(map_.find(key)); }
const_iterator find(absl::string_view key) const {
return wrap_const_iterator(map_.find(key));
}
bool contains(absl::string_view key) const { return find(key) != end(); }
void erase(absl::string_view key);
void clear();
InsertResult insert(const value_type& value);
void AppendValueOrAddHeader(const absl::string_view key,
const absl::string_view value);
class QUICHE_EXPORT ValueProxy {
public:
~ValueProxy();
ValueProxy(ValueProxy&& other);
ValueProxy& operator=(ValueProxy&& other);
ValueProxy(const ValueProxy& other) = delete;
ValueProxy& operator=(const ValueProxy& other) = delete;
ValueProxy& operator=(absl::string_view value);
bool operator==(absl::string_view value) const;
std::string as_string() const;
private:
friend class HttpHeaderBlock;
friend class test::ValueProxyPeer;
ValueProxy(HttpHeaderBlock* block,
HttpHeaderBlock::MapType::iterator lookup_result,
const absl::string_view key,
size_t* spdy_header_block_value_size);
HttpHeaderBlock* block_;
HttpHeaderBlock::MapType::iterator lookup_result_;
absl::string_view key_;
size_t* spdy_header_block_value_size_;
bool valid_;
};
ABSL_MUST_USE_RESULT ValueProxy operator[](const absl::string_view key);
size_t TotalBytesUsed() const { return key_size_ + value_size_; }
private:
friend class test::HttpHeaderBlockPeer;
inline iterator wrap_iterator(MapType::const_iterator inner_iterator) const {
#if SPDY_HEADER_DEBUG
iterator outer_iterator(inner_iterator);
if (inner_iterator == map_.end()) {
outer_iterator.forbid_dereference();
}
return outer_iterator;
#else
return iterator(inner_iterator);
#endif
}
inline const_iterator wrap_const_iterator(
MapType::const_iterator inner_iterator) const {
#if SPDY_HEADER_DEBUG
const_iterator outer_iterator(inner_iterator);
if (inner_iterator == map_.end()) {
outer_iterator.forbid_dereference();
}
return outer_iterator;
#else
return iterator(inner_iterator);
#endif
}
void AppendHeader(const absl::string_view key, const absl::string_view value);
absl::string_view WriteKey(const absl::string_view key);
size_t bytes_allocated() const;
MapType map_;
HttpHeaderStorage storage_;
size_t key_size_ = 0;
size_t value_size_ = 0;
};
inline bool operator==(absl::string_view lhs,
const HttpHeaderBlock::ValueProxy& rhs) {
return rhs == lhs;
}
}
#endif
#include "quiche/common/http/http_header_block.h"
#include <string.h>
#include <algorithm>
#include <ios>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
namespace {
const size_t kInitialMapBuckets = 11;
const char kCookieKey[] = "cookie";
const char kNullSeparator = 0;
absl::string_view SeparatorForKey(absl::string_view key) {
if (key == kCookieKey) {
static absl::string_view cookie_separator = "; ";
return cookie_separator;
} else {
return absl::string_view(&kNullSeparator, 1);
}
}
}
HttpHeaderBlock::HeaderValue::HeaderValue(HttpHeaderStorage* storage,
absl::string_view key,
absl::string_view initial_value)
: storage_(storage),
fragments_({initial_value}),
pair_({key, {}}),
size_(initial_value.size()),
separator_size_(SeparatorForKey(key).size()) {}
HttpHeaderBlock::HeaderValue::HeaderValue(HeaderValue&& other)
: storage_(other.storage_),
fragments_(std::move(other.fragments_)),
pair_(std::move(other.pair_)),
size_(other.size_),
separator_size_(other.separator_size_) {}
HttpHeaderBlock::HeaderValue& HttpHeaderBlock::HeaderValue::operator=(
HeaderValue&& other) {
storage_ = other.storage_;
fragments_ = std::move(other.fragments_);
pair_ = std::move(other.pair_);
size_ = other.size_;
separator_size_ = other.separator_size_;
return *this;
}
void HttpHeaderBlock::HeaderValue::set_storage(HttpHeaderStorage* storage) {
storage_ = storage;
}
HttpHeaderBlock::HeaderValue::~HeaderValue() = default;
absl::string_view HttpHeaderBlock::HeaderValue::ConsolidatedValue() const {
if (fragments_.empty()) {
return absl::string_view();
}
if (fragments_.size() > 1) {
fragments_ = {
storage_->WriteFragments(fragments_, SeparatorForKey(pair_.first))};
}
return fragments_[0];
}
void HttpHeaderBlock::HeaderValue::Append(absl::string_view fragment) {
size_ += (fragment.size() + separator_size_);
fragments_.push_back(fragment);
}
const std::pair<absl::string_view, absl::string_view>&
HttpHeaderBlock::HeaderValue::as_pair() const {
pair_.second = ConsolidatedValue();
return pair_;
}
HttpHeaderBlock::iterator::iterator(MapType::const_iterator it) : it_(it) {}
HttpHeaderBlock::iterator::iterator(const iterator& other) = default;
HttpHeaderBlock::iterator::~iterator() = default;
HttpHeaderBlock::ValueProxy::ValueProxy(
HttpHeaderBlock* block, HttpHeaderBlock::MapType::iterator lookup_result,
const absl::string_view key, size_t* spdy_header_block_value_size)
: block_(block),
lookup_result_(lookup_result),
key_(key),
spdy_header_block_value_size_(spdy_header_block_value_size),
valid_(true) {}
HttpHeaderBlock::ValueProxy::ValueProxy(ValueProxy&& other)
: block_(other.block_),
lookup_result_(other.lookup_result_),
key_(other.key_),
spdy_header_block_value_size_(other.spdy_header_block_value_size_),
valid_(true) {
other.valid_ = false;
}
HttpHeaderBlock::ValueProxy& HttpHeaderBlock::ValueProxy::operator=(
HttpHeaderBlock::ValueProxy&& other) {
block_ = other.block_;
lookup_result_ = other.lookup_result_;
key_ = other.key_;
valid_ = true;
other.valid_ = false;
spdy_header_block_value_size_ = other.spdy_header_block_value_size_;
return *this;
}
HttpHeaderBlock::ValueProxy::~ValueProxy() {
if (valid_ && lookup_result_ == block_->map_.end()) {
block_->storage_.Rewind(key_);
}
}
HttpHeaderBlock::ValueProxy& HttpHeaderBlock::ValueProxy::operator=(
absl::string_view value) {
*spdy_header_block_value_size_ += value.size();
HttpHeaderStorage* storage = &block_->storage_;
if (lookup_result_ == block_->map_.end()) {
QUICHE_DVLOG(1) << "Inserting: (" << key_ << ", " << value << ")";
lookup_result_ =
block_->map_
.emplace(std::make_pair(
key_, HeaderValue(storage, key_, storage->Write(value))))
.first;
} else {
QUICHE_DVLOG(1) << "Updating key: " << key_ << " with value: " << value;
*spdy_header_block_value_size_ -= lookup_result_->second.SizeEstimate();
lookup_result_->second = HeaderValue(storage, key_, storage->Write(value));
}
return *this;
}
bool HttpHeaderBlock::ValueProxy::operator==(absl::string_view value) const {
if (lookup_result_ == block_->map_.end()) {
return false;
} else {
return value == lookup_result_->second.value();
}
}
std::string HttpHeaderBlock::ValueProxy::as_string() const {
if (lookup_result_ == block_->map_.end()) {
return "";
} else {
return std::string(lookup_result_->second.value());
}
}
HttpHeaderBlock::HttpHeaderBlock() : map_(kInitialMapBuckets) {}
HttpHeaderBlock::HttpHeaderBlock(HttpHeaderBlock&& other)
: map_(kInitialMapBuckets) {
map_.swap(other.map_);
storage_ = std::move(other.storage_);
for (auto& p : map_) {
p.second.set_storage(&storage_);
}
key_size_ = other.key_size_;
value_size_ = other.value_size_;
}
HttpHeaderBlock::~HttpHeaderBlock() = default;
HttpHeaderBlock& HttpHeaderBlock::operator=(HttpHeaderBlock&& other) {
map_.swap(other.map_);
storage_ = std::move(other.storage_);
for (auto& p : map_) {
p.second.set_storage(&storage_);
}
key_size_ = other.key_size_;
value_size_ = other.value_size_;
return *this;
}
HttpHeaderBlock HttpHeaderBlock::Clone() const {
HttpHeaderBlock copy;
for (const auto& p : *this) {
copy.AppendHeader(p.first, p.second);
}
return copy;
}
bool HttpHeaderBlock::operator==(const HttpHeaderBlock& other) const {
return size() == other.size() && std::equal(begin(), end(), other.begin());
}
bool HttpHeaderBlock::operator!=(const HttpHeaderBlock& other) const {
return !(operator==(other));
}
std::string HttpHeaderBlock::DebugString() const {
if (empty()) {
return "{}";
}
std::string output = "\n{\n";
for (auto it = begin(); it != end(); ++it) {
absl::StrAppend(&output, " ", it->first, " ", it->second, "\n");
}
absl::StrAppend(&output, "}\n");
return output;
}
void HttpHeaderBlock::erase(absl::string_view key) {
auto iter = map_.find(key);
if (iter != map_.end()) {
QUICHE_DVLOG(1) << "Erasing header with name: " << key;
key_size_ -= key.size();
value_size_ -= iter->second.SizeEstimate();
map_.erase(iter);
}
}
void HttpHeaderBlock::clear() {
key_size_ = 0;
value_size_ = 0;
map_.clear();
storage_.Clear();
}
HttpHeaderBlock::InsertResult HttpHeaderBlock::insert(
const HttpHeaderBlock::value_type& value) {
value_size_ += value.second.size();
auto iter = map_.find(value.first);
if (iter == map_.end()) {
QUICHE_DVLOG(1) << "Inserting: (" << value.first << ", " << value.second
<< ")";
AppendHeader(value.first, value.second);
return InsertResult::kInserted;
} else {
QUICHE_DVLOG(1) << "Updating key: " << iter->first
<< " with value: " << value.second;
value_size_ -= iter->second.SizeEstimate();
iter->second =
HeaderValue(&storage_, iter->first, storage_.Write(value.second));
return InsertResult::kReplaced;
}
}
HttpHeaderBlock::ValueProxy HttpHeaderBlock::operator[](
const absl::string_view key) {
QUICHE_DVLOG(2) << "Operator[] saw key: " << key;
absl::string_view out_key;
auto iter = map_.find(key);
if (iter == map_.end()) {
out_key = WriteKey(key);
QUICHE_DVLOG(2) << "Key written as: " << std::hex
<< static_cast<const void*>(key.data()) << ", " << std::dec
<< key.size();
} else {
out_key = iter->first;
}
return ValueProxy(this, iter, out_key, &value_size_);
}
void HttpHeaderBlock::AppendValueOrAddHeader(const absl::string_view key,
const absl::string_view value) {
value_size_ += value.size();
auto iter = map_.find(key);
if (iter == map_.end()) {
QUICHE_DVLOG(1) << "Inserting: (" << key << ", " << value << ")";
AppendHeader(key, value);
return;
}
QUICHE_DVLOG(1) << "Updating key: " << iter->first
<< "; appending value: " << value;
value_size_ += SeparatorForKey(key).size();
iter->second.Append(storage_.Write(value));
}
void HttpHeaderBlock::AppendHeader(const absl::string_view key,
const absl::string_view value) {
auto backed_key = WriteKey(key);
map_.emplace(std::make_pair(
backed_key, HeaderValue(&storage_, backed_key, storage_.Write(value))));
}
absl::string_view HttpHeaderBlock::WriteKey(const absl::string_view key) {
key_size_ += key.size();
return storage_.Write(key);
}
size_t HttpHeaderBlock::bytes_allocated() const {
return storage_.bytes_allocated();
}
} | #include "quiche/common/http/http_header_block.h"
#include <memory>
#include <string>
#include <utility>
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/spdy/test_tools/spdy_test_utils.h"
using ::testing::ElementsAre;
namespace quiche {
namespace test {
class ValueProxyPeer {
public:
static absl::string_view key(HttpHeaderBlock::ValueProxy* p) {
return p->key_;
}
};
std::pair<absl::string_view, absl::string_view> Pair(absl::string_view k,
absl::string_view v) {
return std::make_pair(k, v);
}
TEST(HttpHeaderBlockTest, EmptyBlock) {
HttpHeaderBlock block;
EXPECT_TRUE(block.empty());
EXPECT_EQ(0u, block.size());
EXPECT_EQ(block.end(), block.find("foo"));
EXPECT_FALSE(block.contains("foo"));
EXPECT_TRUE(block.end() == block.begin());
block.erase("bar");
}
TEST(HttpHeaderBlockTest, KeyMemoryReclaimedOnLookup) {
HttpHeaderBlock block;
absl::string_view copied_key1;
{
auto proxy1 = block["some key name"];
copied_key1 = ValueProxyPeer::key(&proxy1);
}
absl::string_view copied_key2;
{
auto proxy2 = block["some other key name"];
copied_key2 = ValueProxyPeer::key(&proxy2);
}
EXPECT_EQ(copied_key1.data(), copied_key2.data());
{
auto proxy1 = block["some key name"];
block["some other key name"] = "some value";
}
block["key"] = "value";
EXPECT_EQ("value", block["key"]);
EXPECT_EQ("some value", block["some other key name"]);
EXPECT_TRUE(block.find("some key name") == block.end());
}
TEST(HttpHeaderBlockTest, AddHeaders) {
HttpHeaderBlock block;
block["foo"] = std::string(300, 'x');
block["bar"] = "baz";
block["qux"] = "qux1";
block["qux"] = "qux2";
block.insert(std::make_pair("key", "value"));
EXPECT_EQ(Pair("foo", std::string(300, 'x')), *block.find("foo"));
EXPECT_EQ("baz", block["bar"]);
std::string qux("qux");
EXPECT_EQ("qux2", block[qux]);
ASSERT_NE(block.end(), block.find("key"));
ASSERT_TRUE(block.contains("key"));
EXPECT_EQ(Pair("key", "value"), *block.find("key"));
block.erase("key");
EXPECT_EQ(block.end(), block.find("key"));
}
TEST(HttpHeaderBlockTest, CopyBlocks) {
HttpHeaderBlock block1;
block1["foo"] = std::string(300, 'x');
block1["bar"] = "baz";
block1.insert(std::make_pair("qux", "qux1"));
HttpHeaderBlock block2 = block1.Clone();
HttpHeaderBlock block3(block1.Clone());
EXPECT_EQ(block1, block2);
EXPECT_EQ(block1, block3);
}
TEST(HttpHeaderBlockTest, Equality) {
HttpHeaderBlock block1;
block1["foo"] = "bar";
HttpHeaderBlock block2;
block2["foo"] = "bar";
HttpHeaderBlock block3;
block3["baz"] = "qux";
EXPECT_EQ(block1, block2);
EXPECT_NE(block1, block3);
block2["baz"] = "qux";
EXPECT_NE(block1, block2);
}
HttpHeaderBlock ReturnTestHeaderBlock() {
HttpHeaderBlock block;
block["foo"] = "bar";
block.insert(std::make_pair("foo2", "baz"));
return block;
}
TEST(HttpHeaderBlockTest, MovedFromIsValid) {
HttpHeaderBlock block1;
block1["foo"] = "bar";
HttpHeaderBlock block2(std::move(block1));
EXPECT_THAT(block2, ElementsAre(Pair("foo", "bar")));
block1["baz"] = "qux";
HttpHeaderBlock block3(std::move(block1));
block1["foo"] = "bar";
HttpHeaderBlock block4(std::move(block1));
block1.clear();
EXPECT_TRUE(block1.empty());
block1["foo"] = "bar";
EXPECT_THAT(block1, ElementsAre(Pair("foo", "bar")));
HttpHeaderBlock block5 = ReturnTestHeaderBlock();
block5.AppendValueOrAddHeader("foo", "bar2");
EXPECT_THAT(block5, ElementsAre(Pair("foo", std::string("bar\0bar2", 8)),
Pair("foo2", "baz")));
}
TEST(HttpHeaderBlockTest, AppendHeaders) {
HttpHeaderBlock block;
block["foo"] = "foo";
block.AppendValueOrAddHeader("foo", "bar");
EXPECT_EQ(Pair("foo", std::string("foo\0bar", 7)), *block.find("foo"));
block.insert(std::make_pair("foo", "baz"));
EXPECT_EQ("baz", block["foo"]);
EXPECT_EQ(Pair("foo", "baz"), *block.find("foo"));
block["cookie"] = "key1=value1";
block.AppendValueOrAddHeader("h1", "h1v1");
block.insert(std::make_pair("h2", "h2v1"));
block.AppendValueOrAddHeader("h3", "h3v2");
block.AppendValueOrAddHeader("h2", "h2v2");
block.AppendValueOrAddHeader("h1", "h1v2");
block.AppendValueOrAddHeader("cookie", "key2=value2");
block.AppendValueOrAddHeader("cookie", "key3=value3");
block.AppendValueOrAddHeader("h1", "h1v3");
block.AppendValueOrAddHeader("h2", "h2v3");
block.AppendValueOrAddHeader("h3", "h3v3");
block.AppendValueOrAddHeader("h4", "singleton");
block.AppendValueOrAddHeader("set-cookie", "yummy");
block.AppendValueOrAddHeader("set-cookie", "scrumptious");
EXPECT_EQ("key1=value1; key2=value2; key3=value3", block["cookie"]);
EXPECT_EQ("baz", block["foo"]);
EXPECT_EQ(std::string("h1v1\0h1v2\0h1v3", 14), block["h1"]);
EXPECT_EQ(std::string("h2v1\0h2v2\0h2v3", 14), block["h2"]);
EXPECT_EQ(std::string("h3v2\0h3v3", 9), block["h3"]);
EXPECT_EQ("singleton", block["h4"]);
EXPECT_EQ(std::string("yummy\0scrumptious", 17), block["set-cookie"]);
}
TEST(HttpHeaderBlockTest, CompareValueToStringPiece) {
HttpHeaderBlock block;
block["foo"] = "foo";
block.AppendValueOrAddHeader("foo", "bar");
const auto& val = block["foo"];
const char expected[] = "foo\0bar";
EXPECT_TRUE(absl::string_view(expected, 7) == val);
EXPECT_TRUE(val == absl::string_view(expected, 7));
EXPECT_FALSE(absl::string_view(expected, 3) == val);
EXPECT_FALSE(val == absl::string_view(expected, 3));
const char not_expected[] = "foo\0barextra";
EXPECT_FALSE(absl::string_view(not_expected, 12) == val);
EXPECT_FALSE(val == absl::string_view(not_expected, 12));
const auto& val2 = block["foo2"];
EXPECT_FALSE(absl::string_view(expected, 7) == val2);
EXPECT_FALSE(val2 == absl::string_view(expected, 7));
EXPECT_FALSE(absl::string_view("") == val2);
EXPECT_FALSE(val2 == absl::string_view(""));
}
TEST(HttpHeaderBlockTest, UpperCaseNames) {
HttpHeaderBlock block;
block["Foo"] = "foo";
block.AppendValueOrAddHeader("Foo", "bar");
EXPECT_NE(block.end(), block.find("foo"));
EXPECT_EQ(Pair("Foo", std::string("foo\0bar", 7)), *block.find("Foo"));
block.AppendValueOrAddHeader("foo", "baz");
EXPECT_THAT(block,
ElementsAre(Pair("Foo", std::string("foo\0bar\0baz", 11))));
}
namespace {
size_t HttpHeaderBlockSize(const HttpHeaderBlock& block) {
size_t size = 0;
for (const auto& pair : block) {
size += pair.first.size() + pair.second.size();
}
return size;
}
}
TEST(HttpHeaderBlockTest, TotalBytesUsed) {
HttpHeaderBlock block;
const size_t value_size = 300;
block["foo"] = std::string(value_size, 'x');
EXPECT_EQ(block.TotalBytesUsed(), HttpHeaderBlockSize(block));
block.insert(std::make_pair("key", std::string(value_size, 'x')));
EXPECT_EQ(block.TotalBytesUsed(), HttpHeaderBlockSize(block));
block.AppendValueOrAddHeader("abc", std::string(value_size, 'x'));
EXPECT_EQ(block.TotalBytesUsed(), HttpHeaderBlockSize(block));
block["foo"] = std::string(value_size, 'x');
EXPECT_EQ(block.TotalBytesUsed(), HttpHeaderBlockSize(block));
block.insert(std::make_pair("key", std::string(value_size, 'x')));
EXPECT_EQ(block.TotalBytesUsed(), HttpHeaderBlockSize(block));
block.AppendValueOrAddHeader("abc", std::string(value_size, 'x'));
EXPECT_EQ(block.TotalBytesUsed(), HttpHeaderBlockSize(block));
size_t block_size = block.TotalBytesUsed();
HttpHeaderBlock block_copy = std::move(block);
EXPECT_EQ(block_size, block_copy.TotalBytesUsed());
block_copy.erase("foo");
EXPECT_EQ(block_copy.TotalBytesUsed(), HttpHeaderBlockSize(block_copy));
block_copy.erase("key");
EXPECT_EQ(block_copy.TotalBytesUsed(), HttpHeaderBlockSize(block_copy));
block_copy.erase("abc");
EXPECT_EQ(block_copy.TotalBytesUsed(), HttpHeaderBlockSize(block_copy));
}
TEST(HttpHeaderBlockTest, OrderPreserved) {
HttpHeaderBlock block;
block[":method"] = "GET";
block["foo"] = "bar";
block[":path"] = "/";
EXPECT_THAT(block, ElementsAre(Pair(":method", "GET"), Pair("foo", "bar"),
Pair(":path", "/")));
}
TEST(HttpHeaderBlockTest, InsertReturnValue) {
HttpHeaderBlock block;
EXPECT_EQ(HttpHeaderBlock::InsertResult::kInserted,
block.insert({"foo", "bar"}));
EXPECT_EQ(HttpHeaderBlock::InsertResult::kReplaced,
block.insert({"foo", "baz"}));
}
}
} |
5 | #ifndef XLA_WINDOW_UTIL_H_
#define XLA_WINDOW_UTIL_H_
#include "absl/types/span.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace window_util {
Window MakeWindow(absl::Span<const int64_t> sizes);
Window MakeWindow(absl::Span<const int64_t> sizes,
absl::Span<const int64_t> strides);
PaddingConfig MakeSymmetricPadding(absl::Span<const int64_t> sizes);
std::string ToString(const WindowDimension& dim);
std::string ToString(const Window& window);
bool HasStride(const Window& window);
bool HasPadding(const Window& window);
bool HasSymmetricPadding(const Window& window);
bool HasNegativePadding(const Window& window);
bool HasSymmetricPadding(const PaddingConfig& padding_config);
bool HasBaseDilation(const Window& window);
bool HasWindowDilation(const Window& window);
bool HasDilation(const Window& window);
bool HasOverlappingWindow(const Window& window);
bool HasWindowReversal(const Window& window);
bool AllOrNoneReversed(const Window& window);
bool IsTrivialWindowDimension(const WindowDimension& window_dimension);
int64_t DilatedBound(int64_t bound, int64_t dilation);
int64_t StridedBound(int64_t bound, int64_t window_size, int64_t stride);
}
}
#endif
#include "xla/window_util.h"
#include <functional>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/functional/function_ref.h"
#include "absl/strings/str_cat.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace window_util {
Window MakeWindow(absl::Span<const int64_t> sizes) {
Window window;
for (int64_t size : sizes) {
auto* dimension = window.add_dimensions();
dimension->set_size(size);
dimension->set_stride(1);
dimension->set_base_dilation(1);
dimension->set_window_dilation(1);
}
return window;
}
Window MakeWindow(absl::Span<const int64_t> sizes,
absl::Span<const int64_t> strides) {
Window window;
CHECK_EQ(sizes.size(), strides.size());
for (auto nb = 0; nb < sizes.size(); ++nb) {
auto* dimension = window.add_dimensions();
dimension->set_size(sizes[nb]);
dimension->set_stride(strides[nb]);
dimension->set_base_dilation(1);
dimension->set_window_dilation(1);
}
return window;
}
PaddingConfig MakeSymmetricPadding(absl::Span<const int64_t> sizes) {
PaddingConfig config;
for (int64_t size : sizes) {
auto* dimension = config.add_dimensions();
dimension->set_edge_padding_low(size);
dimension->set_edge_padding_high(size);
}
return config;
}
std::string ToString(const WindowDimension& dim) {
using absl::StrAppend;
using absl::StrCat;
std::string str = StrCat("(size=", dim.size());
if (dim.stride() != 1) {
StrAppend(&str, ",stride=", dim.stride());
}
if (dim.padding_low() != 0) {
StrAppend(&str, ",padding_low=", dim.padding_low());
}
if (dim.padding_high() != 0) {
StrAppend(&str, ",padding_high=", dim.padding_high());
}
if (dim.base_dilation() != 1) {
StrAppend(&str, ",base_dilation=", dim.base_dilation());
}
if (dim.window_dilation() != 1) {
StrAppend(&str, ",window_dilation=", dim.window_dilation());
}
if (dim.window_reversal()) {
StrAppend(&str, ",window_reversal");
}
StrAppend(&str, ")");
return str;
}
std::string ToString(const Window& window) {
using absl::StrAppend;
using absl::StrCat;
std::string str;
const auto add_field =
[&](const char* heading,
absl::FunctionRef<std::string(const WindowDimension&)> format) {
StrAppend(&str, heading, "=");
const char* prefix = "";
for (const auto& window_dimension : window.dimensions()) {
StrAppend(&str, prefix, format(window_dimension));
prefix = "x";
}
};
if (window.dimensions_size() > 0) {
add_field("size",
[](const WindowDimension& dim) { return StrCat(dim.size()); });
}
if (HasStride(window)) {
add_field(" stride",
[](const WindowDimension& dim) { return StrCat(dim.stride()); });
}
if (HasPadding(window)) {
add_field(" pad", [](const WindowDimension& dim) {
return StrCat(dim.padding_low(), "_", dim.padding_high());
});
}
if (HasBaseDilation(window)) {
add_field(" lhs_dilate", [](const WindowDimension& dim) {
return StrCat(dim.base_dilation());
});
}
if (HasWindowDilation(window)) {
add_field(" rhs_dilate", [](const WindowDimension& dim) {
return StrCat(dim.window_dilation());
});
}
if (HasWindowReversal(window)) {
add_field(" rhs_reversal", [](const WindowDimension& dim) {
return StrCat(dim.window_reversal() ? 1 : 0);
});
}
return str;
}
bool HasStride(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.stride() != 1) {
return true;
}
}
return false;
}
bool HasPadding(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.padding_low() != 0 || dim.padding_high() != 0) {
return true;
}
}
return false;
}
bool HasSymmetricPadding(const Window& window) {
return absl::c_all_of(window.dimensions(), [](const WindowDimension& dim) {
return dim.padding_low() == dim.padding_high();
});
}
bool HasSymmetricPadding(const PaddingConfig& padding_config) {
return absl::c_all_of(padding_config.dimensions(),
[](const PaddingConfig::PaddingConfigDimension& dim) {
return dim.edge_padding_low() ==
dim.edge_padding_high();
});
}
bool HasNegativePadding(const Window& window) {
return absl::c_any_of(window.dimensions(), [](const WindowDimension& dim) {
return dim.padding_low() < 0 || dim.padding_high() < 0;
});
}
bool HasBaseDilation(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.base_dilation() != 1) {
return true;
}
}
return false;
}
bool HasWindowDilation(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.window_dilation() != 1) {
return true;
}
}
return false;
}
bool HasWindowReversal(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.window_reversal()) {
return true;
}
}
return false;
}
bool AllOrNoneReversed(const Window& window) {
if (window.dimensions().empty()) {
return true;
}
bool reversed = window.dimensions()[0].window_reversal();
return absl::c_all_of(window.dimensions(), [&](const WindowDimension& dim) {
return dim.window_reversal() == reversed;
});
}
bool HasDilation(const Window& window) {
return HasBaseDilation(window) || HasWindowDilation(window);
}
bool IsTrivialWindowDimension(const WindowDimension& window_dimension) {
return window_dimension.size() == 1 && window_dimension.stride() == 1 &&
window_dimension.padding_low() == 0 &&
window_dimension.padding_high() == 0 &&
window_dimension.window_dilation() == 1 &&
window_dimension.base_dilation() == 1;
}
bool HasOverlappingWindow(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.size() > dim.stride()) {
return true;
}
}
return false;
}
int64_t DilatedBound(int64_t bound, int64_t dilation) {
CHECK_GE(bound, 0);
CHECK_GE(dilation, 1);
if (bound == 0) {
return 0;
}
return (bound - 1) * dilation + 1;
}
int64_t StridedBound(int64_t bound, int64_t window_size, int64_t stride) {
CHECK_GE(window_size, 0);
CHECK_GE(bound, 0);
CHECK_GE(stride, 1);
if (bound == 0 || window_size > bound) {
return 0;
}
return (bound - window_size) / stride + 1;
}
}
} | #include "xla/window_util.h"
#include "xla/test.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
TEST(WindowUtilTest, HasOverlappingWindowTest) {
EXPECT_FALSE(
window_util::HasOverlappingWindow(window_util::MakeWindow({1, 1})));
EXPECT_TRUE(
window_util::HasOverlappingWindow(window_util::MakeWindow({2, 2, 2, 2})));
}
TEST(WindowUtilTest, MakeWindowStrideTest) {
Window w = window_util::MakeWindow({1, 2}, {3, 4});
EXPECT_EQ(w.dimensions()[0].size(), 1);
EXPECT_EQ(w.dimensions()[1].size(), 2);
EXPECT_EQ(w.dimensions()[0].stride(), 3);
EXPECT_EQ(w.dimensions()[1].stride(), 4);
}
}
} |
6 | #ifndef QUICHE_COMMON_HTTP_HTTP_HEADER_STORAGE_H_
#define QUICHE_COMMON_HTTP_HTTP_HEADER_STORAGE_H_
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/quiche_simple_arena.h"
namespace quiche {
using Fragments = absl::InlinedVector<absl::string_view, 1>;
class QUICHE_EXPORT HttpHeaderStorage {
public:
HttpHeaderStorage();
HttpHeaderStorage(const HttpHeaderStorage&) = delete;
HttpHeaderStorage& operator=(const HttpHeaderStorage&) = delete;
HttpHeaderStorage(HttpHeaderStorage&& other) = default;
HttpHeaderStorage& operator=(HttpHeaderStorage&& other) = default;
absl::string_view Write(absl::string_view s);
void Rewind(absl::string_view s);
void Clear() { arena_.Reset(); }
absl::string_view WriteFragments(const Fragments& fragments,
absl::string_view separator);
size_t bytes_allocated() const { return arena_.status().bytes_allocated(); }
private:
QuicheSimpleArena arena_;
};
QUICHE_EXPORT size_t Join(char* dst, const Fragments& fragments,
absl::string_view separator);
}
#endif
#include "quiche/common/http/http_header_storage.h"
#include <cstring>
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
namespace {
const size_t kDefaultStorageBlockSize = 2048;
}
HttpHeaderStorage::HttpHeaderStorage() : arena_(kDefaultStorageBlockSize) {}
absl::string_view HttpHeaderStorage::Write(const absl::string_view s) {
return absl::string_view(arena_.Memdup(s.data(), s.size()), s.size());
}
void HttpHeaderStorage::Rewind(const absl::string_view s) {
arena_.Free(const_cast<char*>(s.data()), s.size());
}
absl::string_view HttpHeaderStorage::WriteFragments(
const Fragments& fragments, absl::string_view separator) {
if (fragments.empty()) {
return absl::string_view();
}
size_t total_size = separator.size() * (fragments.size() - 1);
for (const absl::string_view& fragment : fragments) {
total_size += fragment.size();
}
char* dst = arena_.Alloc(total_size);
size_t written = Join(dst, fragments, separator);
QUICHE_DCHECK_EQ(written, total_size);
return absl::string_view(dst, total_size);
}
size_t Join(char* dst, const Fragments& fragments,
absl::string_view separator) {
if (fragments.empty()) {
return 0;
}
auto* original_dst = dst;
auto it = fragments.begin();
memcpy(dst, it->data(), it->size());
dst += it->size();
for (++it; it != fragments.end(); ++it) {
memcpy(dst, separator.data(), separator.size());
dst += separator.size();
memcpy(dst, it->data(), it->size());
dst += it->size();
}
return dst - original_dst;
}
} | #include "quiche/common/http/http_header_storage.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace test {
TEST(JoinTest, JoinEmpty) {
Fragments empty;
absl::string_view separator = ", ";
char buf[10] = "";
size_t written = Join(buf, empty, separator);
EXPECT_EQ(0u, written);
}
TEST(JoinTest, JoinOne) {
Fragments v = {"one"};
absl::string_view separator = ", ";
char buf[15];
size_t written = Join(buf, v, separator);
EXPECT_EQ(3u, written);
EXPECT_EQ("one", absl::string_view(buf, written));
}
TEST(JoinTest, JoinMultiple) {
Fragments v = {"one", "two", "three"};
absl::string_view separator = ", ";
char buf[15];
size_t written = Join(buf, v, separator);
EXPECT_EQ(15u, written);
EXPECT_EQ("one, two, three", absl::string_view(buf, written));
}
}
} |
7 | #ifndef TENSORFLOW_CORE_IR_INTERFACES_H_
#define TENSORFLOW_CORE_IR_INTERFACES_H_
#include "mlir/IR/Dialect.h"
#include "mlir/IR/DialectInterface.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/interfaces.h.inc"
namespace mlir {
namespace tfg {
class TensorFlowRegistryInterfaceBase
: public TensorFlowRegistryInterface::FallbackModel<
TensorFlowRegistryInterfaceBase>,
public DialectInterface::Base<TensorFlowRegistryInterfaceBase> {
public:
explicit TensorFlowRegistryInterfaceBase(Dialect *dialect)
: DialectInterface::Base<TensorFlowRegistryInterfaceBase>(dialect) {}
virtual bool isStateful(Operation *op) const = 0;
};
class StatefulMemoryEffectInterface
: public MemoryEffectOpInterface::FallbackModel<
StatefulMemoryEffectInterface>,
public DialectInterface::Base<StatefulMemoryEffectInterface> {
public:
explicit StatefulMemoryEffectInterface(Dialect *dialect)
: DialectInterface::Base<StatefulMemoryEffectInterface>(dialect) {}
void getEffects(
Operation *op,
SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
&effects) const;
};
}
namespace OpTrait {
template <typename ConcreteType>
class IntrinsicOperation
: public mlir::OpTrait::TraitBase<ConcreteType, IntrinsicOperation> {};
}
}
#endif
#include "tensorflow/core/ir/interfaces.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Region.h"
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/ir/types/dialect.h"
namespace mlir {
namespace tfg {
LogicalResult ControlArgumentInterface::verifyRegion(Operation *op,
Region ®ion) {
unsigned num_ctl = 0, num_data = 0;
for (BlockArgument arg : region.getArguments()) {
bool is_ctl = mlir::isa<tf_type::ControlType>(arg.getType());
num_ctl += is_ctl;
num_data += !is_ctl;
}
if (num_ctl != num_data) {
return op->emitOpError("region #")
<< region.getRegionNumber()
<< " expected same number of data values and control tokens ("
<< num_data << " vs. " << num_ctl << ")";
}
return success();
}
void StatefulMemoryEffectInterface::getEffects(
Operation *op,
SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
&effects) const {
auto registry = dyn_cast<TensorFlowRegistryInterface>(op);
if (!registry || registry.isStateful() || op->getParentOfType<GraphOp>()) {
effects.emplace_back(MemoryEffects::Write::get());
}
}
}
}
#include "tensorflow/core/ir/interfaces.cc.inc" | #include "tensorflow/core/ir/interfaces.h"
#include "llvm/ADT/ScopeExit.h"
#include "mlir/IR/DialectInterface.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/Verifier.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(TensorFlowRegistryInterface, TestDefaultImplementation) {
MLIRContext context(MLIRContext::Threading::DISABLED);
auto *dialect = context.getOrLoadDialect<TFGraphDialect>();
OperationState state(UnknownLoc::get(&context), "tfg.Foo");
state.addTypes(dialect->getControlType());
Operation *op = Operation::create(state);
auto cleanup = llvm::make_scope_exit([&] { op->destroy(); });
ASSERT_TRUE(succeeded(verify(op)));
auto iface = dyn_cast<TensorFlowRegistryInterface>(op);
EXPECT_FALSE(iface);
}
TEST(TensorFlowRegisterInterface, TestCustomImplementation) {
MLIRContext context(MLIRContext::Threading::DISABLED);
DialectRegistry registry;
registry.insert<TFGraphDialect>();
struct CustomRegistryInterface : public TensorFlowRegistryInterfaceBase {
using TensorFlowRegistryInterfaceBase::TensorFlowRegistryInterfaceBase;
bool isStateful(Operation *op) const override {
return op->getName().stripDialect() == "Foo";
}
};
registry.addExtension(+[](mlir::MLIRContext *ctx, TFGraphDialect *dialect) {
dialect->addInterfaces<CustomRegistryInterface>();
});
context.appendDialectRegistry(registry);
auto *dialect = context.getOrLoadDialect<TFGraphDialect>();
SmallVector<StringRef, 2> op_names = {"tfg.Foo", "tfg.Bar"};
SmallVector<bool, 2> expected = {true, false};
for (auto it : llvm::zip(op_names, expected)) {
OperationState state(UnknownLoc::get(&context), std::get<0>(it));
state.addTypes(dialect->getControlType());
Operation *op = Operation::create(state);
auto cleanup = llvm::make_scope_exit([&] { op->destroy(); });
auto iface = dyn_cast<TensorFlowRegistryInterface>(op);
ASSERT_TRUE(iface);
EXPECT_EQ(iface.isStateful(), std::get<1>(it));
}
}
}
}
} |
8 | #ifndef MLIR_HLO_DIALECT_MHLO_IR_REGISTER_H_
#define MLIR_HLO_DIALECT_MHLO_IR_REGISTER_H_
namespace mlir {
class DialectRegistry;
namespace mhlo {
void registerAllMhloDialects(DialectRegistry ®istry);
}
}
#endif
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tflite_with_xnnpack_optional.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_NUMERIC_VERIFY();
TfLiteRegistration* Register_AUDIO_SPECTROGRAM();
TfLiteRegistration* Register_MFCC();
TfLiteRegistration* Register_DETECTION_POSTPROCESS();
}
namespace builtin {
BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_ABS, Register_ABS(), 1,
5);
AddBuiltin(BuiltinOperator_HARD_SWISH, Register_HARD_SWISH());
AddBuiltin(BuiltinOperator_RELU, Register_RELU(), 1,
3);
AddBuiltin(BuiltinOperator_RELU_N1_TO_1, Register_RELU_N1_TO_1());
AddBuiltin(BuiltinOperator_RELU_0_TO_1, Register_RELU_0_TO_1());
AddBuiltin(BuiltinOperator_RELU6, Register_RELU6(), 1,
3);
AddBuiltin(BuiltinOperator_TANH, Register_TANH(), 1,
3);
AddBuiltin(BuiltinOperator_LOGISTIC, Register_LOGISTIC(),
1,
3);
AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, Register_AVERAGE_POOL_2D(),
1,
3);
AddBuiltin(BuiltinOperator_MAX_POOL_2D, Register_MAX_POOL_2D(),
1,
3);
AddBuiltin(BuiltinOperator_L2_POOL_2D, Register_L2_POOL_2D());
AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D(),
1,
8);
AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, Register_DEPTHWISE_CONV_2D(),
1,
7);
AddBuiltin(BuiltinOperator_SVDF, Register_SVDF(),
1,
4);
AddBuiltin(BuiltinOperator_RNN, Register_RNN(),
1,
3);
AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
Register_BIDIRECTIONAL_SEQUENCE_RNN(),
1,
3);
AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
Register_UNIDIRECTIONAL_SEQUENCE_RNN(),
1,
3);
AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP, Register_EMBEDDING_LOOKUP(),
1,
3);
AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP_SPARSE,
Register_EMBEDDING_LOOKUP_SPARSE());
AddBuiltin(BuiltinOperator_FULLY_CONNECTED, Register_FULLY_CONNECTED(),
1,
12);
AddBuiltin(BuiltinOperator_LSH_PROJECTION, Register_LSH_PROJECTION());
AddBuiltin(BuiltinOperator_HASHTABLE_LOOKUP, Register_HASHTABLE_LOOKUP());
AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX(),
1,
3);
AddBuiltin(BuiltinOperator_CONCATENATION, Register_CONCATENATION(),
1,
4);
AddBuiltin(BuiltinOperator_ADD, Register_ADD(),
1,
5);
AddBuiltin(BuiltinOperator_SPACE_TO_BATCH_ND, Register_SPACE_TO_BATCH_ND(),
1,
4);
AddBuiltin(BuiltinOperator_BATCH_TO_SPACE_ND, Register_BATCH_TO_SPACE_ND(),
1,
4);
AddBuiltin(BuiltinOperator_MUL, Register_MUL(), 1,
7);
AddBuiltin(BuiltinOperator_L2_NORMALIZATION, Register_L2_NORMALIZATION(),
1,
2);
AddBuiltin(BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
Register_LOCAL_RESPONSE_NORMALIZATION());
AddBuiltin(BuiltinOperator_LSTM, Register_LSTM(), 1,
4);
AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
Register_BIDIRECTIONAL_SEQUENCE_LSTM(), 1,
3);
AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
Register_UNIDIRECTIONAL_SEQUENCE_LSTM(), 1,
4);
AddBuiltin(BuiltinOperator_PAD, Register_PAD(), 1,
4);
AddBuiltin(BuiltinOperator_PADV2, Register_PADV2(), 1,
4);
AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE());
AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, Register_RESIZE_BILINEAR(),
1,
4);
AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
Register_RESIZE_NEAREST_NEIGHBOR(),
1,
4);
AddBuiltin(BuiltinOperator_SKIP_GRAM, Register_SKIP_GRAM());
AddBuiltin(BuiltinOperator_SPACE_TO_DEPTH, Register_SPACE_TO_DEPTH(),
1,
2);
AddBuiltin(BuiltinOperator_DEPTH_TO_SPACE, Register_DEPTH_TO_SPACE(),
1,
2);
AddBuiltin(BuiltinOperator_GATHER, Register_GATHER(),
1,
7);
AddBuiltin(BuiltinOperator_TRANSPOSE, Register_TRANSPOSE(),
1,
6);
AddBuiltin(BuiltinOperator_MEAN, Register_MEAN(),
1,
3);
AddBuiltin(BuiltinOperator_DIV, Register_DIV(),
1,
2);
AddBuiltin(BuiltinOperator_SUB, Register_SUB(),
1,
5);
AddBuiltin(BuiltinOperator_SPLIT, Register_SPLIT(),
1,
4);
AddBuiltin(BuiltinOperator_SPLIT_V, Register_SPLIT_V(),
1,
2);
AddBuiltin(BuiltinOperator_SQUEEZE, Register_SQUEEZE(),
1,
2);
AddBuiltin(BuiltinOperator_STRIDED_SLICE, Register_STRIDED_SLICE(),
1,
8);
AddBuiltin(BuiltinOperator_EXP, Register_EXP(),
1,
2);
AddBuiltin(BuiltinOperator_TOPK_V2, Register_TOPK_V2(),
1,
3);
AddBuiltin(BuiltinOperator_LOG, Register_LOG(),
1,
2);
AddBuiltin(BuiltinOperator_LOG_SOFTMAX, Register_LOG_SOFTMAX(),
1,
2);
AddBuiltin(BuiltinOperator_CAST, Register_CAST(),
1,
6);
AddBuiltin(BuiltinOperator_DEQUANTIZE, Register_DEQUANTIZE(),
1,
6);
AddBuiltin(BuiltinOperator_PRELU, Register_PRELU());
AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM(),
1,
4);
AddBuiltin(BuiltinOperator_MINIMUM, Register_MINIMUM(),
1,
4);
AddBuiltin(BuiltinOperator_ARG_MAX, Register_ARG_MAX(),
1,
3);
AddBuiltin(BuiltinOperator_ARG_MIN, Register_ARG_MIN(),
1,
3);
AddBuiltin(BuiltinOperator_GREATER, Register_GREATER(),
1,
2);
AddBuiltin(BuiltinOperator_GREATER_EQUAL, Register_GREATER_EQUAL(),
1,
3);
AddBuiltin(BuiltinOperator_LESS, Register_LESS(),
1,
3);
AddBuiltin(BuiltinOperator_LESS_EQUAL, Register_LESS_EQUAL(),
1,
2);
AddBuiltin(BuiltinOperator_FLOOR, Register_FLOOR());
AddBuiltin(BuiltinOperator_CEIL, Register_CEIL());
AddBuiltin(BuiltinOperator_ROUND, Register_ROUND());
AddBuiltin(BuiltinOperator_NEG, Register_NEG());
AddBuiltin(BuiltinOperator_SELECT, Register_SELECT(),
1,
4);
AddBuiltin(BuiltinOperator_SELECT_V2, Register_SELECT_V2(),
1,
2);
AddBuiltin(BuiltinOperator_SLICE, Register_SLICE(),
1,
6);
AddBuiltin(BuiltinOperator_SIN, Register_SIN());
AddBuiltin(BuiltinOperator_COS, Register_COS());
AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, Register_TRANSPOSE_CONV(),
1,
5);
AddBuiltin(BuiltinOperator_TILE, Register_TILE(),
1,
3);
AddBuiltin(BuiltinOperator_SUM, Register_SUM(),
1,
2);
AddBuiltin(BuiltinOperator_REDUCE_PROD, Register_REDUCE_PROD(),
1,
2);
AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX(),
1,
3);
AddBuiltin(BuiltinOperator_REDUCE_MIN, Register_REDUCE_MIN(),
1,
3);
AddBuiltin(BuiltinOperator_REDUCE_ANY, Register_REDUCE_ANY());
AddBuiltin(BuiltinOperator_REDUCE_ALL, Register_REDUCE_ALL());
AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS());
AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE(),
1,
3);
AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL(),
1,
4);
AddBuiltin(BuiltinOperator_NOT_EQUAL, Register_NOT_EQUAL(),
1,
3);
AddBuiltin(BuiltinOperator_SQRT, Register_SQRT());
AddBuiltin(BuiltinOperator_RSQRT, Register_RSQRT(),
1,
3);
AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE());
AddBuiltin(BuiltinOperator_RANK, Register_RANK());
AddBuiltin(BuiltinOperator_POW, Register_POW());
AddBuiltin(BuiltinOperator_FAKE_QUANT, Register_FAKE_QUANT(), 1, 2);
AddBuiltin(BuiltinOperator_PACK, Register_PACK(),
1,
4);
AddBuiltin(BuiltinOperator_ONE_HOT, Register_ONE_HOT());
AddBuiltin(BuiltinOperator_LOGICAL_OR, Register_LOGICAL_OR());
AddBuiltin(BuiltinOperator_LOGICAL_AND, Register_LOGICAL_AND());
AddBuiltin(BuiltinOperator_LOGICAL_NOT, Register_LOGICAL_NOT());
AddBuiltin(BuiltinOperator_UNPACK, Register_UNPACK(),
1,
4);
AddBuiltin(BuiltinOperator_FLOOR_DIV, Register_FLOOR_DIV(),
1,
3);
AddBuiltin(BuiltinOperator_SQUARE, Register_SQUARE());
AddBuiltin(BuiltinOperator_ZEROS_LIKE, Register_ZEROS_LIKE());
AddBuiltin(BuiltinOperator_FLOOR_MOD, Register_FLOOR_MOD(),
1,
2);
AddBuiltin(BuiltinOperator_RANGE, Register_RANGE(),
1,
2);
AddBuiltin(BuiltinOperator_LEAKY_RELU, Register_LEAKY_RELU(),
1,
2);
AddBuiltin(BuiltinOperator_SQUARED_DIFFERENCE, Register_SQUARED_DIFFERENCE(),
1,
2);
AddBuiltin(BuiltinOperator_FILL, Register_FILL(),
1,
4);
AddBuiltin(BuiltinOperator_MIRROR_PAD, Register_MIRROR_PAD(),
1,
3);
AddBuiltin(BuiltinOperator_UNIQUE, Register_UNIQUE());
AddBuiltin(BuiltinOperator_REVERSE_V2, Register_REVERSE_V2(),
1,
3);
AddBuiltin(BuiltinOperator_ADD_N, Register_ADD_N());
AddBuiltin(BuiltinOperator_GATHER_ND, Register_GATHER_ND(),
1,
5);
AddBuiltin(BuiltinOperator_WHERE, Register_WHERE(),
1,
2);
AddBuiltin(BuiltinOperator_ELU, Register_ELU());
AddBuiltin(BuiltinOperator_REVERSE_SEQUENCE, Register_REVERSE_SEQUENCE());
AddBuiltin(BuiltinOperator_MATRIX_DIAG, Register_MATRIX_DIAG());
AddBuiltin(BuiltinOperator_QUANTIZE, Register_QUANTIZE(),
1,
3);
AddBuiltin(BuiltinOperator_MATRIX_SET_DIAG, Register_MATRIX_SET_DIAG());
AddBuiltin(BuiltinOperator_IF, tflite::ops::builtin::Register_IF());
AddBuiltin(BuiltinOperator_WHILE, tflite::ops::builtin::Register_WHILE());
AddBuiltin(BuiltinOperator_NON_MAX_SUPPRESSION_V4,
Register_NON_MAX_SUPPRESSION_V4());
AddBuiltin(BuiltinOperator_NON_MAX_SUPPRESSION_V5,
Register_NON_MAX_SUPPRESSION_V5());
AddBuiltin(BuiltinOperator_SCATTER_ND, Register_SCATTER_ND());
AddBuiltin(BuiltinOperator_DENSIFY, Register_DENSIFY());
AddBuiltin(BuiltinOperator_SEGMENT_SUM, Register_SEGMENT_SUM());
AddBuiltin(BuiltinOperator_BATCH_MATMUL, Register_BATCH_MATMUL(),
1,
4);
AddBuiltin(BuiltinOperator_CUMSUM, Register_CUMSUM());
AddBuiltin(BuiltinOperator_BROADCAST_TO, Register_BROADCAST_TO(),
2,
3);
AddBuiltin(BuiltinOperator_CALL_ONCE,
tflite::ops::builtin::Register_CALL_ONCE());
AddBuiltin(BuiltinOperator_RFFT2D, Register_RFFT2D());
AddBuiltin(BuiltinOperator_CONV_3D, Register_CONV_3D());
AddBuiltin(BuiltinOperator_IMAG, Register_IMAG());
AddBuiltin(BuiltinOperator_REAL, Register_REAL());
AddBuiltin(BuiltinOperator_COMPLEX_ABS, Register_COMPLEX_ABS());
AddBuiltin(BuiltinOperator_BROADCAST_ARGS, Register_BROADCAST_ARGS());
AddBuiltin(BuiltinOperator_HASHTABLE, Register_HASHTABLE());
AddBuiltin(BuiltinOperator_HASHTABLE_FIND, Register_HASHTABLE_FIND());
AddBuiltin(BuiltinOperator_HASHTABLE_IMPORT, Register_HASHTABLE_IMPORT());
AddBuiltin(BuiltinOperator_HASHTABLE_SIZE, Register_HASHTABLE_SIZE());
AddBuiltin(BuiltinOperator_CONV_3D_TRANSPOSE, Register_CONV_3D_TRANSPOSE());
AddBuiltin(BuiltinOperator_VAR_HANDLE, Register_VAR_HANDLE());
AddBuiltin(BuiltinOperator_READ_VARIABLE, Register_READ_VARIABLE());
AddBuiltin(BuiltinOperator_ASSIGN_VARIABLE, Register_ASSIGN_VARIABLE());
AddBuiltin(BuiltinOperator_MULTINOMIAL, Register_MULTINOMIAL());
AddBuiltin(BuiltinOperator_RANDOM_STANDARD_NORMAL,
Register_RANDOM_STANDARD_NORMAL());
AddBuiltin(BuiltinOperator_BUCKETIZE, Register_BUCKETIZE());
AddBuiltin(BuiltinOperator_RANDOM_UNIFORM, Register_RANDOM_UNIFORM());
AddBuiltin(BuiltinOperator_GELU, Register_GELU(),
1,
2);
AddBuiltin(BuiltinOperator_DYNAMIC_UPDATE_SLICE,
Register_DYNAMIC_UPDATE_SLICE(),
1,
2);
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_PROD,
Register_UNSORTED_SEGMENT_PROD());
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_MAX,
Register_UNSORTED_SEGMENT_MAX());
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_MIN,
Register_UNSORTED_SEGMENT_MIN());
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_SUM,
Register_UNSORTED_SEGMENT_SUM());
AddBuiltin(BuiltinOperator_ATAN2, Register_ATAN2());
AddBuiltin(BuiltinOperator_SIGN, Register_SIGN(),
1,
2);
AddBuiltin(BuiltinOperator_BITCAST, Register_BITCAST());
AddBuiltin(BuiltinOperator_BITWISE_XOR, Register_BITWISE_XOR());
AddBuiltin(BuiltinOperator_RIGHT_SHIFT, Register_RIGHT_SHIFT());
AddBuiltin(BuiltinOperator_STABLEHLO_SCATTER, Register_STABLEHLO_SCATTER());
AddBuiltin(BuiltinOperator_DILATE, Register_DILATE());
AddBuiltin(BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR,
Register_STABLEHLO_RNG_BIT_GENERATOR());
AddBuiltin(BuiltinOperator_REDUCE_WINDOW, Register_REDUCE_WINDOW());
AddBuiltin(BuiltinOperator_STABLEHLO_REDUCE_WINDOW,
Register_STABLEHLO_REDUCE_WINDOW());
AddBuiltin(BuiltinOperator_STABLEHLO_GATHER, Register_STABLEHLO_GATHER());
AddBuiltin(BuiltinOperator_STABLEHLO_ADD, Register_STABLEHLO_ADD());
AddBuiltin(BuiltinOperator_STABLEHLO_MULTIPLY, Register_STABLEHLO_MULTIPLY());
AddBuiltin(BuiltinOperator_STABLEHLO_MAXIMUM, Register_STABLEHLO_MAXIMUM());
AddBuiltin(BuiltinOperator_STABLEHLO_MINIMUM, Register_STABLEHLO_MINIMUM());
AddBuiltin(BuiltinOperator_STABLEHLO_PAD, Register_STABLEHLO_PAD());
AddBuiltin(BuiltinOperator_STABLEHLO_COMPOSITE,
Register_STABLEHLO_COMPOSITE());
AddCustom("NumericVerify", tflite::ops::custom::Register_NUMERIC_VERIFY());
AddCustom("Mfcc", tflite::ops::custom::Register_MFCC());
AddCustom("AudioSpectrogram",
tflite::ops::custom::Register_AUDIO_SPECTROGRAM());
AddCustom("TFLite_Detection_PostProcess",
tflite::ops::custom::Register_DETECTION_POSTPROCESS());
may_directly_contain_user_defined_ops_ = false;
delegate_creators_.push_back([](TfLiteContext* context) {
return tflite::MaybeCreateXNNPACKDelegate(context,
XNNPackQS8Options::default_value);
});
}
BuiltinOpResolverWithXNNPACK::BuiltinOpResolverWithXNNPACK(
bool enable_xnnpack_unsigned_quantized) {
delegate_creators_.clear();
XNNPackQS8Options xnnpack_qs8_options = enable_xnnpack_unsigned_quantized
? XNNPackQS8Options::enabled
: XNNPackQS8Options::disabled;
delegate_creators_.push_back([xnnpack_qs8_options](TfLiteContext* context) {
return tflite::MaybeCreateXNNPACKDelegate(context, xnnpack_qs8_options);
});
}
}
}
} | #include "tensorflow/lite/core/kernels/register.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite::ops::builtin {
namespace {
TEST(BuiltinOpResolverTest, SupportsAdd) {
BuiltinOpResolver builtin_op_resolver;
const TfLiteRegistration *add =
builtin_op_resolver.FindOp(::tflite::BuiltinOperator_ADD, 1);
ASSERT_NE(add, nullptr);
ASSERT_NE(add->init, nullptr);
ASSERT_NE(add->free, nullptr);
ASSERT_NE(add->prepare, nullptr);
ASSERT_NE(add->invoke, nullptr);
}
TEST(BuiltinOpResolverTest, CopySupportsAdd) {
BuiltinOpResolver builtin_op_resolver;
MutableOpResolver copy = builtin_op_resolver;
const TfLiteRegistration *add = copy.FindOp(::tflite::BuiltinOperator_ADD, 1);
ASSERT_NE(add, nullptr);
ASSERT_NE(add->init, nullptr);
ASSERT_NE(add->free, nullptr);
ASSERT_NE(add->prepare, nullptr);
ASSERT_NE(add->invoke, nullptr);
}
#if defined(TFLITE_WITHOUT_XNNPACK)
TEST(BuiltinOpResolverTest, HasXNNPACKDelegate_QS8) {
BuiltinOpResolver builtin_op_resolver;
ASSERT_EQ(builtin_op_resolver.GetDelegateCreators().size(), 1);
BuiltinOpResolver::TfLiteDelegateCreator delegate_creator =
builtin_op_resolver.GetDelegateCreators()[0];
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate *)> delegate =
delegate_creator(nullptr);
const TfLiteXNNPackDelegateOptions *options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8,
TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8,
TFLITE_XNNPACK_DELEGATE_FLAG_QS8);
}
TEST(BuiltinOpResolverTest, HasXNNPACKDelegate_QS8_QU8) {
BuiltinOpResolver builtin_op_resolver;
ASSERT_EQ(builtin_op_resolver.GetDelegateCreators().size(), 1);
BuiltinOpResolver::TfLiteDelegateCreator delegate_creator =
builtin_op_resolver.GetDelegateCreators()[0];
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate *)> delegate =
delegate_creator(nullptr);
const TfLiteXNNPackDelegateOptions *options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8,
TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8,
TFLITE_XNNPACK_DELEGATE_FLAG_QS8);
}
TEST(BuiltinOpResolverTest, Disable_QU8) {
BuiltinOpResolverWithXNNPACK builtin_op_resolver(false);
ASSERT_EQ(builtin_op_resolver.GetDelegateCreators().size(), 1);
BuiltinOpResolver::TfLiteDelegateCreator delegate_creator =
builtin_op_resolver.GetDelegateCreators()[0];
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate *)> delegate =
delegate_creator(nullptr);
const TfLiteXNNPackDelegateOptions *options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8, 0);
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8,
TFLITE_XNNPACK_DELEGATE_FLAG_QS8);
}
#endif
}
} |
9 | #ifndef ABSL_SYNCHRONIZATION_BARRIER_H_
#define ABSL_SYNCHRONIZATION_BARRIER_H_
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
class Barrier {
public:
explicit Barrier(int num_threads)
: num_to_block_(num_threads), num_to_exit_(num_threads) {}
Barrier(const Barrier&) = delete;
Barrier& operator=(const Barrier&) = delete;
bool Block();
private:
Mutex lock_;
int num_to_block_ ABSL_GUARDED_BY(lock_);
int num_to_exit_ ABSL_GUARDED_BY(lock_);
};
ABSL_NAMESPACE_END
}
#endif
#include "absl/synchronization/barrier.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
static bool IsZero(void *arg) {
return 0 == *reinterpret_cast<int *>(arg);
}
bool Barrier::Block() {
MutexLock l(&this->lock_);
this->num_to_block_--;
if (this->num_to_block_ < 0) {
ABSL_RAW_LOG(
FATAL,
"Block() called too many times. num_to_block_=%d out of total=%d",
this->num_to_block_, this->num_to_exit_);
}
this->lock_.Await(Condition(IsZero, &this->num_to_block_));
this->num_to_exit_--;
ABSL_RAW_CHECK(this->num_to_exit_ >= 0, "barrier underflow");
return this->num_to_exit_ == 0;
}
ABSL_NAMESPACE_END
} | #include "absl/synchronization/barrier.h"
#include <thread>
#include <vector>
#include "gtest/gtest.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
TEST(Barrier, SanityTest) {
constexpr int kNumThreads = 10;
absl::Barrier* barrier = new absl::Barrier(kNumThreads);
absl::Mutex mutex;
int counter = 0;
auto thread_func = [&] {
if (barrier->Block()) {
delete barrier;
}
absl::MutexLock lock(&mutex);
++counter;
};
std::vector<std::thread> threads;
for (int i = 0; i < kNumThreads - 1; ++i) {
threads.push_back(std::thread(thread_func));
}
absl::SleepFor(absl::Seconds(1));
{
absl::MutexLock lock(&mutex);
EXPECT_EQ(counter, 0);
}
threads.push_back(std::thread(thread_func));
for (auto& thread : threads) {
thread.join();
}
absl::MutexLock lock(&mutex);
EXPECT_EQ(counter, kNumThreads);
} |
10 | #ifndef TENSORFLOW_CORE_LIB_CORE_ARENA_H_
#define TENSORFLOW_CORE_LIB_CORE_ARENA_H_
#include <assert.h>
#include <vector>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace core {
class Arena {
public:
explicit Arena(const size_t block_size);
~Arena();
char* Alloc(const size_t size) {
return reinterpret_cast<char*>(GetMemory(size, 1));
}
char* AllocAligned(const size_t size, const size_t alignment) {
return reinterpret_cast<char*>(GetMemory(size, alignment));
}
void Reset();
#ifdef __i386__
static const int kDefaultAlignment = 4;
#else
static constexpr int kDefaultAlignment = 8;
#endif
protected:
bool SatisfyAlignment(const size_t alignment);
void MakeNewBlock(const uint32 alignment);
void* GetMemoryFallback(const size_t size, const int align);
void* GetMemory(const size_t size, const int align) {
assert(remaining_ <= block_size_);
if (size > 0 && size < remaining_ && align == 1) {
void* result = freestart_;
freestart_ += size;
remaining_ -= size;
return result;
}
return GetMemoryFallback(size, align);
}
size_t remaining_;
private:
struct AllocatedBlock {
char* mem;
size_t size;
};
AllocatedBlock* AllocNewBlock(const size_t block_size,
const uint32 alignment);
const size_t block_size_;
char* freestart_;
char* freestart_when_empty_;
size_t blocks_alloced_;
AllocatedBlock first_blocks_[16];
std::vector<AllocatedBlock>* overflow_blocks_;
void FreeBlocks();
Arena(const Arena&) = delete;
void operator=(const Arena&) = delete;
};
}
}
#endif
#include "tensorflow/core/lib/core/arena.h"
#include <assert.h>
#include <algorithm>
#include <vector>
#include "tensorflow/core/lib/math/math_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mem.h"
namespace tensorflow {
namespace core {
Arena::Arena(const size_t block_size)
: remaining_(0),
block_size_(block_size),
freestart_(nullptr),
blocks_alloced_(1),
overflow_blocks_(nullptr) {
assert(block_size > kDefaultAlignment);
first_blocks_[0].mem =
reinterpret_cast<char*>(port::AlignedMalloc(block_size_, sizeof(void*)));
first_blocks_[0].size = block_size_;
Reset();
}
Arena::~Arena() {
FreeBlocks();
assert(overflow_blocks_ == nullptr);
for (size_t i = 0; i < blocks_alloced_; ++i) {
port::AlignedFree(first_blocks_[i].mem);
}
}
bool Arena::SatisfyAlignment(size_t alignment) {
const size_t overage = reinterpret_cast<size_t>(freestart_) & (alignment - 1);
if (overage > 0) {
const size_t waste = alignment - overage;
if (waste >= remaining_) {
return false;
}
freestart_ += waste;
remaining_ -= waste;
}
DCHECK_EQ(size_t{0}, reinterpret_cast<size_t>(freestart_) & (alignment - 1));
return true;
}
void Arena::Reset() {
FreeBlocks();
freestart_ = first_blocks_[0].mem;
remaining_ = first_blocks_[0].size;
CHECK(SatisfyAlignment(kDefaultAlignment));
freestart_when_empty_ = freestart_;
}
void Arena::MakeNewBlock(const uint32 alignment) {
AllocatedBlock* block = AllocNewBlock(block_size_, alignment);
freestart_ = block->mem;
remaining_ = block->size;
CHECK(SatisfyAlignment(alignment));
}
static uint32 LeastCommonMultiple(uint32 a, uint32 b) {
if (a > b) {
return (a / MathUtil::GCD<uint32>(a, b)) * b;
} else if (a < b) {
return (b / MathUtil::GCD<uint32>(b, a)) * a;
} else {
return a;
}
}
Arena::AllocatedBlock* Arena::AllocNewBlock(const size_t block_size,
const uint32 alignment) {
AllocatedBlock* block;
if (blocks_alloced_ < TF_ARRAYSIZE(first_blocks_)) {
block = &first_blocks_[blocks_alloced_++];
} else {
if (overflow_blocks_ == nullptr)
overflow_blocks_ = new std::vector<AllocatedBlock>;
overflow_blocks_->resize(overflow_blocks_->size() + 1);
block = &overflow_blocks_->back();
}
uint32 adjusted_alignment =
(alignment > 1 ? LeastCommonMultiple(alignment, kDefaultAlignment) : 1);
adjusted_alignment =
std::max(adjusted_alignment, static_cast<uint32>(sizeof(void*)));
CHECK_LE(adjusted_alignment, static_cast<uint32>(1 << 20))
<< "Alignment on boundaries greater than 1MB not supported.";
size_t adjusted_block_size = block_size;
if (adjusted_block_size > adjusted_alignment) {
const uint32 excess = adjusted_block_size % adjusted_alignment;
adjusted_block_size += (excess > 0 ? adjusted_alignment - excess : 0);
}
block->mem = reinterpret_cast<char*>(
port::AlignedMalloc(adjusted_block_size, adjusted_alignment));
block->size = adjusted_block_size;
CHECK(nullptr != block->mem) << "block_size=" << block_size
<< " adjusted_block_size=" << adjusted_block_size
<< " alignment=" << alignment
<< " adjusted_alignment=" << adjusted_alignment;
return block;
}
void* Arena::GetMemoryFallback(const size_t size, const int alignment) {
if (0 == size) {
return nullptr;
}
CHECK(alignment > 0 && 0 == (alignment & (alignment - 1)));
if (block_size_ == 0 || size > block_size_ / 4) {
return AllocNewBlock(size, alignment)->mem;
}
if (!SatisfyAlignment(alignment) || size > remaining_) {
MakeNewBlock(alignment);
}
CHECK_LE(size, remaining_);
remaining_ -= size;
void* result = freestart_;
freestart_ += size;
return result;
}
void Arena::FreeBlocks() {
for (size_t i = 1; i < blocks_alloced_; ++i) {
port::AlignedFree(first_blocks_[i].mem);
first_blocks_[i].mem = nullptr;
first_blocks_[i].size = 0;
}
blocks_alloced_ = 1;
if (overflow_blocks_ != nullptr) {
std::vector<AllocatedBlock>::iterator it;
for (it = overflow_blocks_->begin(); it != overflow_blocks_->end(); ++it) {
port::AlignedFree(it->mem);
}
delete overflow_blocks_;
overflow_blocks_ = nullptr;
}
}
}
} | #include "tensorflow/core/lib/core/arena.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace core {
namespace {
static void TestMemory(void* mem, int size) {
memset(mem, 0xaa, size);
char* tmp[100];
for (size_t i = 0; i < TF_ARRAYSIZE(tmp); i++) {
tmp[i] = new char[i * i + 1];
}
memset(mem, 0xcc, size);
for (size_t i = 0; i < TF_ARRAYSIZE(tmp); i++) {
delete[] tmp[i];
}
memset(mem, 0xee, size);
}
TEST(ArenaTest, TestBasicArena) {
Arena a(1024);
char* memory = a.Alloc(100);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 100);
memory = a.Alloc(100);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 100);
}
TEST(ArenaTest, TestAlignment) {
Arena a(1024);
char* byte0 = a.Alloc(1);
char* alloc_aligned8 = a.AllocAligned(17, 8);
EXPECT_EQ(alloc_aligned8 - byte0, 8);
char* alloc_aligned8_b = a.AllocAligned(8, 8);
EXPECT_EQ(alloc_aligned8_b - alloc_aligned8, 24);
char* alloc_aligned8_c = a.AllocAligned(16, 8);
EXPECT_EQ(alloc_aligned8_c - alloc_aligned8_b, 8);
char* alloc_aligned8_d = a.AllocAligned(8, 1);
EXPECT_EQ(alloc_aligned8_d - alloc_aligned8_c, 16);
}
TEST(ArenaTest, TestVariousArenaSizes) {
{
Arena a(1024);
char* memory = a.Alloc(1024);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 1024);
char* memory2 = a.Alloc(1024);
ASSERT_NE(memory2, nullptr);
TestMemory(memory2, 1024);
}
{
Arena a(1024);
char* memory = a.Alloc(768);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 768);
char* memory2 = a.Alloc(768);
ASSERT_NE(memory2, nullptr);
TestMemory(memory2, 768);
}
{
Arena a(1024);
char* memory = a.Alloc(10240);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 10240);
char* memory2 = a.Alloc(1234);
ASSERT_NE(memory2, nullptr);
TestMemory(memory2, 1234);
}
}
}
}
} |
11 | #ifndef ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
#define ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
#include <atomic>
#include "absl/base/internal/thread_identity.h"
#include "absl/synchronization/internal/create_thread_identity.h"
#include "absl/synchronization/internal/kernel_timeout.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
class Mutex;
namespace synchronization_internal {
class PerThreadSem {
public:
PerThreadSem() = delete;
PerThreadSem(const PerThreadSem&) = delete;
PerThreadSem& operator=(const PerThreadSem&) = delete;
static void Tick(base_internal::ThreadIdentity* identity);
static void SetThreadBlockedCounter(std::atomic<int> *counter);
static std::atomic<int> *GetThreadBlockedCounter();
private:
static inline void Init(base_internal::ThreadIdentity* identity);
static inline void Post(base_internal::ThreadIdentity* identity);
static inline bool Wait(KernelTimeout t);
friend class PerThreadSemTest;
friend class absl::Mutex;
friend void OneTimeInitThreadIdentity(absl::base_internal::ThreadIdentity*);
};
}
ABSL_NAMESPACE_END
}
extern "C" {
void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(
absl::base_internal::ThreadIdentity* identity);
void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
absl::base_internal::ThreadIdentity* identity);
bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
absl::synchronization_internal::KernelTimeout t);
void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(
absl::base_internal::ThreadIdentity* identity);
}
void absl::synchronization_internal::PerThreadSem::Init(
absl::base_internal::ThreadIdentity* identity) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(identity);
}
void absl::synchronization_internal::PerThreadSem::Post(
absl::base_internal::ThreadIdentity* identity) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
}
bool absl::synchronization_internal::PerThreadSem::Wait(
absl::synchronization_internal::KernelTimeout t) {
return ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(t);
}
#endif
#include "absl/base/internal/low_level_alloc.h"
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
#include "absl/synchronization/internal/per_thread_sem.h"
#include <atomic>
#include "absl/base/attributes.h"
#include "absl/base/internal/thread_identity.h"
#include "absl/synchronization/internal/waiter.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
void PerThreadSem::SetThreadBlockedCounter(std::atomic<int> *counter) {
base_internal::ThreadIdentity *identity;
identity = GetOrCreateCurrentThreadIdentity();
identity->blocked_count_ptr = counter;
}
std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
base_internal::ThreadIdentity *identity;
identity = GetOrCreateCurrentThreadIdentity();
return identity->blocked_count_ptr;
}
void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
const int ticker =
identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(identity);
}
}
}
ABSL_NAMESPACE_END
}
extern "C" {
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(
absl::base_internal::ThreadIdentity *identity) {
new (absl::synchronization_internal::Waiter::GetWaiter(identity))
absl::synchronization_internal::Waiter();
}
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
absl::base_internal::ThreadIdentity *identity) {
absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
}
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(
absl::base_internal::ThreadIdentity *identity) {
absl::synchronization_internal::Waiter::GetWaiter(identity)->Poke();
}
ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
absl::synchronization_internal::KernelTimeout t) {
bool timeout = false;
absl::base_internal::ThreadIdentity *identity;
identity = absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
int ticker = identity->ticker.load(std::memory_order_relaxed);
identity->wait_start.store(ticker ? ticker : 1, std::memory_order_relaxed);
identity->is_idle.store(false, std::memory_order_relaxed);
if (identity->blocked_count_ptr != nullptr) {
identity->blocked_count_ptr->fetch_add(1, std::memory_order_relaxed);
}
timeout =
!absl::synchronization_internal::Waiter::GetWaiter(identity)->Wait(t);
if (identity->blocked_count_ptr != nullptr) {
identity->blocked_count_ptr->fetch_sub(1, std::memory_order_relaxed);
}
identity->is_idle.store(false, std::memory_order_relaxed);
identity->wait_start.store(0, std::memory_order_relaxed);
return !timeout;
}
}
#endif | #include "absl/synchronization/internal/per_thread_sem.h"
#include <atomic>
#include <condition_variable>
#include <functional>
#include <limits>
#include <mutex>
#include <string>
#include <thread>
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/thread_identity.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
class SimpleSemaphore {
public:
SimpleSemaphore() : count_(0) {}
void Wait() {
std::unique_lock<std::mutex> lock(mu_);
cv_.wait(lock, [this]() { return count_ > 0; });
--count_;
cv_.notify_one();
}
void Post() {
std::lock_guard<std::mutex> lock(mu_);
++count_;
cv_.notify_one();
}
private:
std::mutex mu_;
std::condition_variable cv_;
int count_;
};
struct ThreadData {
int num_iterations;
SimpleSemaphore identity2_written;
base_internal::ThreadIdentity *identity1;
base_internal::ThreadIdentity *identity2;
KernelTimeout timeout;
};
class PerThreadSemTest : public testing::Test {
public:
static void TimingThread(ThreadData* t) {
t->identity2 = GetOrCreateCurrentThreadIdentity();
t->identity2_written.Post();
while (t->num_iterations--) {
Wait(t->timeout);
Post(t->identity1);
}
}
void TestTiming(const char *msg, bool timeout) {
static const int kNumIterations = 100;
ThreadData t;
t.num_iterations = kNumIterations;
t.timeout = timeout ?
KernelTimeout(absl::Now() + absl::Seconds(10000))
: KernelTimeout::Never();
t.identity1 = GetOrCreateCurrentThreadIdentity();
std::thread partner_thread(std::bind(TimingThread, &t));
t.identity2_written.Wait();
int64_t min_cycles = std::numeric_limits<int64_t>::max();
int64_t total_cycles = 0;
for (int i = 0; i < kNumIterations; ++i) {
absl::SleepFor(absl::Milliseconds(20));
int64_t cycles = base_internal::CycleClock::Now();
Post(t.identity2);
Wait(t.timeout);
cycles = base_internal::CycleClock::Now() - cycles;
min_cycles = std::min(min_cycles, cycles);
total_cycles += cycles;
}
std::string out = StrCat(
msg, "min cycle count=", min_cycles, " avg cycle count=",
absl::SixDigits(static_cast<double>(total_cycles) / kNumIterations));
printf("%s\n", out.c_str());
partner_thread.join();
}
protected:
static void Post(base_internal::ThreadIdentity *id) {
PerThreadSem::Post(id);
}
static bool Wait(KernelTimeout t) {
return PerThreadSem::Wait(t);
}
static bool Wait(absl::Time t) {
return Wait(KernelTimeout(t));
}
static void Tick(base_internal::ThreadIdentity *identity) {
PerThreadSem::Tick(identity);
}
};
namespace {
TEST_F(PerThreadSemTest, WithoutTimeout) {
PerThreadSemTest::TestTiming("Without timeout: ", false);
}
TEST_F(PerThreadSemTest, WithTimeout) {
PerThreadSemTest::TestTiming("With timeout: ", true);
}
TEST_F(PerThreadSemTest, Timeouts) {
const absl::Duration delay = absl::Milliseconds(50);
const absl::Time start = absl::Now();
EXPECT_FALSE(Wait(start + delay));
const absl::Duration elapsed = absl::Now() - start;
absl::Duration slop = absl::Milliseconds(1);
#ifdef _MSC_VER
slop = absl::Milliseconds(16);
#endif
EXPECT_LE(delay - slop, elapsed)
<< "Wait returned " << delay - elapsed
<< " early (with " << slop << " slop), start time was " << start;
absl::Time negative_timeout = absl::UnixEpoch() - absl::Milliseconds(100);
EXPECT_FALSE(Wait(negative_timeout));
EXPECT_LE(negative_timeout, absl::Now() + slop);
Post(GetOrCreateCurrentThreadIdentity());
EXPECT_TRUE(Wait(negative_timeout));
}
TEST_F(PerThreadSemTest, ThreadIdentityReuse) {
for (int i = 0; i < 10000; i++) {
std::thread t([]() { GetOrCreateCurrentThreadIdentity(); });
t.join();
}
}
}
}
ABSL_NAMESPACE_END
} |
12 | #ifndef QUICHE_QUIC_CORE_QUIC_PACKETS_H_
#define QUICHE_QUIC_CORE_QUIC_PACKETS_H_
#include <sys/types.h>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/frames/quic_frame.h"
#include "quiche/quic/core/quic_ack_listener_interface.h"
#include "quiche/quic/core/quic_bandwidth.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_export.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
namespace quic {
class QuicPacket;
struct QuicPacketHeader;
QUICHE_EXPORT QuicConnectionId GetServerConnectionIdAsRecipient(
const QuicPacketHeader& header, Perspective perspective);
QUICHE_EXPORT QuicConnectionId GetClientConnectionIdAsRecipient(
const QuicPacketHeader& header, Perspective perspective);
QUICHE_EXPORT QuicConnectionId GetServerConnectionIdAsSender(
const QuicPacketHeader& header, Perspective perspective);
QUICHE_EXPORT QuicConnectionIdIncluded GetServerConnectionIdIncludedAsSender(
const QuicPacketHeader& header, Perspective perspective);
QUICHE_EXPORT QuicConnectionId GetClientConnectionIdAsSender(
const QuicPacketHeader& header, Perspective perspective);
QUICHE_EXPORT QuicConnectionIdIncluded GetClientConnectionIdIncludedAsSender(
const QuicPacketHeader& header, Perspective perspective);
QUICHE_EXPORT uint8_t
GetIncludedConnectionIdLength(QuicConnectionId connection_id,
QuicConnectionIdIncluded connection_id_included);
QUICHE_EXPORT uint8_t
GetIncludedDestinationConnectionIdLength(const QuicPacketHeader& header);
QUICHE_EXPORT uint8_t
GetIncludedSourceConnectionIdLength(const QuicPacketHeader& header);
QUICHE_EXPORT size_t GetPacketHeaderSize(QuicTransportVersion version,
const QuicPacketHeader& header);
QUICHE_EXPORT size_t GetPacketHeaderSize(
QuicTransportVersion version, uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool include_version,
bool include_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length);
QUICHE_EXPORT size_t GetStartOfEncryptedData(QuicTransportVersion version,
const QuicPacketHeader& header);
QUICHE_EXPORT size_t GetStartOfEncryptedData(
QuicTransportVersion version, uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool include_version,
bool include_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length);
struct QUICHE_EXPORT QuicPacketHeader {
QuicPacketHeader();
QuicPacketHeader(const QuicPacketHeader& other);
~QuicPacketHeader();
QuicPacketHeader& operator=(const QuicPacketHeader& other);
QUICHE_EXPORT friend std::ostream& operator<<(std::ostream& os,
const QuicPacketHeader& header);
QuicConnectionId destination_connection_id;
QuicConnectionIdIncluded destination_connection_id_included;
QuicConnectionId source_connection_id;
QuicConnectionIdIncluded source_connection_id_included;
bool reset_flag;
bool version_flag;
bool has_possible_stateless_reset_token;
QuicPacketNumberLength packet_number_length;
uint8_t type_byte;
ParsedQuicVersion version;
DiversificationNonce* nonce;
QuicPacketNumber packet_number;
PacketHeaderFormat form;
QuicLongHeaderType long_packet_type;
StatelessResetToken possible_stateless_reset_token;
quiche::QuicheVariableLengthIntegerLength retry_token_length_length;
absl::string_view retry_token;
quiche::QuicheVariableLengthIntegerLength length_length;
QuicByteCount remaining_packet_length;
bool operator==(const QuicPacketHeader& other) const;
bool operator!=(const QuicPacketHeader& other) const;
};
struct QUICHE_EXPORT QuicPublicResetPacket {
QuicPublicResetPacket();
explicit QuicPublicResetPacket(QuicConnectionId connection_id);
QuicConnectionId connection_id;
QuicPublicResetNonceProof nonce_proof;
QuicSocketAddress client_address;
std::string endpoint_id;
};
struct QUICHE_EXPORT QuicVersionNegotiationPacket {
QuicVersionNegotiationPacket();
explicit QuicVersionNegotiationPacket(QuicConnectionId connection_id);
QuicVersionNegotiationPacket(const QuicVersionNegotiationPacket& other);
~QuicVersionNegotiationPacket();
QuicConnectionId connection_id;
ParsedQuicVersionVector versions;
};
struct QUICHE_EXPORT QuicIetfStatelessResetPacket {
QuicIetfStatelessResetPacket();
QuicIetfStatelessResetPacket(const QuicPacketHeader& header,
StatelessResetToken token);
QuicIetfStatelessResetPacket(const QuicIetfStatelessResetPacket& other);
~QuicIetfStatelessResetPacket();
QuicPacketHeader header;
StatelessResetToken stateless_reset_token;
};
class QUICHE_EXPORT QuicData {
public:
QuicData(const char* buffer, size_t length);
QuicData(const char* buffer, size_t length, bool owns_buffer);
QuicData(absl::string_view data);
QuicData(const QuicData&) = delete;
QuicData& operator=(const QuicData&) = delete;
virtual ~QuicData();
absl::string_view AsStringPiece() const {
return absl::string_view(data(), length());
}
const char* data() const { return buffer_; }
size_t length() const { return length_; }
private:
const char* buffer_;
size_t length_;
bool owns_buffer_;
};
class QUICHE_EXPORT QuicPacket : public QuicData {
public:
QuicPacket(
char* buffer, size_t length, bool owns_buffer,
uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool includes_version,
bool includes_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length);
QuicPacket(QuicTransportVersion version, char* buffer, size_t length,
bool owns_buffer, const QuicPacketHeader& header);
QuicPacket(const QuicPacket&) = delete;
QuicPacket& operator=(const QuicPacket&) = delete;
absl::string_view AssociatedData(QuicTransportVersion version) const;
absl::string_view Plaintext(QuicTransportVersion version) const;
char* mutable_data() { return buffer_; }
private:
char* buffer_;
const uint8_t destination_connection_id_length_;
const uint8_t source_connection_id_length_;
const bool includes_version_;
const bool includes_diversification_nonce_;
const QuicPacketNumberLength packet_number_length_;
const quiche::QuicheVariableLengthIntegerLength retry_token_length_length_;
const QuicByteCount retry_token_length_;
const quiche::QuicheVariableLengthIntegerLength length_length_;
};
class QUICHE_EXPORT QuicEncryptedPacket : public QuicData {
public:
QuicEncryptedPacket(const char* buffer, size_t length);
QuicEncryptedPacket(const char* buffer, size_t length, bool owns_buffer);
QuicEncryptedPacket(absl::string_view data);
QuicEncryptedPacket(const QuicEncryptedPacket&) = delete;
QuicEncryptedPacket& operator=(const QuicEncryptedPacket&) = delete;
std::unique_ptr<QuicEncryptedPacket> Clone() const;
QUICHE_EXPORT friend std::ostream& operator<<(std::ostream& os,
const QuicEncryptedPacket& s);
};
namespace test {
class QuicReceivedPacketPeer;
}
class QUICHE_EXPORT QuicReceivedPacket : public QuicEncryptedPacket {
public:
QuicReceivedPacket(const char* buffer, size_t length, QuicTime receipt_time);
QuicReceivedPacket(const char* buffer, size_t length, QuicTime receipt_time,
bool owns_buffer);
QuicReceivedPacket(const char* buffer, size_t length, QuicTime receipt_time,
bool owns_buffer, int ttl, bool ttl_valid);
QuicReceivedPacket(const char* buffer, size_t length, QuicTime receipt_time,
bool owns_buffer, int ttl, bool ttl_valid,
char* packet_headers, size_t headers_length,
bool owns_header_buffer);
QuicReceivedPacket(const char* buffer, size_t length, QuicTime receipt_time,
bool owns_buffer, int ttl, bool ttl_valid,
char* packet_headers, size_t headers_length,
bool owns_header_buffer, QuicEcnCodepoint ecn_codepoint);
~QuicReceivedPacket();
QuicReceivedPacket(const QuicReceivedPacket&) = delete;
QuicReceivedPacket& operator=(const QuicReceivedPacket&) = delete;
std::unique_ptr<QuicReceivedPacket> Clone() const;
QuicTime receipt_time() const { return receipt_time_; }
int ttl() const { return ttl_; }
char* packet_headers() const { return packet_headers_; }
int headers_length() const { return headers_length_; }
QUICHE_EXPORT friend std::ostream& operator<<(std::ostream& os,
const QuicReceivedPacket& s);
QuicEcnCodepoint ecn_codepoint() const { return ecn_codepoint_; }
private:
friend class test::QuicReceivedPacketPeer;
const QuicTime receipt_time_;
int ttl_;
char* packet_headers_;
int headers_length_;
bool owns_header_buffer_;
QuicEcnCodepoint ecn_codepoint_;
};
struct QUICHE_EXPORT SerializedPacket {
SerializedPacket(QuicPacketNumber packet_number,
QuicPacketNumberLength packet_number_length,
const char* encrypted_buffer,
QuicPacketLength encrypted_length, bool has_ack,
bool has_stop_waiting);
SerializedPacket(const SerializedPacket& other) = delete;
SerializedPacket& operator=(const SerializedPacket& other) = delete;
SerializedPacket(SerializedPacket&& other);
~SerializedPacket();
const char* encrypted_buffer;
QuicPacketLength encrypted_length;
std::function<void(const char*)> release_encrypted_buffer;
QuicFrames retransmittable_frames;
QuicFrames nonretransmittable_frames;
IsHandshake has_crypto_handshake;
QuicPacketNumber packet_number;
QuicPacketNumberLength packet_number_length;
EncryptionLevel encryption_level;
bool has_ack;
bool has_stop_waiting;
bool has_ack_ecn = false;
TransmissionType transmission_type;
QuicPacketNumber largest_acked;
bool has_ack_frame_copy;
bool has_ack_frequency;
bool has_message;
SerializedPacketFate fate;
QuicSocketAddress peer_address;
std::optional<QuicByteCount> bytes_not_retransmitted;
std::optional<QuicPacketHeader> initial_header;
};
QUICHE_EXPORT SerializedPacket* CopySerializedPacket(
const SerializedPacket& serialized,
quiche::QuicheBufferAllocator* allocator, bool copy_buffer);
QUICHE_EXPORT char* CopyBuffer(const SerializedPacket& packet);
QUICHE_EXPORT char* CopyBuffer(const char* encrypted_buffer,
QuicPacketLength encrypted_length);
struct QUICHE_EXPORT QuicPerPacketContext {
virtual ~QuicPerPacketContext() {}
};
struct QUICHE_EXPORT ReceivedPacketInfo {
ReceivedPacketInfo(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
const QuicReceivedPacket& packet);
ReceivedPacketInfo(const ReceivedPacketInfo& other) = default;
~ReceivedPacketInfo();
std::string ToString() const;
QUICHE_EXPORT friend std::ostream& operator<<(
std::ostream& os, const ReceivedPacketInfo& packet_info);
const QuicSocketAddress& self_address;
const QuicSocketAddress& peer_address;
const QuicReceivedPacket& packet;
PacketHeaderFormat form;
QuicLongHeaderType long_packet_type;
bool version_flag;
bool use_length_prefix;
QuicVersionLabel version_label;
ParsedQuicVersion version;
QuicConnectionId destination_connection_id;
QuicConnectionId source_connection_id;
std::optional<absl::string_view> retry_token;
};
}
#endif
#include "quiche/quic/core/quic_packets.h"
#include <algorithm>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_flags.h"
namespace quic {
QuicConnectionId GetServerConnectionIdAsRecipient(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_SERVER) {
return header.destination_connection_id;
}
return header.source_connection_id;
}
QuicConnectionId GetClientConnectionIdAsRecipient(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.destination_connection_id;
}
return header.source_connection_id;
}
QuicConnectionId GetServerConnectionIdAsSender(const QuicPacketHeader& header,
Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.destination_connection_id;
}
return header.source_connection_id;
}
QuicConnectionIdIncluded GetServerConnectionIdIncludedAsSender(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.destination_connection_id_included;
}
return header.source_connection_id_included;
}
QuicConnectionId GetClientConnectionIdAsSender(const QuicPacketHeader& header,
Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.source_connection_id;
}
return header.destination_connection_id;
}
QuicConnectionIdIncluded GetClientConnectionIdIncludedAsSender(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.source_connection_id_included;
}
return header.destination_connection_id_included;
}
uint8_t GetIncludedConnectionIdLength(
QuicConnectionId connection_id,
QuicConnectionIdIncluded connection_id_included) {
QUICHE_DCHECK(connection_id_included == CONNECTION_ID_PRESENT ||
connection_id_included == CONNECTION_ID_ABSENT);
return connection_id_included == CONNECTION_ID_PRESENT
? connection_id.length()
: 0;
}
uint8_t GetIncludedDestinationConnectionIdLength(
const QuicPacketHeader& header) {
return GetIncludedConnectionIdLength(
header.destination_connection_id,
header.destination_connection_id_included);
}
uint8_t GetIncludedSourceConnectionIdLength(const QuicPacketHeader& header) {
return GetIncludedConnectionIdLength(header.source_connection_id,
header.source_connection_id_included);
}
size_t GetPacketHeaderSize(QuicTransportVersion version,
const QuicPacketHeader& header) {
return GetPacketHeaderSize(
version, GetIncludedDestinationConnectionIdLength(header),
GetIncludedSourceConnectionIdLength(header), header.version_flag,
header.nonce != nullptr, header.packet_number_length,
header.retry_token_length_length, header.retry_token.length(),
header.length_length);
}
size_t GetPacketHeaderSize(
QuicTransportVersion version, uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool include_version,
bool include_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length) {
if (include_version) {
size_t size = kPacketHeaderTypeSize + kConnectionIdLengthSize +
destination_connection_id_length +
source_connection_id_length + packet_number_length +
kQuicVersionSize;
if (include_diversification_nonce) {
size += kDiversificationNonceSize;
}
if (VersionHasLengthPrefixedConnectionIds(version)) {
size += kConnectionIdLengthSize;
}
QUICHE_DCHECK(
QuicVersionHasLongHeaderLengths(version) ||
retry_token_length_length + retry_token_length + length_length == 0);
if (QuicVersionHasLongHeaderLengths(version)) {
size += retry_token_length_length + retry_token_length + length_length;
}
return size;
}
return kPacketHeaderTypeSize + destination_connection_id_length +
packet_number_length;
}
size_t GetStartOfEncryptedData(QuicTransportVersion version,
const QuicPacketHeader& header) {
return GetPacketHeaderSize(version, header);
}
size_t GetStartOfEncryptedData(
QuicTransportVersion version, uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool include_version,
bool include_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length) {
return GetPacketHeaderSize(
version, destination_connection_id_length, source_connection_id_length,
include_version, include_diversification_nonce, packet_number_length,
retry_token_length_length, retry_token_length, length_length);
}
QuicPacketHeader::QuicPacketHeader()
: destination_connection_id(EmptyQuicConnectionId()),
destination_connection_id_included(CONNECTION_ID_PRESENT),
source_connection_id(EmptyQuicConnectionId()),
source_connection_id_included(CONNECTION_ID_ABSENT),
reset_flag(false),
version_flag(false),
has_possible_stateless_reset_token(false),
packet_number_length(PACKET_4BYTE_PACKET_NUMBER),
type_byte(0),
version(UnsupportedQuicVersion()),
nonce(nullptr),
form(GOOGLE_QUIC_PACKET),
long_packet_type(INITIAL),
possible_stateless_reset_token({}),
retry_token_length_length(quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0),
retry_token(absl::string_view()),
length_length(quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0),
remaining_packet_length(0) {}
QuicPacketHeader::QuicPacketHeader(const QuicPacketHeader& other) = default;
QuicPacketHeader::~QuicPacketHeader() {}
QuicPacketHeader& QuicPacketHeader::operator=(const QuicPacketHeader& other) =
default;
QuicPublicResetPacket::QuicPublicResetPacket()
: connection_id(EmptyQuicConnectionId()), nonce_proof(0) {}
QuicPublicResetPacket::QuicPublicResetPacket(QuicConnectionId connection_id)
: connection_id(connection_id), nonce_proof(0) {}
QuicVersionNegotiationPacket::QuicVersionNegotiationPacket()
: connection_id(EmptyQuicConnectionId()) {}
QuicVersionNegotiationPacket::QuicVersionNegotiationPacket(
QuicConnectionId connection_id)
: connection_id(connection_id) {}
QuicVersionNegotiationPacket::QuicVersionNegotiationPacket(
const QuicVersionNegotiationPacket& other) = default;
QuicVersionNegotiationPacket::~QuicVersionNegotiationPacket() {}
QuicIetfStatelessResetPacket::QuicIetfStatelessResetPacket()
: stateless_reset_token({}) {}
QuicIetfStatelessResetPacket::QuicIetfStatelessResetPacket(
const QuicPacketHeader& header, StatelessResetToken token)
: header(header), stateless_reset_token(token) {}
QuicIetfStatelessResetPacket::QuicIetfStatelessResetPacket(
const QuicIetfStatelessResetPacket& other) = default;
QuicIetfStatelessResetPacket::~QuicIetfStatelessResetPacket() {}
std::ostream& operator<<(std::ostream& os, const QuicPacketHeader& header) {
os << "{ destination_connection_id: " << header.destination_connection_id
<< " ("
<< (header.destination_connection_id_included == CONNECTION_ID_PRESENT
? "present"
: "absent")
<< "), source_connection_id: " << header.source_connection_id << " ("
<< (header.source_connection_id_included == CONNECTION_ID_PRESENT
? "present"
: "absent")
<< "), packet_number_length: "
<< static_cast<int>(header.packet_number_length)
<< ", reset_flag: " << header.reset_flag
<< ", version_flag: " << header.version_flag;
if (header.version_flag) {
os << ", version: " << ParsedQuicVersionToString(header.version);
if (header.long_packet_type != INVALID_PACKET_TYPE) {
os << ", long_packet_type: "
<< QuicUtils::QuicLongHeaderTypetoString(header.long_packet_type);
}
if (header.retry_token_length_length !=
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0) {
os << ", retry_token_length_length: "
<< static_cast<int>(header.retry_token_length_length);
}
if (header.retry_token.length() != 0) {
os << ", retry_token_length: " << header.retry_token.length();
}
if (header.length_length != quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0) {
os << ", length_length: " << static_cast<int>(header.length_length);
}
if (header.remaining_packet_length != 0) {
os << ", remaining_packet_length: " << header.remaining_packet_length;
}
}
if (header.nonce != nullptr) {
os << ", diversification_nonce: "
<< absl::BytesToHexString(
absl::string_view(header.nonce->data(), header.nonce->size()));
}
os << ", packet_number: " << header.packet_number << " }\n";
return os;
}
QuicData::QuicData(const char* buffer, size_t length)
: buffer_(buffer), length_(length), owns_buffer_(false) {}
QuicData::QuicData(const char* buffer, size_t length, bool owns_buffer)
: buffer_(buffer), length_(length), owns_buffer_(owns_buffer) {}
QuicData::QuicData(absl::string_view packet_data)
: buffer_(packet_data.data()),
length_(packet_data.length()),
owns_buffer_(false) {}
QuicData::~QuicData() {
if (owns_buffer_) {
delete[] const_cast<char*>(buffer_);
}
}
QuicPacket::QuicPacket(
char* buffer, size_t length, bool owns_buffer,
uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool includes_version,
bool includes_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length)
: QuicData(buffer, length, owns_buffer),
buffer_(buffer),
destination_connection_id_length_(destination_connection_id_length),
source_connection_id_length_(source_connection_id_length),
includes_version_(includes_version),
includes_diversification_nonce_(includes_diversification_nonce),
packet_number_length_(packet_number_length),
retry_token_length_length_(retry_token_length_length),
retry_token_length_(retry_token_length),
length_length_(length_length) {}
QuicPacket::QuicPacket(QuicTransportVersion , char* buffer,
size_t length, bool owns_buffer,
const QuicPacketHeader& header)
: QuicPacket(buffer, length, owns_buffer,
GetIncludedDestinationConnectionIdLength(header),
GetIncludedSourceConnectionIdLength(header),
header.version_flag, header.nonce != nullptr,
header.packet_number_length, header.retry_token_length_length,
header.retry_token.length(), header.length_length) {}
QuicEncryptedPacket::QuicEncryptedPacket(const char* buffer, size_t length)
: QuicData(buffer, length) {}
QuicEncryptedPacket::QuicEncryptedPacket(const char* buffer, size_t length,
bool owns_buffer)
: QuicData(buffer, length, owns_buffer) {}
QuicEncryptedPacket::QuicEncryptedPacket(absl::string_view data)
: QuicData(data) {}
std::unique_ptr<QuicEncryptedPacket> QuicEncryptedPacket::Clone() const {
char* buffer = new char[this->length()];
std::copy(this->data(), this->data() + this->length(), buffer);
return std::make_unique<QuicEncryptedPacket>(buffer, this->length(), true);
}
std::ostream& operator<<(std::ostream& os, const QuicEncryptedPacket& s) {
os << s.length() << "-byte data";
return os;
}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time)
: QuicReceivedPacket(buffer, length, receipt_time,
false ) {}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time, bool owns_buffer)
: QuicReceivedPacket(buffer, length, receipt_time, owns_buffer, 0 ,
true ) {}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time, bool owns_buffer,
int ttl, bool ttl_valid)
: quic::QuicReceivedPacket(buffer, length, receipt_time, owns_buffer, ttl,
ttl_valid, nullptr ,
0 ,
false , ECN_NOT_ECT) {}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time, bool owns_buffer,
int ttl, bool ttl_valid,
char* packet_heade | #include "quiche/quic/core/quic_packets.h"
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quic {
namespace test {
namespace {
QuicPacketHeader CreateFakePacketHeader() {
QuicPacketHeader header;
header.destination_connection_id = TestConnectionId(1);
header.destination_connection_id_included = CONNECTION_ID_PRESENT;
header.source_connection_id = TestConnectionId(2);
header.source_connection_id_included = CONNECTION_ID_ABSENT;
return header;
}
class QuicPacketsTest : public QuicTest {};
TEST_F(QuicPacketsTest, GetServerConnectionIdAsRecipient) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(1),
GetServerConnectionIdAsRecipient(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(2),
GetServerConnectionIdAsRecipient(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetServerConnectionIdAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(2),
GetServerConnectionIdAsSender(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(1),
GetServerConnectionIdAsSender(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetServerConnectionIdIncludedAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(CONNECTION_ID_ABSENT, GetServerConnectionIdIncludedAsSender(
header, Perspective::IS_SERVER));
EXPECT_EQ(CONNECTION_ID_PRESENT, GetServerConnectionIdIncludedAsSender(
header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetClientConnectionIdIncludedAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(CONNECTION_ID_PRESENT, GetClientConnectionIdIncludedAsSender(
header, Perspective::IS_SERVER));
EXPECT_EQ(CONNECTION_ID_ABSENT, GetClientConnectionIdIncludedAsSender(
header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetClientConnectionIdAsRecipient) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(2),
GetClientConnectionIdAsRecipient(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(1),
GetClientConnectionIdAsRecipient(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetClientConnectionIdAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(1),
GetClientConnectionIdAsSender(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(2),
GetClientConnectionIdAsSender(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, CopyQuicPacketHeader) {
QuicPacketHeader header;
QuicPacketHeader header2 = CreateFakePacketHeader();
EXPECT_NE(header, header2);
QuicPacketHeader header3(header2);
EXPECT_EQ(header2, header3);
}
TEST_F(QuicPacketsTest, CopySerializedPacket) {
std::string buffer(1000, 'a');
quiche::SimpleBufferAllocator allocator;
SerializedPacket packet(QuicPacketNumber(1), PACKET_1BYTE_PACKET_NUMBER,
buffer.data(), buffer.length(), false,
false);
packet.retransmittable_frames.push_back(QuicFrame(QuicWindowUpdateFrame()));
packet.retransmittable_frames.push_back(QuicFrame(QuicStreamFrame()));
QuicAckFrame ack_frame(InitAckFrame(1));
packet.nonretransmittable_frames.push_back(QuicFrame(&ack_frame));
packet.nonretransmittable_frames.push_back(QuicFrame(QuicPaddingFrame(-1)));
std::unique_ptr<SerializedPacket> copy = absl::WrapUnique<SerializedPacket>(
CopySerializedPacket(packet, &allocator, true));
EXPECT_EQ(quic::QuicPacketNumber(1), copy->packet_number);
EXPECT_EQ(PACKET_1BYTE_PACKET_NUMBER, copy->packet_number_length);
ASSERT_EQ(2u, copy->retransmittable_frames.size());
EXPECT_EQ(WINDOW_UPDATE_FRAME, copy->retransmittable_frames[0].type);
EXPECT_EQ(STREAM_FRAME, copy->retransmittable_frames[1].type);
ASSERT_EQ(2u, copy->nonretransmittable_frames.size());
EXPECT_EQ(ACK_FRAME, copy->nonretransmittable_frames[0].type);
EXPECT_EQ(PADDING_FRAME, copy->nonretransmittable_frames[1].type);
EXPECT_EQ(1000u, copy->encrypted_length);
quiche::test::CompareCharArraysWithHexError(
"encrypted_buffer", copy->encrypted_buffer, copy->encrypted_length,
packet.encrypted_buffer, packet.encrypted_length);
std::unique_ptr<SerializedPacket> copy2 = absl::WrapUnique<SerializedPacket>(
CopySerializedPacket(packet, &allocator, false));
EXPECT_EQ(packet.encrypted_buffer, copy2->encrypted_buffer);
EXPECT_EQ(1000u, copy2->encrypted_length);
}
TEST_F(QuicPacketsTest, CloneReceivedPacket) {
char header[4] = "bar";
QuicReceivedPacket packet("foo", 3, QuicTime::Zero(), false, 0, true, header,
sizeof(header) - 1, false,
QuicEcnCodepoint::ECN_ECT1);
std::unique_ptr<QuicReceivedPacket> copy = packet.Clone();
EXPECT_EQ(packet.ecn_codepoint(), copy->ecn_codepoint());
}
}
}
} |
13 | #ifndef AROLLA_UTIL_BINARY_SEARCH_H_
#define AROLLA_UTIL_BINARY_SEARCH_H_
#include <cstddef>
#include <cstdint>
#include <optional>
#include "absl/base/attributes.h"
#include "absl/types/span.h"
namespace arolla {
size_t LowerBound(float value, absl::Span<const float> array);
size_t LowerBound(double value, absl::Span<const double> array);
size_t LowerBound(int32_t value, absl::Span<const int32_t> array);
size_t LowerBound(int64_t value, absl::Span<const int64_t> array);
size_t UpperBound(float value, absl::Span<const float> array);
size_t UpperBound(double value, absl::Span<const double> array);
size_t UpperBound(int32_t value, absl::Span<const int32_t> array);
size_t UpperBound(int64_t value, absl::Span<const int64_t> array);
template <typename T, typename Iter>
Iter GallopingLowerBound(Iter begin, Iter end, const T& value);
}
namespace arolla::binary_search_details {
constexpr size_t kSupremacySizeThreshold = 1'000'000;
template <typename T>
size_t LowerBound(T value, absl::Span<const T> array);
template <typename T>
size_t UpperBound(T value, absl::Span<const T> array);
template <typename T, typename Predicate>
inline ABSL_ATTRIBUTE_ALWAYS_INLINE std::optional<size_t> SmallLinearSearch(
absl::Span<const T> array, Predicate predicate) {
if (array.size() <= 2) {
if (array.empty() || predicate(array[0])) {
return 0;
} else if (array.size() == 1 || predicate(array[1])) {
return 1;
}
return 2;
}
return std::nullopt;
}
size_t UpperBoundImpl(float value, absl::Span<const float> array);
size_t UpperBoundImpl(double value, absl::Span<const double> array);
size_t UpperBoundImpl(int32_t value, absl::Span<const int32_t> array);
size_t UpperBoundImpl(int64_t value, absl::Span<const int64_t> array);
size_t LowerBoundImpl(float value, absl::Span<const float> array);
size_t LowerBoundImpl(double value, absl::Span<const double> array);
size_t LowerBoundImpl(int32_t value, absl::Span<const int32_t> array);
size_t LowerBoundImpl(int64_t value, absl::Span<const int64_t> array);
template <typename T>
inline ABSL_ATTRIBUTE_ALWAYS_INLINE size_t
LowerBound(T value, absl::Span<const T> array) {
if (auto result =
SmallLinearSearch(array, [value](T arg) { return !(arg < value); })) {
return *result;
}
return LowerBoundImpl(value, array);
}
template <typename T>
inline ABSL_ATTRIBUTE_ALWAYS_INLINE size_t
UpperBound(T value, absl::Span<const T> array) {
if (auto result =
SmallLinearSearch(array, [value](T arg) { return value < arg; })) {
return *result;
}
return UpperBoundImpl(value, array);
}
}
namespace arolla {
inline size_t LowerBound(float value, absl::Span<const float> array) {
return binary_search_details::LowerBound<float>(value, array);
}
inline size_t LowerBound(double value, absl::Span<const double> array) {
return binary_search_details::LowerBound<double>(value, array);
}
inline size_t LowerBound(int32_t value, absl::Span<const int32_t> array) {
return binary_search_details::LowerBound<int32_t>(value, array);
}
inline size_t LowerBound(int64_t value, absl::Span<const int64_t> array) {
return binary_search_details::LowerBound<int64_t>(value, array);
}
inline size_t UpperBound(float value, absl::Span<const float> array) {
return binary_search_details::UpperBound<float>(value, array);
}
inline size_t UpperBound(double value, absl::Span<const double> array) {
return binary_search_details::UpperBound<double>(value, array);
}
inline size_t UpperBound(int32_t value, absl::Span<const int32_t> array) {
return binary_search_details::UpperBound<int32_t>(value, array);
}
inline size_t UpperBound(int64_t value, absl::Span<const int64_t> array) {
return binary_search_details::UpperBound<int64_t>(value, array);
}
template <typename T, typename Iter>
Iter GallopingLowerBound(Iter begin, Iter end, const T& value) {
size_t i = 0;
size_t size = end - begin;
if (begin >= end || !(*begin < value)) {
return std::min<Iter>(begin, end);
}
size_t d = 1;
while (i + d < size && begin[i + d] < value) {
i += d;
d <<= 1;
}
while (d > 1) {
d >>= 1;
if (i + d < size && begin[i + d] < value) {
i += d;
}
}
return begin + i + 1;
}
}
#endif
#include "arolla/util/binary_search.h"
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include "absl/types/span.h"
#include "arolla/util/bits.h"
#include "arolla/util/switch_index.h"
namespace arolla::binary_search_details {
namespace {
template <size_t kArraySize, typename T, class Predicate>
size_t FastBinarySearchT(const T* const array, Predicate predicate) {
static_assert((kArraySize & (kArraySize + 1)) == 0);
size_t offset = 0;
for (size_t k = kArraySize; k > 0;) {
k >>= 1;
offset = (!predicate(array[offset + k]) ? offset + k + 1 : offset);
}
return offset;
}
template <typename T, typename Predicate>
size_t BinarySearchT(absl::Span<const T> array, Predicate predicate) {
assert(!array.empty());
const int log2_size = BitScanReverse(array.size());
return switch_index<8 * sizeof(size_t)>(
log2_size, [array, predicate](auto constexpr_log2_size) {
constexpr size_t size =
(1ULL << static_cast<int>(constexpr_log2_size)) - 1;
size_t offset = 0;
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
#endif
offset = (!predicate(array[size]) ? array.size() - size : offset);
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
return offset +
FastBinarySearchT<size>(array.begin() + offset, predicate);
});
}
}
size_t LowerBoundImpl(float value, absl::Span<const float> array) {
return BinarySearchT(array, [value](auto arg) { return !(arg < value); });
}
size_t LowerBoundImpl(double value, absl::Span<const double> array) {
return BinarySearchT(array, [value](auto arg) { return !(arg < value); });
}
size_t LowerBoundImpl(int32_t value, absl::Span<const int32_t> array) {
return BinarySearchT(array, [value](auto arg) { return arg >= value; });
}
size_t LowerBoundImpl(int64_t value, absl::Span<const int64_t> array) {
return BinarySearchT(array, [value](auto arg) { return arg >= value; });
}
size_t UpperBoundImpl(float value, absl::Span<const float> array) {
if (std::isnan(value)) {
return array.size();
}
return BinarySearchT(array, [value](auto arg) { return !(arg <= value); });
}
size_t UpperBoundImpl(double value, absl::Span<const double> array) {
if (std::isnan(value)) {
return array.size();
}
return BinarySearchT(array, [value](auto arg) { return !(arg <= value); });
}
size_t UpperBoundImpl(int32_t value, absl::Span<const int32_t> array) {
return BinarySearchT(array, [value](auto arg) { return arg > value; });
}
size_t UpperBoundImpl(int64_t value, absl::Span<const int64_t> array) {
return BinarySearchT(array, [value](auto arg) { return arg > value; });
}
} | #include "arolla/util/binary_search.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <limits>
#include <random>
#include <vector>
#include "gtest/gtest.h"
#include "absl/types/span.h"
namespace arolla {
namespace {
size_t StdLowerBound(float value, absl::Span<const float> array) {
return std::lower_bound(array.begin(), array.end(), value) - array.begin();
}
size_t StdLowerBound(double value, absl::Span<const double> array) {
return std::lower_bound(array.begin(), array.end(), value) - array.begin();
}
size_t StdLowerBound(int32_t value, absl::Span<const int32_t> array) {
return std::lower_bound(array.begin(), array.end(), value) - array.begin();
}
size_t StdLowerBound(int64_t value, absl::Span<const int64_t> array) {
return std::lower_bound(array.begin(), array.end(), value) - array.begin();
}
size_t RlGallopingLowerBound(float value, absl::Span<const float> array) {
return GallopingLowerBound(array.begin(), array.end(), value) - array.begin();
}
TEST(Algorithms, LowerBound_General) {
for (int n : {0, 1, 5, 7, 100, 1000}) {
std::vector<float> thresholds(n);
for (int i = 0; i < n; ++i) {
thresholds[i] = 2 * i + 1;
}
for (int i = 0; i < static_cast<int>(2 * thresholds.size()); ++i) {
size_t expected = StdLowerBound(i, thresholds);
ASSERT_EQ(LowerBound(i, thresholds), expected);
ASSERT_EQ(RlGallopingLowerBound(i, thresholds), expected);
}
ASSERT_EQ(LowerBound(-10 * n, thresholds),
StdLowerBound(-10 * n, thresholds));
ASSERT_EQ(LowerBound(10 * n, thresholds),
StdLowerBound(10 * n, thresholds));
}
}
TEST(Algorithms, LowerBound_Duplicates) {
for (int n : {2, 140}) {
std::vector<float> thresholds(n, 0.);
ASSERT_EQ(LowerBound(-1, thresholds), 0);
ASSERT_EQ(LowerBound(0., thresholds), 0);
ASSERT_EQ(LowerBound(1., thresholds), n);
ASSERT_EQ(RlGallopingLowerBound(-1, thresholds), 0);
ASSERT_EQ(RlGallopingLowerBound(0., thresholds), 0);
ASSERT_EQ(RlGallopingLowerBound(1., thresholds), n);
}
}
TEST(Algorithms, LowerBound_Infs) {
const auto kInf = std::numeric_limits<float>::infinity();
for (int n : {2, 140}) {
std::vector<float> thresholds(n);
for (int i = 0; i < n; ++i) {
thresholds.push_back(i);
}
thresholds.front() = -kInf;
thresholds.back() = kInf;
ASSERT_EQ(LowerBound(-kInf, thresholds), StdLowerBound(-kInf, thresholds));
ASSERT_EQ(LowerBound(kInf, thresholds), StdLowerBound(kInf, thresholds));
ASSERT_EQ(RlGallopingLowerBound(kInf, thresholds),
StdLowerBound(kInf, thresholds));
}
}
TEST(Algorithms, LowerBound_Nan) {
const auto kNan = std::numeric_limits<float>::quiet_NaN();
const auto kInf = std::numeric_limits<float>::infinity();
for (int n : {2, 140}) {
std::vector<float> thresholds;
for (int i = 0; i < n; ++i) {
thresholds.push_back(i);
}
thresholds.front() = -kInf;
thresholds.back() = kInf;
ASSERT_EQ(LowerBound(kNan, thresholds), StdLowerBound(kNan, thresholds));
ASSERT_EQ(RlGallopingLowerBound(kNan, thresholds),
StdLowerBound(kNan, thresholds));
}
}
size_t StdUpperBound(float value, absl::Span<const float> array) {
return std::upper_bound(array.begin(), array.end(), value) - array.begin();
}
size_t StdUpperBound(double value, absl::Span<const double> array) {
return std::upper_bound(array.begin(), array.end(), value) - array.begin();
}
size_t StdUpperBound(int32_t value, absl::Span<const int32_t> array) {
return std::upper_bound(array.begin(), array.end(), value) - array.begin();
}
size_t StdUpperBound(int64_t value, absl::Span<const int64_t> array) {
return std::upper_bound(array.begin(), array.end(), value) - array.begin();
}
TEST(Algorithms, UpperBound_General) {
for (int n : {0, 1, 5, 7, 100, 1000}) {
std::vector<float> thresholds(n);
for (int i = 0; i < n; ++i) {
thresholds[i] = 2 * i + 1;
}
for (int i = 0; i < static_cast<int>(2 * thresholds.size()); ++i) {
ASSERT_EQ(UpperBound(i, thresholds), StdUpperBound(i, thresholds));
}
ASSERT_EQ(UpperBound(-10 * n, thresholds),
StdUpperBound(-10 * n, thresholds));
ASSERT_EQ(UpperBound(10 * n, thresholds),
StdUpperBound(10 * n, thresholds));
}
}
TEST(Algorithms, UpperBound_Duplicates) {
for (int n : {2, 140}) {
std::vector<float> thresholds(n, 0.);
ASSERT_EQ(UpperBound(-1, thresholds), StdUpperBound(-1., thresholds));
ASSERT_EQ(UpperBound(0., thresholds), StdUpperBound(0., thresholds));
}
}
TEST(Algorithms, UpperBound_Infs) {
const auto kInf = std::numeric_limits<float>::infinity();
for (int n : {2, 140}) {
std::vector<float> thresholds(n);
for (int i = 0; i < n; ++i) {
thresholds.push_back(i);
}
thresholds.front() = -kInf;
thresholds.back() = kInf;
ASSERT_EQ(UpperBound(-kInf, thresholds), StdUpperBound(-kInf, thresholds));
ASSERT_EQ(UpperBound(kInf, thresholds), StdUpperBound(kInf, thresholds));
}
}
TEST(Algorithms, UpperBound_Nan) {
const auto kNan = std::numeric_limits<float>::quiet_NaN();
const auto kInf = std::numeric_limits<float>::infinity();
for (int n : {2, 140}) {
std::vector<float> thresholds;
for (int i = 0; i < n; ++i) {
thresholds.push_back(i);
}
thresholds.front() = -kInf;
thresholds.back() = kInf;
ASSERT_EQ(UpperBound(kNan, thresholds), StdUpperBound(kNan, thresholds));
}
}
template <typename T>
std::vector<T> RandomVector(size_t seed, size_t size) {
std::mt19937 gen(seed);
std::vector<T> result(size);
if constexpr (std::is_integral_v<T>) {
std::uniform_int_distribution<T> uniform(0, 1 << 30);
for (auto& x : result) {
x = uniform(gen);
}
} else {
std::uniform_real_distribution<T> uniform01;
for (auto& x : result) {
x = uniform01(gen);
}
}
return result;
}
template <typename T>
std::vector<T> Sorted(std::vector<T> vec) {
std::sort(vec.begin(), vec.end());
return vec;
}
template <typename T>
using AlgoFn = std::function<size_t(T, const std::vector<T>&)>;
template <typename T>
void BinarySearchStressTest(size_t size, AlgoFn<T> algoFn,
AlgoFn<T> referenceAlgoFn) {
const auto seed = 34 + size;
const auto array = Sorted(RandomVector<T>(seed, size));
for (auto value : RandomVector<T>(seed, 2 * size)) {
const auto actual_value = algoFn(value, array);
const auto expected_value = referenceAlgoFn(value, array);
if (actual_value != expected_value) {
ADD_FAILURE() << "Actual value: " << actual_value << '\n'
<< "Expected value: " << expected_value << '\n'
<< "size: " << size;
return;
}
}
}
TEST(Algorithms, LowerBound_Stress) {
for (int size : {10, 100, 1000, 100000}) {
BinarySearchStressTest<float>(
size,
[](float value, absl::Span<const float> array) {
return LowerBound(value, array);
},
[](float value, absl::Span<const float> array) {
return StdLowerBound(value, array);
});
BinarySearchStressTest<float>(
size,
[](float value, absl::Span<const float> array) {
return RlGallopingLowerBound(value, array);
},
[](float value, absl::Span<const float> array) {
return StdLowerBound(value, array);
});
BinarySearchStressTest<double>(
size,
[](double value, absl::Span<const double> array) {
return LowerBound(value, array);
},
[](double value, absl::Span<const double> array) {
return StdLowerBound(value, array);
});
BinarySearchStressTest<int32_t>(
size,
[](int32_t value, absl::Span<const int32_t> array) {
return LowerBound(value, array);
},
[](int32_t value, absl::Span<const int32_t> array) {
return StdLowerBound(value, array);
});
BinarySearchStressTest<int64_t>(
size,
[](int64_t value, absl::Span<const int64_t> array) {
return LowerBound(value, array);
},
[](int64_t value, absl::Span<const int64_t> array) {
return StdLowerBound(value, array);
});
}
}
TEST(Algorithms, UpperBound_Stress) {
for (int size : {10, 100, 1000, 100000}) {
BinarySearchStressTest<float>(
size,
[](float value, absl::Span<const float> array) {
return UpperBound(value, array);
},
[](float value, absl::Span<const float> array) {
return StdUpperBound(value, array);
});
BinarySearchStressTest<double>(
size,
[](double value, absl::Span<const double> array) {
return UpperBound(value, array);
},
[](double value, absl::Span<const double> array) {
return StdUpperBound(value, array);
});
BinarySearchStressTest<int32_t>(
size,
[](int32_t value, absl::Span<const int32_t> array) {
return UpperBound(value, array);
},
[](int32_t value, absl::Span<const int32_t> array) {
return StdUpperBound(value, array);
});
BinarySearchStressTest<int64_t>(
size,
[](int64_t value, absl::Span<const int64_t> array) {
return UpperBound(value, array);
},
[](int64_t value, absl::Span<const int64_t> array) {
return StdUpperBound(value, array);
});
}
}
}
} |