project
string | commit_id
string | target
int64 | func
string | cwe
string | big_vul_idx
string | idx
int64 | hash
string | size
float64 | message
string | dataset
string |
|---|---|---|---|---|---|---|---|---|---|---|
mruby
|
27d1e0132a0804581dca28df042e7047fd27eaa8
| 0
|
mrb_ary_reverse_bang(mrb_state *mrb, mrb_value self)
{
struct RArray *a = mrb_ary_ptr(self);
mrb_int len = ARY_LEN(a);
if (len > 1) {
mrb_value *p1, *p2;
ary_modify(mrb, a);
p1 = ARY_PTR(a);
p2 = p1 + len - 1;
while (p1 < p2) {
mrb_value tmp = *p1;
*p1++ = *p2;
*p2-- = tmp;
}
}
return self;
}
| null | null | 220,446
|
2226019624007970616646917570754131620
| 20
|
array.c: fix `mrb_ary_shift_m` initialization bug.
The `ARY_PTR` and `ARY_LEN` may be modified in `mrb_get_args`.
|
other
|
mruby
|
27d1e0132a0804581dca28df042e7047fd27eaa8
| 0
|
ary_dup(mrb_state *mrb, struct RArray *a)
{
return ary_new_from_values(mrb, ARY_LEN(a), ARY_PTR(a));
}
| null | null | 220,447
|
159778114224329790309891216657894947255
| 4
|
array.c: fix `mrb_ary_shift_m` initialization bug.
The `ARY_PTR` and `ARY_LEN` may be modified in `mrb_get_args`.
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
void operator()(OpKernelContext* ctx, bool use_cudnn, bool cudnn_use_autotune,
const Tensor& input, const Tensor& filter, int row_dilation,
int col_dilation, int row_stride, int col_stride,
const Padding& padding,
const std::vector<int64_t>& explicit_paddings, Tensor* output,
TensorFormat data_format) {
if (data_format != FORMAT_NHWC) {
ctx->SetStatus(
errors::Unimplemented("The Conv2D op currently only supports the "
"NHWC tensor format for integer types. "
"The op was given the format: ",
ToString(data_format)));
return;
}
const int64_t in_depth = GetTensorDim(input, data_format, 'C');
OP_REQUIRES(ctx, in_depth == filter.dim_size(2),
errors::Unimplemented(
"The Conv2D op currently does not support grouped "
"convolutions for integer types. A grouped convolution was "
"attempted to be run because the input depth of ",
in_depth, " does not match the filter input depth of ",
filter.dim_size(2)));
OP_REQUIRES(
ctx, filter.NumElements() > 0,
errors::InvalidArgument("filter must not have zero elements "
"(i.e. all dimensions must be non-zero)"));
for (int64_t explicit_padding : explicit_paddings) {
if (!FastBoundsCheck(explicit_padding, std::numeric_limits<int>::max())) {
ctx->SetStatus(errors::InvalidArgument("filter too large"));
return;
}
}
LaunchGeneric<GPUDevice, int32>()(
ctx, input, filter, row_stride, col_stride, row_dilation, col_dilation,
padding, explicit_paddings, output, data_format);
}
| null | null | 220,448
|
149528448962395516074129353959836990884
| 37
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
void operator()(OpKernelContext* ctx, const Tensor& input,
const Tensor& filter, int row_stride, int col_stride,
int row_dilation, int col_dilation, const Padding& padding,
const std::vector<int64_t>& explicit_paddings, Tensor* output,
TensorFormat data_format) {
DCHECK(data_format == FORMAT_NHWC)
<< "Grouped conv implementation only "
"supports NHWC tensor format for now.";
const int64_t in_depth = input.dim_size(3);
const int64_t patch_depth = filter.dim_size(2);
const int64_t num_groups = in_depth / patch_depth;
// Shuffle input/filter tensors to have group as a leading dimension.
std::array<int64_t, 5> shuffle({3, 0, 1, 2, 4});
// Compute pre shuffle dimemnsions.
auto pre_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> {
return {tensor.dim_size(0), tensor.dim_size(1), tensor.dim_size(2),
num_groups, tensor.dim_size(3) / num_groups};
};
// Compute post shuffle dimemnsions.
auto post_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> {
return {num_groups, tensor.dim_size(0), tensor.dim_size(1),
tensor.dim_size(2), tensor.dim_size(3) / num_groups};
};
auto& device = ctx->eigen_device<CPUDevice>();
absl::BlockingCounter shuffles_completed(2);
auto on_shuffled = [&]() { shuffles_completed.DecrementCount(); };
// Shuffle input into temporary tensor.
Tensor input_shuffled;
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(input.dtype(), TensorShape(post_shuffle(input)),
&input_shuffled));
input_shuffled.tensor<T, 5>().device(device, on_shuffled) =
input.shaped<T, 5>(pre_shuffle(input)).shuffle(shuffle);
// Shuffle filter into temporary tensor.
Tensor filter_shuffled;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(filter.dtype(),
TensorShape(post_shuffle(filter)),
&filter_shuffled));
filter_shuffled.tensor<T, 5>().device(device, on_shuffled) =
filter.shaped<T, 5>(pre_shuffle(filter)).shuffle(shuffle);
// Wait for the completion of input/filter shuffles.
shuffles_completed.Wait();
// Write group convolution results into temporary output tensor.
Tensor output_shuffled;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(output->dtype(),
TensorShape(post_shuffle(*output)),
&output_shuffled));
for (int64_t i = 0; i < num_groups; ++i) {
// TODO(ezhulenev): Run this loop using `parallelFor` (regular parallelFor
// will lead to deadlock, SpatialConvolution has to use async Eigen
// assignment). This requires small changes to Eigen to support async
// exeuction for tensor chipping operation.
// TODO(ezhulenev): Grouped convolution should also support 1x1 filter
// optimization.
auto input_slice = input_shuffled.tensor<T, 5>().template chip<0>(i);
auto filter_slice = filter_shuffled.tensor<T, 5>().template chip<0>(i);
auto output_slice = output_shuffled.tensor<T, 5>().template chip<0>(i);
if (padding == EXPLICIT) {
functor::SpatialConvolution<CPUDevice, T>()(
ctx->eigen_device<CPUDevice>(), output_slice, input_slice,
filter_slice, row_stride, col_stride, row_dilation, col_dilation,
static_cast<int>(explicit_paddings[2]),
static_cast<int>(explicit_paddings[3]),
static_cast<int>(explicit_paddings[4]),
static_cast<int>(explicit_paddings[5]));
} else {
functor::SpatialConvolution<CPUDevice, T>()(
ctx->eigen_device<CPUDevice>(), output_slice, input_slice,
filter_slice, row_stride, col_stride, row_dilation, col_dilation,
BrainPadding2EigenPadding(padding));
}
}
// Shuffle temporary output back into pre-shuffled shape.
std::array<int64_t, 5> rev_shuffle({1, 2, 3, 0, 4});
output->shaped<T, 5>(pre_shuffle(*output)).device(device) =
output_shuffled.tensor<T, 5>().shuffle(rev_shuffle);
}
| null | null | 220,449
|
177872836233250034881909843106902092557
| 92
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
Status ComputeConv2DDimension(const Conv2DParameters& params,
const Tensor& input, const Tensor& filter,
Conv2DDimensions* dimensions) {
// Check that 2D convolution input and filter have exactly 4 dimensions.
TF_REQUIRES(input.dims() == 4,
errors::InvalidArgument("input must be 4-dimensional",
input.shape().DebugString()));
TF_REQUIRES(filter.dims() == 4,
errors::InvalidArgument("filter must be 4-dimensional: ",
filter.shape().DebugString()));
for (int i = 0; i < 3; i++) {
TF_REQUIRES(
FastBoundsCheck(filter.dim_size(i), std::numeric_limits<int>::max()),
errors::InvalidArgument("filter too large"));
}
// The last dimension for input is in_depth. Check that it is the same as the
// filter's in_depth or it is evenly divisible by filter's in_depth.
const int64_t in_depth_raw = GetTensorDim(input, params.data_format, 'C');
const int64_t patch_depth_raw = filter.dim_size(2);
TF_REQUIRES(FastBoundsCheck(in_depth_raw, std::numeric_limits<int>::max()),
errors::InvalidArgument("Input depth too large"));
TF_REQUIRES(FastBoundsCheck(patch_depth_raw, std::numeric_limits<int>::max()),
errors::InvalidArgument("Patch depth too large"));
const int in_depth = static_cast<int>(in_depth_raw);
const int patch_depth = static_cast<int>(patch_depth_raw);
TF_REQUIRES(patch_depth > 0,
errors::InvalidArgument(
"filter depth must be stricly positive, got ", patch_depth));
TF_REQUIRES(in_depth % patch_depth == 0,
errors::InvalidArgument(
"input depth must be evenly divisible by filter depth: ",
in_depth, " vs ", patch_depth));
// The last dimension for filter is out_depth.
const int out_depth = static_cast<int>(filter.dim_size(3));
// The second dimension for input is rows/height.
// The first dimension for filter is rows/height.
const int64_t input_rows_raw = GetTensorDim(input, params.data_format, 'H');
TF_REQUIRES(FastBoundsCheck(input_rows_raw, std::numeric_limits<int>::max()),
errors::InvalidArgument("Input rows too large"));
const int input_rows = static_cast<int>(input_rows_raw);
const int filter_rows = static_cast<int>(filter.dim_size(0));
// The third dimension for input is columns/width.
// The second dimension for filter is columns/width.
const int64_t input_cols_raw = GetTensorDim(input, params.data_format, 'W');
TF_REQUIRES(FastBoundsCheck(input_cols_raw, std::numeric_limits<int>::max()),
errors::InvalidArgument("Input cols too large"));
const int input_cols = static_cast<int>(input_cols_raw);
const int filter_cols = static_cast<int>(filter.dim_size(1));
// The first dimension for input is batch.
const int64_t batch_raw = GetTensorDim(input, params.data_format, 'N');
TF_REQUIRES(FastBoundsCheck(batch_raw, std::numeric_limits<int>::max()),
errors::InvalidArgument("batch is too large"));
const int batch = static_cast<int>(batch_raw);
// Take the stride and dilation from the second and third dimensions only (we
// do not support striding or dilation on the batch or depth dimension).
const int stride_rows = GetTensorDim(params.strides, params.data_format, 'H');
const int stride_cols = GetTensorDim(params.strides, params.data_format, 'W');
const int dilation_rows =
GetTensorDim(params.dilations, params.data_format, 'H');
const int dilation_cols =
GetTensorDim(params.dilations, params.data_format, 'W');
int64_t pad_rows_before, pad_rows_after, pad_cols_before, pad_cols_after;
if (params.padding == Padding::EXPLICIT) {
GetExplicitPaddingForDim(params.explicit_paddings, params.data_format, 'H',
&pad_rows_before, &pad_rows_after);
GetExplicitPaddingForDim(params.explicit_paddings, params.data_format, 'W',
&pad_cols_before, &pad_cols_after);
}
// Compute windowed output sizes for rows and columns.
int64_t out_rows = 0, out_cols = 0;
TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerboseV2(
input_rows, filter_rows, dilation_rows, stride_rows, params.padding,
&out_rows, &pad_rows_before, &pad_rows_after));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerboseV2(
input_cols, filter_cols, dilation_cols, stride_cols, params.padding,
&out_cols, &pad_cols_before, &pad_cols_after));
dimensions->batch = batch;
dimensions->input_rows = input_rows;
dimensions->input_cols = input_cols;
dimensions->in_depth = in_depth;
dimensions->filter_rows = filter_rows;
dimensions->filter_cols = filter_cols;
dimensions->patch_depth = patch_depth;
dimensions->out_depth = out_depth;
dimensions->stride_rows = stride_rows;
dimensions->stride_cols = stride_cols;
dimensions->dilation_rows = dilation_rows;
dimensions->dilation_cols = dilation_cols;
dimensions->out_rows = out_rows;
dimensions->out_cols = out_cols;
dimensions->pad_rows_before = pad_rows_before;
dimensions->pad_rows_after = pad_rows_after;
dimensions->pad_cols_before = pad_cols_before;
dimensions->pad_cols_after = pad_cols_after;
return Status::OK();
}
| null | null | 220,450
|
136314051619457390726425560316926455293
| 106
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
int64_t GetDnnWorkspaceLimit(const string& envvar_in_mb,
int64_t default_value_in_bytes) {
const char* workspace_limit_in_mb_str = getenv(envvar_in_mb.c_str());
if (workspace_limit_in_mb_str != nullptr &&
strcmp(workspace_limit_in_mb_str, "") != 0) {
int64_t scratch_limit_in_mb = -1;
if (strings::safe_strto64(workspace_limit_in_mb_str,
&scratch_limit_in_mb)) {
return scratch_limit_in_mb * (1 << 20);
} else {
LOG(WARNING) << "Invalid value for env-var " << envvar_in_mb << ": "
<< workspace_limit_in_mb_str;
}
}
return default_value_in_bytes;
}
| null | null | 220,451
|
19528478458073325083315038612480693646
| 16
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
static bool Run(OpKernelContext* ctx, const Tensor& input,
const Tensor& filter, int batch, int input_rows,
int input_cols, int in_depth, int filter_rows,
int filter_cols, int pad_rows, int pad_cols, int out_rows,
int out_cols, int out_depth, int dilation_rows,
int dilation_cols, int stride_rows, int stride_cols,
Tensor* output, TensorFormat data_format) {
if (data_format != FORMAT_NHWC || dilation_rows != 1 ||
dilation_cols != 1 ||
!CanUseDeepConv2D(stride_rows, stride_cols, filter_rows, filter_cols,
in_depth, out_depth, out_rows, out_cols)) {
return false;
}
Conv2DArgs args;
args.batch = batch;
args.in_rows = input_rows;
args.in_cols = input_cols;
args.in_depth = in_depth;
args.filter_rows = filter_rows;
args.filter_cols = filter_cols;
args.pad_rows = pad_rows;
args.pad_cols = pad_cols;
args.out_rows = out_rows;
args.out_cols = out_cols;
args.out_depth = out_depth;
auto input_ptr = input.template flat<float>().data();
auto filter_ptr = filter.template flat<float>().data();
auto output_ptr = output->template flat<float>().data();
functor::DeepConv2D<CPUDevice, float>()(ctx, args, input_ptr, filter_ptr,
output_ptr);
return true;
}
| null | null | 220,452
|
147180028757827797186804790613249452079
| 35
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
Status InitConv2DParameters(const OpKernelConstruction* context,
Conv2DParameters* params) {
TF_RETURN_IF_ERROR(context->GetAttr("dilations", ¶ms->dilations));
TF_RETURN_IF_ERROR(context->GetAttr("strides", ¶ms->strides));
TF_RETURN_IF_ERROR(context->GetAttr("padding", ¶ms->padding));
if (context->HasAttr("explicit_paddings")) {
TF_RETURN_IF_ERROR(
context->GetAttr("explicit_paddings", ¶ms->explicit_paddings));
}
string data_format_string;
TF_RETURN_IF_ERROR(context->GetAttr("data_format", &data_format_string));
TF_REQUIRES(FormatFromString(data_format_string, ¶ms->data_format),
errors::InvalidArgument("Invalid data format"));
const auto& strides = params->strides;
const auto& dilations = params->dilations;
const auto& data_format = params->data_format;
TF_REQUIRES(dilations.size() == 4,
errors::InvalidArgument("Sliding window dilations field must "
"specify 4 dimensions"));
TF_REQUIRES(strides.size() == 4,
errors::InvalidArgument("Sliding window strides field must "
"specify 4 dimensions"));
const int64_t stride_n = GetTensorDim(strides, data_format, 'N');
const int64_t stride_c = GetTensorDim(strides, data_format, 'C');
const int64_t stride_h = GetTensorDim(strides, data_format, 'H');
const int64_t stride_w = GetTensorDim(strides, data_format, 'W');
TF_REQUIRES(
stride_n == 1 && stride_c == 1,
errors::Unimplemented("Current implementation does not yet support "
"strides in the batch and depth dimensions."));
TF_REQUIRES(stride_h > 0 && stride_w > 0,
errors::InvalidArgument(
"Row and column strides should be larger than 0."));
const int64_t dilation_n = GetTensorDim(dilations, data_format, 'N');
const int64_t dilation_c = GetTensorDim(dilations, data_format, 'C');
const int64_t dilation_h = GetTensorDim(dilations, data_format, 'H');
const int64_t dilation_w = GetTensorDim(dilations, data_format, 'W');
TF_REQUIRES(
dilation_n == 1 && dilation_c == 1,
errors::Unimplemented("Current implementation does not yet support "
"dilations in the batch and depth dimensions."));
TF_REQUIRES(
dilation_h > 0 && dilation_w > 0,
errors::InvalidArgument("Dilated rates should be larger than 0."));
TF_RETURN_IF_ERROR(CheckValidPadding(params->padding,
params->explicit_paddings,
/*num_dims=*/4, data_format));
return Status::OK();
}
| null | null | 220,453
|
304006382926233549116136869417215151696
| 54
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
static bool Run(OpKernelContext* ctx, const Tensor& input,
const Tensor& filter, int batch, int input_rows,
int input_cols, int in_depth, int filter_rows,
int filter_cols, int pad_rows, int pad_cols, int out_rows,
int /*out_cols*/, int /*out_depth*/, int /*dilation_rows*/,
int /*dilation_cols*/, int /*stride_rows*/,
int /*stride_cols*/, Tensor* /*output*/,
TensorFormat /*data_format*/) {
return false;
}
| null | null | 220,454
|
70182135733861309684352699898384294757
| 10
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
void operator()(OpKernelContext* ctx, const Tensor& input,
const Tensor& filter, int row_stride, int col_stride,
int row_dilation, int col_dilation, const Padding& padding,
const std::vector<int64_t>& explicit_paddings, Tensor* output,
TensorFormat data_format) {
CHECK(data_format == FORMAT_NHWC) << "Generic conv implementation only "
"supports NHWC tensor format for now.";
if (filter.dim_size(0) == 1 && filter.dim_size(1) == 1 && row_stride == 1 &&
col_stride == 1 && (padding == SAME || padding == VALID)) {
// For 1x1 kernel, the 2D convolution is reduced to matrix
// multiplication.
//
// TODO(vrv): We should be able to call SpatialConvolution
// and it will produce the same result, but doing so
// led to NaNs during training. Using matmul instead for now.
int conv_width = 1; // Width for the convolution step.
for (int i = 0; i < 3; ++i) {
conv_width *= output->dim_size(i);
}
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair;
dim_pair[0] = Eigen::IndexPair<Eigen::DenseIndex>(1, 0);
functor::MatMulConvFunctor<Device, T>()(
ctx->eigen_device<Device>(),
output->shaped<T, 2>({conv_width, filter.dim_size(3)}),
input.shaped<T, 2>({conv_width, filter.dim_size(2)}),
filter.shaped<T, 2>({filter.dim_size(2), filter.dim_size(3)}),
dim_pair);
} else if (filter.dim_size(0) == input.dim_size(1) &&
filter.dim_size(1) == input.dim_size(2) && row_dilation == 1 &&
col_dilation == 1 && padding == VALID) {
// If the input data and filter have the same height/width,
// the 2D convolution is reduced to matrix multiplication.
const int k = // Length of reduction dimension.
filter.dim_size(0) * filter.dim_size(1) * filter.dim_size(2);
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair;
dim_pair[0] = Eigen::IndexPair<Eigen::DenseIndex>(1, 0);
functor::MatMulConvFunctor<Device, T>()(
ctx->eigen_device<Device>(),
output->shaped<T, 2>({input.dim_size(0), filter.dim_size(3)}),
input.shaped<T, 2>({input.dim_size(0), k}),
filter.shaped<T, 2>({k, filter.dim_size(3)}), dim_pair);
} else {
if (padding == EXPLICIT) {
functor::SpatialConvolution<Device, T>()(
ctx->eigen_device<Device>(), output->tensor<T, 4>(),
input.tensor<T, 4>(), filter.tensor<T, 4>(), row_stride, col_stride,
row_dilation, col_dilation, static_cast<int>(explicit_paddings[2]),
static_cast<int>(explicit_paddings[3]),
static_cast<int>(explicit_paddings[4]),
static_cast<int>(explicit_paddings[5]));
} else {
functor::SpatialConvolution<Device, T>()(
ctx->eigen_device<Device>(), output->tensor<T, 4>(),
input.tensor<T, 4>(), filter.tensor<T, 4>(), row_stride, col_stride,
row_dilation, col_dilation, BrainPadding2EigenPadding(padding));
}
}
}
| null | null | 220,455
|
173836825409105544398245280878124800151
| 60
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
void operator()(OpKernelContext* ctx, bool use_cudnn, bool cudnn_use_autotune,
const Tensor& input, const Tensor& filter, int row_dilation,
int col_dilation, int row_stride, int col_stride,
const Padding& padding,
const std::vector<int64_t>& explicit_paddings, Tensor* output,
TensorFormat data_format) {
if (data_format != FORMAT_NHWC) {
ctx->SetStatus(errors::Unimplemented(
"The Conv2D op currently only supports the NHWC tensor format on the "
"CPU. The op was given the format: ",
ToString(data_format)));
return;
}
for (int64_t explicit_padding : explicit_paddings) {
if (!FastBoundsCheck(explicit_padding, std::numeric_limits<int>::max())) {
ctx->SetStatus(errors::InvalidArgument("filter too large"));
return;
}
}
const int64_t in_depth = input.dim_size(3);
const int64_t out_depth = output->dim_size(3);
const int64_t patch_depth = filter.dim_size(2);
if (patch_depth <= 0) {
ctx->SetStatus(errors::InvalidArgument(
"filter depth must be stricly positive, got ", patch_depth));
return;
}
if (in_depth % patch_depth != 0) {
ctx->SetStatus(errors::InvalidArgument(
"input depth must be evenly divisible by filter depth: ", in_depth,
" vs ", patch_depth));
return;
}
if (filter.NumElements() <= 0) {
ctx->SetStatus(
errors::InvalidArgument("filter must not have zero elements "
"(i.e. all dimensions must be non-zero)"));
return;
}
const int64_t num_groups = in_depth / patch_depth;
if (num_groups <= 0) {
ctx->SetStatus(errors::InvalidArgument(
"number of groups must be stricly positive, got ", num_groups));
return;
}
if (out_depth % num_groups != 0 || out_depth < num_groups) {
ctx->SetStatus(errors::InvalidArgument(
"output depth must be evenly divisible by number of groups: ",
out_depth, " vs ", num_groups));
return;
}
if (in_depth != patch_depth) {
LaunchGrouped<T>()(ctx, input, filter, row_stride, col_stride,
row_dilation, col_dilation, padding, explicit_paddings,
output, data_format);
} else {
LaunchGeneric<CPUDevice, T>()(ctx, input, filter, row_stride, col_stride,
row_dilation, col_dilation, padding,
explicit_paddings, output, data_format);
}
}
| null | null | 220,456
|
150311915238051063910635971143876062681
| 66
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
void Compute(OpKernelContext* context) override {
// Input tensor is of the following dimensions:
// [ batch, in_rows, in_cols, in_depth ]
const Tensor& input = context->input(0);
// Input filter is of the following dimensions:
// [ filter_rows, filter_cols, in_depth, out_depth]
const Tensor& filter = context->input(1);
Conv2DDimensions dimensions;
OP_REQUIRES_OK(context,
ComputeConv2DDimension(params_, input, filter, &dimensions));
TensorShape out_shape = ShapeFromFormat(
params_.data_format, dimensions.batch, dimensions.out_rows,
dimensions.out_cols, dimensions.out_depth);
// Output tensor is of the following dimensions:
// [ in_batch, out_rows, out_cols, out_depth ]
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output));
VLOG(2) << "Conv2D: in_depth = " << dimensions.in_depth
<< ", patch_depth = " << dimensions.patch_depth
<< ", input_cols = " << dimensions.input_cols
<< ", filter_cols = " << dimensions.filter_cols
<< ", input_rows = " << dimensions.input_rows
<< ", filter_rows = " << dimensions.filter_rows
<< ", stride_rows = " << dimensions.stride_rows
<< ", stride_cols = " << dimensions.stride_cols
<< ", dilation_rows = " << dimensions.dilation_rows
<< ", dilation_cols = " << dimensions.dilation_cols
<< ", out_depth = " << dimensions.out_depth;
// If there is nothing to compute, return.
if (out_shape.num_elements() == 0) {
return;
}
#ifdef TENSORFLOW_USE_LIBXSMM_CONVOLUTIONS
if (params_.padding != EXPLICIT &&
LaunchXsmmConvOp<Device, T>::Run(
context, input, filter, dimensions.batch, dimensions.input_rows,
dimensions.input_cols, dimensions.in_depth, dimensions.filter_rows,
dimensions.filter_cols, dimensions.pad_rows_before,
dimensions.pad_cols_before, dimensions.out_rows,
dimensions.out_cols, dimensions.out_depth, dimensions.dilation_rows,
dimensions.dilation_cols, dimensions.stride_rows,
dimensions.stride_cols, output, params_.data_format)) {
return;
}
#endif
if (params_.padding != EXPLICIT &&
LaunchDeepConvOp<Device, T>::Run(
context, input, filter, dimensions.batch, dimensions.input_rows,
dimensions.input_cols, dimensions.in_depth, dimensions.filter_rows,
dimensions.filter_cols, dimensions.pad_rows_before,
dimensions.pad_cols_before, dimensions.out_rows,
dimensions.out_cols, dimensions.out_depth, dimensions.dilation_rows,
dimensions.dilation_cols, dimensions.stride_rows,
dimensions.stride_cols, output, params_.data_format)) {
return;
}
launcher_(context, use_cudnn_, cudnn_use_autotune_, input, filter,
dimensions.dilation_rows, dimensions.dilation_cols,
dimensions.stride_rows, dimensions.stride_cols, params_.padding,
params_.explicit_paddings, output, params_.data_format);
}
| null | null | 220,457
|
333071325458884381776774741652897483987
| 70
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
void LaunchConv2DOp<GPUDevice, T>::operator()(
OpKernelContext* ctx, bool use_cudnn, bool cudnn_use_autotune,
const Tensor& input_param, const Tensor& filter, int row_dilation,
int col_dilation, int row_stride, int col_stride, const Padding& padding,
const std::vector<int64_t>& explicit_paddings, Tensor* output,
TensorFormat data_format) {
using se::dnn::AlgorithmConfig;
using se::dnn::AlgorithmDesc;
using se::dnn::ProfileResult;
auto* stream = ctx->op_device_context()->stream();
OP_REQUIRES(ctx, stream, errors::Internal("No GPU stream available."));
if (!use_cudnn) {
ctx->SetStatus(
errors::Unimplemented("Conv2D for GPU is not currently supported "
"without cudnn"));
return;
}
Tensor input = input_param;
const int64_t in_batch = GetTensorDim(input, data_format, 'N');
int64_t in_rows = GetTensorDim(input, data_format, 'H');
int64_t in_cols = GetTensorDim(input, data_format, 'W');
const int64_t in_depths = GetTensorDim(input, data_format, 'C');
const int64_t patch_rows = filter.dim_size(0);
const int64_t patch_cols = filter.dim_size(1);
const int64_t patch_depths = filter.dim_size(2);
OP_REQUIRES(
ctx, filter.NumElements() > 0,
errors::InvalidArgument("filter must not have zero elements "
"(i.e. all dimensions must be non-zero)"));
// If the filter in-depth (patch_depths) is 1 and smaller than the input
// depth, it's a depthwise convolution. More generally, if the filter in-depth
// divides but is smaller than the input depth, it is a grouped convolution.
bool is_grouped_convolution = patch_depths != in_depths;
if (patch_rows == 1 && patch_cols == 1 && !is_grouped_convolution &&
row_dilation == 1 && col_dilation == 1 && row_stride == 1 &&
col_stride == 1 && data_format == FORMAT_NHWC &&
(padding == VALID || padding == SAME)) {
// 1x1 filter, so call cublas directly.
const uint64 m = in_batch * in_rows * in_cols;
const uint64 k = patch_depths;
const uint64 n = filter.dim_size(3);
auto a_ptr = AsDeviceMemory(input.template flat<T>().data(),
input.template flat<T>().size());
auto b_ptr = AsDeviceMemory(filter.template flat<T>().data(),
filter.template flat<T>().size());
auto c_ptr = AsDeviceMemory(output->template flat<T>().data(),
output->template flat<T>().size());
auto no_transpose = se::blas::Transpose::kNoTranspose;
OP_REQUIRES_OK(ctx, stream->ThenBlasGemm(no_transpose, no_transpose, n, m,
k, b_ptr, n, a_ptr, k, &c_ptr, n));
return;
} else if (patch_rows == in_rows && patch_cols == in_cols &&
!is_grouped_convolution && row_dilation == 1 &&
col_dilation == 1 && padding == VALID &&
data_format == FORMAT_NHWC) {
// The input data and filter have the same height/width, so call cublas
// directly.
const uint64 m = in_batch;
const uint64 k = patch_rows * patch_cols * patch_depths;
const uint64 n = filter.dim_size(3);
auto a_ptr = AsDeviceMemory(input.template flat<T>().data(),
input.template flat<T>().size());
auto b_ptr = AsDeviceMemory(filter.template flat<T>().data(),
filter.template flat<T>().size());
auto c_ptr = AsDeviceMemory(output->template flat<T>().data(),
output->template flat<T>().size());
auto no_transpose = se::blas::Transpose::kNoTranspose;
OP_REQUIRES_OK(ctx, stream->ThenBlasGemm(no_transpose, no_transpose, n, m,
k, b_ptr, n, a_ptr, k, &c_ptr, n));
return;
}
#if GOOGLE_CUDA
// Tensor Core (NVIDIA Volta+ GPUs) supports efficient convolution with fp16
// in NHWC data layout. In all other configurations it's more efficient to
// run computation in NCHW data format.
const bool compute_in_nhwc = DataTypeToEnum<T>::value == DT_HALF &&
stream->GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::VOLTA);
#else
// fast NHWC implementation is a CUDA only feature
const bool compute_in_nhwc = false;
#endif
// We only do one directional conversion: NHWC->NCHW. We never convert in the
// other direction. Grappler layout optimizer selects preferred layout and
// adds necessary annotations to the graph.
// TODO(ezhulenev): Convert in other direction for fp16?
const TensorFormat compute_data_format =
(compute_in_nhwc && data_format == FORMAT_NHWC) ? FORMAT_NHWC
: FORMAT_NCHW;
VLOG(3) << "Compute Conv2D with cuDNN:"
<< " data_format=" << ToString(data_format)
<< " compute_data_format=" << ToString(compute_data_format);
const int64_t out_batch = GetTensorDim(*output, data_format, 'N');
const int64_t out_rows = GetTensorDim(*output, data_format, 'H');
const int64_t out_cols = GetTensorDim(*output, data_format, 'W');
const int64_t out_depths = GetTensorDim(*output, data_format, 'C');
int64_t padding_top = -1, padding_bottom = -1;
int64_t padding_left = -1, padding_right = -1;
if (padding == EXPLICIT) {
GetExplicitPaddingForDim(explicit_paddings, data_format, 'H', &padding_top,
&padding_bottom);
GetExplicitPaddingForDim(explicit_paddings, data_format, 'W', &padding_left,
&padding_right);
}
int64_t out_rows_check, out_cols_check;
Status status = GetWindowedOutputSizeVerboseV2(
in_rows, patch_rows, row_dilation, row_stride, padding, &out_rows_check,
&padding_top, &padding_bottom);
// The status is guaranteed to be OK because we checked the output and padding
// was valid earlier.
TF_CHECK_OK(status);
DCHECK_EQ(out_rows, out_rows_check);
status = GetWindowedOutputSizeVerboseV2(in_cols, patch_cols, col_dilation,
col_stride, padding, &out_cols_check,
&padding_left, &padding_right);
TF_CHECK_OK(status);
DCHECK_EQ(out_cols, out_cols_check);
const int64_t common_padding_rows = std::min(padding_top, padding_bottom);
const int64_t common_padding_cols = std::min(padding_left, padding_right);
if (padding_top != padding_bottom || padding_left != padding_right) {
// cuDNN only supports padding the same amount on the left and right sides,
// and on the top and bottom sides. So we manually create a new padded
// input tensor such that we can pass it to cuDNN.
VLOG(4) << "Pad input tensor:"
<< " padding_top=" << padding_top
<< " padding_bottom=" << padding_bottom
<< " padding_left=" << padding_left
<< " padding_right=" << padding_right;
// TODO(reedwm): In some cases, we can avoid an allocation even if the two
// padding sides are different. For example, if the input is 2x2, the filter
// is 1x1, the stride is 2, and the padding is (1, 0, 1, 0), the result is
// equivalent to as if the padding is (1, 1, 1, 1). Changing the padding in
// such a way would allow us to avoid the allocation.
Tensor transformed_input;
const int64_t padding_rows_diff = std::abs(padding_bottom - padding_top);
const int64_t padding_cols_diff = std::abs(padding_right - padding_left);
const int64_t new_in_rows = in_rows + padding_rows_diff;
const int64_t new_in_cols = in_cols + padding_cols_diff;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(
DataTypeToEnum<T>::value,
ShapeFromFormat(data_format, in_batch, new_in_rows,
new_in_cols, in_depths),
&transformed_input));
const int64_t input_pad_top = padding_top - common_padding_rows;
const int64_t input_pad_bottom = padding_bottom - common_padding_rows;
const int64_t input_pad_left = padding_left - common_padding_cols;
const int64_t input_pad_right = padding_right - common_padding_cols;
bool in_bounds =
FastBoundsCheck(input_pad_top, std::numeric_limits<int>::max()) &&
FastBoundsCheck(input_pad_bottom, std::numeric_limits<int>::max()) &&
FastBoundsCheck(input_pad_left, std::numeric_limits<int>::max()) &&
FastBoundsCheck(input_pad_right, std::numeric_limits<int>::max());
if (!in_bounds) {
ctx->SetStatus(errors::InvalidArgument("Padding is too large."));
return;
}
functor::PadInput<GPUDevice, T, int, 4>()(
ctx->eigen_device<GPUDevice>(), To32Bit(input_param.tensor<T, 4>()),
{{static_cast<int>(input_pad_top), static_cast<int>(input_pad_left)}},
{{static_cast<int>(input_pad_bottom),
static_cast<int>(input_pad_right)}},
To32Bit(transformed_input.tensor<T, 4>()), data_format, T{});
input = transformed_input;
in_rows = new_in_rows;
in_cols = new_in_cols;
}
if (data_format == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) {
VLOG(4) << "Convert the input tensor from NHWC to NCHW.";
TensorShape nchw_shape =
ShapeFromFormat(FORMAT_NCHW, in_batch, in_rows, in_cols, in_depths);
if (in_depths > 1) {
Tensor transformed_input;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
nchw_shape, &transformed_input));
functor::NHWCToNCHW<GPUDevice, T, 4>()(
ctx->eigen_device<GPUDevice>(),
const_cast<const Tensor&>(input).tensor<T, 4>(),
transformed_input.tensor<T, 4>());
input = transformed_input;
} else {
// If depth <= 1, then just reshape.
CHECK(input.CopyFrom(input, nchw_shape));
}
} else {
CHECK(data_format == compute_data_format) // Crash OK
<< "Illegal data and compute format pair:"
<< " data_format=" << ToString(data_format)
<< " compute_data_format=" << ToString(compute_data_format);
}
CHECK(common_padding_rows >= 0 && common_padding_cols >= 0) // Crash OK
<< "Negative row or col paddings: (" << common_padding_rows << ", "
<< common_padding_cols << ")";
constexpr auto kComputeInNHWC =
std::make_tuple(se::dnn::DataLayout::kBatchYXDepth,
se::dnn::FilterLayout::kOutputYXInput);
constexpr auto kComputeInNCHW =
std::make_tuple(se::dnn::DataLayout::kBatchDepthYX,
se::dnn::FilterLayout::kOutputInputYX);
se::dnn::DataLayout compute_data_layout;
se::dnn::FilterLayout filter_layout;
std::tie(compute_data_layout, filter_layout) =
compute_data_format == FORMAT_NHWC ? kComputeInNHWC : kComputeInNCHW;
se::dnn::BatchDescriptor input_desc;
input_desc.set_count(in_batch)
.set_feature_map_count(in_depths)
.set_height(in_rows)
.set_width(in_cols)
.set_layout(compute_data_layout);
se::dnn::BatchDescriptor output_desc;
output_desc.set_count(out_batch)
.set_height(out_rows)
.set_width(out_cols)
.set_feature_map_count(out_depths)
.set_layout(compute_data_layout);
se::dnn::FilterDescriptor filter_desc;
filter_desc.set_input_filter_height(patch_rows)
.set_input_filter_width(patch_cols)
.set_input_feature_map_count(patch_depths)
.set_output_feature_map_count(filter.dim_size(3))
.set_layout(filter_layout);
se::dnn::ConvolutionDescriptor conv_desc;
conv_desc.set_vertical_dilation_rate(row_dilation)
.set_horizontal_dilation_rate(col_dilation)
.set_vertical_filter_stride(row_stride)
.set_horizontal_filter_stride(col_stride)
.set_zero_padding_height(common_padding_rows)
.set_zero_padding_width(common_padding_cols)
.set_group_count(in_depths / patch_depths);
Tensor transformed_filter;
const auto transform_filter = [&](FilterTensorFormat dst_format) -> Status {
VLOG(4) << "Transform filter tensor from " << ToString(FORMAT_HWIO)
<< " to " << ToString(dst_format);
TensorShape dst_shape =
dst_format == FORMAT_OIHW
? TensorShape({filter.dim_size(3), filter.dim_size(2),
filter.dim_size(0), filter.dim_size(1)})
: TensorShape({filter.dim_size(3), filter.dim_size(0),
filter.dim_size(1), filter.dim_size(2)});
TF_RETURN_IF_ERROR(ctx->allocate_temp(DataTypeToEnum<T>::value, dst_shape,
&transformed_filter));
functor::TransformFilter<GPUDevice, T, int, 4>()(
ctx->eigen_device<GPUDevice>(), dst_format,
To32Bit(filter.tensor<T, 4>()),
To32Bit(transformed_filter.tensor<T, 4>()));
return Status::OK();
};
if (compute_data_format == FORMAT_NCHW) {
OP_REQUIRES_OK(ctx, transform_filter(FORMAT_OIHW));
} else if (compute_data_format == FORMAT_NHWC) {
OP_REQUIRES_OK(ctx, transform_filter(FORMAT_OHWI));
} else {
ctx->SetStatus(errors::InvalidArgument("Invalid compute data format: ",
ToString(compute_data_format)));
return;
}
Tensor transformed_output;
if (data_format != compute_data_format) {
VLOG(4) << "Allocate temporary memory for output in compute data format";
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
ShapeFromFormat(compute_data_format, out_batch,
out_rows, out_cols, out_depths),
&transformed_output));
} else {
transformed_output = *output;
}
auto input_ptr = AsDeviceMemory(input.template flat<T>().data(),
input.template flat<T>().size());
auto filter_ptr =
AsDeviceMemory(transformed_filter.template flat<T>().data(),
transformed_filter.template flat<T>().size());
auto output_ptr =
AsDeviceMemory(transformed_output.template flat<T>().data(),
transformed_output.template flat<T>().size());
static int64_t ConvolveScratchSize = GetDnnWorkspaceLimit(
// default value is in bytes despite the name of the environment variable
"TF_CUDNN_WORKSPACE_LIMIT_IN_MB", 1LL << 32 // 4GB
);
int device_id = stream->parent()->device_ordinal();
DataType dtype = input.dtype();
ConvParameters conv_parameters = {in_batch, // batch
in_depths, // in_depths
{{in_rows, // in_rows
in_cols}}, // in_cols
compute_data_format, // compute_data_format
out_depths, // out_depths
{{patch_rows, // filter_rows
patch_cols, // filter_cols
patch_depths}}, // filter_depths
{{row_dilation, // dilation_rows
col_dilation}}, // dilation_cols
{{row_stride, // stride_rows
col_stride}}, // stride_cols
{{common_padding_rows, // padding_rows
common_padding_cols}}, // padding_cols
dtype, // tensor datatype
device_id, // device_id
conv_desc.group_count()};
auto entry_or = AutotuneUnfusedConv(
cudnn_use_autotune, ConvAutotuneMap::GetInstance(), conv_parameters, ctx,
se::dnn::ConvolutionKind::FORWARD, input_desc, input_ptr, filter_desc,
filter_ptr, conv_desc, output_desc, output_ptr, ConvolveScratchSize);
OP_REQUIRES_OK(ctx, entry_or.status());
auto autotune_entry = entry_or.ConsumeValueOrDie();
DnnScratchAllocator scratch_allocator(ConvolveScratchSize, ctx);
Status cudnn_launch_status = LaunchAutotunedConv(
autotune_entry, &scratch_allocator, se::dnn::ConvolutionKind::FORWARD,
stream, input_desc, input_ptr, filter_desc, filter_ptr, conv_desc,
output_desc, output_ptr);
if (!cudnn_launch_status.ok()) {
ctx->SetStatus(cudnn_launch_status);
return;
}
if (data_format == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) {
VLOG(4) << "Convert the output tensor back from NCHW to NHWC.";
functor::NCHWToNHWC<GPUDevice, T, 4>()(
ctx->eigen_device<GPUDevice>(),
const_cast<const Tensor&>(transformed_output).tensor<T, 4>(),
output->tensor<T, 4>());
}
}
| null | null | 220,458
|
289197129014581852682090163667359254798
| 357
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
explicit Conv2DOp(OpKernelConstruction* context) : BinaryOp<T>(context) {
OP_REQUIRES_OK(context, InitConv2DParameters(context, ¶ms_));
OP_REQUIRES_OK(context, context->GetAttr("use_cudnn_on_gpu", &use_cudnn_));
cudnn_use_autotune_ = CudnnUseAutotune();
}
| null | null | 220,459
|
247351888042820870121408084903155946684
| 6
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
static bool Run(OpKernelContext* ctx, const Tensor& input,
const Tensor& filter, int batch, int input_rows,
int input_cols, int in_depth, int filter_rows,
int filter_cols, int pad_rows, int pad_cols, int out_rows,
int out_cols, int out_depth, int stride_rows, int stride_cols,
int dilation_rows, int dilation_cols, Tensor* output,
TensorFormat data_format) {
return false;
}
| null | null | 220,460
|
335541604954393086285255447821219804615
| 9
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 0
|
static bool Run(OpKernelContext* ctx, const Tensor& input,
const Tensor& filter, int batch, int input_rows,
int input_cols, int in_depth, int filter_rows,
int filter_cols, int pad_rows, int pad_cols, int out_rows,
int out_cols, int out_depth, int dilation_rows,
int dilation_cols, int stride_rows, int stride_cols,
Tensor* output, TensorFormat data_format) {
auto num_threads =
ctx->device()->tensorflow_cpu_worker_threads()->num_threads;
// See libxsmm_dnn.h for this struct definition.
libxsmm_dnn_conv_desc desc;
desc.N = batch;
desc.C = in_depth;
desc.H = input_rows;
desc.W = input_cols;
desc.K = out_depth;
desc.R = filter_rows;
desc.S = filter_cols;
desc.u = stride_rows;
desc.v = stride_cols;
desc.pad_h = pad_rows;
desc.pad_w = pad_cols;
desc.pad_h_in = 0;
desc.pad_w_in = 0;
desc.pad_h_out = 0;
desc.pad_w_out = 0;
desc.threads = num_threads;
desc.algo = LIBXSMM_DNN_CONV_ALGO_DIRECT;
desc.buffer_format = LIBXSMM_DNN_TENSOR_FORMAT_NHWC;
desc.filter_format = LIBXSMM_DNN_TENSOR_FORMAT_LIBXSMM;
desc.fuse_ops = LIBXSMM_DNN_CONV_FUSE_NONE;
desc.options = LIBXSMM_DNN_CONV_OPTION_OVERWRITE;
desc.datatype_out = LIBXSMM_DNN_DATATYPE_F32;
desc.datatype_in = LIBXSMM_DNN_DATATYPE_F32;
if (dilation_rows != 1 || dilation_cols != 1 ||
!CanUseXsmmConv2D(desc, data_format)) {
return false;
}
auto input_ptr = input.template flat<float>().data();
auto filter_ptr = filter.template flat<float>().data();
auto output_ptr = output->template flat<float>().data();
bool success = functor::XsmmFwdConv2D<CPUDevice, float>()(
ctx, desc, input_ptr, filter_ptr, output_ptr);
return success;
}
| null | null | 220,461
|
259864576458161079688972798922705274708
| 47
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e21af685e1828f7ca65038307df5cc06de4479e8
| 0
|
XlaPlatformInfo XlaPlatformInfoFromDevice(DeviceBase* device_base) {
auto device = static_cast<Device*>(device_base);
se::Platform::Id platform_id = nullptr;
const XlaDevice::Metadata* xla_device_metadata = nullptr;
std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator;
if (device->device_type() == DEVICE_CPU) {
platform_id = se::host::kHostPlatformId;
} else if (device->device_type() == DEVICE_GPU) {
platform_id = device->tensorflow_gpu_device_info()
->stream->parent()
->platform()
->id();
} else if (XlaDevice::GetMetadataFromDevice(device, &xla_device_metadata)
.ok()) {
// If we are on an XlaDevice, use the underlying XLA platform's allocator
// directly. We could use the StreamExecutor's allocator which may
// theoretically be more correct, but XLA returns a nice OOM message in a
// Status and StreamExecutor does not.
//
// Importantly we can't use ctx->device()->GetAllocator() as the allocator
// (which xla_allocator above uses) as on an XlaDevice, this is a dummy
// allocator that returns XlaTensor objects. The XlaCompiler needs a real
// allocator to allocate real buffers.
platform_id = xla_device_metadata->platform()->id();
custom_allocator =
xla_device_metadata->client()->backend().shared_memory_allocator();
}
return XlaPlatformInfo(DeviceType(device->device_type()), platform_id,
xla_device_metadata, custom_allocator);
}
| null | null | 220,462
|
103663775092147067966931900990983992089
| 32
|
Fix Null-pointer dereference in BuildXlaCompilationCache
If ConfigProto is not used, then use the default settings which is to allow all devices.
PiperOrigin-RevId: 420391800
Change-Id: I88161ad7042990aef678e77b597a2fb2c8f815be
|
other
|
tensorflow
|
e21af685e1828f7ca65038307df5cc06de4479e8
| 0
|
Status BuildXlaCompilationCache(DeviceBase* device, FunctionLibraryRuntime* flr,
const XlaPlatformInfo& platform_info,
XlaCompilationCache** cache) {
if (platform_info.xla_device_metadata()) {
*cache = new XlaCompilationCache(
platform_info.xla_device_metadata()->client(),
platform_info.xla_device_metadata()->jit_device_type());
return Status::OK();
}
auto platform =
se::MultiPlatformManager::PlatformWithId(platform_info.platform_id());
if (!platform.ok()) {
return platform.status();
}
StatusOr<xla::Compiler*> compiler_for_platform =
xla::Compiler::GetForPlatform(platform.ValueOrDie());
if (!compiler_for_platform.ok()) {
// In some rare cases (usually in unit tests with very small clusters) we
// may end up transforming an XLA cluster with at least one GPU operation
// (which would normally force the cluster to be compiled using XLA:GPU)
// into an XLA cluster with no GPU operations (i.e. containing only CPU
// operations). Such a cluster can fail compilation (in way that
// MarkForCompilation could not have detected) if the CPU JIT is not linked
// in.
//
// So bail out of _XlaCompile in this case, and let the executor handle the
// situation for us.
const Status& status = compiler_for_platform.status();
if (status.code() == error::NOT_FOUND) {
return errors::Unimplemented("Could not find compiler for platform ",
platform.ValueOrDie()->Name(), ": ",
status.ToString());
}
}
xla::LocalClientOptions client_options;
client_options.set_platform(platform.ValueOrDie());
client_options.set_intra_op_parallelism_threads(
device->tensorflow_cpu_worker_threads()->num_threads);
if (flr->config_proto()) {
string allowed_gpus =
flr->config_proto()->gpu_options().visible_device_list();
TF_ASSIGN_OR_RETURN(absl::optional<std::set<int>> gpu_ids,
ParseVisibleDeviceList(allowed_gpus));
client_options.set_allowed_devices(gpu_ids);
}
auto client = xla::ClientLibrary::GetOrCreateLocalClient(client_options);
if (!client.ok()) {
return client.status();
}
const XlaOpRegistry::DeviceRegistration* registration;
if (!XlaOpRegistry::GetCompilationDevice(platform_info.device_type().type(),
®istration)) {
return errors::InvalidArgument("No JIT device registered for ",
platform_info.device_type().type());
}
*cache = new XlaCompilationCache(
client.ValueOrDie(), DeviceType(registration->compilation_device_name));
return Status::OK();
}
| null | null | 220,463
|
54162462653467437305631663164092596903
| 64
|
Fix Null-pointer dereference in BuildXlaCompilationCache
If ConfigProto is not used, then use the default settings which is to allow all devices.
PiperOrigin-RevId: 420391800
Change-Id: I88161ad7042990aef678e77b597a2fb2c8f815be
|
other
|
tensorflow
|
e21af685e1828f7ca65038307df5cc06de4479e8
| 0
|
std::shared_ptr<se::DeviceMemoryAllocator> GetAllocator(
DeviceBase* device, se::Stream* stream,
const XlaPlatformInfo& platform_info) {
if (platform_info.custom_allocator()) {
return platform_info.custom_allocator();
}
auto* alloc = device->GetAllocator({});
if (!stream) {
// Stream is not set for the host platform.
se::Platform* platform =
se::MultiPlatformManager::PlatformWithId(platform_info.platform_id())
.ValueOrDie();
return std::make_shared<se::TfAllocatorAdapter>(alloc, platform);
}
return std::make_shared<se::TfAllocatorAdapter>(alloc, stream);
}
| null | null | 220,464
|
25570211539215501307360281010822183913
| 16
|
Fix Null-pointer dereference in BuildXlaCompilationCache
If ConfigProto is not used, then use the default settings which is to allow all devices.
PiperOrigin-RevId: 420391800
Change-Id: I88161ad7042990aef678e77b597a2fb2c8f815be
|
other
|
tensorflow
|
e21af685e1828f7ca65038307df5cc06de4479e8
| 0
|
xla::StatusOr<absl::optional<std::set<int>>> ParseVisibleDeviceList(
absl::string_view visible_device_list) {
std::set<int> gpu_ids;
if (visible_device_list.empty()) {
return {{absl::nullopt}};
}
const std::vector<string> visible_devices =
absl::StrSplit(visible_device_list, ',');
for (const string& platform_device_id_str : visible_devices) {
int32_t platform_device_id;
if (!absl::SimpleAtoi(platform_device_id_str, &platform_device_id)) {
return errors::InvalidArgument(
"Could not parse entry in 'visible_device_list': '",
platform_device_id_str,
"'. visible_device_list = ", visible_device_list);
}
gpu_ids.insert(platform_device_id);
}
return {{gpu_ids}};
}
| null | null | 220,465
|
66025840875232229799833119662546892000
| 20
|
Fix Null-pointer dereference in BuildXlaCompilationCache
If ConfigProto is not used, then use the default settings which is to allow all devices.
PiperOrigin-RevId: 420391800
Change-Id: I88161ad7042990aef678e77b597a2fb2c8f815be
|
other
|
tensorflow
|
e21af685e1828f7ca65038307df5cc06de4479e8
| 0
|
XlaCompiler::Options GenerateCompilerOptions(
const XlaCompilationCache& cache,
const FunctionLibraryRuntime& function_library, DeviceBase* device,
se::Stream* stream, const XlaPlatformInfo& platform_info,
bool has_ref_vars) {
XlaCompiler::Options options;
options.client = static_cast<xla::LocalClient*>(cache.client());
if (stream != nullptr) {
options.device_ordinal = stream->parent()->device_ordinal();
}
options.device_type = cache.device_type();
options.flib_def = function_library.GetFunctionLibraryDefinition();
options.graph_def_version = function_library.graph_def_version();
options.allow_cpu_custom_calls =
(platform_info.platform_id() == se::host::kHostPlatformId);
options.device_allocator = GetAllocator(device, stream, platform_info);
if (platform_info.xla_device_metadata()) {
options.shape_determination_fns =
platform_info.xla_device_metadata()->default_shape_determination_fns();
}
// If reference variables are not present in the graph, we can safely alias
// passthrough parameters without performing a copy.
options.alias_passthrough_params =
!has_ref_vars && !platform_info.is_on_xla_device();
return options;
}
| null | null | 220,466
|
50107903715813929190637947860861767200
| 26
|
Fix Null-pointer dereference in BuildXlaCompilationCache
If ConfigProto is not used, then use the default settings which is to allow all devices.
PiperOrigin-RevId: 420391800
Change-Id: I88161ad7042990aef678e77b597a2fb2c8f815be
|
other
|
tensorflow
|
2b7100d6cdff36aa21010a82269bc05a6d1cc74a
| 0
|
Status OutputSparse(const BatchedMap<T>& per_batch_counts, int num_values,
bool is_1d, OpKernelContext* context) {
int total_values = 0;
int num_batches = per_batch_counts.size();
for (const auto& per_batch_count : per_batch_counts) {
total_values += per_batch_count.size();
}
Tensor* indices;
int inner_dim = is_1d ? 1 : 2;
TF_RETURN_IF_ERROR(context->allocate_output(
0, TensorShape({total_values, inner_dim}), &indices));
Tensor* values;
TF_RETURN_IF_ERROR(
context->allocate_output(1, TensorShape({total_values}), &values));
auto output_indices = indices->matrix<int64_t>();
auto output_values = values->flat<T>();
int64_t value_loc = 0;
for (int b = 0; b < num_batches; ++b) {
const auto& per_batch_count = per_batch_counts[b];
std::vector<std::pair<int, T>> pairs(per_batch_count.begin(),
per_batch_count.end());
std::sort(pairs.begin(), pairs.end());
for (const auto& x : pairs) {
if (is_1d) {
output_indices(value_loc, 0) = x.first;
} else {
output_indices(value_loc, 0) = b;
output_indices(value_loc, 1) = x.first;
}
output_values(value_loc) = x.second;
++value_loc;
}
}
Tensor* dense_shape;
if (is_1d) {
TF_RETURN_IF_ERROR(
context->allocate_output(2, TensorShape({1}), &dense_shape));
dense_shape->flat<int64_t>().data()[0] = num_values;
} else {
TF_RETURN_IF_ERROR(
context->allocate_output(2, TensorShape({2}), &dense_shape));
dense_shape->flat<int64_t>().data()[0] = num_batches;
dense_shape->flat<int64_t>().data()[1] = num_values;
}
return Status::OK();
}
| null | null | 220,802
|
177390678330180241195899093502529453362
| 50
|
Cleanup and remove duplicate validation in `SparseCount`.
We have valdiation that is duplicated, checking different conditions, in different formats and failing to capture all cases. This should fix all the previous bugs.
PiperOrigin-RevId: 414886981
Change-Id: Ibf0bba0beb057b76d505324bb9487565daf95f01
|
other
|
tensorflow
|
2b7100d6cdff36aa21010a82269bc05a6d1cc74a
| 0
|
explicit RaggedCount(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("minlength", &minlength_));
OP_REQUIRES_OK(context, context->GetAttr("maxlength", &maxlength_));
OP_REQUIRES_OK(context, context->GetAttr("binary_output", &binary_output_));
}
| null | null | 220,803
|
180085740627781792033015629285690318889
| 5
|
Cleanup and remove duplicate validation in `SparseCount`.
We have valdiation that is duplicated, checking different conditions, in different formats and failing to capture all cases. This should fix all the previous bugs.
PiperOrigin-RevId: 414886981
Change-Id: Ibf0bba0beb057b76d505324bb9487565daf95f01
|
other
|
tensorflow
|
2b7100d6cdff36aa21010a82269bc05a6d1cc74a
| 0
|
void Compute(OpKernelContext* context) override {
const Tensor& splits = context->input(0);
const Tensor& values = context->input(1);
const Tensor& weights = context->input(2);
bool use_weights = weights.NumElements() > 0;
bool is_1d = false;
if (use_weights) {
OP_REQUIRES(
context, weights.shape() == values.shape(),
errors::InvalidArgument(
"Weights and values must have the same shape. Weight shape: ",
weights.shape().DebugString(),
"; values shape: ", values.shape().DebugString()));
}
const auto splits_values = splits.flat<int64_t>();
const auto values_values = values.flat<T>();
const auto weight_values = weights.flat<W>();
int num_batches = splits.NumElements() - 1;
int num_values = values.NumElements();
OP_REQUIRES(
context, num_batches > 0,
errors::InvalidArgument(
"Must provide at least 2 elements for the splits argument"));
OP_REQUIRES(context, splits_values(0) == 0,
errors::InvalidArgument("Splits must start with 0, not with ",
splits_values(0)));
OP_REQUIRES(context, splits_values(num_batches) == num_values,
errors::InvalidArgument(
"Splits must end with the number of values, got ",
splits_values(num_batches), " instead of ", num_values));
auto per_batch_counts = BatchedMap<W>(num_batches);
T max_value = 0;
int batch_idx = 0;
for (int idx = 0; idx < num_values; ++idx) {
while (idx >= splits_values(batch_idx)) {
batch_idx++;
}
const auto& value = values_values(idx);
if (value >= 0 && (maxlength_ <= 0 || value < maxlength_)) {
if (binary_output_) {
per_batch_counts[batch_idx - 1][value] = 1;
} else if (use_weights) {
per_batch_counts[batch_idx - 1][value] += weight_values(idx);
} else {
per_batch_counts[batch_idx - 1][value]++;
}
if (value > max_value) {
max_value = value;
}
}
}
int num_output_values = GetOutputSize(max_value, maxlength_, minlength_);
OP_REQUIRES_OK(context, OutputSparse<W>(per_batch_counts, num_output_values,
is_1d, context));
}
| null | null | 220,804
|
110768011914469449706699730109526731833
| 61
|
Cleanup and remove duplicate validation in `SparseCount`.
We have valdiation that is duplicated, checking different conditions, in different formats and failing to capture all cases. This should fix all the previous bugs.
PiperOrigin-RevId: 414886981
Change-Id: Ibf0bba0beb057b76d505324bb9487565daf95f01
|
other
|
tensorflow
|
2b7100d6cdff36aa21010a82269bc05a6d1cc74a
| 0
|
explicit SparseCount(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("minlength", &minlength_));
OP_REQUIRES_OK(context, context->GetAttr("maxlength", &maxlength_));
OP_REQUIRES_OK(context, context->GetAttr("binary_output", &binary_output_));
}
| null | null | 220,805
|
62748649235066084058733023823844036021
| 5
|
Cleanup and remove duplicate validation in `SparseCount`.
We have valdiation that is duplicated, checking different conditions, in different formats and failing to capture all cases. This should fix all the previous bugs.
PiperOrigin-RevId: 414886981
Change-Id: Ibf0bba0beb057b76d505324bb9487565daf95f01
|
other
|
tensorflow
|
2b7100d6cdff36aa21010a82269bc05a6d1cc74a
| 0
|
explicit DenseCount(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("minlength", &minlength_));
OP_REQUIRES_OK(context, context->GetAttr("maxlength", &maxlength_));
OP_REQUIRES_OK(context, context->GetAttr("binary_output", &binary_output_));
}
| null | null | 220,806
|
288622890649243167380501542391568411173
| 5
|
Cleanup and remove duplicate validation in `SparseCount`.
We have valdiation that is duplicated, checking different conditions, in different formats and failing to capture all cases. This should fix all the previous bugs.
PiperOrigin-RevId: 414886981
Change-Id: Ibf0bba0beb057b76d505324bb9487565daf95f01
|
other
|
tensorflow
|
2b7100d6cdff36aa21010a82269bc05a6d1cc74a
| 0
|
int GetOutputSize(int max_seen, int max_length, int min_length) {
return max_length > 0 ? max_length : std::max((max_seen + 1), min_length);
}
| null | null | 220,807
|
112686576939291647904335659468670923144
| 3
|
Cleanup and remove duplicate validation in `SparseCount`.
We have valdiation that is duplicated, checking different conditions, in different formats and failing to capture all cases. This should fix all the previous bugs.
PiperOrigin-RevId: 414886981
Change-Id: Ibf0bba0beb057b76d505324bb9487565daf95f01
|
other
|
tensorflow
|
2b7100d6cdff36aa21010a82269bc05a6d1cc74a
| 0
|
void Compute(OpKernelContext* context) override {
const Tensor& data = context->input(0);
const Tensor& weights = context->input(1);
bool use_weights = weights.NumElements() > 0;
OP_REQUIRES(context,
TensorShapeUtils::IsVector(data.shape()) ||
TensorShapeUtils::IsMatrix(data.shape()),
errors::InvalidArgument(
"Input must be a 1 or 2-dimensional tensor. Got: ",
data.shape().DebugString()));
if (use_weights) {
OP_REQUIRES(
context, weights.shape() == data.shape(),
errors::InvalidArgument(
"Weights and data must have the same shape. Weight shape: ",
weights.shape().DebugString(),
"; data shape: ", data.shape().DebugString()));
}
bool is_1d = TensorShapeUtils::IsVector(data.shape());
int negative_valued_axis = -1;
int num_batch_dimensions = (data.shape().dims() + negative_valued_axis);
int num_batch_elements = 1;
for (int i = 0; i < num_batch_dimensions; ++i) {
OP_REQUIRES(context, data.shape().dim_size(i) != 0,
errors::InvalidArgument(
"Invalid input: Shapes dimension cannot be 0."));
num_batch_elements *= data.shape().dim_size(i);
}
int num_value_elements = data.shape().num_elements() / num_batch_elements;
auto per_batch_counts = BatchedMap<W>(num_batch_elements);
T max_value = 0;
const auto data_values = data.flat<T>();
const auto weight_values = weights.flat<W>();
int i = 0;
for (int b = 0; b < num_batch_elements; ++b) {
for (int v = 0; v < num_value_elements; ++v) {
const auto& value = data_values(i);
if (value >= 0 && (maxlength_ <= 0 || value < maxlength_)) {
if (binary_output_) {
per_batch_counts[b][value] = 1;
} else if (use_weights) {
per_batch_counts[b][value] += weight_values(i);
} else {
per_batch_counts[b][value]++;
}
if (value > max_value) {
max_value = value;
}
}
++i;
}
}
int num_output_values = GetOutputSize(max_value, maxlength_, minlength_);
OP_REQUIRES_OK(context, OutputSparse<W>(per_batch_counts, num_output_values,
is_1d, context));
}
| null | null | 220,808
|
100130268272273832141721632626493790364
| 63
|
Cleanup and remove duplicate validation in `SparseCount`.
We have valdiation that is duplicated, checking different conditions, in different formats and failing to capture all cases. This should fix all the previous bugs.
PiperOrigin-RevId: 414886981
Change-Id: Ibf0bba0beb057b76d505324bb9487565daf95f01
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
Integer RoundUp(Integer i) {
return RoundDown<Modulus>(i + Modulus - 1);
}
| null | null | 220,809
|
40046896019641251208918125980975088920
| 3
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int32_t MultiplyByQuantizedMultiplier(int32_t x,
int32_t quantized_multiplier,
int shift) {
using gemmlowp::RoundingDivideByPOT;
using gemmlowp::SaturatingRoundingDoublingHighMul;
int left_shift = shift > 0 ? shift : 0;
int right_shift = shift > 0 ? 0 : -shift;
return RoundingDivideByPOT(SaturatingRoundingDoublingHighMul(
x * (1 << left_shift), quantized_multiplier),
right_shift);
}
| null | null | 220,810
|
261204126687194641065714655407336742514
| 11
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
SaturatingRoundingMultiplyByPOTParam(
gemmlowp::FixedPoint<tRawType, tIntegerBits> a, int exponent) {
return gemmlowp::FixedPoint<tRawType, tIntegerBits>::FromRaw(
SaturatingRoundingMultiplyByPOTParam(a.raw(), exponent));
}
| null | null | 220,811
|
56674790680841916960710893568247611598
| 5
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int32_t MultiplyByQuantizedMultiplier(int64_t x,
int32_t quantized_multiplier,
int shift) {
// Inputs:
// - quantized_multiplier has fixed point at bit 31
// - shift is -31 to +7 (negative for right shift)
//
// Assumptions: The following input ranges are assumed
// - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1)
// - scaling is chosen so final scaled result fits in int32_t
// - input x is in the range -(1<<47) <= x < (1<<47)
TFLITE_DCHECK(quantized_multiplier >= 0);
TFLITE_DCHECK(shift >= -31 && shift < 8);
TFLITE_DCHECK(x >= -(static_cast<int64_t>(1) << 47) &&
x < (static_cast<int64_t>(1) << 47));
const int32_t reduced_multiplier =
(quantized_multiplier < 0x7FFF0000)
? ((quantized_multiplier + (1 << 15)) >> 16)
: 0x7FFF;
const int64_t total_shift = 15 - shift;
const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
int64_t result = x * static_cast<int64_t>(reduced_multiplier) + round;
result = result >> total_shift;
TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
result <= std::numeric_limits<int32_t>::max());
return static_cast<int32_t>(result);
}
| null | null | 220,812
|
23594485533233254272355562239490867329
| 29
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline void DownScaleInt32ToInt16Multiplier(int32_t multiplier_int32_t,
int16_t* multiplier_int16_t) {
TFLITE_DCHECK_GE(multiplier_int32_t, 0);
static constexpr int32_t kRoundingOffset = 1 << 15;
if (multiplier_int32_t >=
std::numeric_limits<int32_t>::max() - kRoundingOffset) {
*multiplier_int16_t = std::numeric_limits<int16_t>::max();
return;
}
const int32_t result = (multiplier_int32_t + kRoundingOffset) >> 16;
TFLITE_DCHECK_LE(result << 16, multiplier_int32_t + kRoundingOffset);
TFLITE_DCHECK_GT(result << 16, multiplier_int32_t - kRoundingOffset);
*multiplier_int16_t = result;
TFLITE_DCHECK_EQ(*multiplier_int16_t, result);
}
| null | null | 220,813
|
317923766780515904126986121909632263551
| 15
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
Integer CeilQuotient(Integer a, Integer b) {
return (a + b - 1) / b;
}
| null | null | 220,814
|
159471423190680242283507380326320561543
| 3
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
constexpr int lut_size() {
static_assert(std::is_same<LutInT, int8_t>::value ||
std::is_same<LutInT, int16_t>::value,
"Only LUTs with int8 or int16 inputs are supported.");
return std::is_same<LutInT, int8_t>::value ? 256 : 513;
}
| null | null | 220,815
|
247514650328691029626514992488789598251
| 6
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int SubscriptToIndex(const NdArrayDesc<5>& desc, int indexes[5]) {
return indexes[0] * desc.strides[0] + indexes[1] * desc.strides[1] +
indexes[2] * desc.strides[2] + indexes[3] * desc.strides[3] +
indexes[4] * desc.strides[4];
}
| null | null | 220,816
|
268701888151039726268813830330015850168
| 5
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int LegacyHowManyThreads(int max_num_threads, int rows, int cols,
int depth) {
// Early-exit in the default case where multi-threading is disabled.
if (max_num_threads == 1) {
return 1;
}
// Ensure that each thread has KernelRows rows to process, if at all possible.
int thread_count = std::min(max_num_threads, rows / KernelRows);
// Limit the number of threads according to the overall size of the problem.
if (thread_count > 1) {
// Empirically determined value.
static constexpr std::uint64_t min_cubic_size_per_thread = 64 * 1024;
// We can only multiply two out of three sizes without risking overflow
const std::uint64_t cubic_size =
std::uint64_t(rows) * std::uint64_t(cols) * std::uint64_t(depth);
thread_count = std::min(
thread_count, static_cast<int>(cubic_size / min_cubic_size_per_thread));
}
if (thread_count < 1) {
thread_count = 1;
}
assert(thread_count > 0 && thread_count <= max_num_threads);
return thread_count;
}
| null | null | 220,817
|
246775599609519178673944784183973350767
| 30
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int8_t lut_lookup(int8_t value, const int8_t* lut) {
return lut[128 + value];
}
| null | null | 220,818
|
227527435671032590279724142500511082218
| 3
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
void optimized_ops_preload_l1_keep(const T* ptr) {
#ifdef __GNUC__
// builtin offered by GCC-compatible compilers including clang
__builtin_prefetch(ptr, /* 0 means read */ 0, /* 3 means high locality */ 3);
#else
(void)ptr;
#endif
}
| null | null | 220,819
|
265852236439693612181999757336477589663
| 8
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline void gen_lut(FloatT (*func)(FloatT), FloatT input_min, FloatT input_max,
FloatT output_min, FloatT output_max, LutOutT* lut) {
static_assert(std::is_same<LutInT, int8_t>::value ||
std::is_same<LutInT, int16_t>::value,
"Only LUTs with int8 or int16 inputs are supported.");
static_assert(std::is_same<LutOutT, int8_t>::value ||
std::is_same<LutOutT, int16_t>::value,
"Only LUTs with int8 or int16 outputs are supported.");
static_assert(std::is_floating_point<FloatT>::value,
"FloatT must be a floating-point type.");
const int nb_steps = std::is_same<LutInT, int8_t>::value ? 256 : 512;
const FloatT step = (input_max - input_min) / nb_steps;
const FloatT half_step = step / 2;
const FloatT output_scaling_inv =
static_cast<FloatT>(std::numeric_limits<LutOutT>::max() -
std::numeric_limits<LutOutT>::min() + 1) /
(output_max - output_min);
const FloatT table_min =
static_cast<FloatT>(std::numeric_limits<LutOutT>::min());
const FloatT table_max =
static_cast<FloatT>(std::numeric_limits<LutOutT>::max());
for (int i = 0; i < nb_steps; i++) {
const FloatT val = func(input_min + i * step);
const FloatT val_midpoint = func(input_min + i * step + half_step);
const FloatT val_next = func(input_min + (i + 1) * step);
const FloatT sample_val = TfLiteRound(val * output_scaling_inv);
const FloatT midpoint_interp_val =
TfLiteRound((val_next * output_scaling_inv +
TfLiteRound(val * output_scaling_inv)) /
2);
const FloatT midpoint_val = TfLiteRound(val_midpoint * output_scaling_inv);
const FloatT midpoint_err = midpoint_interp_val - midpoint_val;
const FloatT bias = TfLiteRound(midpoint_err / 2);
lut[i] = static_cast<LutOutT>(std::min<FloatT>(
std::max<FloatT>(sample_val - bias, table_min), table_max));
}
const bool with_extra_interpolation_value =
std::is_same<LutInT, int16_t>::value;
if (with_extra_interpolation_value) {
lut[nb_steps] = static_cast<LutOutT>(std::min<FloatT>(
std::max<FloatT>(TfLiteRound(func(input_max) * output_scaling_inv),
table_min),
table_max));
}
}
| null | null | 220,820
|
74324879010582546576437023430267164097
| 50
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
float ActivationFunction(float x) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
return ActivationFunctionWithMinMax(x, output_activation_min,
output_activation_max);
}
| null | null | 220,821
|
202385988353461168619550882700480478001
| 6
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline void NdArrayDescsForElementwiseBroadcast(
const RuntimeShape& input0_shape, const RuntimeShape& input1_shape,
NdArrayDesc<N>* desc0_out, NdArrayDesc<N>* desc1_out) {
TFLITE_DCHECK(desc0_out != nullptr);
TFLITE_DCHECK(desc1_out != nullptr);
auto extended_input0_shape = RuntimeShape::ExtendedShape(N, input0_shape);
auto extended_input1_shape = RuntimeShape::ExtendedShape(N, input1_shape);
// Copy dims to desc, calculating strides.
CopyDimsToDesc<N>(extended_input0_shape, desc0_out);
CopyDimsToDesc<N>(extended_input1_shape, desc1_out);
// Walk over each dimension. If the extents are equal do nothing.
// Otherwise, set the desc with extent 1 to have extent equal to the other and
// stride 0.
for (int i = 0; i < N; ++i) {
const int extent0 = extended_input0_shape.Dims(i);
const int extent1 = extended_input1_shape.Dims(i);
if (extent0 != extent1) {
if (extent0 == 1) {
desc0_out->strides[i] = 0;
desc0_out->extents[i] = extent1;
} else {
TFLITE_DCHECK_EQ(extent1, 1);
desc1_out->strides[i] = 0;
desc1_out->extents[i] = extent0;
}
}
}
}
| null | null | 220,822
|
115912483632591017285563643565313645693
| 31
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
IntegerType SaturatingAddNonGemmlowp(IntegerType a, IntegerType b) {
static_assert(std::is_same<IntegerType, void>::value, "unimplemented");
return a;
}
| null | null | 220,823
|
266399585836589077378566493535924156180
| 4
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
IntegerType SaturatingSub(IntegerType a, IntegerType b) {
static_assert(std::is_same<IntegerType, void>::value, "unimplemented");
return a;
}
| null | null | 220,824
|
81190786828421063993694095460407513929
| 4
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline std::int32_t SaturatingAddNonGemmlowp(std::int32_t a, std::int32_t b) {
std::int64_t a64 = a;
std::int64_t b64 = b;
std::int64_t sum = a64 + b64;
return static_cast<std::int32_t>(std::min(
static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::max()),
std::max(
static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::min()),
sum)));
}
| null | null | 220,825
|
68207203068925591194900797605950531695
| 10
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int32_t MultiplyByQuantizedMultiplier(int64_t x,
int32_t quantized_multiplier,
int shift) {
// Inputs:
// - quantized_multiplier has fixed point at bit 31
// - shift is -31 to +7 (negative for right shift)
//
// Assumptions: The following input ranges are assumed
// - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1)
// - scaling is chosen so final scaled result fits in int32_t
// - input x is in the range -(1<<47) <= x < (1<<47)
assert(quantized_multiplier >= 0);
assert(shift >= -31 && shift < 8);
assert(x >= -(static_cast<int64_t>(1) << 47) &&
x < (static_cast<int64_t>(1) << 47));
int32_t reduced_multiplier = (quantized_multiplier < 0x7FFF0000)
? ((quantized_multiplier + (1 << 15)) >> 16)
: 0x7FFF;
int total_shift = 15 - shift;
x = (x * (int64_t)reduced_multiplier) + ((int64_t)1 << (total_shift - 1));
int32_t result = x >> total_shift;
return result;
}
| null | null | 220,826
|
45540297947294216363461734246201791915
| 24
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
gemmlowp::FixedPoint<tRawType, tIntegerBits> SaturatingSub(
gemmlowp::FixedPoint<tRawType, tIntegerBits> a,
gemmlowp::FixedPoint<tRawType, tIntegerBits> b) {
return gemmlowp::FixedPoint<tRawType, tIntegerBits>::FromRaw(
SaturatingSub(a.raw(), b.raw()));
}
| null | null | 220,827
|
209957506217025820503508897706768031737
| 6
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline void NdArrayDescsForElementwiseBroadcast(const Dims<N>& input0_dims,
const Dims<N>& input1_dims,
NdArrayDesc<N>* desc0_out,
NdArrayDesc<N>* desc1_out) {
TFLITE_DCHECK(desc0_out != nullptr);
TFLITE_DCHECK(desc1_out != nullptr);
// Copy dims to desc.
for (int i = 0; i < N; ++i) {
desc0_out->extents[i] = input0_dims.sizes[i];
desc0_out->strides[i] = input0_dims.strides[i];
desc1_out->extents[i] = input1_dims.sizes[i];
desc1_out->strides[i] = input1_dims.strides[i];
}
// Walk over each dimension. If the extents are equal do nothing.
// Otherwise, set the desc with extent 1 to have extent equal to the other and
// stride 0.
for (int i = 0; i < N; ++i) {
const int extent0 = ArraySize(input0_dims, i);
const int extent1 = ArraySize(input1_dims, i);
if (extent0 != extent1) {
if (extent0 == 1) {
desc0_out->strides[i] = 0;
desc0_out->extents[i] = extent1;
} else {
TFLITE_DCHECK_EQ(extent1, 1);
desc1_out->strides[i] = 0;
desc1_out->extents[i] = extent0;
}
}
}
}
| null | null | 220,828
|
184808821099006979381881470835622859779
| 33
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
log_x_for_x_greater_than_or_equal_to_1(
gemmlowp::FixedPoint<int32_t, InputIntegerBits> input_val) {
static_assert(
OutputIntegerBits >= min_log_x_output_bits(InputIntegerBits),
"Output integer bits must be sufficient to accommodate logs of inputs.");
return log_x_for_x_greater_than_or_equal_to_1_impl<OutputIntegerBits,
InputIntegerBits>(
input_val);
}
| null | null | 220,829
|
259671052873654435456299669654538128612
| 9
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int32_t MultiplyByQuantizedMultiplier(int32_t x,
int32_t quantized_multiplier,
int shift) {
TFLITE_DCHECK(quantized_multiplier >= 0);
TFLITE_DCHECK(shift >= -31 && shift <= 30);
const int64_t total_shift = 31 - shift;
const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
int64_t result = x * static_cast<int64_t>(quantized_multiplier) + round;
result = result >> total_shift;
TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
result <= std::numeric_limits<int32_t>::max());
return static_cast<int32_t>(result);
}
| null | null | 220,830
|
48168224722131196195371301507276631196
| 15
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
gemmlowp::FixedPoint<tRawType, tIntegerBits> SaturatingAddNonGemmlowp(
gemmlowp::FixedPoint<tRawType, tIntegerBits> a,
gemmlowp::FixedPoint<tRawType, tIntegerBits> b) {
return gemmlowp::FixedPoint<tRawType, tIntegerBits>::FromRaw(
SaturatingAddNonGemmlowp(a.raw(), b.raw()));
}
| null | null | 220,831
|
293661064396094414146709591487589328754
| 6
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int SubscriptToIndex(const NdArrayDesc<4>& desc, int i0, int i1, int i2,
int i3) {
TFLITE_DCHECK(i0 >= 0 && i0 < desc.extents[0]);
TFLITE_DCHECK(i1 >= 0 && i1 < desc.extents[1]);
TFLITE_DCHECK(i2 >= 0 && i2 < desc.extents[2]);
TFLITE_DCHECK(i3 >= 0 && i3 < desc.extents[3]);
return i0 * desc.strides[0] + i1 * desc.strides[1] + i2 * desc.strides[2] +
i3 * desc.strides[3];
}
| null | null | 220,832
|
294476551875924914447610455306368188721
| 9
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
typename std::enable_if<DIM != N - 1, void>::type NDOpsHelperImpl(
const NdArrayDesc<N>& output, const Calc& calc, int indexes[N]) {
for (indexes[DIM] = 0; indexes[DIM] < output.extents[DIM]; ++indexes[DIM]) {
NDOpsHelperImpl<N, DIM + 1, Calc>(output, calc, indexes);
}
}
| null | null | 220,833
|
131725818670892389989356160840166828904
| 6
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
void optimized_ops_preload_l1_stream(const T* ptr) {
#ifdef __GNUC__
// builtin offered by GCC-compatible compilers including clang
__builtin_prefetch(ptr, /* 0 means read */ 0, /* 0 means no locality */ 0);
#else
(void)ptr;
#endif
}
| null | null | 220,834
|
225675338897087398501957268846280069854
| 8
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
typename std::enable_if<DIM == N - 1, void>::type NDOpsHelperImpl(
const NdArrayDesc<N>& output, const Calc& calc, int indexes[N]) {
for (indexes[DIM] = 0; indexes[DIM] < output.extents[DIM]; ++indexes[DIM]) {
calc(indexes);
}
}
| null | null | 220,835
|
222445125449900373539201569249898661155
| 6
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
int CountLeadingZeros(T integer_input) {
static_assert(std::is_unsigned<T>::value,
"Only unsigned integer types handled.");
#if defined(__GNUC__)
return integer_input ? __builtin_clz(integer_input)
: std::numeric_limits<T>::digits;
#else
if (integer_input == 0) {
return std::numeric_limits<T>::digits;
}
const T one_in_leading_positive = static_cast<T>(1)
<< (std::numeric_limits<T>::digits - 1);
int leading_zeros = 0;
while (integer_input < one_in_leading_positive) {
integer_input <<= 1;
++leading_zeros;
}
return leading_zeros;
#endif
}
| null | null | 220,836
|
236252747147032092403944052938982726459
| 21
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int32_t GetReciprocal(int32_t x, int x_integer_digits,
int* num_bits_over_unit) {
int headroom_plus_one = CountLeadingZeros(static_cast<uint32_t>(x));
// This is the number of bits to the left of the binary point above 1.0.
// Consider x=1.25. In that case shifted_scale=0.8 and
// no later adjustment will be needed.
*num_bits_over_unit = x_integer_digits - headroom_plus_one;
const int32_t shifted_sum_minus_one =
static_cast<int32_t>((static_cast<uint32_t>(x) << headroom_plus_one) -
(static_cast<uint32_t>(1) << 31));
gemmlowp::FixedPoint<int32_t, 0> shifted_scale =
gemmlowp::one_over_one_plus_x_for_x_in_0_1(
gemmlowp::FixedPoint<int32_t, 0>::FromRaw(shifted_sum_minus_one));
return shifted_scale.raw();
}
| null | null | 220,837
|
117752959692607637828485618418438638827
| 16
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline void GetInvSqrtQuantizedMultiplierExp(int32_t input, int reverse_shift,
int32_t* output_inv_sqrt,
int* output_shift) {
TFLITE_DCHECK_GE(input, 0);
if (input <= 1) {
// Handle the input value 1 separately to avoid overflow in that case
// in the general computation below (b/143972021). Also handle 0 as if it
// were a 1. 0 is an invalid input here (divide by zero) and 1 is a valid
// but rare/unrealistic input value. We can expect both to occur in some
// incompletely trained models, but probably not in fully trained models.
*output_inv_sqrt = std::numeric_limits<std::int32_t>::max();
*output_shift = 0;
return;
}
TFLITE_DCHECK_GT(input, 1);
*output_shift = 11;
while (input >= (1 << 29)) {
input /= 4;
++*output_shift;
}
const unsigned max_left_shift_bits =
CountLeadingZeros(static_cast<uint32_t>(input)) - 1;
const unsigned max_left_shift_bit_pairs = max_left_shift_bits / 2;
const unsigned left_shift_bit_pairs = max_left_shift_bit_pairs - 1;
*output_shift -= left_shift_bit_pairs;
input <<= 2 * left_shift_bit_pairs;
TFLITE_DCHECK_GE(input, (1 << 27));
TFLITE_DCHECK_LT(input, (1 << 29));
using gemmlowp::FixedPoint;
using gemmlowp::Rescale;
using gemmlowp::SaturatingRoundingMultiplyByPOT;
// Using 3 integer bits gives us enough room for the internal arithmetic in
// this Newton-Raphson iteration.
using F3 = FixedPoint<int32_t, 3>;
using F0 = FixedPoint<int32_t, 0>;
const F3 fixedpoint_input = F3::FromRaw(input >> 1);
const F3 fixedpoint_half_input =
SaturatingRoundingMultiplyByPOT<-1>(fixedpoint_input);
const F3 fixedpoint_half_three =
GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F3, (1 << 28) + (1 << 27), 1.5);
// Newton-Raphson iteration
// Naive unoptimized starting guess: x = 1
F3 x = F3::One();
// Naive unoptimized number of iterations: 5
for (int i = 0; i < 5; i++) {
const F3 x3 = Rescale<3>(x * x * x);
x = Rescale<3>(fixedpoint_half_three * x - fixedpoint_half_input * x3);
}
const F0 fixedpoint_half_sqrt_2 =
GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F0, 1518500250, std::sqrt(2.) / 2.);
x = x * fixedpoint_half_sqrt_2;
*output_inv_sqrt = x.raw();
if (*output_shift < 0) {
*output_inv_sqrt <<= -*output_shift;
*output_shift = 0;
}
// Convert right shift (right is positive) to left shift.
*output_shift *= reverse_shift;
}
| null | null | 220,838
|
42340128513957336098112414332903163293
| 59
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
void optimized_ops_prefetch_write_l1_keep(const T* ptr) {
#ifdef __GNUC__
// builtin offered by GCC-compatible compilers including clang
__builtin_prefetch(ptr, /* 1 means write */ 1, /* 3 means high locality */ 3);
#else
(void)ptr;
#endif
}
| null | null | 220,839
|
284956209896441058853928558522128778230
| 8
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline void CopyDimsToDesc(const RuntimeShape& input_shape,
NdArrayDesc<N>* desc_out) {
int desc_stride = 1;
for (int i = N - 1; i >= 0; --i) {
desc_out->extents[i] = input_shape.Dims(i);
desc_out->strides[i] = desc_stride;
desc_stride *= input_shape.Dims(i);
}
}
| null | null | 220,840
|
245744225330047446888177073242029009443
| 9
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
const float* bias_data, int array_size,
float* array_data) {
if (bias_size == 0) return;
// Note: see b/132215220: in May 2019 we thought it would be OK to replace
// this with the Eigen one-liner:
// return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max).
// This turned out to severely regress performance: +4ms (i.e. 8%) on
// MobileNet v2 / 1.0 / 224. So we keep custom NEON code for now.
TFLITE_DCHECK_EQ((array_size % bias_size), 0);
#ifdef USE_NEON
float* array_ptr = array_data;
float* array_end_ptr = array_ptr + array_size;
const auto clamp_min_vec = vdupq_n_f32(clamp_min);
const auto clamp_max_vec = vdupq_n_f32(clamp_max);
for (; array_ptr != array_end_ptr; array_ptr += bias_size) {
int i = 0;
for (; i <= bias_size - 16; i += 16) {
auto b0 = vld1q_f32(bias_data + i);
auto b1 = vld1q_f32(bias_data + i + 4);
auto b2 = vld1q_f32(bias_data + i + 8);
auto b3 = vld1q_f32(bias_data + i + 12);
auto a0 = vld1q_f32(array_ptr + i);
auto a1 = vld1q_f32(array_ptr + i + 4);
auto a2 = vld1q_f32(array_ptr + i + 8);
auto a3 = vld1q_f32(array_ptr + i + 12);
auto x0 = vaddq_f32(a0, b0);
auto x1 = vaddq_f32(a1, b1);
auto x2 = vaddq_f32(a2, b2);
auto x3 = vaddq_f32(a3, b3);
x0 = vmaxq_f32(clamp_min_vec, x0);
x1 = vmaxq_f32(clamp_min_vec, x1);
x2 = vmaxq_f32(clamp_min_vec, x2);
x3 = vmaxq_f32(clamp_min_vec, x3);
x0 = vminq_f32(clamp_max_vec, x0);
x1 = vminq_f32(clamp_max_vec, x1);
x2 = vminq_f32(clamp_max_vec, x2);
x3 = vminq_f32(clamp_max_vec, x3);
vst1q_f32(array_ptr + i, x0);
vst1q_f32(array_ptr + i + 4, x1);
vst1q_f32(array_ptr + i + 8, x2);
vst1q_f32(array_ptr + i + 12, x3);
}
for (; i <= bias_size - 4; i += 4) {
auto b = vld1q_f32(bias_data + i);
auto a = vld1q_f32(array_ptr + i);
auto x = vaddq_f32(a, b);
x = vmaxq_f32(clamp_min_vec, x);
x = vminq_f32(clamp_max_vec, x);
vst1q_f32(array_ptr + i, x);
}
for (; i < bias_size; i++) {
array_ptr[i] = ActivationFunctionWithMinMax(array_ptr[i] + bias_data[i],
clamp_min, clamp_max);
}
}
#else // not NEON
for (int array_offset = 0; array_offset < array_size;
array_offset += bias_size) {
for (int i = 0; i < bias_size; i++) {
array_data[array_offset + i] = ActivationFunctionWithMinMax(
array_data[array_offset + i] + bias_data[i], clamp_min, clamp_max);
}
}
#endif
}
| null | null | 220,841
|
314033549322071930204667162912602626561
| 66
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
log_x_for_x_greater_than_or_equal_to_1_impl(
gemmlowp::FixedPoint<int32_t, InputIntegerBits> input_val) {
// assert(__builtin_clz(0u) >= std::numeric_limits<uint32_t>::digits - 1);
// assert(__builtin_clz(0u) <= std::numeric_limits<uint32_t>::digits);
using FixedPoint0 = gemmlowp::FixedPoint<int32_t, 0>;
// The reason for accumulating the result with an extra bit of headroom is
// that z_pow_2_adj * log_2 might be saturated, and adding num_scaled *
// recip_denom will otherwise introduce an error.
static constexpr int kAccumIntegerBits = OutputIntegerBits + 1;
using FixedPointAccum = gemmlowp::FixedPoint<int32_t, kAccumIntegerBits>;
const FixedPoint0 log_2 = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 1488522236, std::log(2.0));
const FixedPoint0 sqrt_sqrt_half = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 1805811301, std::sqrt(std::sqrt(0.5)));
const FixedPoint0 sqrt_half = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 1518500250, std::sqrt(0.5));
const FixedPoint0 one_quarter =
GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(FixedPoint0, 536870912, 1.0 / 4.0);
const FixedPoint0 alpha_n = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 117049297, 11.0 / 240.0 * std::sqrt(std::sqrt(2.0)));
const FixedPoint0 alpha_d = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 127690142, 1.0 / 20.0 * std::sqrt(std::sqrt(2.0)));
const FixedPoint0 alpha_i = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 1057819769,
2.0 / std::sqrt(std::sqrt(2.0)) - std::sqrt(std::sqrt(2.0)));
const FixedPoint0 alpha_f = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 638450708, 1.0 / 4.0 * std::sqrt(std::sqrt(2.0)));
const FixedPointAccum shifted_quarter =
gemmlowp::Rescale<kAccumIntegerBits>(one_quarter);
// Reinterpret the input value as Q0.31, because we will figure out the
// required shift "ourselves" instead of using, say, Rescale.
FixedPoint0 z_a = FixedPoint0::FromRaw(input_val.raw());
// z_a_pow_2 = input_integer_bits - z_a_headroom;
int z_a_headroom_plus_1 = CountLeadingZeros(static_cast<uint32_t>(z_a.raw()));
FixedPoint0 r_a_tmp =
SaturatingRoundingMultiplyByPOTParam(z_a, (z_a_headroom_plus_1 - 1));
const int32_t r_a_raw =
SaturatingRoundingMultiplyByPOTParam((r_a_tmp * sqrt_half).raw(), 1);
// z_pow_2_adj = max(z_pow_2_a - 0.75, z_pow_2_b - 0.25);
// z_pow_2_adj = max(InputIntegerBits - z_a_headroom_plus_1 + 0.25,
// InputIntegerBits - z_b_headroom - 0.25);
const FixedPointAccum z_a_pow_2_adj = SaturatingAddNonGemmlowp(
FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
static_cast<int32_t>(InputIntegerBits - z_a_headroom_plus_1),
31 - kAccumIntegerBits)),
shifted_quarter);
// z_b is treated like z_a, but premultiplying by sqrt(0.5).
FixedPoint0 z_b = z_a * sqrt_half;
int z_b_headroom = CountLeadingZeros(static_cast<uint32_t>(z_b.raw())) - 1;
const int32_t r_b_raw =
SaturatingRoundingMultiplyByPOTParam(z_a.raw(), z_b_headroom);
const FixedPointAccum z_b_pow_2_adj = SaturatingSub(
FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
static_cast<int32_t>(InputIntegerBits - z_b_headroom),
31 - kAccumIntegerBits)),
shifted_quarter);
const FixedPoint0 r = FixedPoint0::FromRaw(std::min(r_a_raw, r_b_raw));
const FixedPointAccum z_pow_2_adj = FixedPointAccum::FromRaw(
std::max(z_a_pow_2_adj.raw(), z_b_pow_2_adj.raw()));
const FixedPoint0 p = gemmlowp::RoundingHalfSum(r, sqrt_sqrt_half);
FixedPoint0 q = r - sqrt_sqrt_half;
q = q + q;
const FixedPoint0 common_sq = q * q;
const FixedPoint0 num = q * r + q * common_sq * alpha_n;
const FixedPoint0 denom_minus_one_0 =
p * (alpha_i + q + alpha_d * common_sq) + alpha_f * q;
const FixedPoint0 recip_denom =
one_over_one_plus_x_for_x_in_0_1(denom_minus_one_0);
const FixedPointAccum num_scaled = gemmlowp::Rescale<kAccumIntegerBits>(num);
return gemmlowp::Rescale<OutputIntegerBits>(z_pow_2_adj * log_2 +
num_scaled * recip_denom);
}
| null | null | 220,842
|
334300092735192581541408839577293531795
| 81
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows(
int32x4x4_t input_val, int32_t quantized_multiplier, int shift) {
TFLITE_DCHECK(quantized_multiplier >= 0);
const int right_shift = std::min(-1, shift);
const int left_shift = shift - right_shift;
const int32x4_t multiplier_dup = vdupq_n_s32(quantized_multiplier);
const int32x4_t left_shift_dup = vdupq_n_s32(left_shift);
const int32x4_t right_shift_dup = vdupq_n_s32(right_shift);
int32x4x4_t result;
result.val[0] = vrshlq_s32(
vqdmulhq_s32(vshlq_s32(input_val.val[0], left_shift_dup), multiplier_dup),
right_shift_dup);
result.val[1] = vrshlq_s32(
vqdmulhq_s32(vshlq_s32(input_val.val[1], left_shift_dup), multiplier_dup),
right_shift_dup);
result.val[2] = vrshlq_s32(
vqdmulhq_s32(vshlq_s32(input_val.val[2], left_shift_dup), multiplier_dup),
right_shift_dup);
result.val[3] = vrshlq_s32(
vqdmulhq_s32(vshlq_s32(input_val.val[3], left_shift_dup), multiplier_dup),
right_shift_dup);
return result;
}
| null | null | 220,843
|
155148051893702773944024781076714703916
| 30
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline Integer FloorLog2(Integer n) {
static_assert(std::is_integral<Integer>::value, "");
static_assert(std::is_signed<Integer>::value, "");
static_assert(sizeof(Integer) == 4 || sizeof(Integer) == 8, "");
TFLITE_CHECK_GT(n, 0);
if (sizeof(Integer) == 4) {
return 30 - CountLeadingSignBits(n);
} else {
return 62 - CountLeadingSignBits(n);
}
}
| null | null | 220,844
|
320460874830161415509995506403174095511
| 11
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline std::int32_t SaturatingSub(std::int32_t a, std::int32_t b) {
std::int64_t a64 = a;
std::int64_t b64 = b;
std::int64_t diff = a64 - b64;
return static_cast<std::int32_t>(std::min(
static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::max()),
std::max(
static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::min()),
diff)));
}
| null | null | 220,845
|
205506390823499732120680583455281530543
| 10
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
Integer RoundDown(Integer i) {
return i - (i % Modulus);
}
| null | null | 220,846
|
334040631951836110274764991801933467599
| 3
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline void GetActivationMinMax(FusedActivationFunctionType ac,
float* output_activation_min,
float* output_activation_max) {
switch (ac) {
case FusedActivationFunctionType::kNone:
*output_activation_min = std::numeric_limits<float>::lowest();
*output_activation_max = std::numeric_limits<float>::max();
break;
case FusedActivationFunctionType::kRelu:
*output_activation_min = 0.f;
*output_activation_max = std::numeric_limits<float>::max();
break;
case FusedActivationFunctionType::kRelu1:
*output_activation_min = -1.f;
*output_activation_max = 1.f;
break;
case FusedActivationFunctionType::kRelu6:
*output_activation_min = 0.f;
*output_activation_max = 6.f;
break;
}
}
| null | null | 220,847
|
99443928356634090891201714924603043692
| 22
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int CountLeadingSignBits(T integer_input) {
static_assert(std::is_signed<T>::value, "Only signed integer types handled.");
#if defined(__GNUC__) && !defined(__clang__)
return integer_input ? __builtin_clrsb(integer_input)
: std::numeric_limits<T>::digits;
#else
using U = typename std::make_unsigned<T>::type;
return integer_input >= 0
? CountLeadingZeros(static_cast<U>(integer_input)) - 1
: integer_input != std::numeric_limits<T>::min()
? CountLeadingZeros(2 * static_cast<U>(-integer_input) - 1)
: 0;
#endif
}
| null | null | 220,848
|
283680310917308807163763575806389631310
| 14
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows(
int32x4x4_t input_val, int32_t quantized_multiplier, int shift) {
const int left_shift = std::max(shift, 0);
const int right_shift = std::min(shift, 0);
int32x4x4_t result;
int32x4_t multiplier_dup = vdupq_n_s32(quantized_multiplier);
int32x4_t left_shift_dup = vdupq_n_s32(left_shift);
int32x4_t right_shift_dup = vdupq_n_s32(right_shift);
result.val[0] =
vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[0], left_shift_dup),
multiplier_dup),
right_shift_dup);
result.val[1] =
vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[1], left_shift_dup),
multiplier_dup),
right_shift_dup);
result.val[2] =
vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[2], left_shift_dup),
multiplier_dup),
right_shift_dup);
result.val[3] =
vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[3], left_shift_dup),
multiplier_dup),
right_shift_dup);
return result;
}
| null | null | 220,849
|
31866879239790865581171750961888628085
| 32
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline LutOutT lut_lookup_with_interpolation(int16_t value,
const LutOutT* lut) {
static_assert(std::is_same<LutOutT, int8_t>::value ||
std::is_same<LutOutT, int16_t>::value,
"Only LUTs with int8 or int16 outputs are supported.");
// 512 base values, lut[513] is only used to calculate the slope
const uint16_t index = static_cast<uint16_t>(256 + (value >> 7));
assert(index < 512 && "LUT index out of range.");
const int16_t offset = value & 0x7f;
// Base and slope are Q0.x
const LutOutT base = lut[index];
const LutOutT slope = lut[index + 1] - lut[index];
// Q0.x * Q0.7 = Q0.(x + 7)
// Round and convert from Q0.(x + 7) to Q0.x
const int delta = (slope * offset + 64) >> 7;
// Q0.15 + Q0.15
return static_cast<LutOutT>(base + delta);
}
| null | null | 220,850
|
135496053780885826804079167400581248139
| 21
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
int32_t x, int32_t quantized_multiplier, int shift) {
TFLITE_DCHECK_LE(shift, 0);
return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
}
| null | null | 220,851
|
233260956671320999553994013542924374307
| 5
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
int32_t x, int32_t quantized_multiplier, int left_shift) {
using gemmlowp::RoundingDivideByPOT;
using gemmlowp::SaturatingRoundingDoublingHighMul;
return RoundingDivideByPOT(
SaturatingRoundingDoublingHighMul(x, quantized_multiplier), -left_shift);
}
| null | null | 220,852
|
319219100089482412702775846070713304288
| 7
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
IntegerType SaturatingRoundingMultiplyByPOTParam(IntegerType x, int exponent) {
if (exponent == 0) {
return x;
}
using ScalarIntegerType =
typename gemmlowp::FixedPointRawTypeTraits<IntegerType>::ScalarRawType;
const IntegerType min =
gemmlowp::Dup<IntegerType>(std::numeric_limits<ScalarIntegerType>::min());
const IntegerType max =
gemmlowp::Dup<IntegerType>(std::numeric_limits<ScalarIntegerType>::max());
const int ScalarIntegerTypeBits = 8 * sizeof(ScalarIntegerType);
const std::int32_t threshold =
((1 << (ScalarIntegerTypeBits - 1 - exponent)) - 1);
const IntegerType positive_mask =
gemmlowp::MaskIfGreaterThan(x, gemmlowp::Dup<IntegerType>(threshold));
const IntegerType negative_mask =
gemmlowp::MaskIfLessThan(x, gemmlowp::Dup<IntegerType>(-threshold));
IntegerType result = gemmlowp::ShiftLeft(x, exponent);
result = gemmlowp::SelectUsingMask(positive_mask, max, result);
result = gemmlowp::SelectUsingMask(negative_mask, min, result);
return result;
}
| null | null | 220,853
|
162834921159998692631471316767031873141
| 24
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline void NDOpsHelper(const NdArrayDesc<N>& output, const Calc& calc) {
int indexes[N] = {0};
NDOpsHelperImpl<N, 0, Calc>(output, calc, indexes);
}
| null | null | 220,854
|
299225907598377702980362014890741261540
| 4
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int8_t lut_lookup(int16_t value, const int8_t* lut) {
return lut_lookup_with_interpolation(value, lut);
}
| null | null | 220,855
|
116899028413505780062960153766129951103
| 3
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline std::int16_t SaturatingSub(std::int16_t a, std::int16_t b) {
std::int32_t a32 = a;
std::int32_t b32 = b;
std::int32_t diff = a32 - b32;
return static_cast<std::int16_t>(
std::min(static_cast<int32_t>(32767),
std::max(static_cast<int32_t>(-32768), diff)));
}
| null | null | 220,856
|
6755230372672395766956850491278523645
| 8
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int SubscriptToIndex(const NdArrayDesc<8>& desc, int indexes[8]) {
return indexes[0] * desc.strides[0] + indexes[1] * desc.strides[1] +
indexes[2] * desc.strides[2] + indexes[3] * desc.strides[3] +
indexes[4] * desc.strides[4] + indexes[5] * desc.strides[5] +
indexes[6] * desc.strides[6] + indexes[7] * desc.strides[7];
}
| null | null | 220,857
|
119546929465394705663675055731881669216
| 6
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int16_t lut_lookup(int8_t value, const int16_t* lut) {
return lut[128 + value];
}
| null | null | 220,858
|
219089792869907585829818027295600938162
| 3
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne(
int32_t x, int32_t quantized_multiplier, int left_shift) {
using gemmlowp::SaturatingRoundingDoublingHighMul;
return SaturatingRoundingDoublingHighMul(x * (1 << left_shift),
quantized_multiplier);
}
| null | null | 220,859
|
138502093428826278031937267859049675794
| 6
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline T ActivationFunctionWithMinMax(T x, T output_activation_min,
T output_activation_max) {
using std::max;
using std::min;
return min(max(x, output_activation_min), output_activation_max);
}
| null | null | 220,860
|
225584642578707997304548918684221605728
| 6
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
constexpr int min_log_x_output_bits(int input_bits) {
return input_bits > 90 ? 7
: input_bits > 44 ? 6
: input_bits > 21 ? 5
: input_bits > 10 ? 4
: input_bits > 4 ? 3
: input_bits > 1 ? 2
: 1;
}
| null | null | 220,861
|
251895988386844645694128667795150607446
| 9
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int16_t lut_lookup(int16_t value, const int16_t* lut) {
return lut_lookup_with_interpolation(value, lut);
}
| null | null | 220,862
|
249713576572800211684814423934379319906
| 3
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline void NdArrayDescsForElementwiseBroadcast(
const RuntimeShape& input0_shape, const RuntimeShape& input1_shape,
const RuntimeShape& input2_shape, NdArrayDesc<N>* desc0_out,
NdArrayDesc<N>* desc1_out, NdArrayDesc<N>* desc2_out) {
TFLITE_DCHECK(desc0_out != nullptr);
TFLITE_DCHECK(desc1_out != nullptr);
TFLITE_DCHECK(desc2_out != nullptr);
auto extended_input0_shape = RuntimeShape::ExtendedShape(N, input0_shape);
auto extended_input1_shape = RuntimeShape::ExtendedShape(N, input1_shape);
auto extended_input2_shape = RuntimeShape::ExtendedShape(N, input2_shape);
// Copy dims to desc, calculating strides.
CopyDimsToDesc<N>(extended_input0_shape, desc0_out);
CopyDimsToDesc<N>(extended_input1_shape, desc1_out);
CopyDimsToDesc<N>(extended_input2_shape, desc2_out);
// Walk over each dimension. If the extents are equal do nothing.
// Otherwise, set the desc with extent 1 to have extent equal to the other and
// stride 0.
for (int i = 0; i < N; ++i) {
const int extent0 = extended_input0_shape.Dims(i);
const int extent1 = extended_input1_shape.Dims(i);
const int extent2 = extended_input2_shape.Dims(i);
int extent = extent0;
if (extent1 != 1) extent = extent1;
if (extent2 != 1) extent = extent2;
TFLITE_DCHECK(extent0 == 1 || extent0 == extent);
TFLITE_DCHECK(extent1 == 1 || extent1 == extent);
TFLITE_DCHECK(extent2 == 1 || extent2 == extent);
if (!(extent0 == extent1 && extent1 == extent2)) {
if (extent0 == 1) {
desc0_out->strides[i] = 0;
desc0_out->extents[i] = extent;
}
if (extent1 == 1) {
desc1_out->strides[i] = 0;
desc1_out->extents[i] = extent;
}
if (extent2 == 1) {
desc2_out->strides[i] = 0;
desc2_out->extents[i] = extent;
}
}
}
}
| null | null | 220,863
|
207174376548085910428774356456543207854
| 49
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 0
|
inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne(
int32_t x, int32_t quantized_multiplier, int shift) {
TFLITE_DCHECK_GE(shift, 0);
return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
}
| null | null | 220,864
|
200610934503643596322506226899659912283
| 5
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 0
|
Status DependencyOptimizer::OptimizeDependencies() {
SetVector<int> nodes_to_simplify;
std::set<int> nodes_to_delete;
for (int i = 0; i < optimized_graph_->node_size(); ++i) {
const NodeDef& node = optimized_graph_->node(i);
if (IsNoOp(node) || IsIdentity(node) || IsIdentityN(node) ||
IsConstant(node) || SafeToConvertToNoOp(node)) {
nodes_to_simplify.PushBack(i);
}
}
while (!nodes_to_simplify.Empty()) {
int node_to_simplify = nodes_to_simplify.PopBack();
// Discard nodes that were marked for deletion already.
while (nodes_to_delete.find(node_to_simplify) != nodes_to_delete.end()) {
node_to_simplify = nodes_to_simplify.PopBack();
}
OptimizeNode(node_to_simplify, &nodes_to_simplify, &nodes_to_delete);
}
if (fetch_nodes_known_) {
VLOG(1) << "Deleted " << nodes_to_delete.size() << " out of "
<< optimized_graph_->node_size() << " nodes.";
EraseNodesFromGraph(nodes_to_delete, optimized_graph_);
node_map_.reset(new NodeMap(optimized_graph_));
BuildNodeToIdx();
}
return Status::OK();
}
| null | null | 220,900
|
130706505612888941050171561381082021593
| 28
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 0
|
bool RemoveControlInput(NodeDef* node, const string& control_input_to_remove,
NodeMap* node_map) {
for (int pos = node->input_size() - 1; pos >= 0; --pos) {
const string& input = node->input(pos);
if (input[0] != '^') break;
if (input == control_input_to_remove) {
node->mutable_input()->SwapElements(pos, node->input_size() - 1);
node->mutable_input()->RemoveLast();
node_map->RemoveOutput(NodeName(input), node->name());
return true;
}
}
return false;
}
| null | null | 220,901
|
80445113311466334814961648074976447773
| 14
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 0
|
Status DependencyOptimizer::TransitiveReduction() {
// PRECONDITION: optimized_graph_ must be sorted topologically.
const int num_nodes = optimized_graph_->node_size();
// Set up a compressed version of the graph to save a constant factor in the
// expensive algorithm below. Also cache the set of control outputs and the
// highest index of a target of any control output from each node.
int num_controls = 0;
std::vector<std::vector<int>> outputs(num_nodes);
std::vector<gtl::InlinedVector<std::pair<int, int>, 2>> control_outputs(
num_nodes);
// target_range[i] contains the range of node indices for which to compute
// longest paths starting from node i.
std::vector<std::pair<int, int>> target_range(num_nodes, {num_nodes, -1});
for (int node_idx = 0; node_idx < num_nodes; ++node_idx) {
const NodeDef& node = optimized_graph_->node(node_idx);
if (ModifiesFrameInfo(node) || !HasOpDef(node)) {
// Ignore function nodes and nodes that modify frame info.
continue;
}
for (int input_slot = 0; input_slot < node.input_size(); ++input_slot) {
const string& input = node.input(input_slot);
const NodeDef* input_node = node_map_->GetNode(input);
if (ModifiesFrameInfo(*input_node) || IsMerge(*input_node)) {
// Ignore edges from nodes that modify frame info and from Merge nodes,
// because we cannot know which of it's input paths executes.
continue;
}
const int input_node_idx = node_to_idx_[input_node];
outputs[input_node_idx].push_back(node_idx);
target_range[input_node_idx].first =
std::min(target_range[input_node_idx].first, node_idx);
if (IsControlInput(input)) {
++num_controls;
control_outputs[input_node_idx].emplace_back(node_idx, input_slot);
target_range[input_node_idx].second =
std::max(target_range[input_node_idx].second, node_idx);
}
}
}
// Run the longest path in DAG algorithm for each source node that has control
// outputs. If, for any target node of a control output, there exists a path
// of length > 1, we can drop that control dependency.
int num_controls_removed = 0;
std::vector<DistanceFromSource> longest_distance(num_nodes);
// Map from target_index -> set of (input_slot, source_index), representing
// the control edges to remove. We sort them in reverse order by input slot,
// such that when we swap them out so we don't clobber the
// node(target).input() repeated field.
typedef std::pair<int, int> InputSlotAndSource;
absl::flat_hash_map<
int, std::set<InputSlotAndSource, std::greater<InputSlotAndSource>>>
control_edges_to_remove;
for (int source = 0; source < num_nodes; ++source) {
if (target_range[source].first >= target_range[source].second ||
target_range[source].second <= source) {
continue;
}
// Compute the set of nodes in the transitive fanout of source with
// topological sort index in [target_range.first : target_range.second]]
// to which there exists a path of length 2 or more from source.
std::fill(longest_distance.begin() + target_range[source].first,
longest_distance.begin() + target_range[source].second + 1, ZERO);
LongestPathsLowerBounds(source, target_range[source], outputs,
&longest_distance);
// If the longest path from source to target of a control dependency is
// longer than 1, there exists an alternate path, and we can eliminate the
// redundant direct control dependency.
for (const auto& control_output : control_outputs[source]) {
const int target = control_output.first;
if (longest_distance[target] == TWO_OR_GREATER) {
const int input_slot = control_output.second;
control_edges_to_remove[target].emplace(input_slot, source);
}
}
}
for (const auto& it : control_edges_to_remove) {
const int target = it.first;
NodeDef* target_node = optimized_graph_->mutable_node(target);
for (const InputSlotAndSource& slot_and_source : it.second) {
const int input_slot = slot_and_source.first;
const int source = slot_and_source.second;
const NodeDef& source_node = optimized_graph_->node(source);
CHECK_LT(input_slot, target_node->input_size());
target_node->mutable_input()->SwapElements(input_slot,
target_node->input_size() - 1);
node_map_->RemoveOutput(source_node.name(), target_node->name());
target_node->mutable_input()->RemoveLast();
++num_controls_removed;
}
}
VLOG(1) << "Removed " << num_controls_removed << " out of " << num_controls
<< " control dependencies";
return Status::OK();
}
| null | null | 220,902
|
179563495225180125614095211127677070095
| 96
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 0
|
int DependencyOptimizer::NumEdgesIfBypassed(
const NodeDef& node, const std::vector<NodeDef*>& output_nodes) const {
const bool is_multi_input_identity_n =
IsIdentityN(node) && !IsIdentityNSingleInput(node);
const int num_outputs = output_nodes.size();
const int num_inputs = node.input_size();
if (is_multi_input_identity_n) {
// multi-input identity_n with input/output control dependencies will likely
// increase number of edges after optimization.
int num_edges_if_bypassed(0);
for (const string& input_node_name : node.input()) {
if (IsControlInput(input_node_name)) {
num_edges_if_bypassed += num_outputs;
} else {
++num_edges_if_bypassed;
}
}
for (auto consumer : output_nodes) {
for (int j = 0; j < consumer->input_size(); ++j) {
const TensorId consumer_input = ParseTensorName(consumer->input(j));
if (consumer_input.node() == node.name()) {
if (IsControlInput(consumer_input)) {
num_edges_if_bypassed += num_inputs;
} else {
++num_edges_if_bypassed;
}
}
}
}
return num_edges_if_bypassed;
} else {
return num_inputs * num_outputs;
}
}
| null | null | 220,903
|
44537672593893563572742922570619760255
| 36
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 0
|
void LongestPathsLowerBounds(
int source, const std::pair<int, int>& target_range,
const std::vector<std::vector<int>>& outputs,
std::vector<DistanceFromSource>* longest_distance) {
std::deque<int> queue;
queue.emplace_front(source);
while (!queue.empty()) {
int node = queue.front();
queue.pop_front();
for (int fanout : outputs[node]) {
// 1) Only nodes in the target range can be on paths from source to one of
// its control outputs.
// 2) Since we only need a lower bound on the longest distance, we can
// skip nodes for which we have already proven have a path of
// length > 1 from the source.
if (fanout >= target_range.first && fanout <= target_range.second &&
(*longest_distance)[fanout] != TWO_OR_GREATER) {
(*longest_distance)[fanout] =
(*longest_distance)[fanout] == ZERO ? ONE : TWO_OR_GREATER;
queue.emplace_front(fanout);
}
}
}
}
| null | null | 220,904
|
146808720697295052559126548705416293688
| 24
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 0
|
void DependencyOptimizer::OptimizeNode(int node_idx,
SetVector<int>* nodes_to_simplify,
std::set<int>* nodes_to_delete) {
NodeDef* node = optimized_graph_->mutable_node(node_idx);
const bool is_noop = IsNoOp(*node);
const bool is_identity = IsIdentity(*node) || IsIdentityNSingleInput(*node);
const bool is_multi_input_identity =
IsIdentityN(*node) && !IsIdentityNSingleInput(*node);
const string node_name = node->name();
// Constant nodes with no input control dependency are always executed early,
// so we can prune all their output control dependencies.
if (IsConstant(*node) && node->input_size() == 0) {
const auto output_nodes = node_map_->GetOutputs(node_name);
for (NodeDef* fanout : output_nodes) {
bool optimize_fanout = false;
bool data_connection = false;
for (int i = fanout->input_size() - 1; i >= 0; --i) {
const TensorId input_tensor = ParseTensorName(fanout->input(i));
if (input_tensor.node() == node_name) {
if (input_tensor.index() < 0) {
fanout->mutable_input()->SwapElements(i, fanout->input_size() - 1);
fanout->mutable_input()->RemoveLast();
optimize_fanout = true;
} else {
data_connection = true;
}
}
}
if (optimize_fanout) {
nodes_to_simplify->PushBack(node_to_idx_[fanout]);
if (!data_connection) {
node_map_->RemoveOutput(node_name, fanout->name());
}
}
}
if (node_map_->GetOutputs(node_name).empty() && fetch_nodes_known_ &&
nodes_to_preserve_.find(node_name) == nodes_to_preserve_.end()) {
// Mark the node for deletion.
nodes_to_delete->insert(node_to_idx_[node]);
}
return;
}
// Change ops that only have control dependencies as outputs to NoOps.
if (!is_noop && SafeToConvertToNoOp(*node)) {
VLOG(2) << "***** Replacing " << node_name << " (" << node->op()
<< ") with NoOp.";
// The outputs of this node are not consumed. Replace its inputs with
// control dependencies and replace the op itself with the NoOp op.
std::unordered_set<string> ctrl_inputs;
int pos = 0;
while (pos < node->input_size()) {
const string old_input = node->input(pos);
if (IsControlInput(old_input)) {
if (!ctrl_inputs.insert(old_input).second) {
// We found a duplicate control input. Remove it.
node->mutable_input()->SwapElements(pos, node->input_size() - 1);
node->mutable_input()->RemoveLast();
} else {
++pos;
}
continue;
}
// Replace a normal input with a control input.
const string ctrl_input = ConstantFolding::AddControlDependency(
old_input, optimized_graph_, node_map_.get());
ctrl_inputs.insert(ctrl_input);
node->set_input(pos, ctrl_input);
node_map_->UpdateInput(node_name, old_input, ctrl_input);
const NodeDef* old_input_node = node_map_->GetNode(old_input);
nodes_to_simplify->PushBack(node_to_idx_[old_input_node]);
++pos;
}
node->set_op("NoOp");
EraseRegularNodeAttributes(node);
DedupControlInputs(node);
nodes_to_simplify->PushBack(node_to_idx_[node]);
return;
}
// Remove NoOp nodes if the product of their fan-in and fan-out is less than
// or equal to the sum of the fan-in and fan-out. The non-trivial rewrites
// take the following form:
//
// Case a)
// x --^> +------+ x --^> +---+
// y --^> | NoOp | --^> a ==> y --^> | a |
// ... | | ... | |
// z --^> +------+ z --^> +---+
//
// Case b)
// +------+ --^> a +---+ --^> a
// x --^> | NoOp | --^> b ==> | x | --^> b
// | | ... | | ...
// +------+ --^> c +---+ --^> c
// Case c)
// +------+ x ---^> a
// x --^> | NoOp | --^> a ==> \/
// y --^> | | --^> b /\
// +------+ y ---^> b
//
// We only apply this optimization if we don't increase the number of control
// edges across device boundaries, e.g. in cases a) and b) if NoOp and
// a and x, respectively, are on the same device. Control edges across device
// boundaries require inter-device communication (Send/Recv pairs to be
// inserted in the graph), which is very costly.
//
// We also remove identity nodes, subject to the same constraints on number of
// resulting control edges and device boundary crossings:
//
// Case a)
// +----------+ ---> a +---+ ---> a
// x --> | Identity | --^> b ==> | x | --^> b
// | | ... | | ...
// +----------+ --^> c +---+ --^> c
//
// Case b)
// x ---> +----------+ ---> a x ---> +---+
// y --^> | Identity | ==> y --^> | a |
// ... | | ... | |
// z --^> +----------+ z --^> +---+
//
// Case c)
// +----------+ x ---> +---+
// x ---> | Identity | ---> a ==> \--^> | a |
// y --^> | | --^> b /\ +---+
// +----------+ y --^> b
if (is_noop || ((is_identity || is_multi_input_identity) &&
SafeToRemoveIdentity(*node))) {
const int num_inputs = node->input_size();
std::vector<NodeDef*> input_nodes;
for (int i = 0; i < num_inputs; ++i) {
NodeDef* input_node = node_map_->GetNode(node->input(i));
if (input_node == nullptr) {
LOG(ERROR) << "Invalid input " << node->input(i);
return;
}
input_nodes.push_back(input_node);
}
const auto& output_node_set = node_map_->GetOutputs(node_name);
const std::vector<NodeDef*> output_nodes(output_node_set.begin(),
output_node_set.end());
if (!BypassingNodeIsBeneficial(*node, input_nodes, output_nodes)) {
return;
}
VLOG(2) << "***** Rerouting input around\n" << node->DebugString();
// Now remove the node and re-wire its inputs to its outputs.
for (auto consumer : output_nodes) {
bool updated_consumer = false;
VLOG(2) << "consumer before:\n" << consumer->DebugString();
// Remove dependency on node from consumer.
for (int i = 0; i < num_inputs; ++i) {
const NodeDef* input = input_nodes[i];
// Forward dependency from input to consumer if it doesn't already
// depend on it.
if ((is_identity && i == 0) ||
(is_multi_input_identity && !IsControlInput(node->input(i)))) {
// Replace regular input from Identity node.
string new_input;
const string& input_to_forward = node->input(i);
CHECK(!IsControlInput(input_to_forward));
for (int j = 0; j < consumer->input_size(); ++j) {
const TensorId old_input = ParseTensorName(consumer->input(j));
if (old_input.node() == node_name) {
if (old_input.index() == i) {
// Regular input
new_input = input_to_forward;
node_map_->UpdateInput(consumer->name(),
string(old_input.node()), new_input);
consumer->set_input(j, new_input);
} else if (old_input.index() == -1) {
// Control dependency
new_input = AsControlDependency(NodeName(input_to_forward));
node_map_->UpdateInput(consumer->name(),
string(old_input.node()), new_input);
consumer->set_input(j, new_input);
}
}
}
updated_consumer = true;
} else {
// Forward dependency from input to consumer if it doesn't already
// depend on it.
if (node_map_->GetOutputs(input->name()).count(consumer) == 0) {
consumer->add_input(AsControlDependency(input->name()));
node_map_->AddOutput(input->name(), consumer->name());
nodes_to_simplify->PushBack(node_to_idx_[input]);
updated_consumer = true;
}
}
}
updated_consumer |= RemoveControlInput(
consumer, AsControlDependency(node_name), node_map_.get());
if (updated_consumer) {
nodes_to_simplify->PushBack(node_to_idx_[consumer]);
}
VLOG(2) << "consumer after:\n" << consumer->DebugString();
}
node_map_->RemoveOutputs(node_name);
if (fetch_nodes_known_ &&
nodes_to_preserve_.find(node_name) == nodes_to_preserve_.end()) {
// Mark the node for deletion.
nodes_to_delete->insert(node_idx);
// Disconnect the node from its inputs to enable further optimizations.
node_map_->RemoveInputs(node_name);
node->clear_input();
}
}
}
| null | null | 220,905
|
164476998576730710914635998729209152047
| 213
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 0
|
void DependencyOptimizer::GroupCrossDeviceControlEdges(bool host_granularity) {
VLOG(1)
<< "DependencyOptimizer::GroupCrossDeviceControlEdges host_granularity="
<< host_granularity;
const int num_nodes = optimized_graph_->node_size();
for (int i = 0; i < num_nodes; ++i) {
NodeDef* node = optimized_graph_->mutable_node(i);
if (node->device().empty()) continue;
string rest, node_device = node->device();
if (host_granularity) {
DeviceNameUtils::SplitDeviceName(node->device(), &node_device, &rest);
}
// Creates new noop nodes for devices on which multiple control inputs are
// located.
// Map keyed by device name to the newly introduced Noop node for that
// device. A nullptr value means that we have only seen a single node on
// that device.
std::map<string, NodeDef*> noops;
int num_noops = 0;
for (int j = 0; j < node->input_size(); ++j) {
if (IsControlInput(node->input(j))) {
const NodeDef* input = node_map_->GetNode(node->input(j));
if (input == nullptr || input->device().empty()) continue;
string input_device = input->device();
if (host_granularity) {
DeviceNameUtils::SplitDeviceName(input->device(), &input_device,
&rest);
}
if (input_device != node_device) {
VLOG(2) << "Cross-device " << node->name() << " " << input->device()
<< " -> " << node->device();
auto emplace_result = noops.emplace(input_device, nullptr);
if (!emplace_result.second &&
emplace_result.first->second == nullptr) {
VLOG(2) << "Duplicate input device from " << node->name();
// This is the second cross-device control input from the same
// device. Creates an intermediate noop node on that device.
string group_name;
NodeDef* noop;
// Creates a fresh node name; there may be conflicting names from
// a previous iteration of the optimizer.
do {
group_name = AddPrefixToNodeName(
node->name(),
strings::StrCat("GroupCrossDeviceControlEdges_", num_noops));
noop = node_map_->GetNode(group_name);
++num_noops;
} while (noop != nullptr);
noop = optimized_graph_->add_node();
noop->set_name(group_name);
noop->set_device(input->device());
noop->set_op("NoOp");
node_map_->AddNode(noop->name(), noop);
emplace_result.first->second = noop;
VLOG(1) << "GroupCrossDeviceControlEdges: Added "
<< SummarizeNodeDef(*noop);
}
}
}
}
// Reroute existing control edges to go via the newly introduced NoOp nodes.
int pos = 0;
while (pos < node->input_size()) {
const string& input_name = node->input(pos);
if (IsControlInput(input_name)) {
NodeDef* input = node_map_->GetNode(input_name);
if (input == nullptr) {
++pos;
} else {
string input_device = input->device();
if (host_granularity) {
DeviceNameUtils::SplitDeviceName(input->device(), &input_device,
&rest);
}
auto it = noops.find(input_device);
if (it == noops.end() || it->second == nullptr) {
++pos;
} else {
VLOG(2) << "Rewriting input from " << input_name;
node->mutable_input()->SwapElements(pos, node->input_size() - 1);
node->mutable_input()->RemoveLast();
it->second->add_input(AsControlDependency(*input));
node_map_->UpdateOutput(input_name, node->name(),
it->second->name());
}
}
} else {
++pos;
}
}
for (const auto& entry : noops) {
if (entry.second) {
node->add_input(AsControlDependency(*entry.second));
node_map_->AddOutput(entry.second->name(), node->name());
}
}
}
}
| null | null | 220,906
|
84279980906591605300247496632262614007
| 101
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 0
|
void DependencyOptimizer::CleanControlInputs() {
for (int i = 0; i < optimized_graph_->node_size(); ++i) {
DedupControlInputs(optimized_graph_->mutable_node(i));
}
}
| null | null | 220,907
|
215375382894070548193265871875406046854
| 5
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 0
|
bool DependencyOptimizer::BypassingNodeIsBeneficial(
const NodeDef& node, const std::vector<NodeDef*>& input_nodes,
const std::vector<NodeDef*>& output_nodes) const {
const bool is_identity = IsIdentity(node) || IsIdentityNSingleInput(node);
const bool is_multi_input_identity_n =
IsIdentityN(node) && !IsIdentityNSingleInput(node);
const int num_outputs = output_nodes.size();
const int num_inputs = node.input_size();
if (NumEdgesIfBypassed(node, output_nodes) > num_inputs + num_outputs) {
return false;
}
// Make sure that we don't increase the number of edges that cross
// device boundaries.
if ((num_inputs == 1 && num_outputs > 1 &&
input_nodes[0]->device() != node.device()) ||
(num_inputs > 1 && num_outputs == 1 &&
output_nodes[0]->device() != node.device())) {
return false;
}
// TODO(rmlarsen): Not all device crossings are equally expensive.
// Assign a cost to each based on device affinity and compute a
// cost before and after.
const string& node_dev = node.device();
int num_cross_in = 0;
for (NodeDef* input_node : input_nodes) {
num_cross_in += static_cast<int>(input_node->device() != node_dev);
}
int num_cross_out = 0;
for (NodeDef* output_node : output_nodes) {
num_cross_out += static_cast<int>(output_node->device() != node_dev);
}
// Make sure we do not increase the number of device crossings.
const int num_cross_before = num_cross_in + num_cross_out;
int num_cross_after = 0;
for (NodeDef* input_node : input_nodes) {
for (NodeDef* output_node : output_nodes) {
num_cross_after +=
static_cast<int>(input_node->device() != output_node->device());
}
}
if (num_cross_after > num_cross_before) {
return false;
}
if ((is_identity || is_multi_input_identity_n) && num_cross_in > 0 &&
num_cross_out > 0 && num_cross_after > 0) {
// This identity node follows a device crossing, so it might be
// following a _Recv node after partitioning. Do not remove such nodes,
// unless they only have consumers on the same device as themselves.
return false;
}
return true;
}
| null | null | 220,908
|
312544264129674724211053206537639836134
| 58
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 0
|
bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const {
if (!IsIdentity(node) && !IsIdentityN(node)) {
return true;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
return false;
}
if (!fetch_nodes_known_) {
// The output values of this node may be needed.
return false;
}
if (node.input_size() < 1) {
// Node lacks input, is invalid
return false;
}
const NodeDef* input = node_map_->GetNode(NodeName(node.input(0)));
if (input == nullptr) {
VLOG(1) << "node = " << node.name() << " input = " << node.input(0);
return false;
}
// Don't remove Identity nodes corresponding to Variable reads or following
// Recv.
if (IsVariable(*input) || IsRecv(*input)) {
return false;
}
for (const auto& consumer : node_map_->GetOutputs(node.name())) {
if (node.input_size() > 1 && (IsRetval(*consumer) || IsMerge(*consumer))) {
return false;
}
if (IsSwitch(*input)) {
for (const string& consumer_input : consumer->input()) {
if (consumer_input == AsControlDependency(node.name())) {
return false;
}
}
}
}
return true;
}
| null | null | 220,909
|
31084999862529364047221013067634307621
| 42
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 0
|
Status DependencyOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
optimized_graph_ = optimized_graph;
*optimized_graph_ = item.graph;
nodes_to_preserve_ = item.NodesToPreserve();
fetch_nodes_known_ = !item.fetch.empty();
CleanControlInputs();
const int num_iterations = 2;
for (int iteration = 0; iteration < num_iterations; ++iteration) {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
Status topo_sort_status;
// Perform topological sort to prepare the graph for transitive reduction.
topo_sort_status = TopologicalSort(optimized_graph_);
// Set up index-based graph datastructures to speed up analysis steps below.
node_map_.reset(new NodeMap(optimized_graph_));
BuildNodeToIdx();
if (topo_sort_status.ok()) {
// Remove redundant control dependencies.
TF_RETURN_IF_ERROR(TransitiveReduction());
} else {
LOG(ERROR) << "Iteration = " << iteration
<< ", topological sort failed with message: "
<< topo_sort_status.error_message();
}
// Turn nodes with only control outputs into NoOps, prune NoOp and Identity
// nodes.
TF_RETURN_IF_ERROR(OptimizeDependencies());
// Dedup control inputs.
CleanControlInputs();
// Merge multiple control edges from the same device.
GroupCrossDeviceControlEdges(/*host_granularity=*/false);
// Merge control edges from the same host to reduce RPC traffic.
GroupCrossDeviceControlEdges(/*host_granularity=*/true);
}
return Status::OK();
}
| null | null | 220,910
|
161189725961773544933778262135947949654
| 42
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 0
|
bool DependencyOptimizer::SafeToConvertToNoOp(const NodeDef& node) const {
if (HasRegularOutputs(node, *node_map_)) {
// The output values of this node may be needed.
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Node has outputs.";
return false;
}
if (!fetch_nodes_known_) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Fetches unknown.";
return false;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
VLOG(3) << "Not safe to convert to NoOp: " << node.name()
<< " is in preserve set.";
return false;
}
if (IsMerge(node) || IsSwitch(node) || ModifiesFrameInfo(node)) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Node modifies frame info.";
return false;
}
// Ops reading variables are marked as stateful, but are safe to remove if
// redundant.
static const absl::flat_hash_set<string>* gather_ops =
new absl::flat_hash_set<string>{"Gather", "GatherV2", "GatherNd",
"ResourceGather", "ResourceGatherNd"};
const bool is_variable_read =
IsReadVariableOp(node) || IsReadVariablesOp(node) ||
gather_ops->find(node.op()) != gather_ops->end();
if (!is_variable_read && !IsFreeOfSideEffect(node)) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Node has side effect.";
return false;
}
if (node.op().rfind("Submodel", 0) == 0) {
return false;
}
const OpDef* op_def = nullptr;
Status status = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def);
if (!status.ok() || op_def->output_arg_size() == 0) {
return false;
}
const std::unordered_set<string> do_not_rewrite_ops{
"Assert", "CheckNumerics", "_Retval",
"_Arg", "_ParallelConcatUpdate", "TPUExecute",
"TPUCompile", "ControlTrigger"};
if (do_not_rewrite_ops.find(node.op()) != do_not_rewrite_ops.end()) {
return false;
}
if (!SafeToRemoveIdentity(node)) {
return false;
}
return true;
}
| null | null | 220,911
|
73449684201122921265997791684539555602
| 55
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 0
|
void DependencyOptimizer::BuildNodeToIdx() {
// Set up &node -> index map.
node_to_idx_.clear();
for (int i = 0; i < optimized_graph_->node_size(); ++i) {
const NodeDef& node = optimized_graph_->node(i);
node_to_idx_[&node] = i;
}
}
| null | null | 220,912
|
335261170638367569289610209327566997120
| 8
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
gpac
|
5f2c2a16d30229b6241f02fa28e3d6b810d64858
| 0
|
GF_Err mpgviddmx_process(GF_Filter *filter)
{
GF_MPGVidDmxCtx *ctx = gf_filter_get_udta(filter);
GF_FilterPacket *pck, *dst_pck;
u64 byte_offset;
s64 vosh_start = -1;
s64 vosh_end = -1;
GF_Err e;
char *data;
u8 *start;
u32 pck_size;
s32 remain;
//always reparse duration
if (!ctx->duration.num)
mpgviddmx_check_dur(filter, ctx);
pck = gf_filter_pid_get_packet(ctx->ipid);
if (!pck) {
if (gf_filter_pid_is_eos(ctx->ipid)) {
mpgviddmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE, GF_TRUE);
if (ctx->opid)
gf_filter_pid_set_eos(ctx->opid);
if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck);
ctx->src_pck = NULL;
return GF_EOS;
}
return GF_OK;
}
data = (char *) gf_filter_pck_get_data(pck, &pck_size);
byte_offset = gf_filter_pck_get_byte_offset(pck);
start = data;
remain = pck_size;
//input pid sets some timescale - we flushed pending data , update cts
if (!ctx->resume_from && ctx->timescale) {
u64 ts = gf_filter_pck_get_cts(pck);
if (ts != GF_FILTER_NO_TS) {
if (!ctx->cts || !ctx->recompute_cts)
ctx->cts = ts;
}
ts = gf_filter_pck_get_dts(pck);
if (ts != GF_FILTER_NO_TS) {
if (!ctx->dts || !ctx->recompute_cts)
ctx->dts = ts;
if (!ctx->prev_dts) ctx->prev_dts = ts;
else if (ctx->prev_dts != ts) {
u64 diff = ts;
diff -= ctx->prev_dts;
if (!ctx->cur_fps.den) ctx->cur_fps.den = (u32) diff;
else if (ctx->cur_fps.den > diff)
ctx->cur_fps.den = (u32) diff;
}
}
gf_filter_pck_get_framing(pck, &ctx->input_is_au_start, &ctx->input_is_au_end);
//this will force CTS recomput of each frame
if (ctx->recompute_cts) ctx->input_is_au_start = GF_FALSE;
if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck);
ctx->src_pck = pck;
gf_filter_pck_ref_props(&ctx->src_pck);
}
//we stored some data to find the complete vosh, aggregate this packet with current one
if (!ctx->resume_from && ctx->hdr_store_size) {
if (ctx->hdr_store_alloc < ctx->hdr_store_size + pck_size) {
ctx->hdr_store_alloc = ctx->hdr_store_size + pck_size;
ctx->hdr_store = gf_realloc(ctx->hdr_store, sizeof(char)*ctx->hdr_store_alloc);
}
memcpy(ctx->hdr_store + ctx->hdr_store_size, data, sizeof(char)*pck_size);
if (byte_offset != GF_FILTER_NO_BO) {
if (byte_offset >= ctx->hdr_store_size)
byte_offset -= ctx->hdr_store_size;
else
byte_offset = GF_FILTER_NO_BO;
}
ctx->hdr_store_size += pck_size;
start = data = ctx->hdr_store;
remain = pck_size = ctx->hdr_store_size;
}
if (ctx->resume_from) {
if (gf_filter_pid_would_block(ctx->opid))
return GF_OK;
//resume from data copied internally
if (ctx->hdr_store_size) {
assert(ctx->resume_from <= ctx->hdr_store_size);
start = data = ctx->hdr_store + ctx->resume_from;
remain = pck_size = ctx->hdr_store_size - ctx->resume_from;
} else {
assert(remain >= (s32) ctx->resume_from);
start += ctx->resume_from;
remain -= ctx->resume_from;
}
ctx->resume_from = 0;
}
if (!ctx->bs) {
ctx->bs = gf_bs_new(start, remain, GF_BITSTREAM_READ);
} else {
gf_bs_reassign_buffer(ctx->bs, start, remain);
}
if (!ctx->vparser) {
ctx->vparser = gf_m4v_parser_bs_new(ctx->bs, ctx->is_mpg12);
}
while (remain) {
Bool full_frame;
u8 *pck_data;
s32 current;
u8 sc_type, forced_sc_type=0;
Bool sc_type_forced = GF_FALSE;
Bool skip_pck = GF_FALSE;
u8 ftype;
u32 tinc;
u64 size=0;
u64 fstart;
Bool is_coded;
u32 bytes_from_store = 0;
u32 hdr_offset = 0;
Bool copy_last_bytes = GF_FALSE;
//not enough bytes to parse start code
if (remain<5) {
memcpy(ctx->hdr_store, start, remain);
ctx->bytes_in_header = remain;
break;
}
current = -1;
//we have some potential bytes of a start code in the store, copy some more bytes and check if valid start code.
//if not, dispatch these bytes as continuation of the data
if (ctx->bytes_in_header) {
memcpy(ctx->hdr_store + ctx->bytes_in_header, start, 8 - ctx->bytes_in_header);
current = mpgviddmx_next_start_code(ctx->hdr_store, 8);
//no start code in stored buffer
if ((current<0) || (current >= (s32) ctx->bytes_in_header) ) {
if (ctx->opid) {
dst_pck = gf_filter_pck_new_alloc(ctx->opid, ctx->bytes_in_header, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
gf_filter_pck_set_cts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_dts(dst_pck, GF_FILTER_NO_TS);
memcpy(pck_data, ctx->hdr_store, ctx->bytes_in_header);
gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_FALSE);
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset - ctx->bytes_in_header);
}
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
}
if (current<0) current = -1;
else current -= ctx->bytes_in_header;
ctx->bytes_in_header = 0;
} else {
//we have a valid start code, check which byte in our store or in the packet payload is the start code type
//and remember its location to reinit the parser from there
hdr_offset = 4 - ctx->bytes_in_header + current;
//bytes still to dispatch
bytes_from_store = ctx->bytes_in_header;
ctx->bytes_in_header = 0;
if (!hdr_offset) {
forced_sc_type = ctx->hdr_store[current+3];
} else {
forced_sc_type = start[hdr_offset-1];
}
sc_type_forced = GF_TRUE;
}
}
//no starcode in store, look for startcode in packet
if (current == -1) {
//locate next start code
current = mpgviddmx_next_start_code(start, remain);
//no start code, dispatch the block
if (current<0) {
u8 b3, b2, b1;
if (! ctx->frame_started) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[MPGVid] no start code in block and no frame started, discarding data\n" ));
break;
}
size = remain;
b3 = start[remain-3];
b2 = start[remain-2];
b1 = start[remain-1];
//we may have a startcode at the end of the packet, store it and don't dispatch the last 3 bytes !
if (!b1 || !b2 || !b3) {
copy_last_bytes = GF_TRUE;
assert(size >= 3);
size -= 3;
ctx->bytes_in_header = 3;
}
dst_pck = gf_filter_pck_new_alloc(ctx->opid, (u32) size, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
memcpy(pck_data, start, (size_t) size);
gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_FALSE);
gf_filter_pck_set_cts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_dts(dst_pck, GF_FILTER_NO_TS);
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset);
}
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
if (copy_last_bytes) {
memcpy(ctx->hdr_store, start+remain-3, 3);
}
break;
}
}
assert(current>=0);
//if we are in the middle of parsing the vosh, skip over bytes remaining from previous obj not parsed
if ((vosh_start>=0) && current) {
assert(remain>=current);
start += current;
remain -= current;
current = 0;
}
//also skip if no output pid
if (!ctx->opid && current) {
assert(remain>=current);
start += current;
remain -= current;
current = 0;
}
//dispatch remaining bytes
if (current>0) {
//flush remaining
dst_pck = gf_filter_pck_new_alloc(ctx->opid, current, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
gf_filter_pck_set_cts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_dts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_TRUE);
//bytes were partly in store, partly in packet
if (bytes_from_store) {
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset - bytes_from_store);
}
assert(bytes_from_store>=(u32) current);
bytes_from_store -= current;
memcpy(pck_data, ctx->hdr_store, current);
} else {
//bytes were only in packet
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset);
}
memcpy(pck_data, start, current);
assert(remain>=current);
start += current;
remain -= current;
current = 0;
}
gf_filter_pck_set_carousel_version(dst_pck, 1);
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
}
//not enough bytes to parse start code
if (remain<5) {
memcpy(ctx->hdr_store, start, remain);
ctx->bytes_in_header = remain;
break;
}
//parse headers
//we have a start code loaded, eg the data packet does not have a full start code at the beginning
if (sc_type_forced) {
gf_bs_reassign_buffer(ctx->bs, start + hdr_offset, remain - hdr_offset);
sc_type = forced_sc_type;
} else {
gf_bs_reassign_buffer(ctx->bs, start, remain);
gf_bs_read_int(ctx->bs, 24);
sc_type = gf_bs_read_int(ctx->bs, 8);
}
if (ctx->is_mpg12) {
switch (sc_type) {
case M2V_SEQ_START_CODE:
case M2V_EXT_START_CODE:
gf_bs_reassign_buffer(ctx->bs, start, remain);
e = gf_m4v_parse_config(ctx->vparser, &ctx->dsi);
//not enough data, accumulate until we can parse the full header
if (e==GF_EOS) {
if (vosh_start<0) vosh_start = 0;
if (data == ctx->hdr_store) {
memmove(ctx->hdr_store, start, remain);
ctx->hdr_store_size = remain;
} else {
if (ctx->hdr_store_alloc < ctx->hdr_store_size + pck_size - vosh_start) {
ctx->hdr_store_alloc = (u32) (ctx->hdr_store_size + pck_size - vosh_start);
ctx->hdr_store = gf_realloc(ctx->hdr_store, sizeof(char)*ctx->hdr_store_alloc);
}
memcpy(ctx->hdr_store + ctx->hdr_store_size, data + vosh_start, (size_t) (pck_size - vosh_start) );
ctx->hdr_store_size += pck_size - (u32) vosh_start;
}
gf_filter_pid_drop_packet(ctx->ipid);
return GF_OK;
} else if (e != GF_OK) {
GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[MPGVid] Failed to parse VOS header: %s\n", gf_error_to_string(e) ));
} else {
mpgviddmx_check_pid(filter, ctx, 0, NULL);
}
break;
case M2V_PIC_START_CODE:
break;
default:
break;
}
} else {
u8 PL;
switch (sc_type) {
case M4V_VOS_START_CODE:
ctx->dsi.VideoPL = (u8) gf_bs_read_u8(ctx->bs);
vosh_start = start - (u8 *)data;
skip_pck = GF_TRUE;
assert(remain>=5);
start += 5;
remain -= 5;
break;
case M4V_VOL_START_CODE:
gf_bs_reassign_buffer(ctx->bs, start, remain);
PL = ctx->dsi.VideoPL;
e = gf_m4v_parse_config(ctx->vparser, &ctx->dsi);
ctx->dsi.VideoPL = PL;
//not enough data, accumulate until we can parse the full header
if (e==GF_EOS) {
if (vosh_start<0) vosh_start = 0;
if (data == ctx->hdr_store) {
memmove(ctx->hdr_store, start, remain);
ctx->hdr_store_size = remain;
} else {
if (ctx->hdr_store_alloc < ctx->hdr_store_size + pck_size - vosh_start) {
ctx->hdr_store_alloc = (u32) (ctx->hdr_store_size + pck_size - (u32) vosh_start);
ctx->hdr_store = gf_realloc(ctx->hdr_store, sizeof(char)*ctx->hdr_store_alloc);
}
memcpy(ctx->hdr_store + ctx->hdr_store_size, data + vosh_start, (size_t) (pck_size - vosh_start) );
ctx->hdr_store_size += pck_size - (u32) vosh_start;
}
gf_filter_pid_drop_packet(ctx->ipid);
return GF_OK;
} else if (e != GF_OK) {
GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[MPGVid] Failed to parse VOS header: %s\n", gf_error_to_string(e) ));
} else {
u32 obj_size = (u32) gf_m4v_get_object_start(ctx->vparser);
if (vosh_start<0) vosh_start = 0;
vosh_end = start - (u8 *)data + obj_size;
vosh_end -= vosh_start;
mpgviddmx_check_pid(filter, ctx,(u32) vosh_end, data+vosh_start);
skip_pck = GF_TRUE;
assert(remain>=(s32) obj_size);
start += obj_size;
remain -= obj_size;
}
break;
case M4V_VOP_START_CODE:
case M4V_GOV_START_CODE:
break;
case M4V_VO_START_CODE:
case M4V_VISOBJ_START_CODE:
default:
if (vosh_start>=0) {
skip_pck = GF_TRUE;
assert(remain>=4);
start += 4;
remain -= 4;
}
break;
}
}
if (skip_pck) {
continue;
}
if (!ctx->opid) {
assert(remain>=4);
start += 4;
remain -= 4;
continue;
}
if (!ctx->is_playing) {
ctx->resume_from = (u32) ((char *)start - (char *)data);
return GF_OK;
}
//at this point, we no longer reaggregate packets
ctx->hdr_store_size = 0;
if (ctx->in_seek) {
u64 nb_frames_at_seek = (u64) (ctx->start_range * ctx->cur_fps.num);
if (ctx->cts + ctx->cur_fps.den >= nb_frames_at_seek) {
//u32 samples_to_discard = (ctx->cts + ctx->dts_inc) - nb_samples_at_seek;
ctx->in_seek = GF_FALSE;
}
}
//may happen that after all our checks, only 4 bytes are left, continue to store these 4 bytes
if (remain<5)
continue;
//good to go
gf_m4v_parser_reset(ctx->vparser, sc_type_forced ? forced_sc_type + 1 : 0);
size = 0;
e = gf_m4v_parse_frame(ctx->vparser, &ctx->dsi, &ftype, &tinc, &size, &fstart, &is_coded);
//true if we strip VO and VISOBJ assert(!fstart);
//we skipped bytes already in store + end of start code present in packet, so the size of the first object
//needs adjustement
if (bytes_from_store) {
size += bytes_from_store + hdr_offset;
}
if ((e == GF_EOS) && !ctx->input_is_au_end) {
u8 b3 = start[remain-3];
u8 b2 = start[remain-2];
u8 b1 = start[remain-1];
//we may have a startcode at the end of the packet, store it and don't dispatch the last 3 bytes !
if (!b1 || !b2 || !b3) {
copy_last_bytes = GF_TRUE;
assert(size >= 3);
size -= 3;
ctx->bytes_in_header = 3;
}
full_frame = GF_FALSE;
} else {
full_frame = GF_TRUE;
}
if (!is_coded) {
/*if prev is B and we're parsing a packed bitstream discard n-vop*/
if (ctx->forced_packed && ctx->b_frames) {
ctx->is_packed = GF_TRUE;
assert(remain>=size);
start += size;
remain -= (s32) size;
continue;
}
/*policy is to import at variable frame rate, skip*/
if (ctx->vfr) {
ctx->is_vfr = GF_TRUE;
mpgviddmx_update_time(ctx);
assert(remain>=size);
start += size;
remain -= (s32) size;
continue;
}
/*policy is to keep non coded frame (constant frame rate), add*/
}
if (ftype==2) {
//count number of B-frames since last ref
ctx->b_frames++;
ctx->nb_b++;
} else {
//flush all pending packets
mpgviddmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE, GF_FALSE);
//remeber the CTS of the last ref
ctx->last_ref_cts = ctx->cts;
if (ctx->max_b < ctx->b_frames) ctx->max_b = ctx->b_frames;
ctx->b_frames = 0;
if (ftype)
ctx->nb_p++;
else
ctx->nb_i++;
}
ctx->nb_frames++;
dst_pck = gf_filter_pck_new_alloc(ctx->opid, (u32) size, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
//bytes come from both our store and the data packet
if (bytes_from_store) {
memcpy(pck_data, ctx->hdr_store+current, bytes_from_store);
assert(size >= bytes_from_store);
size -= bytes_from_store;
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset - bytes_from_store);
}
memcpy(pck_data + bytes_from_store, start, (size_t) size);
} else {
//bytes only come the data packet
memcpy(pck_data, start, (size_t) size);
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset + start - (u8 *) data);
}
}
assert(pck_data[0] == 0);
assert(pck_data[1] == 0);
assert(pck_data[2] == 0x01);
gf_filter_pck_set_framing(dst_pck, GF_TRUE, (full_frame || ctx->input_is_au_end) ? GF_TRUE : GF_FALSE);
gf_filter_pck_set_cts(dst_pck, ctx->cts);
gf_filter_pck_set_dts(dst_pck, ctx->dts);
if (ctx->input_is_au_start) {
ctx->input_is_au_start = GF_FALSE;
} else {
//we use the carousel flag temporarly to indicate the cts must be recomputed
gf_filter_pck_set_carousel_version(dst_pck, 1);
}
gf_filter_pck_set_sap(dst_pck, ftype ? GF_FILTER_SAP_NONE : GF_FILTER_SAP_1);
gf_filter_pck_set_duration(dst_pck, ctx->cur_fps.den);
if (ctx->in_seek) gf_filter_pck_set_seek_flag(dst_pck, GF_TRUE);
ctx->frame_started = GF_TRUE;
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
mpgviddmx_update_time(ctx);
if (!full_frame) {
if (copy_last_bytes) {
memcpy(ctx->hdr_store, start+remain-3, 3);
}
break;
}
assert(remain>=size);
start += size;
remain -= (s32) size;
}
gf_filter_pid_drop_packet(ctx->ipid);
return GF_OK;
}
| null | null | 220,924
|
34216555234361695012653916844531420458
| 541
|
fixed #1905
|
other
|
gpac
|
5f2c2a16d30229b6241f02fa28e3d6b810d64858
| 0
|
const GF_FilterRegister *mpgviddmx_register(GF_FilterSession *session)
{
return &MPGVidDmxRegister;
}
| null | null | 220,925
|
333220729260302784488651844755614671333
| 4
|
fixed #1905
|
other
|
gpac
|
5f2c2a16d30229b6241f02fa28e3d6b810d64858
| 0
|
static s32 mpgviddmx_next_start_code(u8 *data, u32 size)
{
u32 v, bpos, found;
s64 start, end;
bpos = 0;
found = 0;
start = 0;
end = 0;
v = 0xffffffff;
while (!end) {
if (bpos == size)
return -1;
v = ( (v<<8) & 0xFFFFFF00) | data[bpos];
bpos++;
if ((v & 0xFFFFFF00) == 0x00000100) {
end = start + bpos - 4;
found = 1;
break;
}
}
if (!found)
return -1;
assert(end >= start);
return (s32) (end - start);
}
| null | null | 220,926
|
296530263199573113491341901644707258494
| 28
|
fixed #1905
|
other
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.