diff --git a/ngraph_bridge/executable.cc b/ngraph_bridge/executable.cc index 56c2d22c7..3f8485fab 100644 --- a/ngraph_bridge/executable.cc +++ b/ngraph_bridge/executable.cc @@ -48,7 +48,11 @@ Executable::Executable(shared_ptr func, string device) bool trivial_fn = true; for (auto result : func->get_results()) { auto parent = result->input_value(0).get_node_shared_ptr(); - auto& shape = result->get_shape(); + ngraph::Shape shape = {1}; + if (result->get_output_partial_shape(0).is_static()) { + shape = result->get_shape(); + } + trivial_fn &= ngraph::is_type(parent) || ngraph::is_type(parent) || count(shape.begin(), shape.end(), 0); diff --git a/ngraph_bridge/ngraph_builder.cc b/ngraph_bridge/ngraph_builder.cc index 3d97dfaa9..9edde7085 100644 --- a/ngraph_bridge/ngraph_builder.cc +++ b/ngraph_bridge/ngraph_builder.cc @@ -1181,17 +1181,10 @@ static Status TranslateExpandDimsOp( static Status TranslateFillOp( const Node* op, const std::vector& static_input_map, Builder::OpMap& ng_op_map) { - ng::Output ng_value, ng_unused; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_unused, ng_value)); - - std::vector dims_vec; - TF_RETURN_IF_ERROR(GetStaticInputVector(op, 0, static_input_map, &dims_vec)); - - auto ng_output_shape = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{dims_vec.size()}, dims_vec); - - SaveNgOp(ng_op_map, op->name(), ConstructNgNode( - op->name(), ng_value, ng_output_shape)); + ng::Output ng_value, ng_dims; + TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_dims, ng_value)); + SaveNgOp(ng_op_map, op->name(), + ConstructNgNode(op->name(), ng_value, ng_dims)); return Status::OK(); } @@ -1289,9 +1282,7 @@ static Status TranslateFusedMatMulOp(const Node* op, ng_matmul = ConstructNgNode(op->name(), ng_lhs, ng_rhs, transpose_a, transpose_b); - auto ng_matmul_shape = ng_matmul.get_shape(); auto ng_bias_shape = ng_bias.get_shape(); - if (ng_bias_shape.size() != 1) { return errors::InvalidArgument( "Bias argument to BiasAdd does not have one dimension"); @@ -1322,41 +1313,12 @@ static Status TranslateFusedMatMulOp(const Node* op, static Status TranslateGatherV2Op( const Node* op, const std::vector& static_input_map, Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_input_coords, ng_unused; + ng::Output ng_input, ng_input_coords, ng_axis; TF_RETURN_IF_ERROR( - GetInputNodes(ng_op_map, op, ng_input, ng_input_coords, ng_unused)); - - std::vector tf_axis; - TF_RETURN_IF_ERROR(GetStaticInputVector(op, 2, static_input_map, &tf_axis)); - - if (tf_axis.size() > 1) { - return errors::Internal("Found axis in GatherV2 op (", op->name(), - ") translation to be non scalar, of size ", - tf_axis.size()); - } - - // Negative axis is supported. Accounting for that - auto ng_input_shape = ng_input.get_shape(); - size_t ng_input_rank = ng_input_shape.size(); - int axis; - if (tf_axis[0] >= 0) { - axis = tf_axis[0]; - } else { - axis = tf_axis[0] + ng_input_rank; - } - if (axis < 0 || axis >= ng_input_rank) { - return errors::InvalidArgument("Expected axis in the range [-", - ng_input_rank, ", ", ng_input_rank, - "), but got ", tf_axis[0]); - } - - auto ng_axis = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{tf_axis.size()}, tf_axis); - - auto gather_op = ConstructNgNode(op->name(), ng_input, - ng_input_coords, ng_axis); - - SaveNgOp(ng_op_map, op->name(), gather_op); + GetInputNodes(ng_op_map, op, ng_input, ng_input_coords, ng_axis)); + SaveNgOp(ng_op_map, op->name(), + ConstructNgNode(op->name(), ng_input, ng_input_coords, + ng_axis)); return Status::OK(); } @@ -1801,55 +1763,21 @@ static Status TranslateMaxPool3DOp(const Node* op, static Status TranslateNonMaxSuppressionV4Op( const Node* op, const std::vector& static_input_map, Builder::OpMap& ng_op_map) { - ng::Output ng_boxes, ng_scores; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_boxes, ng_scores)); - - std::vector max_output_size; - TF_RETURN_IF_ERROR( - GetStaticInputVector(op, 2, static_input_map, &max_output_size)); - std::vector iou_threshold; - TF_RETURN_IF_ERROR( - GetStaticInputVector(op, 3, static_input_map, &iou_threshold)); - - std::vector score_threshold; - TF_RETURN_IF_ERROR( - GetStaticInputVector(op, 4, static_input_map, &score_threshold)); + ng::Output ng_boxes, ng_scores, max_output_size, iou_threshold, + score_threshold; + TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_boxes, ng_scores, + max_output_size, iou_threshold, + score_threshold)); bool pad_to_max_output_size; if (GetNodeAttr(op->attrs(), "pad_to_max_output_size", &pad_to_max_output_size) != Status::OK()) { pad_to_max_output_size = false; } - // max_output_size must be scalar - if (max_output_size.size() != 1) { - return errors::InvalidArgument( - "NonMaxSuppressionV4 Op: max_output_size of nms must be scalar ", - max_output_size.size()); - } - // iou_threshold must be scalar - if (iou_threshold.size() != 1) { - return errors::InvalidArgument( - "NonMaxSuppressionV4 Op: iou_threshold of nms must be scalar ", - iou_threshold.size()); - } - - // score_threshold must be scalar - if (score_threshold.size() != 1) { - return errors::InvalidArgument( - "NonMaxSuppressionV4 Op: score_threshold of nms must be scalar ", - score_threshold.size()); - } - - auto ng_max_output_size = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{}, max_output_size[0]); - auto ng_iou_threshold = ConstructNgNode( - op->name(), ng::element::f32, ng::Shape{}, iou_threshold[0]); - auto ng_score_threshold = ConstructNgNode( - op->name(), ng::element::f32, ng::Shape{}, score_threshold[0]); auto ng_nmsv4 = ConstructNgNode( - op->name(), ng_boxes, ng_scores, ng_max_output_size, ng_iou_threshold, - ng_score_threshold); + op->name(), ng_boxes, ng_scores, max_output_size, iou_threshold, + score_threshold); Builder::SetTracingInfo(op->name(), ng_nmsv4); auto ng_selected_indices = ng_nmsv4.get_node_shared_ptr()->output(0); @@ -1859,44 +1787,8 @@ static Status TranslateNonMaxSuppressionV4Op( return Status::OK(); } -static Status TranslateReduceOp( - const Node* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map, - std::function(ng::Output, - ng::Output, const bool)> - create_ng_node) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, ng_input)); - bool tf_keep_dims; - if (GetNodeAttr(op->attrs(), "keep_dims", &tf_keep_dims) != Status::OK()) { - tf_keep_dims = false; - } - - std::vector axes; - TF_RETURN_IF_ERROR(GetStaticInputVector(op, 1, static_input_map, &axes)); - - ng::Shape input_shape = ng_input.get_shape(); - size_t input_rank = input_shape.size(); - - TF_RETURN_IF_ERROR(CheckAxisDimInRange(axes, input_rank)); - - std::vector ng_reduction_axes_vect(axes.size()); - std::transform( - axes.begin(), axes.end(), ng_reduction_axes_vect.begin(), - [input_rank](int idx) { return idx + (idx < 0 ? (int)input_rank : 0); }); - auto ng_reduction_axes = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{ng_reduction_axes_vect.size()}, - ng_reduction_axes_vect); - - ng::Output ng_node = - create_ng_node(ng_input, ng_reduction_axes, tf_keep_dims); - - SaveNgOp(ng_op_map, op->name(), ng_node); - return Status::OK(); -} - template -static Status TranslateDirectReduceOp( +static Status TranslateReduceOp( const Node* op, const std::vector& static_input_map, Builder::OpMap& ng_op_map) { // ensure its either an arithmetic or a logical reduction @@ -1906,41 +1798,29 @@ static Status TranslateDirectReduceOp( "Expected node to be either a valid logical or arithmetic reduction " "type"); } - return TranslateReduceOp( - op, static_input_map, ng_op_map, - [&op](ng::Output ng_input, - ng::Output ng_reduction_axes, const bool keep_dims) { - return ConstructNgNode(op->name(), ng_input, ng_reduction_axes, - keep_dims); - }); + + ng::Output ng_input, ng_axes; + TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_axes)); + bool tf_keep_dims; + if (GetNodeAttr(op->attrs(), "keep_dims", &tf_keep_dims) != Status::OK()) { + tf_keep_dims = false; + } + SaveNgOp(ng_op_map, op->name(), + ConstructNgNode(op->name(), ng_input, ng_axes, tf_keep_dims)); + return Status::OK(); } static Status TranslateOneHotOp( const Node* op, const std::vector& static_input_map, Builder::OpMap& ng_op_map) { - ng::Output ng_features, ng_unused, ng_on, ng_off, ng_depth; + ng::Output ng_features, ng_on, ng_off, ng_depth; TF_RETURN_IF_ERROR( - GetInputNodes(ng_op_map, op, ng_features, ng_unused, ng_on, ng_off)); - - auto ng_features_shape = ng_features.get_shape(); - std::vector depth; - TF_RETURN_IF_ERROR(GetStaticInputVector(op, 1, static_input_map, &depth)); - - // Depth must be scalar - if (depth.size() != 1) { - return errors::InvalidArgument( - "OneHot Op: depth of one hot dimension must be scalar ", depth.size()); - } - - auto const_depth = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{}, depth); - - int one_hot_axis; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "axis", &one_hot_axis)); - - auto ng_onehot = ConstructNgNode( - op->name(), ng_features, const_depth, ng_on, ng_off, one_hot_axis); - SaveNgOp(ng_op_map, op->name(), ng_onehot); + GetInputNodes(ng_op_map, op, ng_features, ng_depth, ng_on, ng_off)); + int axis; + TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "axis", &axis)); + SaveNgOp(ng_op_map, op->name(), + ConstructNgNode(op->name(), ng_features, ng_depth, + ng_on, ng_off, axis)); return Status::OK(); } @@ -2119,18 +1999,10 @@ static Status TranslateRelu6Op(const Node* op, static Status TranslateReshapeOp( const Node* op, const std::vector& static_input_map, Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_shape_op; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_shape_op)); + ng::Output ng_input, ng_shape; + TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_shape)); NGRAPH_VLOG(3) << "Input shape: " << ng::join(ng_input.get_shape()); - - std::vector shape; - TF_RETURN_IF_ERROR(GetStaticInputVector(op, 1, static_input_map, &shape)); - - NGRAPH_VLOG(3) << "Requested result shape: " << ng::join(shape); - - auto ng_shape = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{shape.size()}, shape); SaveNgOp(ng_op_map, op->name(), ConstructNgNode( op->name(), ng_input, ng_shape, false)); return Status::OK(); @@ -2203,59 +2075,23 @@ static Status TranslateSliceOp( Builder::OpMap& ng_op_map) { ng::Output ng_input, ng_begin, ng_size; TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_begin, ng_size)); - - std::vector begin_vec; - std::vector size_vec; - TF_RETURN_IF_ERROR(GetStaticInputVector(op, 1, static_input_map, &begin_vec)); - TF_RETURN_IF_ERROR(GetStaticInputVector(op, 2, static_input_map, &size_vec)); - - if (begin_vec.size() != size_vec.size()) - return errors::InvalidArgument( - "Cannot translate slice op: size of begin = ", begin_vec.size(), - ", size of size_vec = ", size_vec.size(), ". Expected them to match."); - - NGRAPH_VLOG(3) << "Begin input for Slice: " << ng::join(begin_vec); - NGRAPH_VLOG(3) << "Size input for Slice: " << ng::join(size_vec); - - std::vector end_vec(begin_vec.size()); - const auto ng_input_shape = ng_input.get_shape(); - stringstream err_stream; - string err_msg; - for (size_t i = 0; i < size_vec.size(); i++) { - if (size_vec[i] != -1) { - end_vec[i] = begin_vec[i] + size_vec[i]; - } else { - // support -1 for size_vec, to the end of the tensor - end_vec[i] = ng_input_shape[i]; - } - - // check for this condition: 0 <= begin[i] <= begin[i] + size[i] <= Di - if (0 > begin_vec[i]) - err_stream << "lower < 0: " << begin_vec[i] - << ". It should have been positive.\n"; - if (begin_vec[i] > end_vec[i]) - err_stream << "upper < lower: upper = " << end_vec[i] - << ", lower = " << begin_vec[i] << "\n"; - if (begin_vec[i] > ng_input_shape[i]) - err_stream << "dim < upper: dim = " << ng_input_shape[i] - << ", upper = " << end_vec[i] << "\n"; - - err_msg = err_stream.str(); - if (!err_msg.empty()) - return errors::InvalidArgument("Cannot translate slice op at position ", - i, " of ", size_vec.size(), - ". The reasons are:\n", err_msg); - } - - auto begin = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{begin_vec.size()}, begin_vec); - auto end = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{end_vec.size()}, end_vec); - + auto input_shape = ConstructNgNode(op->name(), ng_input); + auto ng_begin_64 = ConstructNgNode(op->name(), ng_begin, + ngraph::element::i64); + auto ng_size_64 = ConstructNgNode(op->name(), ng_size, + ngraph::element::i64); + auto is_negative = ConstructNgNode( + op->name(), ng_size_64, + ConstructNgNode(op->name(), ng::element::i64, + ng::Shape{}, std::vector({-1}))); + auto ng_add = + ConstructNgNode(op->name(), ng_begin_64, ng_size_64); + auto ng_end = ConstructNgNode(op->name(), is_negative, + input_shape, ng_add); SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_input, begin, - end, std::vector{}, - std::vector{})); + ConstructNgNode( + op->name(), ng_input, ng_begin_64, ng_end, + std::vector{}, std::vector{})); return Status::OK(); } @@ -2264,13 +2100,7 @@ static Status TranslateSoftmaxOp(const Node* op, Builder::OpMap& ng_op_map) { ng::Output ng_input; TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - - auto input_shape = ng_input.get_shape(); - auto rank = input_shape.size(); - if (rank < 1) { - return errors::InvalidArgument("TF Softmax logits must be >=1 dimension"); - } - + auto rank = ng_input.get_partial_shape().rank().get_length(); SaveNgOp(ng_op_map, op->name(), ConstructNgNode(op->name(), ng_input, rank - 1)); return Status::OK(); @@ -2308,24 +2138,14 @@ static Status TranslateSpaceToDepthOp(const Node* op, static Status TranslateSplitOp( const Node* op, const std::vector& static_input_map, Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 1, ng_input)); + ng::Output ng_input, ng_split_dim; + TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_split_dim, ng_input)); // num_split : The number of ways to split. Must evenly divide // value.shape[split_dim] int32 num_split; TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "num_split", &num_split)); - ng::Shape shape = ng_input.get_shape(); - int rank = shape.size(); - - std::vector split_dim_vec; - TF_RETURN_IF_ERROR( - GetStaticInputVector(op, 0, static_input_map, &split_dim_vec)); - int split_dim = split_dim_vec[0] + (split_dim_vec[0] < 0 ? (int64)rank : 0); - auto ng_split_dim = ConstructNgNode( - op->name(), ng::element::u64, ng::Shape{}, split_dim); auto ng_split = make_shared(ng_input, ng_split_dim, num_split); - for (int i = 0; i < num_split; ++i) { auto out = ng_split->output(i); Builder::SetTracingInfo(op->name(), out); @@ -2339,77 +2159,14 @@ static Status TranslateSplitVOp( Builder::OpMap& ng_op_map) { ng::Output ng_input, ng_split_length, ng_split_dim; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, ng_input)); - - ng::Shape shape = ng_input.get_shape(); - int rank = shape.size(); - - std::vector split_dim_vec; TF_RETURN_IF_ERROR( - GetStaticInputVector(op, 2, static_input_map, &split_dim_vec)); - // there should be at least one element specified as axis and not more than - // one as axis is 0-D - if (split_dim_vec.size() != 1) { - return errors::InvalidArgument( - "split_dim_tensor must have " - "exactly one element."); - } - TF_RETURN_IF_ERROR(CheckAxisDimInRange(split_dim_vec, rank)); - int split_dim = split_dim_vec[0] + (split_dim_vec[0] < 0 ? (int64)rank : 0); - ng_split_dim = ConstructNgNode(op->name(), ng::element::i32, - ng::Shape{}, split_dim); - - std::vector split_lengths_vec; - TF_RETURN_IF_ERROR( - GetStaticInputVector(op, 1, static_input_map, &split_lengths_vec)); - - // length: Length of size_splits - int length = 0; - int idx = -1; - - // Find out the total length of the splits and locate -1 's index, if any - bool has_one_neg = false; - for (size_t i = 0; i < split_lengths_vec.size(); ++i) { - if (split_lengths_vec[i] != -1) { - length += split_lengths_vec[i]; - } else { - if (has_one_neg) { - return errors::InvalidArgument("size_splits can only have one -1"); - } else { - idx = i; - has_one_neg = true; - } - } - } - - // Size splits must sum to the dimension of value along split_dim - if (idx > 0) { - split_lengths_vec[idx] = shape[split_dim] - length; - } - - if ((!has_one_neg && length != shape[split_dim]) || - (has_one_neg && split_lengths_vec[idx] < 0)) { - return errors::InvalidArgument( - "The length of size_splits must sum to the value of the dimension " - "along split_dim"); - } - - ng_split_length = ConstructNgNode( - op->name(), ng::element::i32, ng::Shape{split_lengths_vec.size()}, - split_lengths_vec); - - if (split_lengths_vec.size() != 1) { - auto ng_split = make_shared(ng_input, ng_split_dim, - ng_split_length); - for (size_t i = 0; i < split_lengths_vec.size(); ++i) { - auto out = ng_split->output(i); - Builder::SetTracingInfo(op->name(), out); - SaveNgOp(ng_op_map, op->name(), out); - } - } else { - SaveNgOp(ng_op_map, op->name(), ng_input); + GetInputNodes(ng_op_map, op, ng_input, ng_split_length, ng_split_dim)); + auto ng_split = make_shared(ng_input, ng_split_dim, + ng_split_length); + for (auto& out : ng_split->outputs()) { + Builder::SetTracingInfo(op->name(), out); + SaveNgOp(ng_op_map, op->name(), out); } - return Status::OK(); } @@ -2448,8 +2205,9 @@ static Status TranslateSqueezeOp(const Node* op, static Status TranslateStridedSliceOp( const Node* op, const std::vector& static_input_map, Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, ng_input)); + ng::Output ng_input, ng_begin, ng_end, ng_strides; + TF_RETURN_IF_ERROR( + GetInputNodes(ng_op_map, op, ng_input, ng_begin, ng_end, ng_strides)); int32 begin_mask, end_mask, new_axis_mask, shrink_axis_mask, ellipsis_mask; TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "begin_mask", &begin_mask)); @@ -2465,21 +2223,6 @@ static Status TranslateStridedSliceOp( << " shrink axis mask: " << shrink_axis_mask << " ellipsis mask: " << ellipsis_mask; - std::vector begin_vec; - TF_RETURN_IF_ERROR(GetStaticInputVector(op, 1, static_input_map, &begin_vec)); - std::vector end_vec; - TF_RETURN_IF_ERROR(GetStaticInputVector(op, 2, static_input_map, &end_vec)); - std::vector stride_vec; - TF_RETURN_IF_ERROR( - GetStaticInputVector(op, 3, static_input_map, &stride_vec)); - - auto begin = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{begin_vec.size()}, begin_vec); - auto end = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{end_vec.size()}, end_vec); - auto strides = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{stride_vec.size()}, stride_vec); - auto mask_to_vec = [](int32 mask) { auto length = sizeof(mask) * CHAR_BIT; std::vector vec(length, 0); @@ -2494,12 +2237,12 @@ static Status TranslateStridedSliceOp( return vec; }; - SaveNgOp( - ng_op_map, op->name(), - ConstructNgNode( - op->name(), ng_input, begin, end, strides, mask_to_vec(begin_mask), - mask_to_vec(end_mask), mask_to_vec(new_axis_mask), - mask_to_vec(shrink_axis_mask), mask_to_vec(ellipsis_mask))); + SaveNgOp(ng_op_map, op->name(), + ConstructNgNode( + op->name(), ng_input, ng_begin, ng_end, ng_strides, + mask_to_vec(begin_mask), mask_to_vec(end_mask), + mask_to_vec(new_axis_mask), mask_to_vec(shrink_axis_mask), + mask_to_vec(ellipsis_mask))); return Status::OK(); } @@ -2508,14 +2251,8 @@ static Status TranslateTileOp( Builder::OpMap& ng_op_map) { ng::Output ng_input, ng_multiples; TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_multiples)); - - std::vector multiples; - TF_RETURN_IF_ERROR(GetStaticInputVector(op, 1, static_input_map, &multiples)); - - auto ng_repeats = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{multiples.size()}, multiples); SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_input, ng_repeats)); + ConstructNgNode(op->name(), ng_input, ng_multiples)); return Status::OK(); } @@ -2523,23 +2260,15 @@ static Status TranslateTileOp( static Status TranslateTopKV2Op( const Node* op, const std::vector& static_input_map, Builder::OpMap& ng_op_map) { - ng::Output ng_input; + ng::Output ng_input, ng_k; TF_RETURN_IF_ERROR(ValidateInputCount(op, 2)); - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, ng_input)); + TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_k)); // axis along which to compute top k indices int64 k_axis = ng_input.get_shape().size() - 1; - // scalar input tensor specifying how many max/min elts should be computed - // CPU backend only supports element type i64 - std::vector ng_k_vec; - TF_RETURN_IF_ERROR(GetStaticInputVector(op, 1, static_input_map, &ng_k_vec)); - auto ng_k = ConstructNgNode(op->name(), ng::element::i64, - ng::Shape{}, ng_k_vec[0]); - std::string mode = "max"; - std::string sort = "value"; bool sorted = true; TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "sorted", &sorted)); @@ -2553,7 +2282,7 @@ static Status TranslateTopKV2Op( ng::Output ng_values = ng_result->output(0); Builder::SetTracingInfo(op->name(), ng_values); ng::Output ng_indices = ng_result->output(1); - NGRAPH_VLOG(0) << "ng_indices " << ng_indices; + NGRAPH_VLOG(4) << "ng_indices " << ng_indices; Builder::SetTracingInfo(op->name(), ng_indices); SaveNgOp(ng_op_map, op->name(), ng_values); @@ -2567,36 +2296,8 @@ static Status TranslateTransposeOp( Builder::OpMap& ng_op_map) { ng::Output ng_input, ng_permutation; TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_permutation)); - - std::vector permutation; - TF_RETURN_IF_ERROR( - GetStaticInputVector(op, 1, static_input_map, &permutation)); - - // Check to make sure that the permutation requested for transpose - // is valid for example: - // - it should not have duplicates, - // - it should have all the dimensions. - - int ng_input_rank = ng_input.get_shape().size(); - vector count(ng_input_rank, false); - for (auto p : permutation) { - if (0 <= p && p < ng_input_rank) { - count[p] = true; - } - } - for (int i = 0; i < ng_input_rank; i++) { - if (!count[i]) { - return errors::InvalidArgument(i, " is missing from {", - ng::join(permutation), "}."); - } - } - - NGRAPH_VLOG(3) << ng::join(permutation); - - auto input_order = ConstructNgNode( - op->name(), ng::element::u64, ng::Shape{permutation.size()}, permutation); SaveNgOp(ng_op_map, op->name(), ConstructNgNode( - op->name(), ng_input, input_order)); + op->name(), ng_input, ng_permutation)); return Status::OK(); } @@ -2715,8 +2416,8 @@ const static std::map< {"Add", TranslateBinaryOp}, {"AddN", TranslateAddNOp}, {"AddV2", TranslateBinaryOp}, - {"Any", TranslateDirectReduceOp}, - {"All", TranslateDirectReduceOp}, + {"Any", TranslateReduceOp}, + {"All", TranslateReduceOp}, {"ArgMax", TranslateArgMaxOp}, {"ArgMin", TranslateArgMinOp}, {"Asin", TranslateUnaryOp}, @@ -2764,13 +2465,13 @@ const static std::map< {"LogicalNot", TranslateUnaryOp}, {"LogicalOr", TranslateBinaryOp}, {"MatMul", TranslateMatMulOp}, - {"Max", TranslateDirectReduceOp}, + {"Max", TranslateReduceOp}, {"Maximum", TranslateBinaryOp}, {"MaxPool", TranslateMaxPoolOp}, {"MaxPool3D", TranslateMaxPool3DOp}, {"NonMaxSuppressionV4", TranslateNonMaxSuppressionV4Op}, - {"Mean", TranslateDirectReduceOp}, - {"Min", TranslateDirectReduceOp}, + {"Mean", TranslateReduceOp}, + {"Min", TranslateReduceOp}, {"Minimum", TranslateBinaryOp}, {"MirrorPad", TranslatePadOp}, {"Mul", TranslateBinaryOp}, @@ -2788,7 +2489,7 @@ const static std::map< {"Pow", TranslateBinaryOp}, // PreventGradient is just Identity in dataflow terms, so reuse that. {"PreventGradient", TranslateIdentityOp}, - {"Prod", TranslateDirectReduceOp}, + {"Prod", TranslateReduceOp}, {"Rank", TranslateRankOp}, {"RealDiv", TranslateBinaryOp}, {"Reciprocal", TranslateReciprocalOp}, @@ -2817,7 +2518,7 @@ const static std::map< {"Squeeze", TranslateSqueezeOp}, {"StridedSlice", TranslateStridedSliceOp}, {"Sub", TranslateBinaryOp}, - {"Sum", TranslateDirectReduceOp}, + {"Sum", TranslateReduceOp}, {"Tan", TranslateUnaryOp}, {"Tanh", TranslateUnaryOp}, {"Tile", TranslateTileOp}, diff --git a/ngraph_bridge/ngraph_encapsulate_op.cc b/ngraph_bridge/ngraph_encapsulate_op.cc index ce9ab0ec6..41096f50c 100644 --- a/ngraph_bridge/ngraph_encapsulate_op.cc +++ b/ngraph_bridge/ngraph_encapsulate_op.cc @@ -46,9 +46,6 @@ namespace ngraph_bridge { int NGraphEncapsulateOp::s_instance_id = 0; -//--------------------------------------------------------------------------- -// NGraphEncapsulateOp::ctor -//--------------------------------------------------------------------------- NGraphEncapsulateOp::NGraphEncapsulateOp(OpKernelConstruction* ctx) : OpKernel(ctx) { NGRAPH_VLOG(1) << "Create Executor " << name(); @@ -115,9 +112,8 @@ NGraphEncapsulateOp::NGraphEncapsulateOp(OpKernelConstruction* ctx) std::vector arg_nodes; for (auto node : ng_encap_impl_.m_graph.nodes()) { - if (node->type_string() == "_Arg") { + if (node->IsArg()) { arg_nodes.push_back(node); - int32 index; OP_REQUIRES_OK(ctx, GetNodeAttr(node->attrs(), "index", &index)); if (index > max_arg_index) max_arg_index = index; @@ -162,9 +158,6 @@ NGraphEncapsulateOp::NGraphEncapsulateOp(OpKernelConstruction* ctx) node_def.attr(), &additional_attribute_map)); } -//--------------------------------------------------------------------------- -// ~NGraphEncapsulateOp() -//--------------------------------------------------------------------------- NGraphEncapsulateOp::~NGraphEncapsulateOp() { std::ostringstream oss; oss << "Destroy Encapsulate_" << ng_encap_impl_.GetInstanceId() << ": " diff --git a/ngraph_bridge/ngraph_mark_for_clustering.cc b/ngraph_bridge/ngraph_mark_for_clustering.cc index 60bbaad09..0c194f07c 100644 --- a/ngraph_bridge/ngraph_mark_for_clustering.cc +++ b/ngraph_bridge/ngraph_mark_for_clustering.cc @@ -195,34 +195,14 @@ const std::map& GetAttributeSetters() { if (!initialized) { // Set Additional Attributes (if any) - set_attributes_map["Any"] = SetStaticInputs({1}); - set_attributes_map["All"] = SetStaticInputs({1}); set_attributes_map["ArgMax"] = SetStaticInputs({1}); set_attributes_map["ArgMin"] = SetStaticInputs({1}); set_attributes_map["ConcatV2"] = SetStaticInputs({-1}); set_attributes_map["Conv2DBackpropInput"] = SetStaticInputs({0}); set_attributes_map["ExpandDims"] = SetStaticInputs({1}); - set_attributes_map["Fill"] = SetStaticInputs({0}); - set_attributes_map["GatherV2"] = SetStaticInputs({2}); - set_attributes_map["Max"] = SetStaticInputs({1}); - set_attributes_map["Mean"] = SetStaticInputs({1}); - set_attributes_map["Min"] = SetStaticInputs({1}); set_attributes_map["MirrorPad"] = SetStaticInputs({1}); - set_attributes_map["NonMaxSuppressionV4"] = SetStaticInputs({2, 3, 4}); - set_attributes_map["OneHot"] = SetStaticInputs({1}); set_attributes_map["Pad"] = SetStaticInputs({1}); set_attributes_map["PadV2"] = SetStaticInputs({1, 2}); - set_attributes_map["Prod"] = SetStaticInputs({1}); - set_attributes_map["Reshape"] = SetStaticInputs({1}); - set_attributes_map["Shape"] = SetStaticInputs({0}); - set_attributes_map["Slice"] = SetStaticInputs({1, 2}); - set_attributes_map["Split"] = SetStaticInputs({0}); - set_attributes_map["SplitV"] = SetStaticInputs({1, 2}); - set_attributes_map["StridedSlice"] = SetStaticInputs({1, 2, 3}); - set_attributes_map["Sum"] = SetStaticInputs({1}); - set_attributes_map["TopKV2"] = SetStaticInputs({1}); - set_attributes_map["Tile"] = SetStaticInputs({1}); - set_attributes_map["Transpose"] = SetStaticInputs({1}); initialized = true; } return set_attributes_map; @@ -537,7 +517,7 @@ const TypeConstraintMap& GetTypeConstraintMap() { type_constraint_map["SquaredDifference"]["T"] = NGraphDTypes(); type_constraint_map["Squeeze"]["T"] = NGraphDTypes(); type_constraint_map["StridedSlice"]["T"] = NGraphDTypes(); - type_constraint_map["StridedSlice"]["Index"] = NGraphIndexDTypes(); + type_constraint_map["StridedSlice"]["Index"] = {DT_INT64}; type_constraint_map["Sub"]["T"] = NGraphNumericDTypes(); type_constraint_map["Sum"]["T"] = NGraphNumericDTypes(); type_constraint_map["Sum"]["Tidx"] = NGraphIndexDTypes(); diff --git a/test/python/tensorflow/tests_common.txt b/test/python/tensorflow/tests_common.txt index 99852c7f8..72563b7ca 100644 --- a/test/python/tensorflow/tests_common.txt +++ b/test/python/tensorflow/tests_common.txt @@ -57,11 +57,10 @@ array_ops_test.ShapeSizeRankTest.testSizeDtype array_ops_test.ShapeSizeRankTest.testSparseShape bias_op_test.BiasAddTest.testEmpty -#This test was commented out when upgrading from tf 1.13 to tf 1.14rc0 -#bias_op_test.BiasAddTest.testEmptyGradient +bias_op_test.BiasAddTest.testEmptyGradient bias_op_test.BiasAddTest.testFloatTypes -#bias_op_test.BiasAddTest.testGradientTensor4D -#bias_op_test.BiasAddTest.testIntTypes +bias_op_test.BiasAddTest.testGradientTensor4D +bias_op_test.BiasAddTest.testIntTypes #bitwise_ops_test.BitwiseOpTest.testBinaryOps @@ -71,7 +70,7 @@ cast_op_test.CastOpTest.testCastToTypeOfVariable cast_op_test.CastOpTest.testGradients cast_op_test.CastOpTest.testInfNan cast_op_test.CastOpTest.testIntToFloatBoundary -#cast_op_test.CastOpTest.testNotImplemented +cast_op_test.CastOpTest.testNotImplemented cast_op_test.CastOpTest.testRandom cast_op_test.CastOpTest.testSmallValues @@ -97,7 +96,7 @@ concat_op_test.ConcatOpTest.testTensorConcatDim0Grad concat_op_test.ConcatOpTest.testTensorConcatDim1Grad concat_op_test.ConcatOpTest.testVStack concat_op_test.ConcatOpTest.testZeroSize -#concat_op_test.ConcatOpTest.testRandom +concat_op_test.ConcatOpTest.testRandom conv_ops_test.Conv2DTest.testConv2D1x1Filter conv_ops_test.Conv2DTest.testConv2D1x2Filter @@ -111,13 +110,13 @@ conv_ops_test.Conv2DTest.testConv2D2x2FilterDilation conv_ops_test.Conv2DTest.testConv2D2x2FilterStride1x2 conv_ops_test.Conv2DTest.testConv2D2x2FilterStride2 conv_ops_test.Conv2DTest.testConv2D2x2FilterStride2Same -#conv_ops_test.Conv2DTest.testConv2DKernelSizeMatchesInputSize # this is running the next line which is an error, so excluding for now. -#conv_ops_test.Conv2DTest.testConv2DKernelSizeMatchesInputSizeBackpropFilter +conv_ops_test.Conv2DTest.testConv2DKernelSizeMatchesInputSize +conv_ops_test.Conv2DTest.testConv2DKernelSizeMatchesInputSizeBackpropFilter conv_ops_test.Conv2DTest.testConv2DKernelSizeMatchesInputSizeBackpropInput conv_ops_test.Conv2DTest.testConv2DKernelSizeMatchesInputSizeDilation -#conv_ops_test.Conv2DTest.testConv2DKernelSmallerThanStrideSame +conv_ops_test.Conv2DTest.testConv2DKernelSmallerThanStrideSame conv_ops_test.Conv2DTest.testConv2DKernelSmallerThanStrideValid -#conv_ops_test.Conv2DTest.testConv2DStrideTwoFilterOneSameBackpropInput +conv_ops_test.Conv2DTest.testConv2DStrideTwoFilterOneSameBackpropInput conv_ops_test.Conv2DTest.testInputGradientKernelSizeMatchesInputSize conv_ops_test.Conv2DTest.testInputGradientSamePaddingStrideOne conv_ops_test.Conv2DTest.testInputGradientSamePaddingStrideThree @@ -125,25 +124,25 @@ conv_ops_test.Conv2DTest.testInputGradientSamePaddingStrideTwo conv_ops_test.Conv2DTest.testInputGradientValidPaddingStrideOne conv_ops_test.Conv2DTest.testInputGradientValidPaddingStrideThree conv_ops_test.Conv2DTest.testInputGradientValidPaddingStrideTwo -#conv_ops_test.Conv2DTest.testConv2D2x2Depth1ValidBackpropFilter -#conv_ops_test.Conv2DTest.testConv2D2x2Depth1ValidBackpropFilter -#conv_ops_test.Conv2DTest.testConv2D2x2Depth3ValidBackpropFilterStride1x2 -#conv_ops_test.Conv2DTest.testConv2DBackpropFilterWithEmptyInput +conv_ops_test.Conv2DTest.testConv2D2x2Depth1ValidBackpropFilter +conv_ops_test.Conv2DTest.testConv2D2x2Depth1ValidBackpropFilter +conv_ops_test.Conv2DTest.testConv2D2x2Depth3ValidBackpropFilterStride1x2 +conv_ops_test.Conv2DTest.testConv2DBackpropFilterWithEmptyInput #conv_ops_test.Conv2DTest.testConv2DEmpty -#conv_ops_test.Conv2DTest.testConv2DEmptyBackpropFilter +conv_ops_test.Conv2DTest.testConv2DEmptyBackpropFilter #conv_ops_test.Conv2DTest.testConv2DEmptyBackpropInput #conv_ops_test.Conv2DTest.testConv2DEmptyDilation -#conv_ops_test.Conv2DTest.testConv2DEmptyBackpropFilterDilation1x2 -#conv_ops_test.Conv2DTest.testConv2DEmptyBackpropInputDilation1x2 -#conv_ops_test.Conv2DTest.testConv2DStrideTwoFilterOneSameBackpropFilter -#conv_ops_test.Conv2DTest.testFilterGradientKernelSizeMatchesInputSize -#conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStride2x1 -#conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStrideOne -#conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStrideThree -#conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStrideTwo -#conv_ops_test.Conv2DTest.testFilterGradientValidPaddingStrideOne -#conv_ops_test.Conv2DTest.testFilterGradientValidPaddingStrideThree -#conv_ops_test.Conv2DTest.testFilterGradientValidPaddingStrideTwo +conv_ops_test.Conv2DTest.testConv2DEmptyBackpropFilterDilation1x2 +conv_ops_test.Conv2DTest.testConv2DEmptyBackpropInputDilation1x2 +conv_ops_test.Conv2DTest.testConv2DStrideTwoFilterOneSameBackpropFilter +conv_ops_test.Conv2DTest.testFilterGradientKernelSizeMatchesInputSize +conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStride2x1 +conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStrideOne +conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStrideThree +conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStrideTwo +conv_ops_test.Conv2DTest.testFilterGradientValidPaddingStrideOne +conv_ops_test.Conv2DTest.testFilterGradientValidPaddingStrideThree +conv_ops_test.Conv2DTest.testFilterGradientValidPaddingStrideTwo #conv_ops_test.Conv2DTest.testOpEdgeCases conv_ops_3d_test.Conv3DTest.testConv3D1x1x1Filter @@ -226,10 +225,6 @@ gather_nd_op_test.GatherNdTest.testParamsRankLargerThanIndexSlices gather_nd_op_test.GatherNdTest.testSimpleDtype gather_nd_op_test.GatherNdTest.testUnknownIndices gather_nd_op_test.GatherNdTest.test_session - -# Note that all these test will pass on CPU (except testUInt32AndUInt64), because on CPU gather falls back to TF -# In case of NNPI all except the last pass. But CI wont be tracking it because CI runs on CPU backend only -gather_op_test.GatherTest.testBadAxis #gather_op_test.GatherTest.testBadIndicesCPU ...failing with ngraph error The start corner is out of bounds at axis 0 #gather_op_test.GatherTest.testEmptySlices ...failure in backend due to int64 #gather_op_test.GatherTest.testScalar1D ...failure in backend due to int64 @@ -527,7 +522,7 @@ split_op_test.SplitOpTest.testIdentity split_op_test.SplitOpTest.testListOfScalarTensors split_op_test.SplitOpTest.testNonexistentDimTensor split_op_test.SplitOpTest.testRandom -split_op_test.SplitOpTest.testShapeInference +#split_op_test.SplitOpTest.testShapeInference #split_op_test.SplitOpTest.testSpecialCasesVariable split_op_test.SplitOpTest.testSplitCols split_op_test.SplitOpTest.testSplitDim0 diff --git a/test/python/tensorflow/tests_linux_cpu.txt b/test/python/tensorflow/tests_linux_cpu.txt index 87339b156..98fbd336c 100644 --- a/test/python/tensorflow/tests_linux_cpu.txt +++ b/test/python/tensorflow/tests_linux_cpu.txt @@ -42,6 +42,7 @@ reduction_ops_test.SumReductionTest.testDegenerate reduction_ops_test.MeanReductionTest.testDegenerate reduction_ops_test.ProdReductionTest.testDegenerate cwise_ops_unary_test.UnaryOpTest.testComplexAbsGradGrad +slice_op_test.SliceTest.testRandom #Failed to set Blob with precision not corresponding to user output precision. argmax_op_test.ArgMaxTest.testFloat @@ -91,6 +92,7 @@ concat_op_test.ConcatOpTest.testIndexedSlicesConcatDim0Grad concat_op_test.ConcatOpTest.testIndexedSlicesConcatDim1Grad concat_op_test.ConcatOpTest.testIndexedSlicesConcatDim1Grad_UnknownInputDim concat_op_test.ConcatOpTest.testIndexedSlicesConcatDim2Grad +concat_op_test.ConcatOpTest.testRandom identity_bijector_test.IdentityBijectorTest.testBijector math_ops_test.LogSumExpTest.testInfinity math_ops_test.LogSumExpTest.testKeepDims diff --git a/test/test_utilities.cpp b/test/test_utilities.cpp index 59e120581..64ce33162 100644 --- a/test/test_utilities.cpp +++ b/test/test_utilities.cpp @@ -304,9 +304,9 @@ void Compare(const Tensor& T1, const Tensor& T2, float rtol, float atol) { "expected output datatype." << dtype; } - auto T_size = T1.flat().size(); - auto T1_data = T1.flat().data(); - auto T2_data = T2.flat().data(); + auto T_size = T1.unaligned_flat().size(); + auto T1_data = T1.unaligned_flat().data(); + auto T2_data = T2.unaligned_flat().data(); bool is_comparable = false; for (int k = 0; k < T_size; k++) { diff --git a/test/tests_linux_cpu.txt b/test/tests_linux_cpu.txt index 00769c15b..b107a519b 100644 --- a/test/tests_linux_cpu.txt +++ b/test/tests_linux_cpu.txt @@ -48,7 +48,6 @@ MathOps.Pow0D1D # Const layer Squeeze/Constant_3544 has incorrect dimensions in the output data 0 MathOps.SqueezeNoAttributes - # Const/Const/Constant_1260 has zero dimension which is not allowed NNOps.L2Loss @@ -59,8 +58,3 @@ ArrayOps.Shape3D # zero dimension ArrayOps.SplitVZeroSizeSplit ArrayOps.SplitVZeroSizeNegSplit - -MathOps.FloorModNegFloat #Floor_mod supports only I32 precision of inputs - - -