1//===- TFUtilsTest.cpp - test for TFUtils ---------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/Analysis/Utils/TFUtils.h"
10#include "llvm/Analysis/ModelUnderTrainingRunner.h"
11#include "llvm/Analysis/TensorSpec.h"
12#include "llvm/AsmParser/Parser.h"
13#include "llvm/IR/Dominators.h"
14#include "llvm/IR/Instructions.h"
15#include "llvm/IR/LLVMContext.h"
16#include "llvm/IR/Module.h"
17#include "llvm/Support/Path.h"
18#include "llvm/Support/SourceMgr.h"
19#include "llvm/Testing/Support/SupportHelpers.h"
20#include "gtest/gtest.h"
21
22using namespace llvm;
23
24extern const char *TestMainArgv0;
25
26// NOTE! This test model is currently also used by test/Transforms/Inline/ML tests
27//- relevant if updating this model.
28static std::string getModelPath() {
29 SmallString<128> InputsDir = unittest::getInputFileDirectory(Argv0: TestMainArgv0);
30 llvm::sys::path::append(path&: InputsDir, a: "ir2native_x86_64_model");
31 return std::string(InputsDir);
32}
33
34// Test observable behavior when no model is provided.
35TEST(TFUtilsTest, NoModel) {
36 TFModelEvaluator Evaluator("", {}, {});
37 EXPECT_FALSE(Evaluator.isValid());
38}
39
40// Test we can correctly load a savedmodel and evaluate it.
41TEST(TFUtilsTest, LoadAndExecuteTest) {
42 // We use the ir2native model for test. We know it has one feature of
43 // dimension (1, 214)
44 const static int64_t KnownSize = 214;
45 std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
46 Name: "serving_default_input_1", Shape: {1, KnownSize})};
47 std::vector<TensorSpec> OutputSpecs{
48 TensorSpec::createSpec<float>(Name: "StatefulPartitionedCall", Shape: {1})};
49
50 TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
51 EXPECT_TRUE(Evaluator.isValid());
52
53 int32_t *V = Evaluator.getInput<int32_t>(0);
54 // Fill it up with 1's, we know the output.
55 for (auto I = 0; I < KnownSize; ++I) {
56 V[I] = 1;
57 }
58 {
59 auto ER = Evaluator.evaluate();
60 EXPECT_TRUE(ER.has_value());
61 float Ret = *ER->getTensorValue<float>(0);
62 EXPECT_EQ(static_cast<int64_t>(Ret), 80);
63 EXPECT_EQ(ER->getUntypedTensorValue(0),
64 reinterpret_cast<const void *>(ER->getTensorValue<float>(0)));
65 }
66 // The input vector should be unchanged
67 for (auto I = 0; I < KnownSize; ++I) {
68 EXPECT_EQ(V[I], 1);
69 }
70 // Zero-out the unused position '0' of the instruction histogram, which is
71 // after the first 9 calculated values. Should the the same result.
72 V[9] = 0;
73 {
74 auto ER = Evaluator.evaluate();
75 EXPECT_TRUE(ER.has_value());
76 float Ret = *ER->getTensorValue<float>(0);
77 EXPECT_EQ(static_cast<int64_t>(Ret), 80);
78 }
79}
80
81// Test incorrect input setup
82TEST(TFUtilsTest, EvalError) {
83 // We use the ir2native model for test. We know it has one feature of
84 // dimension (1, 214)
85 const static int64_t KnownSize = 213;
86 std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
87 Name: "serving_default_input_1", Shape: {1, KnownSize})};
88 std::vector<TensorSpec> OutputSpecs{
89 TensorSpec::createSpec<float>(Name: "StatefulPartitionedCall", Shape: {1})};
90
91 TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
92 EXPECT_FALSE(Evaluator.isValid());
93}
94
95TEST(TFUtilsTest, UnsupportedFeature) {
96 const static int64_t KnownSize = 214;
97 std::vector<TensorSpec> InputSpecs{
98 TensorSpec::createSpec<int32_t>(Name: "serving_default_input_1",
99 Shape: {1, KnownSize}),
100 TensorSpec::createSpec<float>(Name: "this_feature_does_not_exist", Shape: {2, 5})};
101
102 LLVMContext Ctx;
103 ModelUnderTrainingRunner Evaluator(
104 Ctx, getModelPath(), InputSpecs,
105 {TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})});
106 EXPECT_TRUE(Evaluator.isValid());
107 int32_t *V = Evaluator.getTensor<int32_t>(0);
108 // Fill it up with 1s, we know the output.
109 for (auto I = 0; I < KnownSize; ++I)
110 V[I] = 1;
111
112 float *F = Evaluator.getTensor<float>(1);
113 for (auto I = 0; I < 2 * 5; ++I)
114 F[I] = 3.14 + I;
115 float Ret = Evaluator.evaluate<float>();
116 EXPECT_EQ(static_cast<int64_t>(Ret), 80);
117 // The input vector should be unchanged
118 for (auto I = 0; I < KnownSize; ++I)
119 EXPECT_EQ(V[I], 1);
120 for (auto I = 0; I < 2 * 5; ++I)
121 EXPECT_FLOAT_EQ(F[I], 3.14 + I);
122}
123
124TEST(TFUtilsTest, MissingFeature) {
125 std::vector<TensorSpec> InputSpecs{};
126 std::vector<TensorSpec> OutputSpecs{
127 TensorSpec::createSpec<float>(Name: "StatefulPartitionedCall", Shape: {1})};
128
129 TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
130 EXPECT_FALSE(Evaluator.isValid());
131}
132

source code of llvm/unittests/Analysis/TFUtilsTest.cpp