Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

course8的linear算子问题 #28

Open
IsUpsAndDowns opened this issue Aug 5, 2024 · 0 comments
Open

course8的linear算子问题 #28

IsUpsAndDowns opened this issue Aug 5, 2024 · 0 comments

Comments

@IsUpsAndDowns
Copy link

linear.cpp中的output = std::make_shared<Tensor<float>>(1, out_features_, feature_dims); 这段代码的out_featuresfeature_dims的位置是否写反了

    std::shared_ptr<Tensor<float>> output = outputs.at(i);
    if (output == nullptr || output->empty()) {
      output = std::make_shared<Tensor<float>>(1, out_features_, feature_dims);
      outputs.at(i) = output;
    }

    CHECK(output->channels() == 1 && output->rows() == feature_dims &&
          output->cols() == out_features_)
        << "The row of output tensor should be same to feature_dims_ and the "
           "col of output tensor should be same to output_features_ "
        << i << " th";
    const auto& output_raw_shapes = output->raw_shapes();
    if (output_raw_shapes.size() == 2) {
      CHECK(output_raw_shapes.at(0) == feature_dims &&
            output_raw_shapes.at(1) == out_features_);
    }
    if (output_raw_shapes.size() == 1) {
      CHECK(output_raw_shapes.at(0) == out_features_);
    }

测试linear算子的单元测试

TEST(test_register, test_linear1) {
    std::shared_ptr<RuntimeOperator> op = std::make_shared<RuntimeOperator>();
    op->type = "nn.Linear";

    // bias parameter
    bool bias = true;
    std::shared_ptr<RuntimeParameter> bias_param = std::make_shared<RuntimeParameterBool>(bias);
    op->params.insert({"bias", bias_param});

    const uint32_t batch_size = 1;
    std::vector<sftensor> inputs(batch_size);
    std::vector<sftensor> outputs(batch_size);

    const uint32_t in_channel = 1;
    std::shared_ptr<Tensor<float>> input = std::make_shared<Tensor<float>>(in_channel, 2, 4);
    input->data().slice(0) = "1,1,1,1;"
                             "1,1,1,1;";
    inputs.at(0) = input;

    const uint32_t kernel_h = 3;
    const uint32_t kernel_w = 4;

    std::vector<sftensor> weights;
    std::shared_ptr<Tensor<float>> kernel = std::make_shared<Tensor<float>>(in_channel, kernel_h, kernel_w);
    kernel->slice(0) = arma::fmat("2,2,2,2;"
                                          "2,2,2,2;"
                                          "2,2,2,2;");

    weights.push_back(kernel);

    std::vector<sftensor> Bias;
    std::shared_ptr<Tensor<float>> b = std::make_shared<Tensor<float>>(in_channel, 1, 3);
    b->slice(0) = arma::fmat("1, 1, 1;");

    Bias.push_back(b);

    const uint32_t in_features = 4;
    const uint32_t out_features = 3;
    LinearLayer linear_layer(in_features, out_features, true);
    linear_layer.set_weights(weights);
    linear_layer.set_bias(Bias);

    linear_layer.Forward(inputs, outputs);
    ASSERT_EQ(outputs.size(), 1);

    outputs.at(0)->Show();

    std::shared_ptr<Tensor<float>> output1 = outputs.front();
    const arma::fmat output2 = arma::fmat("9,9,9;"
                                          "9,9,9;");
    ASSERT_TRUE(arma::approx_equal(output1->data().slice(0), output2, "absdiff", 1e-5));
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant