From 2458ba5f1d01d45221099864359ff09c4e1c75f2 Mon Sep 17 00:00:00 2001 From: zjhellofss Date: Sat, 10 Feb 2024 12:16:05 +0800 Subject: [PATCH] =?UTF-8?q?=E5=A2=9E=E5=8A=A0matmul=E7=AE=97=E5=AD=90?= =?UTF-8?q?=E7=9A=84=E5=8D=95=E5=85=83=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/test_layer/test_matmul.cpp | 67 +++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 test/test_layer/test_matmul.cpp diff --git a/test/test_layer/test_matmul.cpp b/test/test_layer/test_matmul.cpp new file mode 100644 index 00000000..fa157188 --- /dev/null +++ b/test/test_layer/test_matmul.cpp @@ -0,0 +1,67 @@ +// +// Created by fss on 24-2-10. +// +#include +#include +#include +#include "../../source/layer/details/matmul.hpp" + +TEST(test_layer, forward_matmul) { + using namespace kuiper_infer; + std::vector weights; + for (int i = 0; i < 3 * 4; ++i) { + weights.push_back(float(i)); + } + LLamaMatmulLayer llama_matmul(3, 4); + std::shared_ptr> weight = std::make_shared>(weights.data(), 3, 4); + llama_matmul.set_weights({weight}); + + std::vector inputs; + for (int i = 0; i < 4 * 5; ++i) { + inputs.push_back(float(i)); + } + + std::shared_ptr> input = std::make_shared>(inputs.data(), 4, 5); + std::shared_ptr> output = std::make_shared>(3, 5); + std::vector outputs; + outputs.push_back(output); + + llama_matmul.Forward({input}, outputs); + ASSERT_EQ(output->at(0, 0, 0), 70); + ASSERT_EQ(output->at(0, 0, 1), 76); + ASSERT_EQ(output->at(0, 0, 2), 82); + ASSERT_EQ(output->at(0, 0, 3), 88); + ASSERT_EQ(output->at(0, 0, 4), 94); + + ASSERT_EQ(output->at(0, 2, 0), 310); + ASSERT_EQ(output->at(0, 2, 1), 348); + ASSERT_EQ(output->at(0, 2, 2), 386); + ASSERT_EQ(output->at(0, 2, 3), 424); + ASSERT_EQ(output->at(0, 2, 4), 462); +} + +TEST(test_layer, forward_matmul2) { + using namespace kuiper_infer; + std::vector weights; + for (int i = 0; i < 3 * 4; ++i) { + weights.push_back(float(i)); + } + LLamaMatmulLayer llama_matmul(3, 4); + std::shared_ptr> weight = std::make_shared>(weights.data(), 3, 4); + llama_matmul.set_weights({weight}); + + std::vector inputs; + for (int i = 0; i < 4; ++i) { + inputs.push_back(float(i)); + } + + std::shared_ptr> input = std::make_shared>(inputs.data(), 4, 1); + std::shared_ptr> output = std::make_shared>(3, 1); + std::vector outputs; + outputs.push_back(output); + + llama_matmul.Forward({input}, outputs); + ASSERT_EQ(output->at(0, 0, 0), 14); + ASSERT_EQ(output->at(0, 1, 0), 38); + ASSERT_EQ(output->at(0, 2, 0), 62); +}