diff --git a/candle-onnx/src/eval.rs b/candle-onnx/src/eval.rs index 10a3b9377b..f7203b36f7 100644 --- a/candle-onnx/src/eval.rs +++ b/candle-onnx/src/eval.rs @@ -1274,6 +1274,30 @@ fn simple_eval_( let output = candle_nn::ops::leaky_relu(input, alpha.into())?; values.insert(node.output[0].clone(), output); } + // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Gemm + "Gemm" => { + let a = get(&node.input[0])?; + let b = get(&node.input[1])?; + let c = get(&node.input[2])?; + + let alpha = get_attr_opt::(node, "alpha")?.copied().unwrap_or(1.0); + let beta = get_attr_opt::(node, "beta")?.copied().unwrap_or(1.0); + + let alpha = Tensor::full(alpha, a.shape(), &Device::Cpu)?; + let beta = Tensor::full(beta, c.shape(), &Device::Cpu)?; + + let trans_a = get_attr_opt::(node, "transA")?.copied().unwrap_or(0); + let trans_b = get_attr_opt::(node, "transB")?.copied().unwrap_or(0); + + let a = if trans_a == 0 { a.clone() } else { a.t()? }; + let b = if trans_b == 0 { b.clone() } else { b.t()? }; + + let output = a + .broadcast_mul(&alpha)? + .broadcast_matmul(&b)? + .broadcast_add(&c.broadcast_mul(&beta)?)?; + values.insert(node.output[0].clone(), output); + } op_type => bail!("unsupported op_type {op_type} for op {node:?}"), } }