Skip to content

Commit

Permalink
Add pad support.
Browse files Browse the repository at this point in the history
  • Loading branch information
liuliu committed Jan 24, 2024
1 parent 525ac82 commit 3869208
Show file tree
Hide file tree
Showing 3 changed files with 102 additions and 0 deletions.
53 changes: 53 additions & 0 deletions lib/nnc/ccv_cnnp_model_addons.c
Original file line number Diff line number Diff line change
Expand Up @@ -547,6 +547,59 @@ static ccv_cnnp_model_t* _ccv_cnnp_reshape_copy(const ccv_cnnp_model_t* const su
return ccv_cnnp_reshape(self->format, self->dim, self->ofs, self->stride, self->super.name);
}

typedef struct {
ccv_cnnp_model_t super;
ccv_nnc_tensor_symbol_t output;
int begin[CCV_NNC_MAX_DIM_ALLOC];
int end[CCV_NNC_MAX_DIM_ALLOC];
} ccv_cnnp_model_pad_t;

static void _ccv_cnnp_pad_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
{
assert(input_size == 1);
assert(output_size == 1);
ccv_cnnp_model_pad_t* const self = (ccv_cnnp_model_pad_t*)super;
const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
const int nd = ccv_nnc_tensor_nd(input_params.dim);
ccv_nnc_tensor_param_t params = input_params;
int i;
for (i = 0 ; i < nd; i++)
params.dim[i] += self->begin[i] + self->end[i];
const ccv_nnc_tensor_symbol_t padded = ccv_nnc_tensor_symbol_new(graph, params, 0);
ccv_nnc_tensor_symbol_set_flags(graph, padded, CCV_NNC_TENSOR_SYMBOL_INIT_ZEROS);
int stride[CCV_NNC_MAX_DIM_ALLOC];
ccv_nnc_tensor_get_stride(params.dim, stride);
const ccv_nnc_tensor_symbol_t unpadded = ccv_nnc_tensor_symbol_alias_new(graph, padded, self->begin, stride, input_params, 0);
ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD(), TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(unpadded), "pad");
outputs[0] = padded;
}

static ccv_cnnp_model_t* _ccv_cnnp_pad_copy(const ccv_cnnp_model_t* const super, void* const context);

static const ccv_cnnp_model_vtab_t ccv_cnnp_pad_isa = {
.build = _ccv_cnnp_pad_build,
.copy = _ccv_cnnp_pad_copy,
};

ccv_cnnp_model_t* ccv_cnnp_pad(const int begin[CCV_NNC_MAX_DIM_ALLOC], const int end[CCV_NNC_MAX_DIM_ALLOC], const char* const name)
{
ccv_cnnp_model_pad_t* const model_pad = (ccv_cnnp_model_pad_t*)cccalloc(1, sizeof(ccv_cnnp_model_pad_t));
model_pad->super.isa = &ccv_cnnp_pad_isa;
model_pad->super.input_size = 1;
model_pad->super.outputs = &model_pad->output;
model_pad->super.output_size = 1;
ccv_cnnp_model_copy_name(&model_pad->super, name);
memcpy(model_pad->begin, begin, sizeof(model_pad->begin));
memcpy(model_pad->end, end, sizeof(model_pad->end));
return (ccv_cnnp_model_t*)model_pad;
}

static ccv_cnnp_model_t* _ccv_cnnp_pad_copy(const ccv_cnnp_model_t* const super, void* const context)
{
const ccv_cnnp_model_pad_t* const self = (const ccv_cnnp_model_pad_t*)super;
return ccv_cnnp_pad(self->begin, self->end, self->super.name);
}

typedef struct {
ccv_cnnp_model_t super;
ccv_nnc_tensor_symbol_t output;
Expand Down
8 changes: 8 additions & 0 deletions lib/nnc/ccv_nnc.h
Original file line number Diff line number Diff line change
Expand Up @@ -4272,6 +4272,14 @@ CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_average_pool(const int kdim[CCV_NNC_
* @return A reshape layer model.
*/
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reshape(const int format, const int dim[CCV_NNC_MAX_DIM_ALLOC], const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const char* const name);
/**
* Pad the input with extra dimensions at beginning or the ends. Note that for now, these are 0 paddings.
* @param begin How many elements to add at the beginning of each dimension.
* @param end How many elements to add at the end of each dimension.
* @param name The unique name of the model.
* @return A pad layer model.
*/
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_pad(const int begin[CCV_NNC_MAX_DIM_ALLOC], const int end[CCV_NNC_MAX_DIM_ALLOC], const char* const name);
/**
* Identity op that simply copy from input to output without using any data transfer / format conversion methods.
* @param name The unique name of the model.
Expand Down
41 changes: 41 additions & 0 deletions test/unit/nnc/cnnp.core.tests.c
Original file line number Diff line number Diff line change
Expand Up @@ -1791,4 +1791,45 @@ TEST_CASE("two models, one with LoRA, one with not, share the same parameters")
ccv_cnnp_model_free(final1);
}

TEST_CASE("pad a tensor with padding")
{
const ccv_cnnp_model_io_t input0 = ccv_cnnp_input();
const ccv_cnnp_model_io_t input1 = ccv_cnnp_input();
ccv_cnnp_model_t* const pad = ccv_cnnp_pad(DIM_ALLOC(0, 2, 2, 0), DIM_ALLOC(0, 1, 2, 1), "pad");
ccv_cnnp_model_io_t out0 = ccv_cnnp_model_apply(pad, MODEL_IO_LIST(input0));
ccv_cnnp_model_t* const add = ccv_cnnp_sum("sum");
ccv_cnnp_model_io_t out = ccv_cnnp_model_apply(add, MODEL_IO_LIST(out0, input1));
ccv_cnnp_model_t* const final = ccv_cnnp_model_new(MODEL_IO_LIST(input0, input1), MODEL_IO_LIST(out), 0, "tiny");

ccv_nnc_tensor_t* const x0 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 3, 3, 10), 0);
dsfmt_t dsfmt;
int i;
dsfmt_init_gen_rand(&dsfmt, 1);
for (i = 0; i < 3 * 3 * 10; i++)
x0->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1;
ccv_nnc_tensor_t* const x1 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 6, 7, 11), 0);
for (i = 0; i < 6 * 7 * 11; i++)
x1->data.f32[i] = 1;
ccv_nnc_tensor_t* const y = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 6, 7, 11), 0);
ccv_nnc_tensor_param_t input0_params = CPU_TENSOR_NHWC(32F, 1, 3, 3, 10);
ccv_nnc_tensor_param_t input1_params = CPU_TENSOR_NHWC(32F, 1, 6, 7, 11);
ccv_cnnp_model_compile(final, TENSOR_PARAM_LIST(input0_params, input1_params), CMD_NOOP(), CMD_NOOP());
ccv_cnnp_model_evaluate(final, (ccv_cnnp_evaluate_param_t){
.requires_grad = 0,
}, TENSOR_LIST(x0, x1), TENSOR_LIST(y), 0, 0);
int j, k;
ccv_nnc_tensor_t* const y0 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 6, 7, 11), 0);
for (i = 0; i < 6; i++)
for (j = 0; j < 7; j++)
for (k = 0; k < 11; k++)
y0->data.f32[i * 7 * 11 + j * 11 + k] = (i >= 2 && i < 5 && j >=2 && j < 5 && k < 10) ? 1 + x0->data.f32[(i - 2) * 3 * 10 + (j - 2) * 10 + k] : 1;
REQUIRE_TENSOR_EQ(y, y0, "it should be padded");
CNNP_MODEL_GEN(pad, CCV_NNC_LONG_DOT_GRAPH);
ccv_nnc_tensor_free(x0);
ccv_nnc_tensor_free(x1);
ccv_nnc_tensor_free(y);
ccv_nnc_tensor_free(y0);
ccv_cnnp_model_free(final);
}

#include "case_main.h"

0 comments on commit 3869208

Please sign in to comment.