Skip to content

Commit

Permalink
coverity
Browse files Browse the repository at this point in the history
  • Loading branch information
mzegla committed Jul 24, 2024
1 parent 53b317c commit 5864059
Show file tree
Hide file tree
Showing 8 changed files with 15 additions and 15 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,13 @@ class OPENVINO_GENAI_EXPORTS ContinuousBatchingPipeline {

PipelineMetrics get_metrics() const;

GenerationHandle add_request(uint64_t request_id, std::string prompt, ov::genai::GenerationConfig sampling_params);
GenerationHandle add_request(uint64_t request_id, const std::string& prompt, ov::genai::GenerationConfig sampling_params);

void step();

bool has_non_finished_requests();

// more high level interface, which can process multiple prompts in continuous batching manner
std::vector<GenerationResult> generate(const std::vector<std::string>& prompts, std::vector<ov::genai::GenerationConfig> sampling_params);
std::vector<GenerationResult> generate(const std::vector<std::string>& prompts, const std::vector<ov::genai::GenerationConfig>& sampling_params);
};
}
2 changes: 1 addition & 1 deletion src/cpp/include/openvino/genai/generation_handle.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ class OPENVINO_GENAI_EXPORTS GenerationHandleImpl {

public:
GenerationHandleImpl(std::shared_ptr<GenerationStream> generation_stream, const ov::genai::GenerationConfig& sampling_params) :
m_generation_stream(generation_stream),
m_generation_stream(std::move(generation_stream)),
m_sampling_params(sampling_params) {};

~GenerationHandleImpl();
Expand Down
4 changes: 2 additions & 2 deletions src/cpp/src/block_manager.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ class BlockManager {
}

bool can_append_slots(SequenceGroup::CPtr seq_group) {
return required_blocks_count(seq_group) <= m_allocator.num_free_blocks();
return required_blocks_count(std::move(seq_group)) <= m_allocator.num_free_blocks();
}

size_t required_blocks_count(SequenceGroup::CPtr seq_group) {
Expand Down Expand Up @@ -336,7 +336,7 @@ class BlockManager {
// write information about block forking for later usage in CacheManager
copy_blocks_map[last_block->get_index()].push_back(new_block->get_index());
// release `last_block` usage
m_allocator.free(last_block);
m_allocator.free(std::move(last_block));
} else {
// nothing to do, because we are the only users of this block
}
Expand Down
8 changes: 4 additions & 4 deletions src/cpp/src/continuous_batching_pipeline.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ class ContinuousBatchingPipeline::Impl {
return !m_awaiting_requests.empty() || !m_requests.empty();
}

std::vector<GenerationResult> generate(const std::vector<std::string> prompts, std::vector<ov::genai::GenerationConfig> sampling_params) {
std::vector<GenerationResult> generate(const std::vector<std::string>& prompts, const std::vector<ov::genai::GenerationConfig>& sampling_params) {
OPENVINO_ASSERT(!has_non_finished_requests(), "Generate cannot be called while ContinuousBatchingPipeline is already in running state. Use ContinuousBatchingPipeline::add_request");
OPENVINO_ASSERT(prompts.size() == sampling_params.size());

Expand Down Expand Up @@ -307,8 +307,8 @@ PipelineMetrics ContinuousBatchingPipeline::get_metrics() const{
return m_impl->get_metrics();
}

GenerationHandle ContinuousBatchingPipeline::add_request(uint64_t request_id, std::string prompt, ov::genai::GenerationConfig sampling_params) {
return m_impl->add_request(request_id, prompt, sampling_params);
GenerationHandle ContinuousBatchingPipeline::add_request(uint64_t request_id, const std::string& prompt, ov::genai::GenerationConfig sampling_params) {
return m_impl->add_request(request_id, prompt, std::move(sampling_params));
}

void ContinuousBatchingPipeline::step() {
Expand All @@ -319,6 +319,6 @@ bool ContinuousBatchingPipeline::has_non_finished_requests() {
return m_impl->has_non_finished_requests();
}

std::vector<GenerationResult> ContinuousBatchingPipeline::generate(const std::vector<std::string>& prompts, std::vector<ov::genai::GenerationConfig> sampling_params) {
std::vector<GenerationResult> ContinuousBatchingPipeline::generate(const std::vector<std::string>& prompts, const std::vector<ov::genai::GenerationConfig>& sampling_params) {
return m_impl->generate(prompts, sampling_params);
}
2 changes: 1 addition & 1 deletion src/cpp/src/generation_stream.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class GenerationStream {
}

void push(GenerationOutputs outputs) {
m_output_queue.push(outputs);
m_output_queue.push(std::move(outputs));
}

// Retriving vector of pairs <sequence_id, token_id> as we can generate multiple outputs for a single prompt
Expand Down
2 changes: 1 addition & 1 deletion src/cpp/src/model_runner.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class ModelRunner {
SchedulerConfig m_scheduler_config;
public:
ModelRunner(ov::InferRequest request, const SchedulerConfig& scheduler_config) :
m_request(request),
m_request(std::move(request)),
m_scheduler_config(scheduler_config) { }

ov::InferRequest get_infer_request() const {
Expand Down
2 changes: 1 addition & 1 deletion src/cpp/src/sampler.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ struct Beam {
float m_score = -std::numeric_limits<float>::infinity();

Beam(Sequence::Ptr sequence)
: m_sequence(sequence) { }
: m_sequence(std::move(sequence)) { }

size_t get_generated_len() const {
return m_sequence->get_generated_len();
Expand Down
6 changes: 3 additions & 3 deletions src/cpp/src/sequence_group.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ class SequenceGroup {
}

Sequence::Ptr fork_sequence(Sequence::CPtr sequence) {
m_sequences.emplace_back(Sequence::fork(sequence, m_next_sequence_id++));
m_sequences.emplace_back(Sequence::fork(std::move(sequence), m_next_sequence_id++));
return m_sequences.back();
}

Expand Down Expand Up @@ -433,7 +433,7 @@ class SequenceGroup {
output.score = sequence->get_beam_search_score(m_sampling_params);
outputs.emplace(sequence->get_grouped_id(), output);
}
m_generation_stream->push(outputs);
m_generation_stream->push(std::move(outputs));
}

void push_partial_outputs() {
Expand All @@ -445,7 +445,7 @@ class SequenceGroup {
const auto last_gen_token = sequence->get_last_generation_output();
outputs.emplace(sequence->get_grouped_id(), last_gen_token);
}
m_generation_stream->push(outputs);
m_generation_stream->push(std::move(outputs));
}

void notify_handle() {
Expand Down

0 comments on commit 5864059

Please sign in to comment.