diff --git a/.buildinfo b/.buildinfo new file mode 100644 index 000000000..b5398b052 --- /dev/null +++ b/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 4b4aa23cef1ca7c44ae22a4a808b9899 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/.doctrees/about/authors.doctree b/.doctrees/about/authors.doctree new file mode 100644 index 000000000..38daa2d83 Binary files /dev/null and b/.doctrees/about/authors.doctree differ diff --git a/.doctrees/about/changelog.doctree b/.doctrees/about/changelog.doctree new file mode 100644 index 000000000..c0a627d84 Binary files /dev/null and b/.doctrees/about/changelog.doctree differ diff --git a/.doctrees/about/index.doctree b/.doctrees/about/index.doctree new file mode 100644 index 000000000..f046a2e2c Binary files /dev/null and b/.doctrees/about/index.doctree differ diff --git a/.doctrees/autoapi/index.doctree b/.doctrees/autoapi/index.doctree new file mode 100644 index 000000000..8ced3bef6 Binary files /dev/null and b/.doctrees/autoapi/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/args/index.doctree b/.doctrees/autoapi/lmflow/args/index.doctree new file mode 100644 index 000000000..a29bcd278 Binary files /dev/null and b/.doctrees/autoapi/lmflow/args/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/datasets/dataset/index.doctree b/.doctrees/autoapi/lmflow/datasets/dataset/index.doctree new file mode 100644 index 000000000..863a6b3bd Binary files /dev/null and b/.doctrees/autoapi/lmflow/datasets/dataset/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/datasets/index.doctree b/.doctrees/autoapi/lmflow/datasets/index.doctree new file mode 100644 index 000000000..1383c8fbb Binary files /dev/null and b/.doctrees/autoapi/lmflow/datasets/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/datasets/multi_modal_dataset/index.doctree b/.doctrees/autoapi/lmflow/datasets/multi_modal_dataset/index.doctree new file mode 100644 index 000000000..c75017594 Binary files /dev/null and b/.doctrees/autoapi/lmflow/datasets/multi_modal_dataset/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/index.doctree b/.doctrees/autoapi/lmflow/index.doctree new file mode 100644 index 000000000..0d7e50f76 Binary files /dev/null and b/.doctrees/autoapi/lmflow/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/auto_model/index.doctree b/.doctrees/autoapi/lmflow/models/auto_model/index.doctree new file mode 100644 index 000000000..a0a9bbb58 Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/auto_model/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/base_model/index.doctree b/.doctrees/autoapi/lmflow/models/base_model/index.doctree new file mode 100644 index 000000000..387097620 Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/base_model/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/decoder_model/index.doctree b/.doctrees/autoapi/lmflow/models/decoder_model/index.doctree new file mode 100644 index 000000000..3c965d450 Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/decoder_model/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/encoder_decoder_model/index.doctree b/.doctrees/autoapi/lmflow/models/encoder_decoder_model/index.doctree new file mode 100644 index 000000000..098752563 Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/encoder_decoder_model/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/hf_decoder_model/index.doctree b/.doctrees/autoapi/lmflow/models/hf_decoder_model/index.doctree new file mode 100644 index 000000000..dba960132 Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/hf_decoder_model/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/hf_encoder_decoder_model/index.doctree b/.doctrees/autoapi/lmflow/models/hf_encoder_decoder_model/index.doctree new file mode 100644 index 000000000..4be36d01a Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/hf_encoder_decoder_model/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/hf_model_mixin/index.doctree b/.doctrees/autoapi/lmflow/models/hf_model_mixin/index.doctree new file mode 100644 index 000000000..dd11e4c06 Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/hf_model_mixin/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/hf_text_regression_model/index.doctree b/.doctrees/autoapi/lmflow/models/hf_text_regression_model/index.doctree new file mode 100644 index 000000000..668ca6233 Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/hf_text_regression_model/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/index.doctree b/.doctrees/autoapi/lmflow/models/index.doctree new file mode 100644 index 000000000..71b31e41c Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/interfaces/index.doctree b/.doctrees/autoapi/lmflow/models/interfaces/index.doctree new file mode 100644 index 000000000..bdc0c84ee Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/interfaces/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/interfaces/tunable/index.doctree b/.doctrees/autoapi/lmflow/models/interfaces/tunable/index.doctree new file mode 100644 index 000000000..0b99c4ec1 Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/interfaces/tunable/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/regression_model/index.doctree b/.doctrees/autoapi/lmflow/models/regression_model/index.doctree new file mode 100644 index 000000000..4d393bdc1 Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/regression_model/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/text_regression_model/index.doctree b/.doctrees/autoapi/lmflow/models/text_regression_model/index.doctree new file mode 100644 index 000000000..174260788 Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/text_regression_model/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/vision2seq_model/index.doctree b/.doctrees/autoapi/lmflow/models/vision2seq_model/index.doctree new file mode 100644 index 000000000..b2fd72492 Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/vision2seq_model/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/vision_encoder/clip_encoder/index.doctree b/.doctrees/autoapi/lmflow/models/vision_encoder/clip_encoder/index.doctree new file mode 100644 index 000000000..36aa1d931 Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/vision_encoder/clip_encoder/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/models/vision_encoder/index.doctree b/.doctrees/autoapi/lmflow/models/vision_encoder/index.doctree new file mode 100644 index 000000000..0ebf1b999 Binary files /dev/null and b/.doctrees/autoapi/lmflow/models/vision_encoder/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/adabelief/index.doctree b/.doctrees/autoapi/lmflow/optim/adabelief/index.doctree new file mode 100644 index 000000000..75cf873b9 Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/adabelief/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/adabound/index.doctree b/.doctrees/autoapi/lmflow/optim/adabound/index.doctree new file mode 100644 index 000000000..918eecbcf Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/adabound/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/adadelta/index.doctree b/.doctrees/autoapi/lmflow/optim/adadelta/index.doctree new file mode 100644 index 000000000..63b938a3e Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/adadelta/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/adagrad/index.doctree b/.doctrees/autoapi/lmflow/optim/adagrad/index.doctree new file mode 100644 index 000000000..059e4ef6a Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/adagrad/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/adam/index.doctree b/.doctrees/autoapi/lmflow/optim/adam/index.doctree new file mode 100644 index 000000000..39dd2faed Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/adam/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/adamax/index.doctree b/.doctrees/autoapi/lmflow/optim/adamax/index.doctree new file mode 100644 index 000000000..5f50c4050 Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/adamax/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/adamp/index.doctree b/.doctrees/autoapi/lmflow/optim/adamp/index.doctree new file mode 100644 index 000000000..7a98f1d47 Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/adamp/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/adamw_schedule_free/index.doctree b/.doctrees/autoapi/lmflow/optim/adamw_schedule_free/index.doctree new file mode 100644 index 000000000..6def979ba Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/adamw_schedule_free/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/adan/index.doctree b/.doctrees/autoapi/lmflow/optim/adan/index.doctree new file mode 100644 index 000000000..ca86034c9 Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/adan/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/dummy/index.doctree b/.doctrees/autoapi/lmflow/optim/dummy/index.doctree new file mode 100644 index 000000000..d187431ce Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/dummy/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/index.doctree b/.doctrees/autoapi/lmflow/optim/index.doctree new file mode 100644 index 000000000..39608258e Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/lamb/index.doctree b/.doctrees/autoapi/lmflow/optim/lamb/index.doctree new file mode 100644 index 000000000..28c2be190 Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/lamb/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/lars/index.doctree b/.doctrees/autoapi/lmflow/optim/lars/index.doctree new file mode 100644 index 000000000..2b3541f6d Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/lars/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/nadam/index.doctree b/.doctrees/autoapi/lmflow/optim/nadam/index.doctree new file mode 100644 index 000000000..fb8cbf324 Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/nadam/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/novograd/index.doctree b/.doctrees/autoapi/lmflow/optim/novograd/index.doctree new file mode 100644 index 000000000..cf705d3f6 Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/novograd/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/optimizers/index.doctree b/.doctrees/autoapi/lmflow/optim/optimizers/index.doctree new file mode 100644 index 000000000..f91a1b42f Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/optimizers/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/radam/index.doctree b/.doctrees/autoapi/lmflow/optim/radam/index.doctree new file mode 100644 index 000000000..06366ec7d Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/radam/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/sgd_schedule_free/index.doctree b/.doctrees/autoapi/lmflow/optim/sgd_schedule_free/index.doctree new file mode 100644 index 000000000..a4daad2d4 Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/sgd_schedule_free/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/sgdp/index.doctree b/.doctrees/autoapi/lmflow/optim/sgdp/index.doctree new file mode 100644 index 000000000..dc4be3e25 Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/sgdp/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/sophia/index.doctree b/.doctrees/autoapi/lmflow/optim/sophia/index.doctree new file mode 100644 index 000000000..1d3c5cc94 Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/sophia/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/optim/yogi/index.doctree b/.doctrees/autoapi/lmflow/optim/yogi/index.doctree new file mode 100644 index 000000000..2bc5b97b7 Binary files /dev/null and b/.doctrees/autoapi/lmflow/optim/yogi/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/auto_pipeline/index.doctree b/.doctrees/autoapi/lmflow/pipeline/auto_pipeline/index.doctree new file mode 100644 index 000000000..97ddfa8d9 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/auto_pipeline/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/base_aligner/index.doctree b/.doctrees/autoapi/lmflow/pipeline/base_aligner/index.doctree new file mode 100644 index 000000000..872ffa0a6 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/base_aligner/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/base_pipeline/index.doctree b/.doctrees/autoapi/lmflow/pipeline/base_pipeline/index.doctree new file mode 100644 index 000000000..46a585542 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/base_pipeline/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/base_tuner/index.doctree b/.doctrees/autoapi/lmflow/pipeline/base_tuner/index.doctree new file mode 100644 index 000000000..cbdd7b530 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/base_tuner/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/dpo_aligner/index.doctree b/.doctrees/autoapi/lmflow/pipeline/dpo_aligner/index.doctree new file mode 100644 index 000000000..0dcf5b7fa Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/dpo_aligner/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/dpov2_aligner/index.doctree b/.doctrees/autoapi/lmflow/pipeline/dpov2_aligner/index.doctree new file mode 100644 index 000000000..f92df8a68 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/dpov2_aligner/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/evaluator/index.doctree b/.doctrees/autoapi/lmflow/pipeline/evaluator/index.doctree new file mode 100644 index 000000000..e497c569a Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/evaluator/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/finetuner/index.doctree b/.doctrees/autoapi/lmflow/pipeline/finetuner/index.doctree new file mode 100644 index 000000000..a9bf33442 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/finetuner/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/index.doctree b/.doctrees/autoapi/lmflow/pipeline/index.doctree new file mode 100644 index 000000000..ce5f05554 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/inferencer/index.doctree b/.doctrees/autoapi/lmflow/pipeline/inferencer/index.doctree new file mode 100644 index 000000000..95c424057 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/inferencer/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/iterative_dpo_aligner/index.doctree b/.doctrees/autoapi/lmflow/pipeline/iterative_dpo_aligner/index.doctree new file mode 100644 index 000000000..6ec03ce4e Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/iterative_dpo_aligner/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/raft_aligner/index.doctree b/.doctrees/autoapi/lmflow/pipeline/raft_aligner/index.doctree new file mode 100644 index 000000000..e11e4d3b5 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/raft_aligner/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/rm_inferencer/index.doctree b/.doctrees/autoapi/lmflow/pipeline/rm_inferencer/index.doctree new file mode 100644 index 000000000..a255bfb44 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/rm_inferencer/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/rm_tuner/index.doctree b/.doctrees/autoapi/lmflow/pipeline/rm_tuner/index.doctree new file mode 100644 index 000000000..564a3ae66 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/rm_tuner/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/utils/dpov2_dataprocessor/index.doctree b/.doctrees/autoapi/lmflow/pipeline/utils/dpov2_dataprocessor/index.doctree new file mode 100644 index 000000000..b745a9529 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/utils/dpov2_dataprocessor/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/utils/dpov2_trainer/index.doctree b/.doctrees/autoapi/lmflow/pipeline/utils/dpov2_trainer/index.doctree new file mode 100644 index 000000000..0e726b78d Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/utils/dpov2_trainer/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/utils/index.doctree b/.doctrees/autoapi/lmflow/pipeline/utils/index.doctree new file mode 100644 index 000000000..3e53dab83 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/utils/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/utils/memory_safe_dpov2_align/index.doctree b/.doctrees/autoapi/lmflow/pipeline/utils/memory_safe_dpov2_align/index.doctree new file mode 100644 index 000000000..142c90661 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/utils/memory_safe_dpov2_align/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/utils/memory_safe_vllm_inference/index.doctree b/.doctrees/autoapi/lmflow/pipeline/utils/memory_safe_vllm_inference/index.doctree new file mode 100644 index 000000000..4d1a43cad Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/utils/memory_safe_vllm_inference/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/utils/peft_trainer/index.doctree b/.doctrees/autoapi/lmflow/pipeline/utils/peft_trainer/index.doctree new file mode 100644 index 000000000..a1d870b5c Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/utils/peft_trainer/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/utils/raft_trainer/index.doctree b/.doctrees/autoapi/lmflow/pipeline/utils/raft_trainer/index.doctree new file mode 100644 index 000000000..1ec8ba503 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/utils/raft_trainer/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/utils/rm_dataprocessor/index.doctree b/.doctrees/autoapi/lmflow/pipeline/utils/rm_dataprocessor/index.doctree new file mode 100644 index 000000000..bd607e81b Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/utils/rm_dataprocessor/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/utils/rm_trainer/index.doctree b/.doctrees/autoapi/lmflow/pipeline/utils/rm_trainer/index.doctree new file mode 100644 index 000000000..c43206867 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/utils/rm_trainer/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/pipeline/vllm_inferencer/index.doctree b/.doctrees/autoapi/lmflow/pipeline/vllm_inferencer/index.doctree new file mode 100644 index 000000000..47a23e4d9 Binary files /dev/null and b/.doctrees/autoapi/lmflow/pipeline/vllm_inferencer/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/tokenization/hf_decoder_model/index.doctree b/.doctrees/autoapi/lmflow/tokenization/hf_decoder_model/index.doctree new file mode 100644 index 000000000..6d2291ef3 Binary files /dev/null and b/.doctrees/autoapi/lmflow/tokenization/hf_decoder_model/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/tokenization/hf_text_regression_model/index.doctree b/.doctrees/autoapi/lmflow/tokenization/hf_text_regression_model/index.doctree new file mode 100644 index 000000000..a616af053 Binary files /dev/null and b/.doctrees/autoapi/lmflow/tokenization/hf_text_regression_model/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/tokenization/index.doctree b/.doctrees/autoapi/lmflow/tokenization/index.doctree new file mode 100644 index 000000000..e2aab6285 Binary files /dev/null and b/.doctrees/autoapi/lmflow/tokenization/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/common/index.doctree b/.doctrees/autoapi/lmflow/utils/common/index.doctree new file mode 100644 index 000000000..c2fdbef8a Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/common/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/constants/index.doctree b/.doctrees/autoapi/lmflow/utils/constants/index.doctree new file mode 100644 index 000000000..9e6dd7096 Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/constants/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/conversation_template/base/index.doctree b/.doctrees/autoapi/lmflow/utils/conversation_template/base/index.doctree new file mode 100644 index 000000000..9ce21bffd Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/conversation_template/base/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/conversation_template/chatglm/index.doctree b/.doctrees/autoapi/lmflow/utils/conversation_template/chatglm/index.doctree new file mode 100644 index 000000000..81dd3d4b1 Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/conversation_template/chatglm/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/conversation_template/chatml/index.doctree b/.doctrees/autoapi/lmflow/utils/conversation_template/chatml/index.doctree new file mode 100644 index 000000000..34f63ce2a Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/conversation_template/chatml/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/conversation_template/deepseek/index.doctree b/.doctrees/autoapi/lmflow/utils/conversation_template/deepseek/index.doctree new file mode 100644 index 000000000..9a6f5698c Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/conversation_template/deepseek/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/conversation_template/fox/index.doctree b/.doctrees/autoapi/lmflow/utils/conversation_template/fox/index.doctree new file mode 100644 index 000000000..f8cbb6e84 Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/conversation_template/fox/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/conversation_template/gemma/index.doctree b/.doctrees/autoapi/lmflow/utils/conversation_template/gemma/index.doctree new file mode 100644 index 000000000..74de85972 Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/conversation_template/gemma/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/conversation_template/index.doctree b/.doctrees/autoapi/lmflow/utils/conversation_template/index.doctree new file mode 100644 index 000000000..69da675b7 Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/conversation_template/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/conversation_template/internlm/index.doctree b/.doctrees/autoapi/lmflow/utils/conversation_template/internlm/index.doctree new file mode 100644 index 000000000..6c0ccdcaf Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/conversation_template/internlm/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/conversation_template/llama/index.doctree b/.doctrees/autoapi/lmflow/utils/conversation_template/llama/index.doctree new file mode 100644 index 000000000..32fcb359d Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/conversation_template/llama/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/conversation_template/phi/index.doctree b/.doctrees/autoapi/lmflow/utils/conversation_template/phi/index.doctree new file mode 100644 index 000000000..88c5ec96e Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/conversation_template/phi/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/conversation_template/qwen/index.doctree b/.doctrees/autoapi/lmflow/utils/conversation_template/qwen/index.doctree new file mode 100644 index 000000000..c76aa9140 Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/conversation_template/qwen/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/conversation_template/yi/index.doctree b/.doctrees/autoapi/lmflow/utils/conversation_template/yi/index.doctree new file mode 100644 index 000000000..b933c1194 Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/conversation_template/yi/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/conversation_template/zephyr/index.doctree b/.doctrees/autoapi/lmflow/utils/conversation_template/zephyr/index.doctree new file mode 100644 index 000000000..0d3bbcb78 Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/conversation_template/zephyr/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/data_utils/index.doctree b/.doctrees/autoapi/lmflow/utils/data_utils/index.doctree new file mode 100644 index 000000000..4871cee95 Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/data_utils/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/flash_attention/bloom_flash_attention/index.doctree b/.doctrees/autoapi/lmflow/utils/flash_attention/bloom_flash_attention/index.doctree new file mode 100644 index 000000000..60435a31b Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/flash_attention/bloom_flash_attention/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/flash_attention/gpt2_flash_attention/index.doctree b/.doctrees/autoapi/lmflow/utils/flash_attention/gpt2_flash_attention/index.doctree new file mode 100644 index 000000000..065c7440c Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/flash_attention/gpt2_flash_attention/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/flash_attention/gpt_neo_flash_attention/index.doctree b/.doctrees/autoapi/lmflow/utils/flash_attention/gpt_neo_flash_attention/index.doctree new file mode 100644 index 000000000..ed4806e1e Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/flash_attention/gpt_neo_flash_attention/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/flash_attention/index.doctree b/.doctrees/autoapi/lmflow/utils/flash_attention/index.doctree new file mode 100644 index 000000000..0f6338311 Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/flash_attention/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/flash_attention/llama_flash_attention/index.doctree b/.doctrees/autoapi/lmflow/utils/flash_attention/llama_flash_attention/index.doctree new file mode 100644 index 000000000..2bcb0c96a Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/flash_attention/llama_flash_attention/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/flash_attention/triton_flash_attention/index.doctree b/.doctrees/autoapi/lmflow/utils/flash_attention/triton_flash_attention/index.doctree new file mode 100644 index 000000000..90c4b1214 Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/flash_attention/triton_flash_attention/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/index.doctree b/.doctrees/autoapi/lmflow/utils/index.doctree new file mode 100644 index 000000000..785297a4c Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/llava_conversation_lib/index.doctree b/.doctrees/autoapi/lmflow/utils/llava_conversation_lib/index.doctree new file mode 100644 index 000000000..476792c38 Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/llava_conversation_lib/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/model/index.doctree b/.doctrees/autoapi/lmflow/utils/model/index.doctree new file mode 100644 index 000000000..f6769116c Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/model/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/multimodal/index.doctree b/.doctrees/autoapi/lmflow/utils/multimodal/index.doctree new file mode 100644 index 000000000..a70cf445e Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/multimodal/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/position_interpolation/index.doctree b/.doctrees/autoapi/lmflow/utils/position_interpolation/index.doctree new file mode 100644 index 000000000..7f407ebe4 Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/position_interpolation/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch/index.doctree b/.doctrees/autoapi/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch/index.doctree new file mode 100644 index 000000000..b0dfe5f4e Binary files /dev/null and b/.doctrees/autoapi/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch/index.doctree differ diff --git a/.doctrees/autoapi/lmflow/version/index.doctree b/.doctrees/autoapi/lmflow/version/index.doctree new file mode 100644 index 000000000..0fa69371b Binary files /dev/null and b/.doctrees/autoapi/lmflow/version/index.doctree differ diff --git a/.doctrees/blogs/benchmark.doctree b/.doctrees/blogs/benchmark.doctree new file mode 100644 index 000000000..e32ffbf25 Binary files /dev/null and b/.doctrees/blogs/benchmark.doctree differ diff --git a/.doctrees/blogs/index.doctree b/.doctrees/blogs/index.doctree new file mode 100644 index 000000000..54e3260e4 Binary files /dev/null and b/.doctrees/blogs/index.doctree differ diff --git a/.doctrees/environment.pickle b/.doctrees/environment.pickle new file mode 100644 index 000000000..1524be247 Binary files /dev/null and b/.doctrees/environment.pickle differ diff --git a/.doctrees/examples/DATASETS.doctree b/.doctrees/examples/DATASETS.doctree new file mode 100644 index 000000000..9769046ba Binary files /dev/null and b/.doctrees/examples/DATASETS.doctree differ diff --git a/.doctrees/examples/TASK_GUIDE.doctree b/.doctrees/examples/TASK_GUIDE.doctree new file mode 100644 index 000000000..a13594629 Binary files /dev/null and b/.doctrees/examples/TASK_GUIDE.doctree differ diff --git a/.doctrees/examples/checkpoints.doctree b/.doctrees/examples/checkpoints.doctree new file mode 100644 index 000000000..844a2fb7f Binary files /dev/null and b/.doctrees/examples/checkpoints.doctree differ diff --git a/.doctrees/examples/customize_conversation_template.doctree b/.doctrees/examples/customize_conversation_template.doctree new file mode 100644 index 000000000..745c71d9e Binary files /dev/null and b/.doctrees/examples/customize_conversation_template.doctree differ diff --git a/.doctrees/examples/finetuning.doctree b/.doctrees/examples/finetuning.doctree new file mode 100644 index 000000000..484d0d45f Binary files /dev/null and b/.doctrees/examples/finetuning.doctree differ diff --git a/.doctrees/examples/index.doctree b/.doctrees/examples/index.doctree new file mode 100644 index 000000000..1c3b71b91 Binary files /dev/null and b/.doctrees/examples/index.doctree differ diff --git a/.doctrees/examples/medical_finetune.doctree b/.doctrees/examples/medical_finetune.doctree new file mode 100644 index 000000000..7fdf3ae23 Binary files /dev/null and b/.doctrees/examples/medical_finetune.doctree differ diff --git a/.doctrees/examples/raft.doctree b/.doctrees/examples/raft.doctree new file mode 100644 index 000000000..9c79cdfba Binary files /dev/null and b/.doctrees/examples/raft.doctree differ diff --git a/.doctrees/examples/reward_modeling.doctree b/.doctrees/examples/reward_modeling.doctree new file mode 100644 index 000000000..dbc381a8b Binary files /dev/null and b/.doctrees/examples/reward_modeling.doctree differ diff --git a/.doctrees/examples/supported_conversation_template.doctree b/.doctrees/examples/supported_conversation_template.doctree new file mode 100644 index 000000000..1048d1412 Binary files /dev/null and b/.doctrees/examples/supported_conversation_template.doctree differ diff --git a/.doctrees/index.doctree b/.doctrees/index.doctree new file mode 100644 index 000000000..999353f46 Binary files /dev/null and b/.doctrees/index.doctree differ diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/_images/IT_sample1.png b/_images/IT_sample1.png new file mode 100644 index 000000000..705705509 Binary files /dev/null and b/_images/IT_sample1.png differ diff --git a/_images/IT_sample2.png b/_images/IT_sample2.png new file mode 100644 index 000000000..27b0bda7f Binary files /dev/null and b/_images/IT_sample2.png differ diff --git a/_images/IT_sample3.png b/_images/IT_sample3.png new file mode 100644 index 000000000..b8c7b8dba Binary files /dev/null and b/_images/IT_sample3.png differ diff --git a/_images/IT_sample4.png b/_images/IT_sample4.png new file mode 100644 index 000000000..19caded9f Binary files /dev/null and b/_images/IT_sample4.png differ diff --git a/_images/IT_sample5.png b/_images/IT_sample5.png new file mode 100644 index 000000000..b27c3eb11 Binary files /dev/null and b/_images/IT_sample5.png differ diff --git a/_images/IT_sample6.png b/_images/IT_sample6.png new file mode 100644 index 000000000..bdb535463 Binary files /dev/null and b/_images/IT_sample6.png differ diff --git a/_images/IT_sample7.png b/_images/IT_sample7.png new file mode 100644 index 000000000..6cafa761d Binary files /dev/null and b/_images/IT_sample7.png differ diff --git a/_images/benchmark-1.png b/_images/benchmark-1.png new file mode 100644 index 000000000..0c4ab31f1 Binary files /dev/null and b/_images/benchmark-1.png differ diff --git a/_images/benchmark-2.png b/_images/benchmark-2.png new file mode 100644 index 000000000..30f537cd0 Binary files /dev/null and b/_images/benchmark-2.png differ diff --git a/_images/nll.png b/_images/nll.png new file mode 100644 index 000000000..f5f7d0b7d Binary files /dev/null and b/_images/nll.png differ diff --git a/_images/ppl.png b/_images/ppl.png new file mode 100644 index 000000000..a40a8289b Binary files /dev/null and b/_images/ppl.png differ diff --git a/_images/raft_idea.PNG b/_images/raft_idea.PNG new file mode 100644 index 000000000..b3fe93c86 Binary files /dev/null and b/_images/raft_idea.PNG differ diff --git a/_images/raft_reward.PNG b/_images/raft_reward.PNG new file mode 100644 index 000000000..f69f56e81 Binary files /dev/null and b/_images/raft_reward.PNG differ diff --git a/_modules/index.html b/_modules/index.html new file mode 100644 index 000000000..ba8bc735d --- /dev/null +++ b/_modules/index.html @@ -0,0 +1,540 @@ + + + + + + + + + + Overview: module code — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

All modules for which code is available

+ + +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow.html b/_modules/lmflow.html new file mode 100644 index 000000000..12fb85283 --- /dev/null +++ b/_modules/lmflow.html @@ -0,0 +1,473 @@ + + + + + + + + + + lmflow — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow

+from .version import __version__ as internal_version
+
+
+[docs] +__version__ = internal_version
+ + +from transformers.utils import check_min_version +from transformers.utils.versions import require_version + +from lmflow import args, datasets, models, pipeline, utils + +# Will error if the minimal version of Transformers is not installed. Remove at your own risks. +check_min_version("4.27.0.dev0") + +require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/args.html b/_modules/lmflow/args.html new file mode 100644 index 000000000..e9387bfca --- /dev/null +++ b/_modules/lmflow/args.html @@ -0,0 +1,2615 @@ + + + + + + + + + + lmflow.args — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.args

+#!/usr/bin/env python
+# coding=utf-8
+"""This script defines dataclasses: ModelArguments and DatasetArguments,
+that contain the arguments for the model and dataset used in training.
+
+It imports several modules, including dataclasses, field from typing, Optional from typing,
+require_version from transformers.utils.versions, MODEL_FOR_CAUSAL_LM_MAPPING,
+and TrainingArguments from transformers.
+
+MODEL_CONFIG_CLASSES is assigned a list of the model config classes from
+MODEL_FOR_CAUSAL_LM_MAPPING. MODEL_TYPES is assigned a tuple of the model types
+extracted from the MODEL_CONFIG_CLASSES.
+"""
+import logging
+from dataclasses import dataclass, field, fields, Field, make_dataclass
+from pathlib import Path
+from typing import Optional, List, Union, Dict
+
+from transformers import (
+    MODEL_FOR_CAUSAL_LM_MAPPING,
+    TrainingArguments,
+)
+from transformers.utils.versions import require_version
+
+
+[docs] +MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
+ +
+[docs] +MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
+ + + +
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +class OptimizerNames(): +
+[docs] + DUMMY = "dummy"
+ +
+[docs] + ADABELIEF = "adabelief"
+ +
+[docs] + ADABOUND = "adabound"
+ +
+[docs] + LARS = "lars"
+ +
+[docs] + LAMB = "lamb"
+ +
+[docs] + ADAMAX = "adamax"
+ +
+[docs] + NADAM = "nadam"
+ +
+[docs] + RADAM = "radam"
+ +
+[docs] + ADAMP = "adamp"
+ +
+[docs] + SGDP = "sgdp"
+ +
+[docs] + YOGI = "yogi"
+ +
+[docs] + SOPHIA = "sophia"
+ +
+[docs] + ADAN = "adan"
+ +
+[docs] + ADAM = "adam"
+ +
+[docs] + NOVOGRAD = "novograd"
+ +
+[docs] + ADADELTA = "adadelta"
+ +
+[docs] + ADAGRAD = "adagrad"
+ +
+[docs] + ADAMW_SCHEDULE_FREE = "adamw_schedule_free"
+ +
+[docs] + SGD_SCHEDULE_FREE = "sgd_schedule_free"
+
+ + +@dataclass +
+[docs] +class ModelArguments: + """ + Define a class ModelArguments using the dataclass decorator. + The class contains several optional parameters that can be used to configure a model. + + model_name_or_path : str + a string representing the path or name of a pretrained + model checkpoint for weights initialization. If None, a model will be trained from scratch. + + model_type : str + a string representing the type of model to use if training from + scratch. If not provided, a pretrained model will be used. + + config_overrides : str + a string representing the default config settings to override + when training a model from scratch. + + config_name : str + a string representing the name or path of the pretrained config to + use, if different from the model_name_or_path. + + tokenizer_name : str + a string representing the name or path of the pretrained tokenizer + to use, if different from the model_name_or_path. + + cache_dir : str + a string representing the path to the directory where pretrained models + downloaded from huggingface.co will be stored. + + use_fast_tokenizer : bool + a boolean indicating whether to use a fast tokenizer (backed by the + tokenizers library) or not. + + model_revision : str + a string representing the specific model version to use (can be a + branch name, tag name, or commit id). + + use_auth_token : bool + a boolean indicating whether to use the token generated when running + huggingface-cli login (necessary to use this script with private models). + + torch_dtype : str + a string representing the dtype to load the model under. If auto is + passed, the dtype will be automatically derived from the model's weights. + + use_ram_optimized_load : bool + a boolean indicating whether to use disk mapping when memory is not + enough. + + use_int8 : bool + a boolean indicating whether to load int8 quantization for inference. + + load_in_4bit : bool + whether to load the model in 4bit + + model_max_length : int + The maximum length of the model. + + truncation_side : str + The side on which the model should have truncation applied. + + arch_type : str + Model architecture type. + padding_side : str + The side on which the tokenizer should have padding applied. + eos_padding : bool + whether to pad with eos token instead of pad token. + ignore_bias_buffers : bool + fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. + """ + +
+[docs] + model_name_or_path: Optional[str] = field( + default=None, + metadata={ + "help": ( + "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." + ) + }, + )
+ +
+[docs] + lora_model_path: Optional[str] = field( + default=None, + metadata={ + "help": ( + "The incremental model diff introduced by LoRA finetuning." + " Along with the original non-finetuned model forms the whole" + " finetuned model." + ) + } + )
+ +
+[docs] + model_type: Optional[str] = field( + default=None, + metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, + )
+ +
+[docs] + config_overrides: Optional[str] = field( + default=None, + metadata={ + "help": ( + "Override some existing default config settings when a model is trained from scratch. Example: " + "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" + ) + }, + )
+ +
+[docs] + arch_type: Optional[str] = field( + default="decoder_only", + metadata={ + "help": ("Model architecture type."), + "choices": ["decoder_only", "encoder_decoder", "text_regression", "vision_encoder_decoder"], + }, + )
+ +
+[docs] + config_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} + )
+ +
+[docs] + tokenizer_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} + )
+ +
+[docs] + cache_dir: Optional[str] = field( + default=None, + metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, + )
+ +
+[docs] + use_fast_tokenizer: bool = field( + default=True, + metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, + )
+ +
+[docs] + model_revision: str = field( + default="main", + metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, + )
+ +
+[docs] + use_auth_token: bool = field( + default=False, + metadata={ + "help": ( + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " + "with private models)." + ) + }, + )
+ +
+[docs] + trust_remote_code: bool = field( + default=False, + metadata={ + "help": ( + "Whether to trust remote code when loading model." + ) + }, + )
+ +
+[docs] + torch_dtype: Optional[str] = field( + default=None, + metadata={ + "help": ( + "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " + "dtype will be automatically derived from the model's weights." + ), + "choices": ["auto", "bfloat16", "float16", "float32"], + }, + )
+ +
+[docs] + use_lora: bool = field( + default=False, + metadata={"help": "Whether to lora."}, + )
+ +
+[docs] + use_qlora: bool = field( + default=False, + metadata={"help": "Whether to use qlora."}, + )
+ +
+[docs] + bits: int = field( + default=4, + metadata={"help": "The number of bits for quantization.", + "choices": [4, 8], }, + )
+ +
+[docs] + quant_type: str = field( + default='nf4', + metadata={"help": "The quantization type for quantization.", + "choices": ["nf4", "fp4"], }, + )
+ +
+[docs] + double_quant: bool = field( + default=True, + metadata={"help": "Whether to use double quantization."}, + )
+ +
+[docs] + lora_r: int = field( + default=8, + metadata={"help": "the rank of the lora parameters. The smaller lora_r is , the fewer parameters lora has."}, + )
+ +
+[docs] + lora_alpha: int = field( + default=32, + metadata={ + "help": "Merging ratio between the fine-tuned model and the original. This is controlled by a parameter called alpha in the paper."}, + )
+ +
+[docs] + lora_target_modules: List[str] = field( + default=None, metadata={"help": "Pretrained config name or path if not the same as model_name",} + )
+ +
+[docs] + lora_dropout: float = field( + default=0.1, + metadata={"help": "The dropout rate in lora.linear."}, + )
+ +
+[docs] + save_aggregated_lora: bool = field( + default=False, + metadata={"help": "Whether to save aggregated lora."}, + )
+ +
+[docs] + use_ram_optimized_load: bool = field( + default=True, + metadata={"help": "Whether use disk mapping when memory is not enough."} + )
+ +
+[docs] + use_flash_attention: bool = field( + default=False, + metadata={ + "help": ( + "whether use flash attention layer to reduce GPU memory with" + " higher time cost." + ) + } + )
+ +
+[docs] + truncate_to_model_max_length: bool = field( + default=True, + metadata={ + "help": ( + "whether truncate the dataset to model max length." + ) + } + )
+ +
+[docs] + do_rope_scaling: bool = field( + default=False, + metadata={ + "help": ( + "whether do ROPE scaling for llama model." + "Linear_scaling credits to the Reddit user /u/kaiokendev." + "https://arxiv.org/abs/2306.15595" + "NTK_scaling credits to the Reddit users /u/bloc97 and /u/emozilla." + "https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/" + ) + } + )
+ +
+[docs] + rope_pi_ratio: int = field( + default=1, + metadata={ + "help": ( + "the ratio of pi in RoPE scaling." + ) + } + )
+ +
+[docs] + rope_ntk_ratio: int = field( + default=1, + metadata={ + "help": ( + "the ratio of NTK in RoPE scaling." + ) + } + )
+ +
+[docs] + use_int8: bool = field( + default=False, + metadata={"help": "whether to load int8 quantization for inference"} + )
+ +
+[docs] + load_in_4bit: Optional[bool] = field( + default=True, + metadata={ + "help": "whether to load the model in 4bit" + }, + )
+ +
+[docs] + model_max_length: Optional[int] = field( + default=None, + metadata={"help": ( + "The maximum length of the model. When not specified, " + "will follow the model's default max length. (i.e., tokenizer.model_max_length)") + }, + )
+ +
+[docs] + truncation_side: str = field( + default=None, + metadata={ + "help": ( + "The side on which the tokenizer should have truncation applied. " + "When not specified, will follow the tokenizer's default truncation strategy. " + "(i.e., tokenizer.truncation_side)"), + "choices": [None, "left", "right"], + }, + )
+ +
+[docs] + padding_side: str = field( + default='right', + metadata={ + "help": ( + "The side on which the tokenizer should have padding applied. " + "LMFlow uses right padding by default. When set to `auto`, will " + "use padding_side from tokenizer.padding_side."), + "choices": ["right", "left", "auto"], + } + )
+ +
+[docs] + eos_padding: Optional[bool] = field( + default=False, + metadata={"help": "whether to pad with eos token"} + )
+ +
+[docs] + ignore_bias_buffers: Optional[bool] = field( + default=False, + metadata={ + # debug argument for distributed training + "help": "fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. See" + "https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992" + }, + )
+ + + +
+[docs] + def __post_init__(self): + if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): + raise ValueError( + "--config_overrides can't be used in combination with --config_name or --model_name_or_path" + ) + + if self.use_qlora: + if not self.use_lora: + logger.warning("use_qlora is set to True, but use_lora is not set to True. Setting use_lora to True.") + self.use_lora = True
+
+ + + +@dataclass +
+[docs] +class VisModelArguments(ModelArguments): +
+[docs] + low_resource: Optional[bool] = field( + default=False, + metadata={ + "help": "Use 8 bit and float16 when loading llm" + } + )
+ +
+[docs] + custom_model: bool = field( + default=False, + metadata={"help": "flag for the model from huggingface or not"} + )
+ +
+[docs] + pretrained_language_projection_path: str = field( + default=None, + metadata={"help": "path for model pretrained_language_projection_path"} + )
+ +
+[docs] + custom_vision_model: bool = field( + default=False, + metadata={"help": "flag for the model from huggingface or not"} + )
+ +
+[docs] + image_encoder_name_or_path: Optional[str] = field( + default=None, + metadata={ + "help": ( + "The name or path of the image encoder to use." + ) + }, + )
+ +
+[docs] + qformer_name_or_path: Optional[str] = field( + default=None, + metadata={ + "help": ( + "llm model in multi-modality model" + ) + }, + )
+ +
+[docs] + llm_model_name_or_path: Optional[str] = field( + default=None, + metadata={ + "help": ( + "llm model in multi-modality model" + ) + }, + )
+ +
+[docs] + use_prompt_cache: bool = field( + default=False, + metadata={"help": "Whether to use prompt cache."}, + )
+ +
+[docs] + prompt_cache_path: Optional[str] = field( + default=None, + metadata={"help": "Path to prompt cache."}, + )
+ +
+[docs] + llava_loading: Optional[bool] = field( + default=False, + metadata={"help": "Whether to load module by module from pretrained model."}, + )
+ +
+[docs] + with_qformer: Optional[bool] = field( + default=False, + metadata={"help": "Whether to use qformer."}, + )
+ +
+[docs] + vision_select_layer: Optional[int] = field( + default=-2, + metadata={"help": "Which layer to select in vision model."}, + )
+ +
+[docs] + llava_pretrain_model_path: Optional[str] = field( + default=None, + metadata={"help": "Path to llava pretrained model."}, + )
+ +
+[docs] + save_pretrain_model_path: Optional[str] = field( + default=None, + metadata={"help": "Path to pretrained model."}, + )
+
+ + + +@dataclass +
+[docs] +class DatasetArguments: + """ + Define a class DatasetArguments using the dataclass decorator. + The class contains several optional parameters that can be used to configure a dataset for a language model. + + + dataset_path : str + a string representing the path of the dataset to use. + + dataset_name : str + a string representing the name of the dataset to use. The default value is "customized". + + is_custom_dataset : bool + a boolean indicating whether to use custom data. The default value is False. + + customized_cache_dir : str + a string representing the path to the directory where customized dataset caches will be stored. + + dataset_config_name : str + a string representing the configuration name of the dataset to use (via the datasets library). + + train_file : str + a string representing the path to the input training data file (a text file). + + validation_file : str + a string representing the path to the input evaluation data file to evaluate the perplexity on (a text file). + + max_train_samples : int + an integer indicating the maximum number of training examples to use for debugging or quicker training. + If set, the training dataset will be truncated to this number. + + max_eval_samples: int + an integer indicating the maximum number of evaluation examples to use for debugging or quicker training. + If set, the evaluation dataset will be truncated to this number. + + streaming : bool + a boolean indicating whether to enable streaming mode. + + block_size: int + an integer indicating the optional input sequence length after tokenization. The training dataset will be + truncated in blocks of this size for training. + + train_on_prompt: bool + a boolean indicating whether to train on prompt for conversation datasets such as ShareGPT. + + conversation_template: str + a string representing the template for conversation datasets. + + The class also includes some additional parameters that can be used to configure the dataset further, such as `overwrite_cache`, + `validation_split_percentage`, `preprocessing_num_workers`, `disable_group_texts`, `demo_example_in_prompt`, `explanation_in_prompt`, + `keep_linebreaks`, and `prompt_structure`. + + The field function is used to set default values and provide help messages for each parameter. The Optional type hint is + used to indicate that a parameter is optional. The metadata argument is used to provide additional information about + each parameter, such as a help message. + """ + +
+[docs] + dataset_path: Optional[str] = field( + default=None, metadata={"help": "The path of the dataset to use."} + )
+ +
+[docs] + dataset_name: Optional[str] = field( + default="customized", metadata={"help": "Should be \"customized\""} + )
+ +
+[docs] + is_custom_dataset: Optional[bool] = field( + default=False, metadata={"help": "whether to use custom data"} + )
+ +
+[docs] + customized_cache_dir: Optional[str] = field( + default=".cache/llm-ft/datasets", + metadata={"help": "Where do you want to store the customized dataset caches"}, + )
+ +
+[docs] + dataset_config_name: Optional[str] = field( + default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} + )
+ +
+[docs] + train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
+ +
+[docs] + validation_file: Optional[str] = field( + default=None, + metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, + )
+ +
+[docs] + max_train_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ) + }, + )
+ +
+[docs] + max_eval_samples: Optional[int] = field( + default=1e10, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of evaluation examples to this " + "value if set." + ) + }, + )
+ +
+[docs] + streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"})
+ +
+[docs] + block_size: Optional[int] = field( + default=None, + metadata={ + "help": ( + "Optional input sequence length after tokenization. " + "The training dataset will be truncated in block of this size for training. " + "Default to the model max input length for single sentence inputs (take into account special tokens)." + ) + }, + )
+ +
+[docs] + overwrite_cache: bool = field( + default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} + )
+ +
+[docs] + validation_split_percentage: Optional[int] = field( + default=5, + metadata={ + "help": "The percentage of the train set used as validation set in case there's no validation split" + }, + )
+ +
+[docs] + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "The number of processes to use for the preprocessing."}, + )
+ +
+[docs] + group_texts_batch_size: int = field( + default=1000, + metadata={ + "help": ( + "Number of samples that will be grouped together to go though" + " `group_texts` operation. See `--disable_group_texts` for" + " detailed explanation of this operation." + ) + } + )
+ +
+[docs] + disable_group_texts: bool = field( + default=True, + metadata={ + "help": ( + "Whether we disable group of original samples together to" + " generate sample sequences of length `block_size`" + " By Default, it is True, which means the long samples" + " are truncated to `block_size` tokens" + " and short samples are padded to `block_size` tokens." + " If set to False, we group every 1000 tokenized" + " sequences together, divide them into" + " [{total_num_tokens} / {block_size}] sequences," + " each with `block_size` tokens" + " (the remaining tokens are ommited." + " This group text behavior is useful" + " for continual pretrain or pretrain." + ) + }, + )
+ +
+[docs] + keep_linebreaks: bool = field( + default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} + )
+ +
+[docs] + test_file: Optional[str] = field( + default=None, + metadata={"help": "Evaluation File Path"}, + )
+ +
+[docs] + train_on_prompt: bool = field( + default=False, + metadata={"help": "Whether to train on prompt for conversation datasets such as ShareGPT."} + )
+ +
+[docs] + conversation_template: Optional[str] = field( + default=None, + metadata={"help": "The template for conversation datasets."} + )
+ + +
+[docs] + def __post_init__(self): + if self.streaming: + require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`") + + if self.dataset_name is None and self.train_file is None and self.validation_file is None: + raise ValueError("Need either a dataset name or a training/validation file.") + else: + if self.train_file is not None: + extension = self.train_file.split(".")[-1] + assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." + if self.validation_file is not None: + extension = self.validation_file.split(".")[-1] + assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
+
+ + + +@dataclass +
+[docs] +class MultiModalDatasetArguments(DatasetArguments): +
+[docs] + image_folder: Optional[str] = field( + default=None, metadata={"help": "The folder of the image file."} + )
+ +
+[docs] + image_aspect_ratio: Optional[str] = field( + default="pad", metadata={"help": "The ratio type"} + )
+ +
+[docs] + is_multimodal: Optional[bool] = field( + default=True, metadata={"help": "Flag for the modality type."} + )
+ +
+[docs] + use_image_start_end: Optional[bool] = field( + default=True, metadata={"help": "Flag for the modality type."} + )
+ +
+[docs] + sep_style: Optional[str] = field( + default="plain", metadata={"help": "Sep style in multi_modality dataset."} + )
+
+ + + +@dataclass +
+[docs] +class FinetunerArguments(TrainingArguments): + """ + Adapt transformers.TrainingArguments + """ +
+[docs] + eval_dataset_path: Optional[str] = field( + default=None, metadata={"help": "The path of the eval dataset to use."} + )
+ +
+[docs] + remove_unused_columns: Optional[bool] = field( + default=False, + metadata={ + "help": "whether to remove the unused columns in collate fn"} + )
+ +
+[docs] + finetune_part: Optional[str] = field( + default="language_projection", + metadata={ + "help": "the module to finetune." + } + )
+ +
+[docs] + save_language_projection: Optional[str] = field( + default=False, + metadata={ + "help": "whether to save language projection layer in multi-modal models." + } + )
+ +
+[docs] + use_lisa: bool = field( + default=False, + metadata={ + "help": "whether to use LISA training strategy." + } + )
+ +
+[docs] + lisa_activated_layers: int = field( + default=2, + metadata={ + "help": "the number of activated layers in LISA." + } + )
+ +
+[docs] + lisa_interval_steps: int = field( + default=20, + metadata={ + "help": "the number of steps in each freezing interval of LISA, i.e. the selected unfreezed layers are randomly switched every {lisa_interval_steps} steps." + } + )
+ +
+[docs] + lisa_layers_attribute: str = field( + default="model.model.layers", + metadata={ + "help": "where the layer attribute stores, e.g. model.model.layers" + } + )
+ +
+[docs] + use_customized_optim: bool = field( + default=False, + metadata={ + "help": "whether to use customized optimizers." + } + )
+ +
+[docs] + customized_optim: str = field( + default="sign_sgd", + metadata={ + "help": "name of the customized optimizer." + } + )
+ +
+[docs] + customized_optim_args: str = field( + default=None, + metadata={ + "help": "optional arguments that are supplied." + } + )
+ +
+[docs] + optim_dummy_beta1: float = field( + default=0.9, + metadata={ + "help": "A useless argument for dummy optimizer, just for tutorial" + } + )
+ +
+[docs] + optim_dummy_beta2: float = field( + default=0.999, + metadata={ + "help": "A useless argument for dummy optimizer, just for tutorial" + } + )
+ +
+[docs] + optim_adam_beta1: float = field( + default=0.9, + metadata={ + "help": "Coefficient used for computing running averages of gradient" + } + )
+ +
+[docs] + optim_adam_beta2: float = field( + default=0.999, + metadata={ + "help": "Coefficient used for computing running averages of squared gradient" + } + )
+ +
+[docs] + optim_beta1: float = field( + default=0.9, + metadata={ + "help": "Coefficient used for computing running averages of gradient" + } + )
+ +
+[docs] + optim_beta2: float = field( + default=0.999, + metadata={ + "help": "Coefficient used for computing running averages of squared gradient" + } + )
+ +
+[docs] + optim_beta3: float = field( + default=0.9, + metadata={ + "help": "Coefficient used for computing running averages of gradient" + } + )
+ +
+[docs] + optim_momentum: float = field( + default=0.999, + metadata={ + "help": "Coefficient used for the momentum term in optimizers like SGD with momentum" + } + )
+ +
+[docs] + optim_weight_decay: float = field( + default=0, + metadata={ + "help": "Weight decay (L2 penalty) added to the loss to prevent overfitting" + } + )
+
+ + +@dataclass +
+[docs] +class RewardModelTunerArguments(FinetunerArguments): + """ + Arguments for reward modeling. + """ + pass
+ + + +@dataclass +
+[docs] +class EvaluatorArguments: + """ + Define a class EvaluatorArguments using the dataclass decorator. The class contains several optional + parameters that can be used to configure a evaluator. + + local_rank : str + For distributed training: local_rank + + random_shuffle : bool + + use_wandb : bool + + random_seed : int, default = 1 + + output_dir : str, default = './output_dir', + + mixed_precision : str, choice from ["bf16","fp16"]. + mixed precision mode, whether to use bf16 or fp16 + + deepspeed : + Enable deepspeed and pass the path to deepspeed json config file (e.g. ds_config.json) or an already + loaded json file as a dict + + temperature : float + An argument of model.generate in huggingface to control the diversity of generation. + + repetition_penalty : float + An argument of model.generate in huggingface to penalize repetitions. + """ +
+[docs] + local_rank: int = field( + default=-1, + metadata={"help": "For distributed training: local_rank" + } + )
+ + +
+[docs] + random_shuffle: Optional[bool] = field( + default=False, + metadata={"help": "" + } + )
+ + +
+[docs] + use_wandb: Optional[bool] = field( + default=False, + metadata={ + "help": ( + "When this flag is True, wandb will be enabled" + ) + }, + )
+ +
+[docs] + random_seed: Optional[int] = field( + default=1, + metadata={ + "help": ( + "used to set random seed" + ) + }, + )
+ +
+[docs] + output_dir: Optional[str] = field( + default="./output_dir", + metadata={"help": "Output path for the inferenced results"}, + )
+ +
+[docs] + mixed_precision: Optional[str] = field( + default="bf16", + metadata={ + "help": ( + "mixed precision mode, whether to use bf16 or fp16" + ), + "choices": ["bf16", "fp16"], + }, + )
+ +
+[docs] + deepspeed: Optional[str] = field( + default=None, + metadata={ + "help": ( + "Enable deepspeed and pass the path to deepspeed json config file (e.g. ds_config.json) or an already" + " loaded json file as a dict" + ) + }, + )
+ +
+[docs] + answer_type: Optional[str] = field( + default="text", + metadata={ + "help": ( + 'Question type for answer extraction from the decoder output.' + ' Supported types: \n' + ' 1) "multiple_choice", e.g. A, B, C, D, ...\n' + ' 2) "binary_choice", e.g. yes, no, maybe\n' + ' 3) "math", e.g. 1.0, -3.52\n' + ' 4) "text", e.g. "I think that it is okay"\n' + ' 5) Special treatment for several datasets\n' + ' - "gsm8k"\n' + ' - "svamp"\n' + ' - "asdiv"\n' + ' - "addsub"\n' + ' - "singleeq"\n' + ' - "multiarith"\n' + ' - "aqua"\n' + ' - "csqa"\n' + ' - "strategyqa"\n' + ' - "pubmedqa"\n' + ' - "medmcqa"\n' + ' - "usmle"\n' + ) + }, + )
+ +
+[docs] + prompt_structure: Optional[str] = field( + default="{input}", + metadata={ + "help": ( + 'Prompt structure to facilitate prompt engineering during' + ' inference. The model will receive' + ' `prompt_structure.format(input=input)` as its input.' + ) + }, + )
+ +
+[docs] + evaluate_block_size: Optional[int] = field( + default=512, + metadata={ + "help": ( + "the model will have at least block_size tokens for context when calculating the conditional likelihood of any one token" + " (provided there are block_size preceding tokens available to condition on)" + ) + }, + )
+ +
+[docs] + metric: Optional[str] = field( + default="accuracy", + metadata={ + "help": "the metric the model will be evaluated on", + "choices": ["ppl", "perplexity", "acc", "accuracy", "nll", "neg_log_likelihood"], + }, + )
+ +
+[docs] + inference_batch_size_per_device: Optional[int] = field( + default=1, + metadata={ + "help": ( + "every device will infer {inference_batch_size_per_device}" + " samples in parallel. The inferred results will be concatenaed" + " with inputs and attach a reward." + ), + }, + )
+ +
+[docs] + use_accelerator_for_evaluator: bool = field( + default=False, metadata={"help": "Whether to use Huggingface Accelerator instead of Deepspeed"}, + )
+ + +
+[docs] + temperature: float = field( + default=0, + metadata={"help": "Temperature during inference."}, + )
+ + +
+[docs] + repetition_penalty: float = field( + default=1, + metadata={"help": "Repetition_penalty during inference."}, + )
+ + +
+[docs] + max_new_tokens: int = field( + default=100, + metadata={"help": "Maximum length during inference."}, + )
+
+ + + +@dataclass +
+[docs] +class InferencerArguments: + """ + Define a class InferencerArguments using the dataclass decorator. The class contains several optional + parameters that can be used to configure a inferencer. + + local_rank : str + For distributed training: local_rank + random_seed : int, default = 1 + inference_batch_size : int, default = 1 + deepspeed : + Enable deepspeed and pass the path to deepspeed json config file (e.g. ds_config.json) or an already + loaded json file as a dict + mixed_precision : str, choice from ["bf16","fp16"]. + mixed precision mode, whether to use bf16 or fp16 + temperature : float + An argument of model.generate in huggingface to control the diversity of generation. + repetition_penalty : float + An argument of model.generate in huggingface to penalize repetitions. + use_beam_search : Optional[bool] + Whether to use beam search during inference, By default False. + num_output_sequences : Optional[int] + Number of output sequences to return for the given prompt, + currently only used in vllm inference, By default 8. + top_p : Optional[float] + top_p for sampling, By default 1.0. + top_k : Optional[int] + top_k for sampling, By default -1 (no top_k). + additional_stop_token_ids : Optional[List[int]] + the ids of the end of sentence tokens, By default []. + apply_chat_template : Optional[bool] + Whether to apply chat template, By default True. + save_results : Optional[bool] + Whether to save inference results, By default False. + results_path : Optional[str] + The **json file** path of inference results, By default None. + enable_decode_inference_result : Optional[bool] + Whether to detokenize the inference results. + + NOTE: For iterative align pipelines, whether to detokenize depends on + the homogeneity of the policy model and the reward model + (i.e., if they have the same tokenizer). + use_vllm: bool, optional + Whether to use VLLM for inference, By default False. + vllm_tensor_parallel_size: int, optional + The tensor parallel size for VLLM inference. + vllm_gpu_memory_utilization: float, optional + The GPU memory utilization for VLLM inference. The proportion of GPU + memory (per GPU) to use for VLLM inference. + """ +
+[docs] + device: str = field( + default="gpu", + metadata={ + "help": "device of chatbot", + "choices": ["gpu", "cpu"], + }, + )
+ +
+[docs] + local_rank: int = field( + default=-1, + metadata={"help": "For distributed training: local_rank" + }, + )
+ +
+[docs] + inference_batch_size: int = field( + default=1, + metadata={"help": "batch size for inference"}, + )
+ +
+[docs] + vllm_inference_batch_size: int = field( + default=1, + metadata={"help": "The batch size for VLLM inference."} + )
+ +
+[docs] + temperature: float = field( + default=0.0, + metadata={"help": "Temperature during inference."}, + )
+ + +
+[docs] + repetition_penalty: float = field( + default=1, + metadata={"help": "Repetition_penalty during inference."}, + )
+ + +
+[docs] + max_new_tokens: int = field( + default=100, + metadata={"help": "Maximum length during inference."}, + )
+ + +
+[docs] + random_seed: Optional[int] = field( + default=1, + metadata={ + "help": ( + "used to set random seed" + ) + }, + )
+ +
+[docs] + deepspeed: Optional[str] = field( + default=None, + metadata={ + "help": ( + "Enable deepspeed and pass the path to deepspeed json config file (e.g. ds_config.json) or an already" + " loaded json file as a dict" + ) + }, + )
+ +
+[docs] + mixed_precision: Optional[str] = field( + default="bf16", + metadata={ + "help": ( + "mixed precision mode, whether to use bf16 or fp16" + ), + "choices": ["bf16", "fp16"], + }, + )
+ +
+[docs] + do_sample: Optional[bool] = field( + default=False, + metadata={ + "help": "whether turn on true random sampling during inference." + }, + )
+ +
+[docs] + use_accelerator: bool = field( + default=False, metadata={"help": "Whether to use Huggingface Accelerator instead of Deepspeed"}, + )
+ + + +
+[docs] + num_output_sequences: Optional[int] = field( + default=8, + metadata={"help": ( + "number of output sequences to return for the given prompt, " + "currently only used in vllm inference." + )}, + )
+ +
+[docs] + top_p: Optional[float] = field( + default=1.0, + metadata={"help": "top_p for sampling."}, + )
+ +
+[docs] + top_k: Optional[int] = field( + default=-1, + metadata={"help": "top_k for sampling."}, + )
+ +
+[docs] + additional_stop_token_ids: Optional[List[int]] = field( + default_factory=lambda: [], + metadata={"help": "the ids of the end of sentence tokens"}, + )
+ +
+[docs] + apply_chat_template: Optional[bool] = field( + default=True, + metadata={"help": "whether to apply chat template"}, + )
+ +
+[docs] + enable_decode_inference_result: Optional[bool] = field( + default=False, + metadata={"help": "Whether to decode the inference results."}, + )
+ +
+[docs] + tensor_parallel_size: Optional[int] = field( + default=1, + metadata={"help": "The tp size for distributed (multi-instance) inference."} + )
+ +
+[docs] + enable_distributed_inference: Optional[bool] = field( + default=False, + metadata={"help": "Whether to use multi-instance VLLM inference."} + )
+ +
+[docs] + distributed_inference_num_instances: Optional[int] = field( + default=1, + metadata={"help": "The number of instances for multi-instance VLLM inference."} + )
+ + + # vllm inference args +
+[docs] + use_vllm: bool = field( + default=False, + metadata={"help": "Whether to use VLLM for inference, By default False."} + )
+ +
+[docs] + vllm_tensor_parallel_size: Optional[int] = field( + default=1, + metadata={"help": "The tensor parallel size for VLLM inference."} + )
+ +
+[docs] + vllm_gpu_memory_utilization: Optional[float] = field( + default=0.95, + metadata={"help": "The GPU memory utilization for VLLM inference."} + )
+ + + # Args for result saving +
+[docs] + save_results: Optional[bool] = field( + default=False, metadata={"help": "Whether to save inference results."} + )
+ +
+[docs] + results_path: Optional[str] = field( + default=None, metadata={"help": "The path of inference results."} + )
+ + +
+[docs] + def __post_init__(self): + if self.save_results: + if self.results_path is None: + raise ValueError("Need to specify results_path when save_results is True.") + else: + if not self.results_path.endswith(".json"): + raise ValueError("The results_path must be a json file.") + else: + Path(self.results_path).parent.mkdir(parents=True, exist_ok=True)
+
+ + + +@dataclass +
+[docs] +class RaftAlignerArguments(TrainingArguments): + """ + Define a class RaftAlignerArguments to configure raft aligner. + """ +
+[docs] + output_reward_path: Optional[str] = field( + default="tmp/raft_aligner/", + metadata={ + "help": "The path of output rewards." + } + )
+ +
+[docs] + output_min_length: Optional[int] = field( + default=64, + metadata={ + "help": ( + "minimum length of the output token sequence generated from" + " model given an input." + ), + }, + )
+ +
+[docs] + output_max_length: Optional[int] = field( + default=128, + metadata={ + "help": ( + "maximum length of the output token sequence generated from" + " model given an output." + ), + }, + )
+ +
+[docs] + num_raft_iteration: Optional[int] = field( + default=20, + metadata={ + "help": "number of iterations of the raft aligner." + }, + )
+ +
+[docs] + raft_batch_size: Optional[int] = field( + default=1024, + metadata={ + "help": ( + "only select {raft_batch_size} samples each time for STF training." + ) + }, + )
+ +
+[docs] + top_reward_percentage: Optional[float] = field( + default=0.2, + metadata={ + "help": ( + "only top {top_reward_percentage} samples in the raft batch," + " (in terms of rewards), will be used for SFT the model." + ), + }, + )
+ +
+[docs] + inference_batch_size_per_device: Optional[int] = field( + default=1, + metadata={ + "help": ( + "every device will infer {inference_batch_size_per_device}" + " samples in parallel. The inferred results will be concatenaed" + " with inputs and attach a reward." + ), + }, + )
+ +
+[docs] + collection_strategy: Optional[str] = field( + default="top", + metadata={ + "help": ( + "{collection_strategy} is either top or local" + " top means that we rank the samples globally regardless of the prompts" + " local means that we only rank the samples with the same prompt" + ), + }, + )
+
+ + + +@dataclass +
+[docs] +class BenchmarkingArguments: +
+[docs] + dataset_name: Optional[str] = field( + default=None, + metadata={ + "help": "benchmark dataset name provided by lmflow" + }, + )
+ +
+[docs] + lm_evaluation_metric: Optional[str] = field( + default="accuracy", + metadata={ + "help": "the metric the model will be evaluated on", + "choices": ["acc", "acc_norm", "bleu", "chrf", "em", "f1", "ppl", \ + "ter", "r@1", "r@2", "mrr", "mc1", "mc2", "word_perplexity", \ + "byte_perplexity", "bits_per_byte"], + }, + )
+
+ + + +@dataclass +
+[docs] +class DPOAlignerArguments: + """ + The arguments for the DPO training script. + """ +
+[docs] + local_rank: int = field( + default=-1, + metadata={"help": "For distributed training: local_rank" + }, + )
+ + # data parameters +
+[docs] + beta: Optional[float] = field( + default=0.1, + metadata={ + "help": "the beta parameter for DPO loss" + } + )
+ + # # training parameters +
+[docs] + learning_rate: Optional[float] = field( + default=5e-4, + metadata={ + "help": "optimizer learning rate" + } + )
+ +
+[docs] + lr_scheduler_type: Optional[str] = field( + default="cosine", + metadata={ + "help": "the lr scheduler type" + } + )
+ +
+[docs] + warmup_steps: Optional[int] = field( + default=100, metadata={ + "help": "the number of warmup steps" + } + )
+ +
+[docs] + weight_decay: Optional[float] = field( + default=0.05, metadata={ + "help": "the weight decay" + } + )
+ +
+[docs] + optimizer_type: Optional[str] = field( + default="paged_adamw_32bit", + metadata={ + "help": "the optimizer type" + } + )
+ + +
+[docs] + per_device_train_batch_size: Optional[int] = field( + default=4, + metadata={ + "help": "train batch size per device" + } + )
+ +
+[docs] + per_device_eval_batch_size: Optional[int] = field( + default=1, metadata={ + "help": "eval batch size per device" + } + )
+ +
+[docs] + gradient_accumulation_steps: Optional[int] = field( + default=4, + metadata={ + "help": "the number of gradient accumulation steps" + }, + )
+ +
+[docs] + gradient_checkpointing: Optional[bool] = field( + default=True, + metadata={ + "help": "whether to use gradient checkpointing" + }, + )
+ + +
+[docs] + gradient_checkpointing_use_reentrant: Optional[bool] = field( + default=False, + metadata={ + "help": "whether to use reentrant for gradient checkpointing" + }, + )
+ +
+[docs] + max_prompt_length: Optional[int] = field( + default=512, + metadata={ + "help": "the maximum prompt length" + }, + )
+ +
+[docs] + max_length: Optional[int] = field( + default=1024, + metadata={ + "help": "the maximum sequence length" + }, + )
+ +
+[docs] + max_steps: Optional[int] = field( + default=1000, + metadata={ + "help": "max number of training steps" + }, + )
+ +
+[docs] + logging_steps: Optional[int] = field( + default=10, + metadata={ + "help": "the logging frequency" + }, + )
+ +
+[docs] + save_steps: Optional[int] = field( + default=100, + metadata={ + "help": "the saving frequency" + }, + )
+ +
+[docs] + eval_steps: Optional[int] = field( + default=100, + metadata={ + "help": "the evaluation frequency" + }, + )
+ +
+[docs] + output_dir: Optional[str] = field( + default="./results", + metadata={ + "help": "the output directory" + }, + )
+ +
+[docs] + log_freq: Optional[int] = field( + default=1, + metadata={ + "help": "the logging frequency" + }, + )
+ +
+[docs] + sanity_check: Optional[bool] = field( + default=False, + metadata={ + "help": "only train on 1000 samples" + } + )
+ +
+[docs] + report_to: Optional[str] = field( + default="wandb", + metadata={ + "help": 'The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`,' + '`"comet_ml"`, `"mlflow"`, `"neptune"`, `"tensorboard"`,`"clearml"` and `"wandb"`. ' + 'Use `"all"` to report to all integrations installed, `"none"` for no integrations.' + }, + )
+ +
+[docs] + seed: Optional[int] = field( + default=0, metadata={"help": "Random seed that will be set at the beginning of training."} + )
+ +
+[docs] + run_name: Optional[str] = field( + default="dpo", metadata={"help": "The name of the run."} + )
+
+ + + +@dataclass +
+[docs] +class DPOv2AlignerArguments(FinetunerArguments): + """ + The arguments for the DPOv2 training script. + """ + # general args +
+[docs] + random_seed: Optional[int] = field(default=42, metadata={"help": "the random seed"})
+ +
+[docs] + accelerate_config_file: Optional[str] = field( + default=None, + metadata={"help": "file path for accelerate config file, only used in memory safe dpov2 align."} + )
+ + # pair sampling args +
+[docs] + margin_scale: Optional[float] = field(default=1.0, metadata={"help": "the margin scale"})
+ +
+[docs] + sampling_paired_method: Optional[str] = field(default="max_random", metadata={"help": "the choose type"})
+ +
+[docs] + length_penalty: Optional[float] = field(default=0, metadata={"help": "the length penalty"})
+ + # data collator args +
+[docs] + max_length: Optional[int] = field(default=2048, metadata={"help": "the maximum sequence length, prompt + output"})
+ +
+[docs] + max_prompt_length: Optional[int] = field(default=1000, metadata={"help": "the maximum prompt length"})
+ +
+[docs] + mask_prompt: Optional[bool] = field(default=False, metadata={"help": "mask prompt"})
+ + # dpov2 aligner args +
+[docs] + beta: Optional[float] = field(default=0.1, metadata={"help": "the beta parameter for DPO loss"})
+ +
+[docs] + loss_type: Optional[str] = field(default="sigmoid", metadata={"help": "the loss type"})
+
+ + + +@dataclass +
+[docs] +class IterativeAlignerArguments(InferencerArguments): + """ + Arguments for iterative aligners. + """ +
+[docs] + dataset_path_list: List[str] = field( + default_factory=list, + metadata={"help": "The list of dataset paths for iterative aligners."} + )
+ +
+[docs] + initial_iter_idx: int = field( + default=0, + metadata={"help": "The initial iteration index, 0 refers to the first dataset in dataset_path_list."} + )
+
+ + + + +@dataclass +
+[docs] +class IterativeDPOAlignerArguments(IterativeAlignerArguments, DPOv2AlignerArguments): + """ + Arguments for iterative DPO aligners. + """ +
+[docs] + output_dir: Optional[str] = field( + default="./runs", + metadata={"help": "Output path for the inferenced results"}, + )
+ +
+[docs] + reward_model_inference_batch_size: int = field( + default=1, + metadata={"help": "The batch size for reward model inference."} + )
+ +
+[docs] + reward_model_inference_block_size: int = field( + default=2048, + metadata={"help": "The block size for reward model inference."} + )
+ +
+[docs] + do_response_generation: bool = field( + default=True, + metadata={"help": "Whether to generate responses using the model."} + )
+ +
+[docs] + do_scoring: bool = field( + default=True, + metadata={"help": "Whether to score the responses using the reward model."} + )
+ +
+[docs] + do_dpo_align: bool = field( + default=True, + metadata={"help": "Whether to perform DPO alignment."} + )
+
+ + + +
+[docs] +PIPELINE_ARGUMENT_MAPPING = { + "finetuner": FinetunerArguments, + "evaluator": EvaluatorArguments, + "inferencer": InferencerArguments, + "vllm_inferencer": InferencerArguments, + "rm_inferencer": InferencerArguments, + "raft_aligner": RaftAlignerArguments, + "dpo_aligner": DPOAlignerArguments, + "rm_tuner": RewardModelTunerArguments, + "dpov2_aligner": DPOv2AlignerArguments, + "iterative_dpo_aligner": IterativeDPOAlignerArguments, +}
+ + + +
+[docs] +class AutoArguments: + """ + Automatically choose arguments from FinetunerArguments or EvaluatorArguments. + """ + +
+[docs] + def get_pipeline_args_class(pipeline_name: str): + return PIPELINE_ARGUMENT_MAPPING[pipeline_name]
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/datasets/dataset.html b/_modules/lmflow/datasets/dataset.html new file mode 100644 index 000000000..d8deb8e2b --- /dev/null +++ b/_modules/lmflow/datasets/dataset.html @@ -0,0 +1,1141 @@ + + + + + + + + + + lmflow.datasets.dataset — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.datasets.dataset

+#!/usr/bin/env python
+# coding=utf-8
+"""This Python code defines a class Dataset with methods for initializing, loading,
+and manipulating datasets from different backends such as Hugging Face and JSON.
+ 
+The `Dataset` class includes methods for loading datasets from a dictionary and a Hugging
+Face dataset, mapping datasets, and retrieving the backend dataset and arguments.
+"""
+
+
+
+# Importing necessary libraries and modules
+import copy
+import json
+import logging
+from pathlib import Path
+
+from cmath import e
+from pathlib import Path
+from typing import Optional
+
+from datasets import load_dataset
+from datasets import Dataset as HFDataset
+
+from lmflow.args import DatasetArguments
+from lmflow.utils.constants import (
+    DATASET_DESCRIPTION_MAP,
+    TEXT_ONLY_DATASET_DESCRIPTION,
+    TEXT2TEXT_DATASET_DESCRIPTION,
+    FLOAT_ONLY_DATASET_DESCRIPTION,
+    INSTANCE_FIELDS_MAP,
+)
+
+from .multi_modal_dataset import CustomMultiModalDataset
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +DATASET_TYPES = [ + "text_only", + "text2text", + "float_only", + "image_text", + "conversation", + "paired_conversation", + "paired_text_to_text", + "text_to_textlist", + "text_to_scored_textlist" +]
+ + +
+[docs] +KEY_TYPE = "type"
+ +
+[docs] +KEY_INSTANCES = "instances"
+ +
+[docs] +KEY_SCORE = "score"
+ + +
+[docs] +class Dataset: + r""" + Initializes the Dataset object with the given parameters. + + Parameters + ------------ + data_args : DatasetArguments object. + Contains the arguments required to load the dataset. + + backend : str, default="huggingface" + A string representing the dataset backend. Defaults to "huggingface". + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + """ + def __init__(self, data_args: DatasetArguments=None, backend: str="huggingface", *args, **kwargs): +
+[docs] + self.data_args = data_args
+ +
+[docs] + self.backend = backend
+ +
+[docs] + self.backend_dataset = None
+ +
+[docs] + self.type = None # Original type of the dataset
+ +
+[docs] + self.dataset_path = data_args.dataset_path
+ + + if data_args.dataset_path is None: + return + + if backend == "huggingface": + data_files = [ + x.absolute().as_posix() + for x in Path(self.dataset_path).glob("*.json") + ] + + # Iterate through all the files and ensure they have the same data type + for single_file in data_files: + with open(single_file) as fin: + json_data = json.load(fin) + if KEY_TYPE not in json_data.keys(): + raise ValueError( + f'"{KEY_TYPE}" field must be specified for data, e.g.' + '{\n' + f' "{KEY_TYPE}: "text_only",\n' + f' "{KEY_INSTANCES}": [\n' + ' { "text": "Sentence 1: This is a sentence." }\n' + ' { "text": "Sentence 2: This is another sentence." }\n' + f' ]\n' + '}' + ) + if self.type is None: + self.type = json_data[KEY_TYPE] + elif self.type != json_data[KEY_TYPE]: + raise ValueError( + 'All task files must have same data types. Previous' + f' files have type "{self.type}", but in file' + f' {single_file}, it has type "{self.type}".' + ) + + # Load the dataset using the HuggingFace dataset library + extensions = "json" + raw_dataset = load_dataset( + extensions, + data_files=data_files, + field=KEY_INSTANCES, + split="train", + use_auth_token=None, + ) + self.backend_dataset = raw_dataset + self._check_data_format() + elif backend == "json": + # TODO (@Jiachun) + pass + elif backend == "custom_multi_modal": + # FIXME refactor the backend name + raw_dataset = CustomMultiModalDataset(self.dataset_path, data_args) + self.backend_dataset = raw_dataset + else: + raise NotImplementedError(f'Unsupported dataset backend "{backend}"') + +
+[docs] + def __len__(self): + return len(self.backend_dataset)
+ + +
+[docs] + def _check_data_format(self): + """Checks if data type and data structure matches + + Raise messages with hints if not matched. + """ + data_dict = self.to_dict() + if KEY_TYPE not in data_dict: + raise ValueError( + f'"{KEY_TYPE}" must be provided to initialize a dataset,' + f' e.g.\n' + f' {TEXT_ONLY_DATASET_DESCRIPTION}' + ) + if KEY_INSTANCES not in data_dict: + raise ValueError( + f'"{KEY_INSTANCES}" must be provided to initialize a' + f' dataset, e.g.\n' + f' {TEXT_ONLY_DATASET_DESCRIPTION}' + ) + + data_type = data_dict[KEY_TYPE] + fields = self.get_backend_dataset().features + correct_fields = INSTANCE_FIELDS_MAP[data_type] + if not set(correct_fields).issubset(set(fields)): + raise ValueError( + f'data instance fields incorrect' + f' {list(correct_fields)} are required.' + )
+ + + +
+[docs] + def from_dict(self, dict_obj: dict, *args, **kwargs): + r""" + Create a Dataset object from a dictionary. + + Return a Dataset given a dict with format: + { + "type": TYPE, + "instances": [ + { + "key_1": VALUE_1.1, + "key_2": VALUE_1.2, + ... + }, + { + "key_1": VALUE_2.1, + "key_2": VALUE_2.2, + ... + }, + ... + ] + } + + Parameters + ----------- + + dict_obj : dict. + A dictionary containing the dataset information. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + --------- + + self : Dataset object. + """ + if self.backend == "huggingface": + if KEY_TYPE not in dict_obj: + raise ValueError( + f'"{KEY_TYPE}" must be provided to initialize a dataset,' + f' e.g.\n' + f' {TEXT_ONLY_DATASET_DESCRIPTION}' + ) + if KEY_INSTANCES not in dict_obj: + raise ValueError( + f'"{KEY_INSTANCES}" must be provided to initialize a' + f' dataset, e.g.\n' + f' {TEXT_ONLY_DATASET_DESCRIPTION}' + ) + + self.type = dict_obj[KEY_TYPE] + if not self.type in INSTANCE_FIELDS_MAP: + raise ValueError(f'type "{self.type}" is not supported') + + correct_fields = INSTANCE_FIELDS_MAP[self.type] + + for i, instance in enumerate(dict_obj[KEY_INSTANCES]): + fields = instance.keys() + if not set(correct_fields).issubset(set(fields)): + raise ValueError( + f'data instance fields incorrect' + f' {list(correct_fields)} are required.' + ) + + try: + hf_dict = {} + if len(dict_obj[KEY_INSTANCES]) > 0: + for key in dict_obj[KEY_INSTANCES][0].keys(): + hf_dict[key] = [ + instance[key] for instance in dict_obj[KEY_INSTANCES] + ] + + self.backend_dataset = HFDataset.from_dict(hf_dict, *args, **kwargs) + except AttributeError as ex: + raise ValueError( + f"Error occurs: {ex}. Failed to convert dict to" + f" \"{self.type}\" dataset," f" the standard format is as" + f" follows:\n" + f" {DATASET_DESCRIPTION_MAP[self.type]}" + ) + self._check_data_format() + + return self + elif self.backend == "dict": + self.backend_dataset = dict_obj + self.type = dict_obj[KEY_TYPE] + return self + else: + raise NotImplementedError( + f'Currently .from_dict is not supported for backend "{self.backend}"' + )
+ + + + @classmethod +
+[docs] + def create_from_dict(cls, dict_obj, *args, **kwargs): + r""" + Returns + -------- + + Returns a Dataset object given a dict. + """ + empty_data_args = DatasetArguments(dataset_path=None) + dataset = Dataset(empty_data_args) + return dataset.from_dict(dict_obj)
+ + + +
+[docs] + def to_dict(self): + r""" + Returns + --------- + + Return a dict represents the dataset: + { + "type": TYPE, + "instances": [ + { + "key_1": VALUE_1.1, + "key_2": VALUE_1.2, + ... + }, + { + "key_1": VALUE_2.1, + "key_2": VALUE_2.2, + ... + }, + ... + ] + } + + A python dict object represents the content of this dataset. + """ + if self.backend == "huggingface": + dict_obj = {} + dict_obj[KEY_TYPE] = self.get_type() + hf_dict = self.backend_dataset.to_dict() + dict_obj[KEY_INSTANCES] = [] + + first_key = None + for key in hf_dict.keys(): + first_key = key + break + + if first_key is not None: + num_instances = len(hf_dict[first_key]) + dict_obj[KEY_INSTANCES] = [ + { + key: hf_dict[key][i] for key in hf_dict.keys() + } + for i in range(num_instances) + ] + + return dict_obj + elif self.backend == "dict": + dict_obj = self.backend_dataset + return dict_obj + else: + raise NotImplementedError( + f'Current .to_dict is not supported for backend "{self.backend}"' + )
+ + + +
+[docs] + def to_list(self): + """Returns a list of instances.""" + if self.backend == "huggingface": + instance_list = [self.backend_dataset.__getitem__(idx) + for idx in range(len(self.backend_dataset))] + return instance_list + elif self.backend == "dict": + instance_list = copy.deepcopy(self.backend_dataset[KEY_INSTANCES]) + # TODO: should be a list of instances, instance should be huggingface datasets row format + return instance_list + else: + raise NotImplementedError( + f'Current .to_list is not supported for backend "{self.backend}"' + )
+ + + +
+[docs] + def map(self, *args, **kwargs): + r""" + Parameters + ------------ + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + --------- + + self : Dataset object. + """ + # If the dataset uses Hugging Face as the backend, + # call the `map()` function of the Hugging Face backend dataset + if self.backend == "huggingface": + # Set the mapped dataset as the backend dataset of the current dataset + mapped_backend_dataset = self.backend_dataset.map(*args, **kwargs) + self.backend_dataset = mapped_backend_dataset + return self + else: + # If the backend is not Hugging Face, raise a NotImplementedError + raise NotImplementedError( + f'Currently .map is not supported for backend "{self.backend}"' + )
+ + + +
+[docs] + def get_backend(self) -> Optional[str]: + r""" + Returns + --------- + + self.backend + """ + return self.backend
+ + + +
+[docs] + def get_backend_dataset(self): + r""" + Returns + --------- + + self.backend_dataset + """ + return self.backend_dataset
+ + + +
+[docs] + def get_fingerprint(self): + r""" + Returns + --------- + + Fingerprint of the backend_dataset which controls the cache + """ + return self.backend_dataset._fingerprint
+ + + +
+[docs] + def get_data_args(self): + r""" + Returns + --------- + + self.data_args + """ + return self.data_args
+ + + +
+[docs] + def get_type(self) -> str: + r""" + Returns + --------- + + self.type + """ + return self.type
+ + + +
+[docs] + def save( + self, + file_path: str, + format: str="json" + ): + r""" + Save the dataset to a json file. + + Parameters + ------------ + file_path : str. + The path to the file where the dataset will be saved. + """ + if format == "json": + assert Path(file_path).suffix == ".json", "The file path must have a .json extension." + with open(file_path, "w", encoding='utf-8') as fout: + json.dump(self.to_dict(), fout, indent=4, ensure_ascii=False) + + else: + logger.error(f"Unsupported format when saving the dataset: {format}.")
+ + + +
+[docs] + def sample(self, n: int, seed: int=42): + r""" + Sample n instances from the dataset. + + Parameters + ------------ + n : int. + The number of instances to sample from the dataset. + + Returns + --------- + + sample_dataset : Dataset object. + A new dataset object containing the sampled instances. + """ + if self.backend == "huggingface": + sampled_dataset = self.backend_dataset.shuffle(seed=seed).select(range(n)) + output_dataset = self.create_from_dict( + { + "type": self.get_type(), + "instances": [ + { + col_name: sampled_dataset[col_name][i] for col_name in sampled_dataset.column_names + } for i in range(n) + ] + } + ) + return output_dataset + else: + raise NotImplementedError( + f'Currently .sample is not supported for backend "{self.backend}"' + )
+ + + +
+[docs] + def train_test_split(self, test_size: float=0.2, shuffle: bool=True, seed: int=42): + r""" + Split the dataset into training and testing sets. + + Parameters + ------------ + test_size : float, default=0.2. + The proportion of the dataset that will be used for testing. + + Returns + --------- + + train_dataset : Dataset object. + A new dataset object containing the training instances. + + test_dataset : Dataset object. + A new dataset object containing the testing instances. + """ + if self.backend == "huggingface": + splited = self.backend_dataset.train_test_split( + test_size=test_size, shuffle=shuffle, seed=seed + ) + train_dataset = self.create_from_dict( + { + "type": self.get_type(), + "instances": [ + { + col_name: splited["train"][col_name][i] for col_name in splited["train"].column_names + } for i in range(len(splited["train"])) + ] + } + ) + test_dataset = self.create_from_dict( + { + "type": self.get_type(), + "instances": [ + { + col_name: splited["test"][col_name][i] for col_name in splited["test"].column_names + } for i in range(len(splited["test"])) + ] + } + ) + return train_dataset, test_dataset + else: + raise NotImplementedError( + f'Currently .train_test_split is not supported for backend "{self.backend}"' + )
+ + + +
+[docs] + def drop_instances(self, indices: list): + r""" + Drop instances from the dataset. + + Parameters + ------------ + indices : list. + A list of indices of the instances to drop from the dataset. + """ + if self.backend == "huggingface": + self.backend_dataset = self.backend_dataset.remove_indices(indices) + else: + raise NotImplementedError( + f'Currently .drop_instances is not supported for backend "{self.backend}"' + )
+ + + +
+[docs] + def sanity_check( + self, + drop_invalid: bool=True, + ): + r""" + Perform a sanity check on the dataset. + """ + if self.backend == "huggingface": + self.hf_dataset_sanity_check(drop_invalid) + else: + raise NotImplementedError( + f'Currently .sanity_check is not supported for backend "{self.backend}"' + )
+ + + +
+[docs] + def hf_dataset_sanity_check( + self, + drop_invalid: bool=True, + ): + r""" + Perform a sanity check on the HuggingFace dataset. + """ + if self.backend_dataset is None or len(self.backend_dataset) == 0: + raise ValueError("Dataset is empty.") + + if self.type == 'text_to_textlist': + num_output_per_instance = len(self.backend_dataset['output'][0]) + dataset_cache = self.backend_dataset.filter(lambda x: len(x['input'])!=0) + dataset_cache = self.backend_dataset.filter(lambda x: len(x['output']) == num_output_per_instance) + dataset_cache = self.backend_dataset.filter(lambda x: not all([len(output) == 0 for output in x['output']])) + + if len(dataset_cache) != len(self.backend_dataset): + warning_info = ( + f"Found {len(self.backend_dataset) - len(dataset_cache)} invalid instances " + "during hf_dataset_sanity_check, please check:\n" + " 1. length of input strings should not be empty\n" + " 2. length of output strings should not be all empty\n" + " 3. number of output strings should be consistent\n" # since we will use tensor reshape later + ) + if drop_invalid: + self.backend_dataset = dataset_cache + logger.warning(warning_info+"Invalid instances are dropped.") + else: + raise ValueError(warning_info) + + else: + logger.warning(f"No sanity check for {self.type} dataset.")
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/datasets/multi_modal_dataset.html b/_modules/lmflow/datasets/multi_modal_dataset.html new file mode 100644 index 000000000..298d196e7 --- /dev/null +++ b/_modules/lmflow/datasets/multi_modal_dataset.html @@ -0,0 +1,802 @@ + + + + + + + + + + lmflow.datasets.multi_modal_dataset — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.datasets.multi_modal_dataset

+#!/usr/bin/env python
+# coding=utf-8
+# FIXME update the doc string.
+"""This Python code defines a class Multi Modal Dataset.
+"""
+import copy
+from dataclasses import dataclass, field
+import json
+from PIL import Image
+import os.path as osp
+import transformers
+import torch
+from torch.utils.data import Dataset
+
+from lmflow.args import DatasetArguments
+from lmflow.utils import llava_conversation_lib as conversation_lib
+
+from lmflow.utils.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+
+
+[docs] +class CustomMultiModalDataset(Dataset): + """Dataset for Multi Modal data""" + + def __init__(self, dataset_path: str, + data_args: DatasetArguments): + super(CustomMultiModalDataset, self).__init__() +
+[docs] + data_dict = json.load(open(dataset_path, "r"))
+ + self.data_dict = data_dict + print("Finish loading json file in dataset.") +
+[docs] + self.data_args = data_args
+ +
+[docs] + self.image_folder = data_args.image_folder
+ + +
+[docs] + def __len__(self): + return len(self.data_dict)
+ + +
+[docs] + def register_tokenizer(self, tokenizer, image_processor=None): + self.tokenizer = tokenizer + self.image_processor = getattr( + tokenizer, "image_processor", image_processor)
+ + +
+[docs] + def __getitem__(self, i): + data = self.data_dict[i] + if isinstance(i, int): + data = [data] + assert len(data) == 1 + processor = self.image_processor + if 'image' in data[0]: + image_file = data[0]['image'] + image = Image.open( + osp.join(self.image_folder, image_file)).convert("RGB") + if self.data_args.image_aspect_ratio == 'pad': + def expand2square(pil_img, background_color): + width, height = pil_img.size + if width == height: + return pil_img + elif width > height: + result = Image.new(pil_img.mode, (width, width), background_color) + result.paste(pil_img, (0, (width - height) // 2)) + return result + else: + result = Image.new(pil_img.mode, (height, height), background_color) + result.paste(pil_img, ((height - width) // 2, 0)) + return result + image = expand2square(image, tuple(int(x*255) for x in processor.image_mean)) + image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0] + else: + image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0] + data = preprocess_multimodal_llava( + copy.deepcopy([e["conversations"] for e in data]), + self.data_args) + else: + data = copy.deepcopy([e["conversations"] for e in data]) + if self.data_args.sep_style == "plain": + data_dict = preprocess_llama_from_llava_plain( + data, + self.tokenizer, + has_image=('image' in self.data_dict[i]) + ) + else: + data_dict = preprocess_llama_from_llava_v1( + data, + self.tokenizer, + has_image=('image' in self.data_dict[i]) + ) + if isinstance(i, int): + data_dict = dict(input_ids=data_dict["input_ids"][0], + labels=data_dict["labels"][0]) + + # image exist in the data + if 'image' in self.data_dict[i]: + data_dict['image'] = image + else: + # image does not exist in the data, but the model is multimodal + crop_size = self.image_processor.crop_size + data_dict['image'] = torch.zeros( + 3, crop_size['height'], crop_size['width']) + return data_dict
+
+ + + + +
+[docs] +def preprocess_multimodal_llava(sources, data_args): + is_multimodal = data_args.is_multimodal + if not is_multimodal: + return sources + + for source in sources: + for sentence in source: + if DEFAULT_IMAGE_TOKEN in sentence['value']: + sentence['value'] = sentence['value'].replace( + DEFAULT_IMAGE_TOKEN, '').strip() + sentence['value'] = DEFAULT_IMAGE_TOKEN + '\n' + sentence['value'] + sentence['value'] = sentence['value'].strip() + if "mmtag" in conversation_lib.default_conversation.version: + sentence['value'] = sentence['value'].replace( + DEFAULT_IMAGE_TOKEN, '<Image>' + DEFAULT_IMAGE_TOKEN + '</Image>') + replace_token = DEFAULT_IMAGE_TOKEN + if data_args.use_image_start_end: + replace_token = DEFAULT_IM_START_TOKEN + \ + replace_token + DEFAULT_IM_END_TOKEN + sentence["value"] = sentence["value"].replace( + DEFAULT_IMAGE_TOKEN, replace_token) + return sources
+ + + +
+[docs] +def tokenizer_image_token(prompt, + tokenizer, + image_token_index=IMAGE_TOKEN_INDEX, + return_tensors=None): + prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')] + + def insert_separator(X, sep): + return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1] + + input_ids = [] + offset = 0 + if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id: + offset = 1 + input_ids.append(prompt_chunks[0][0]) + + for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)): + input_ids.extend(x[offset:]) + + if return_tensors is not None: + if return_tensors == 'pt': + return torch.tensor(input_ids, dtype=torch.long) + raise ValueError(f'Unsupported tensor type: {return_tensors}') + return input_ids
+ + + +
+[docs] +def preprocess_llama_from_llava_plain( + sources, + tokenizer: transformers.PreTrainedTokenizer, + has_image: bool = False): + """ + This function just add the image in the front of text. + And don't add any prompt. + Args: + sources: The input data with text and image. + tokenizer: The tokenizer to process text. + has_image: Whether the input data has image. + Returns: + The input_ids and labels for the model. + """ + conversations = [] + for source in sources: + assert len(source) == 2 + assert DEFAULT_IMAGE_TOKEN in source[0]['value'] + source[0]['value'] = DEFAULT_IMAGE_TOKEN + conversation = source[0]['value'] + source[1]['value'] + conversation_lib.default_conversation.sep + conversations.append(conversation) + # tokenize conversations + input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations] + targets = copy.deepcopy(input_ids) + for target, source in zip(targets, sources): + tokenized_len = len(tokenizer_image_token(source[0]['value'], tokenizer)) + target[:tokenized_len] = IGNORE_INDEX + + return dict(input_ids=input_ids, labels=targets)
+ + +
+[docs] +def preprocess_llama_from_llava_v1( + sources, + tokenizer: transformers.PreTrainedTokenizer, + has_image: bool = False): + """ + This function add the prompt and then put the image after the prompt. + So it needs additional code to generate the target label. + Args: + sources: The input data with text and image. + tokenizer: The tokenizer to process text. + has_image: Whether the input data has image. + Returns: + The input_ids and labels for the model. + """ + + conv = conversation_lib.default_conversation.copy() + roles = {"human": conv.roles[0], "gpt": conv.roles[1]} + + # Apply prompt templates + conversations = [] + for i, source in enumerate(sources): + if roles[source[0]["from"]] != conv.roles[0]: + # Skip the first one if it is not from human + source = source[1:] + + conv.messages = [] + for j, sentence in enumerate(source): + role = roles[sentence["from"]] + assert role == conv.roles[j % 2], f"{i}" + conv.append_message(role, sentence["value"]) + conversations.append(conv.get_prompt()) + + # Tokenize conversations + + if has_image: + input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0) + else: + input_ids = tokenizer( + conversations, + return_tensors="pt", + padding="longest", + max_length=tokenizer.model_max_length, + truncation=True, + ).input_ids + + targets = input_ids.clone() + assert conv.sep_style == conversation_lib.SeparatorStyle.TWO + + # Mask targets + sep = conv.sep + conv.roles[1] + ": " + for conversation, target in zip(conversations, targets): + total_len = int(target.ne(tokenizer.pad_token_id).sum()) + + rounds = conversation.split(conv.sep2) + cur_len = 1 + target[:cur_len] = IGNORE_INDEX + for i, rou in enumerate(rounds): + if rou == "": + break + + parts = rou.split(sep) + if len(parts) != 2: + break + parts[0] += sep + + if has_image: + round_len = len(tokenizer_image_token(rou, tokenizer)) + instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2 + else: + round_len = len(tokenizer(rou).input_ids) + instruction_len = len(tokenizer(parts[0]).input_ids) - 2 + + target[cur_len : cur_len + instruction_len] = IGNORE_INDEX + + cur_len += round_len + target[cur_len:] = IGNORE_INDEX + + if cur_len < tokenizer.model_max_length: + if cur_len != total_len: + target[:] = IGNORE_INDEX + print( + f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." + f" (ignored)" + ) + + return dict( + input_ids=input_ids, + labels=targets, + )
+ + + +@dataclass +
+[docs] +class DataCollatorForSupervisedDataset(object): + """Collate examples for supervised fine-tuning.""" + +
+[docs] + tokenizer: transformers.PreTrainedTokenizer
+ + +
+[docs] + def __call__(self, instances): + input_ids, labels = tuple([instance[key] for instance in instances] + for key in ("input_ids", "labels")) + input_ids = torch.nn.utils.rnn.pad_sequence( + input_ids, + batch_first=True, + padding_value=self.tokenizer.pad_token_id) + labels = torch.nn.utils.rnn.pad_sequence(labels, + batch_first=True, + padding_value=IGNORE_INDEX) + input_ids = input_ids[:, :self.tokenizer.model_max_length] + labels = labels[:, :self.tokenizer.model_max_length] + batch = dict( + input_ids=input_ids, + labels=labels, + attention_mask=input_ids.ne(self.tokenizer.pad_token_id), + ) + + if 'image' in instances[0]: + images = [instance['image'] for instance in instances] + if all(x is not None and x.shape == images[0].shape for x in images): + batch['images'] = torch.stack(images) + else: + batch['images'] = images + return batch
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/models/auto_model.html b/_modules/lmflow/models/auto_model.html new file mode 100644 index 000000000..ce7f97bba --- /dev/null +++ b/_modules/lmflow/models/auto_model.html @@ -0,0 +1,491 @@ + + + + + + + + + + lmflow.models.auto_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.models.auto_model

+#!/usr/bin/env python
+# coding=utf-8
+"""Automatically get correct model type.
+"""
+
+from lmflow.models.hf_decoder_model import HFDecoderModel
+from lmflow.models.hf_text_regression_model import HFTextRegressionModel
+from lmflow.models.hf_encoder_decoder_model import HFEncoderDecoderModel
+
+
+[docs] +class AutoModel: + + @classmethod +
+[docs] + def get_model(self, model_args, *args, **kwargs): + arch_type = model_args.arch_type + if arch_type == "decoder_only": + return HFDecoderModel(model_args, *args, **kwargs) + elif arch_type == "text_regression": + return HFTextRegressionModel(model_args, *args, **kwargs) + elif arch_type == "encoder_decoder" or \ + arch_type == "vision_encoder_decoder": + return HFEncoderDecoderModel(model_args, *args, **kwargs) + else: + raise NotImplementedError( + f"model architecture type \"{arch_type}\" is not supported" + )
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/models/base_model.html b/_modules/lmflow/models/base_model.html new file mode 100644 index 000000000..93206b204 --- /dev/null +++ b/_modules/lmflow/models/base_model.html @@ -0,0 +1,475 @@ + + + + + + + + + + lmflow.models.base_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.models.base_model

+#!/usr/bin/env python
+# coding=utf-8
+"""Base model class.
+"""
+
+from abc import ABC
+
+
+
+[docs] +class BaseModel(ABC): + + def __init__(self, *args, **kwargs): + pass
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/models/decoder_model.html b/_modules/lmflow/models/decoder_model.html new file mode 100644 index 000000000..7c31f14d1 --- /dev/null +++ b/_modules/lmflow/models/decoder_model.html @@ -0,0 +1,485 @@ + + + + + + + + + + lmflow.models.decoder_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.models.decoder_model

+#!/usr/bin/env python
+# coding=utf-8
+"""A one-line summary of the module or program, terminated by a period.
+
+Leave one blank line.  The rest of this docstring should contain an
+overall description of the module or program.  Optionally, it may also
+contain a brief description of exported classes and functions and/or usage
+examples.
+
+Typical usage example:
+
+  foo = ClassFoo()
+  bar = foo.FunctionBar()
+"""
+
+from lmflow.models.base_model import BaseModel
+
+
+
+[docs] +class DecoderModel(BaseModel): + + def __init__(self, *args, **kwargs): + pass
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/models/encoder_decoder_model.html b/_modules/lmflow/models/encoder_decoder_model.html new file mode 100644 index 000000000..73247a871 --- /dev/null +++ b/_modules/lmflow/models/encoder_decoder_model.html @@ -0,0 +1,485 @@ + + + + + + + + + + lmflow.models.encoder_decoder_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.models.encoder_decoder_model

+#!/usr/bin/env python
+# coding=utf-8
+"""A one-line summary of the module or program, terminated by a period.
+
+Leave one blank line.  The rest of this docstring should contain an
+overall description of the module or program.  Optionally, it may also
+contain a brief description of exported classes and functions and/or usage
+examples.
+
+Typical usage example:
+
+  foo = ClassFoo()
+  bar = foo.FunctionBar()
+"""
+
+from lmflow.models.base_model import BaseModel
+
+
+
+[docs] +class EncoderDecoderModel(BaseModel): + + def __init__(self, *args, **kwargs): + pass
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/models/hf_decoder_model.html b/_modules/lmflow/models/hf_decoder_model.html new file mode 100644 index 000000000..d0116fe28 --- /dev/null +++ b/_modules/lmflow/models/hf_decoder_model.html @@ -0,0 +1,1212 @@ + + + + + + + + + + lmflow.models.hf_decoder_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.models.hf_decoder_model

+#!/usr/bin/env python
+# coding=utf-8
+"""This is a class called HFDecoderModel which is a wrapper around transformers model and
+tokenizer classes. It has several methods such as __init__, tokenize, and train that are 
+used for training and fine-tuning the model. The __init__ method takes in several arguments
+such as model_args, tune_strategy, and ds_config, which are used to load the pretrained 
+model and tokenizer, and initialize the training settings.
+
+The tokenize method is used to tokenize the input text and return the input IDs and attention
+masks that can be fed to the model for training or inference.
+
+This class supports different tune_strategy options such as 'normal', 'none', 'lora', and
+'adapter', which allow for different fine-tuning settings of the model. However, the 'lora'
+and 'adapter' strategies are not yet implemented.
+
+Overall, this class provides a convenient interface for loading and fine-tuning transformer
+models and can be used for various NLP tasks such as language modeling, text classification,
+and question answering.
+"""
+
+import hashlib
+import logging
+import os, shutil
+from typing import List, Union, Optional, Dict
+from pathlib import Path
+
+import ray
+import ray.data
+import torch
+import transformers
+import bitsandbytes
+import deepspeed
+from transformers.deepspeed import HfDeepSpeedConfig
+from transformers import BitsAndBytesConfig
+from transformers import (
+    CONFIG_MAPPING,
+    AutoConfig,
+    AutoTokenizer,
+    AutoModelForCausalLM,
+)
+from peft import (
+    LoraConfig,
+    PeftModel,
+    TaskType,
+    get_peft_config,
+    get_peft_model,
+    prepare_model_for_kbit_training
+)
+from vllm import SamplingParams
+
+from lmflow.datasets.dataset import Dataset
+from lmflow.models.hf_model_mixin import HFModelMixin
+from lmflow.models.decoder_model import DecoderModel
+from lmflow.models.interfaces.tunable import Tunable
+from lmflow.utils.constants import (
+    TEXT_ONLY_DATASET_DESCRIPTION,
+    TEXT2TEXT_DATASET_DESCRIPTION,
+    CONVERSATION_DATASET_DESCRIPTION,
+)
+from lmflow.utils.conversation_template import PRESET_TEMPLATES
+from lmflow.utils.data_utils import VLLMInferenceResultWithInput
+from lmflow.tokenization.hf_decoder_model import (
+    tokenize_function, 
+    conversation_tokenize_function
+)
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +MODELS_SUPPORT_FLASH_ATTENTION = [ + "LlamaForCausalLM", + "GPTNeoForCausalLM", + "GPT2ForCausalLM", + "BloomForCausalLM" +]
+ + +GPU_SUPPORT_FLASH_ATTENTION = { + "A100": ["LlamaForCausalLM", "GPTNeoForCausalLM", "GPT2ForCausalLM", "BloomForCausalLM"], + "A40": ["GPTNeoForCausalLM", "GPT2ForCausalLM", "BloomForCausalLM"], + "A6000": ["LlamaForCausalLM", "GPTNeoForCausalLM", "GPT2ForCausalLM", "BloomForCausalLM"] +} + +try: + import flash_attn + if int(flash_attn.__version__.split(".")[0]) == 2: +
+[docs] + GPU_SUPPORT_FLASH_ATTENTION = { + "A100": ["LlamaForCausalLM", "GPTNeoForCausalLM", "GPT2ForCausalLM", "BloomForCausalLM"], + "A40": ["LlamaForCausalLM","GPTNeoForCausalLM", "GPT2ForCausalLM", "BloomForCausalLM"], + "A6000": ["LlamaForCausalLM", "GPTNeoForCausalLM", "GPT2ForCausalLM", "BloomForCausalLM"] + }
+ +except Exception as e: + if e.__class__ == ModuleNotFoundError: + logger.warning( + "flash_attn is not installed. Install flash_attn for better performance." + ) + else: + logger.warning(f'An error occurred when importing flash_attn, flash attention is disabled: {e}') + + +
+[docs] +class HFDecoderModel(DecoderModel, HFModelMixin, Tunable): + r""" + Initializes a HFDecoderModel instance. + + Parameters + ------------ + + model_args : + Model arguments such as model name, path, revision, etc. + + tune_strategy : str or none, default="normal". + A string representing the dataset backend. Defaults to "huggingface". + + ds_config : + Deepspeed configuations. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + """ + + def __init__( + self, + model_args, + tune_strategy='normal', + ds_config=None, + device="gpu", + use_accelerator=False, + *args, + **kwargs + ): + """ + Initializes a HFDecoderModel instance. + :param model_args: dictionary with model arguments such as model name, path, revision, etc. + :param tune_strategy: tuning strategy: normal, none, lora or adapter + :param ds_config: deepspeed configuration for distributed training + """ + HFModelMixin.__init__( + self, + model_args=model_args, + do_train=True if tune_strategy == "normal" else False, + ds_config=ds_config, + device=device, + use_accelerator=use_accelerator, + *args, + **kwargs + ) + + +
+[docs] + def tokenize( + self, + dataset, + add_special_tokens=True, + *args, + **kwargs + ) -> Dataset: + """ + Tokenize the full dataset. + + Parameters + ------------ + dataset : lmflow.datasets.Dataset. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + tokenized_datasets : + The tokenized dataset, without any leading or trailing special + tokens (normally they are Begin-Of-Sentence or End-Of-Sentence + tokens). + """ + # Preprocessing the datasets. + # First we tokenize all the texts. + if dataset.get_backend() != "huggingface": + raise NotImplementedError( + "tokenization of datasets with non-huggingface backend are" + "not supported yet" + ) + + dataset_type = dataset.get_type() + model_args = self.model_args + raw_datasets = dataset + hf_raw_datasets = dataset.get_backend_dataset() + column_names = list(hf_raw_datasets.features) + data_args = raw_datasets.get_data_args() + + # Requires three types of information for tokenizing different datasets + # 1) Which fields require tokenization, e.g. + # "text2float": "text", but not "float" + # "text2text": both "input" and "output" + # 2) How will there tokenized sequence concatenated together, e.g. + # "text_only": "text" -> "text" + # "text2text": "input", "output" -> "input" + "output" + # 3) Which fields require loss in final computation, e.g. + # "text_only": "text" + # "text2text": "output" only + tokenized_column_order = None # Handles 1) and 2) + label_columns = None # Handles 3) + if dataset_type == "text_only": + tokenized_column_order = ["text"] + label_columns = ["text"] + elif dataset_type == "text2text": + tokenized_column_order = ["input", "output"] + label_columns = ["output"] + add_special_tokens = False + elif dataset_type == "conversation": + if data_args.conversation_template: + if data_args.conversation_template in PRESET_TEMPLATES.keys(): + conversation_template = PRESET_TEMPLATES[data_args.conversation_template] + else: + raise NotImplementedError( + f"Conversation template {data_args.conversation_template} is not supported yet." + ) + else: + logger.warning("No conversation template provided. Using default template.") + conversation_template = PRESET_TEMPLATES['empty'] + + logger.warning(f"Conversation template: {conversation_template}") + else: + raise NotImplementedError( + f"dataset type \"{dataset_type}\" is not supported, currently" + " only support following data types:\n" + f" 1) {TEXT_ONLY_DATASET_DESCRIPTION}\n" + f" 2) {TEXT2TEXT_DATASET_DESCRIPTION}\n" + f" 3) {CONVERSATION_DATASET_DESCRIPTION}\n" + ) + + # Whether to truncate long sequences to fit into max_length + use_truncation = False + if model_args.use_lora or data_args.disable_group_texts: + use_truncation = True + + tokenize_fn = conversation_tokenize_function if "conversation" in dataset_type else tokenize_function + tokenize_fn_kwargs = { + "data_args": data_args, + "tokenizer": self.tokenizer, + "column_names": column_names, + } + if "conversation" in dataset_type: + tokenize_fn_kwargs["conversation_template"] = conversation_template + else: + tokenize_fn_kwargs["label_columns"] = label_columns + tokenize_fn_kwargs["tokenized_column_order"] = tokenized_column_order + tokenize_fn_kwargs["add_special_tokens"] = add_special_tokens + tokenize_fn_kwargs["use_truncation"] = use_truncation + + tokenize_kwargs = {} + if not data_args.streaming: + fingerprint = hashlib.md5( + ( + raw_datasets.get_fingerprint() + + str(self.tokenizer) + + f'###padding_side={self.tokenizer.padding_side}' + + ('###conversation_template=' + str(conversation_template) if "conversation" in dataset_type else "") + + f'###disable_group_texts={data_args.disable_group_texts}' + + f'###block_size={data_args.block_size}' + ).encode("utf-8") + ).hexdigest() + tokenize_kwargs = { + "num_proc": data_args.preprocessing_num_workers, + "load_from_cache_file": not data_args.overwrite_cache, + "desc": "Running tokenizer on dataset", + "new_fingerprint": fingerprint, + } + + tokenized_datasets = raw_datasets.map( + tokenize_fn, + batched=True, + remove_columns=column_names, + fn_kwargs=tokenize_fn_kwargs, + **tokenize_kwargs + ) + + return tokenized_datasets
+ + + +
+[docs] + def encode(self, input: Union[str, List[str]], *args, **kwargs ) -> Union[List[int], List[List[int]]]: + """ + Perform encoding process of the tokenizer. + + Parameters + ------------ + inputs : str or list. + The text sequence. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + if string input,return the tokenized inputs. + "Hello,world!"-> [101, 7592, 1010, 2088, 102] + if batch input,return {input_ids,attention_mask,token_type_ids} + ["Hello,world!","Hello!"]-> {'input_ids': tensor([[ 101, 7592, 1010, 2088, 102],...),'attention_mask': tensor([[1, 1, 1, 1, 1],[0,0,1,1,1]])} + """ + if isinstance(input, list): + return self.tokenizer(text=input, *args, **kwargs)#batch encode,will automatically do left padding + elif isinstance(input, str): + return self.tokenizer.encode(text=input, *args, **kwargs) + else: + raise NotImplementedError(f'type "{type(input)}" cannot be encoded')
+ + + +
+[docs] + def decode(self, input, *args, **kwargs ) -> Union[str, List[str]]: + """ + Perform decoding process of the tokenizer. + + Parameters + ------------ + inputs : list or tensor. + The token sequence. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The text decoded from the token inputs. + if batch input,return the list of text + [[101, 7592, 1010, 2088, 102],[101, 7592, 1010, 2088, 102]]-> ["Hello,world!","Hello,world!" + if single input,return the text + [101, 7592, 1010, 2088, 102]-> "Hello,world!" + """ + if isinstance(input, List): + input=torch.tensor(input) + if input.dim()==2: + return self.tokenizer.batch_decode(input, *args, **kwargs)#batch_decode + else: + # Can be list of ints or a Tensor + return self.tokenizer.decode(input, *args, **kwargs)
+ + + +
+[docs] + def inference( + self, + inputs, + release_gpu: bool = False, + use_vllm: bool = False, + **kwargs + ): + """ + Perform generation process of the model. + + Parameters + ------------ + inputs : + The sequence used as a prompt for the generation or as model inputs to the model. + When using vllm inference, this should be a string or a list of strings. + When using normal inference, this should be a tensor. + release_gpu : bool, optional + Whether to release the GPU resource after inference, by default False. + use_vllm : bool, optional + Whether to use VLLM for inference, by default False. + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The generated sequence output + """ + if not self._activated: + self.activate_model_for_inference( + use_vllm=use_vllm, + **kwargs, + ) + + if use_vllm: + res = self.__vllm_inference(inputs, **kwargs) + else: + res = self.__inference(inputs, **kwargs) + + if release_gpu: + self.deactivate_model_for_inference(use_vllm=use_vllm) + + return res
+ + + +
+[docs] + def __inference(self, inputs, *args, **kwargs): + """ + Perform generation process of the model. + + Parameters + ------------ + inputs : + The **tokenized** sequence used as a prompt for the generation or as model inputs to the model. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The generated sequence output + """ + with torch.no_grad(): + if self.use_accelerator: + outputs = self.backend_model.generate( + input_ids=inputs, + pad_token_id=self.tokenizer.pad_token_id, + *args, + **kwargs + ) + else: + if self.device == "gpu": + outputs = self.ds_engine.module.generate( + input_ids=inputs, + synced_gpus=True, + pad_token_id=self.tokenizer.pad_token_id, + *args, + **kwargs + ) + elif self.device == "cpu": + outputs = self.backend_model.generate( + input_ids=inputs, + synced_gpus=True, + pad_token_id=self.tokenizer.pad_token_id, + *args, + **kwargs + ) + else: + raise NotImplementedError( + f"device \"{self.device}\" is not supported" + ) + return outputs
+ + + +
+[docs] + def __vllm_inference( + self, + inputs: Union[str, List[str]], + sampling_params: Optional[SamplingParams] = None, + **kwargs, + ) -> List[VLLMInferenceResultWithInput]: + """Perform VLLM inference process of the model. + + Parameters + ---------- + inputs : Union[str, List[str]] + Prompt(s), string or a list of strings. + sampling_params : Optional[SamplingParams], optional + vllm SamplingParams object, by default None. + + Returns + ------- + List[VLLMInferenceResultWithInput] + Return a list of VLLMInferenceResultWithInput, where each + element contains the input prompt and the corresponding output. + + When `sampling_params.detokenize = True`, the output would be a list of strings, + contains sampling_params.n samples for the corresponding prompt. + + When `sampling_params.detokenize = False`, return a list of list of ints + (token ids, no decoding after generation). + """ + vllm_outputs = self.backend_model_for_inference.generate( + inputs, + sampling_params=sampling_params, + use_tqdm=True, + ) + final_output = [] + for output in vllm_outputs: + if sampling_params.detokenize: + output_list = [sentence.text for sentence in output.outputs] + else: + output_list = [sentence.token_ids for sentence in output.outputs] + + final_output.append({"input": output.prompt, "output": output_list}) + + return final_output
+ + + +
+[docs] + def prepare_inputs_for_inference( + self, + dataset: Dataset, + apply_chat_template: bool = True, + enable_distributed_inference: bool = False, + use_vllm: bool = False, + **kwargs, + ) -> Union[List[str], ray.data.Dataset, Dict[str, torch.Tensor]]: + """ + Prepare inputs for inference. + + Parameters + ------------ + dataset : lmflow.datasets.Dataset. + The dataset used for inference. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The prepared inputs for inference. + """ + if use_vllm: + inference_inputs = self.__prepare_inputs_for_vllm_inference( + dataset=dataset, + apply_chat_template=apply_chat_template, + enable_distributed_inference=enable_distributed_inference, + ) + else: + inference_inputs = self.__prepare_inputs_for_inference( + dataset, + apply_chat_template=apply_chat_template, + enable_distributed_inference=enable_distributed_inference, + ) + + return inference_inputs
+ + + +
+[docs] + def __prepare_inputs_for_vllm_inference( + self, + dataset: Dataset, + apply_chat_template: bool = True, + enable_distributed_inference: bool = False, + ) -> Union[List[str], ray.data.Dataset]: + if dataset.get_type() == 'text_only': + if apply_chat_template: + dataset = dataset.map( + lambda sample: { + "templated": self.tokenizer.apply_chat_template( + [{"role":"user", "content": sample['text']}], + tokenize=False, + add_generation_prompt=True + ) + }, + num_proc=dataset.data_args.preprocessing_num_workers, + ) + inference_inputs = dataset.get_backend_dataset()['templated'] + else: + inference_inputs = dataset.get_backend_dataset()['text'] + + elif dataset.get_type() == "text2text": + logger.warning(f"For a text2text dataset, only `input` will be used as the model input.") + if apply_chat_template: + dataset = dataset.map( + lambda sample: { + "templated": self.tokenizer.apply_chat_template( + conversation=[{"role":"user", "content": sample['input']}], + tokenize=False, + add_generation_prompt=True + ) + }, + num_proc=dataset.data_args.preprocessing_num_workers, + ) + inference_inputs = dataset.get_backend_dataset()['templated'] + else: + inference_inputs = dataset.get_backend_dataset()['input'] + + elif dataset.get_type() == 'conversation': + if apply_chat_template: + def preprocess_conversation(sample): + conversation = sample['messages'][:-1] if len(sample['messages'])%2 == 0 else sample['messages'] + + if sample['messages'][-1]['role'] != 'user': + logger.warning( + "Not a valid conversation for generation, since the conversation " + "doesn't end up with an user message. Skip." + ) + sample_out = {"templated": ""} + else: + sample_out = {"templated": self.tokenizer.apply_chat_template( + conversation=conversation, + tokenize=False, + add_generation_prompt=True, + )} + + return sample_out + dataset = dataset.map( + preprocess_conversation, + num_proc=dataset.data_args.preprocessing_num_workers, + ) + inference_inputs = dataset.get_backend_dataset()['templated'] + else: + logger.warning( + "Your dataset is `conversation` type but `apply_chat_template` is set to False. " + "Will use the first user input in conversation as model input." + ) + inference_inputs = [conversation[0]['content'] for conversation in dataset.get_backend_dataset()['messages']] + + else: + raise NotImplementedError( + f"Currently `{dataset.get_type()}` data are not supported for vllm inference." + ) + + inference_inputs = [sentence for sentence in inference_inputs if len(sentence) > 0] + + if enable_distributed_inference: + inference_inputs = ray.data.from_items(inference_inputs) # -> Dict[str, np.ndarray], {"item": array(['...', '...', '...'])} + + return inference_inputs
+ + + +
+[docs] + def __prepare_inputs_for_inference( + self, + dataset: Dataset, + **kwargs, + ): + raise NotImplementedError("prepare_inputs_for_inference is not implemented")
+ + + +
+[docs] + def merge_lora_weights(self): + if self.model_args.use_lora and not self.model_args.use_qlora: + self.get_backend_model().merge_and_unload() + elif self.model_args.use_qlora: + logger.warning("Reloading base model in 16-bit precision to merge adapter weights. NOTE: Your device must have" + "sufficient memory to reload the model in half-precision without quantization.") + self.get_peft_without_qlora() + self.get_backend_model().merge_and_unload() + else: + logger.warning("LoRA training is NOT enabled. Merging LoRA weights is not applicable.")
+ + +
+[docs] + def get_peft_without_qlora(self): + import tempfile + + with tempfile.TemporaryDirectory() as tmpdirname: + print('created temporary directory', tmpdirname) + + + self.get_backend_model().save_pretrained(tmpdirname) + + torch_dtype = ( + self.model_args.torch_dtype + if self.model_args.torch_dtype in ["auto", None] + else getattr(torch, self.model_args.torch_dtype) + ) + config_kwargs = { + "cache_dir": self.model_args.cache_dir, + "revision": self.model_args.model_revision, + "use_auth_token": True if self.model_args.use_auth_token else None, + } + config = AutoConfig.from_pretrained(self.model_args.model_name_or_path, **config_kwargs) + device_map = "auto" + if os.environ.get('LOCAL_RANK') is not None: + local_rank = int(os.environ.get('LOCAL_RANK','0')) + device_map = {'': local_rank} + + self.backend_model_full = AutoModelForCausalLM.from_pretrained( + self.model_args.model_name_or_path, + from_tf=bool(".ckpt" in self.model_args.model_name_or_path), + config=config, + cache_dir=self.model_args.cache_dir, + revision=self.model_args.model_revision, + use_auth_token=True if self.model_args.use_auth_token else None, + torch_dtype=torch_dtype, + device_map=device_map, + trust_remote_code = self.model_args.trust_remote_code, + attn_implementation="flash_attention_2" if self.model_args.use_flash_attention else None, + ) + + self.backend_model = PeftModel.from_pretrained(self.backend_model_full, tmpdirname)
+ + +
+[docs] + def save(self, dir, save_full_model=False, *args, **kwargs): + """ + Perform generation process of the model. + + Parameters + ------------ + dir : + The directory to save model and tokenizer + + save_full_model : Optional. + Whether to save full model. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The generated sequence output + """ + self.get_tokenizer().save_pretrained(dir) + if save_full_model and self.model_args.use_lora: + save_dtype = ( + torch.float16 + if self.model_args.torch_dtype in ["auto", None] + else getattr(torch, self.model_args.torch_dtype) + ) + self.backend_model_full.to(dtype=save_dtype).save_pretrained(dir) + logger.warning(f"Save full model with dtype: {save_dtype}") + else: + self.get_backend_model().save_pretrained(dir)
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/models/hf_encoder_decoder_model.html b/_modules/lmflow/models/hf_encoder_decoder_model.html new file mode 100644 index 000000000..b9a3dc7db --- /dev/null +++ b/_modules/lmflow/models/hf_encoder_decoder_model.html @@ -0,0 +1,1015 @@ + + + + + + + + + + lmflow.models.hf_encoder_decoder_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.models.hf_encoder_decoder_model

+#!/usr/bin/env python
+# coding=utf-8
+"""This is a class called HFDecoderModel which is a wrapper around transformers model and
+tokenizer classes. It has several methods such as __init__, tokenize, and train that are
+used for training and fine-tuning the model. The __init__ method takes in several arguments
+such as model_args, tune_strategy, and ds_config, which are used to load the pretrained
+model and tokenizer, and initialize the training settings.
+
+The tokenize method is used to tokenize the input text and return the input IDs and attention
+masks that can be fed to the model for training or inference.
+
+This class supports different tune_strategy options such as 'normal', 'none', 'lora', and
+'adapter', which allow for different fine-tuning settings of the model. However, the 'lora'
+and 'adapter' strategies are not yet implemented.
+
+Overall, this class provides a convenient interface for loading and fine-tuning transformer
+models and can be used for various NLP tasks such as language modeling, text classification,
+and question answering.
+"""
+
+import copy
+import logging
+import time
+from typing import List, Union
+
+import deepspeed
+from peft import (
+    LoraConfig,
+    PeftModel,
+    TaskType,
+    get_peft_config,
+    get_peft_model,
+)
+
+import torch
+from transformers.deepspeed import HfDeepSpeedConfig, HfTrainerDeepSpeedConfig
+
+from transformers.testing_utils import CaptureLogger
+
+from transformers import (
+    CONFIG_MAPPING,
+    AutoConfig,
+    AutoTokenizer,
+    AutoModelForSeq2SeqLM,
+    AutoModelForVision2Seq,
+    AutoModel,
+    AutoProcessor,
+    LlamaConfig
+)
+
+from lmflow.datasets.dataset import Dataset
+from lmflow.models.encoder_decoder_model import EncoderDecoderModel
+from lmflow.models.interfaces.tunable import Tunable
+from lmflow.models.vision2seq_model import CustomAutoVision2SeqModel
+from lmflow.utils.multimodal import update_custom_config, load_llava_pretrain_model
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +class HFEncoderDecoderModel(EncoderDecoderModel, Tunable): + r""" + Initializes a HFEncoderDecoderModel instance. + + Parameters + ------------ + + model_args : + Model arguments such as model name, path, revision, etc. + + tune_strategy : str or none, default="normal". + A string representing the dataset backend. Defaults to "huggingface". + + ds_config : + Deepspeed configuations. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + """ + + def __init__( + self, + model_args, + tune_strategy='normal', + ds_config=None, + device="gpu", + use_accelerator=False, + custom_model=False, + with_deepspeed=True, + pipeline_args=None, + *args, + **kwargs + ): + """ + Initializes a HFDecoderModel instance. + :param model_args: dictionary with model arguments such as model name, path, revision, etc. + :param tune_strategy: tuning strategy: normal, none, lora or adapter + :param ds_config: deepspeed configuration for distributed training + """ + + # See more about loading any type of standard or custom dataset (from + # files, python dict, pandas DataFrame, etc) at + # https://huggingface.co/docs/datasets/loading_datasets.html. + + # Load pretrained model and tokenizer + # + # Distributed training: The .from_pretrained methods guarantee that + # only one local process can concurrently download model & vocab. + +
+[docs] + self.device = device
+ + + if tune_strategy == 'normal': + raise NotImplementedError( + f"tune_strategy \"{tune_strategy}\" is not supported" + ) + elif tune_strategy == 'none': + if use_accelerator: + raise NotImplementedError( + f"Currently encoder2decoder model is not supported with accelerator" + ) + # dschf = HfDeepSpeedConfig(ds_config) + dschf = HfTrainerDeepSpeedConfig(ds_config) + if pipeline_args is not None: + dschf.trainer_config_process(pipeline_args) + peft_model_id = model_args.lora_model_path + # NOTE: Currently offload is not supported by llama + if "llama" in model_args.model_name_or_path and model_args.use_ram_optimized_load: + logger.warning( + "llama does not support RAM optimized load. Automatically" + " use original load instead." + ) + model_args.use_ram_optimized_load = False + + # get model register + self.arch_type = model_args.arch_type + if self.arch_type == "encoder_decoder": + if model_args.model_name_or_path == 'THUDM/chatglm-6b': + model_register = AutoModel + else: + model_register = AutoModelForSeq2SeqLM + elif self.arch_type == "vision_encoder_decoder": + if not custom_model: + model_register = AutoModelForVision2Seq + else: + model_register = CustomAutoVision2SeqModel + else: + raise NotImplementedError + if not custom_model: + if model_args.model_name_or_path == 'THUDM/chatglm-6b': + self.backend_model = model_register.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) + + elif model_args.use_ram_optimized_load and peft_model_id is None: + try: + # RAM-optimized load + self.backend_model = model_register.from_pretrained( + model_args.model_name_or_path, + device_map="auto", + offload_folder="offload", + offload_state_dict=True, + ) + except: + logger.warning( + "Failed to use RAM optimized load. Automatically" + " use original load instead." + ) + # Normal load + self.backend_model = model_register.from_pretrained( + model_args.model_name_or_path, + ) + else: + if peft_model_id is not None: + logger.warning( + "LoRA does not support RAM optimized load currently." + " Automatically use original load instead." + ) + self.backend_model = model_register.from_pretrained( + model_args.model_name_or_path, + ) + # else: + # self.backend_model = model_register.from_pretrained( + # model_args.model_name_or_path) + else: + if model_args.llava_loading is False: + # FIXME remove the following from_pretrained code by + # creating a unified pretrained model. + model = CustomAutoVision2SeqModel.from_pretrained(model_args.model_name_or_path) + if model_args.llm_model_name_or_path is not None: + text_config = LlamaConfig.from_pretrained(model_args.llm_model_name_or_path) + model.config.text_config = text_config + model.language_model_from_pretrained(model_args.llm_model_name_or_path, + low_resource=model_args.low_resource) + state_dict = torch.load( + model_args.pretrained_language_projection_path, + map_location="cpu") + model.load_state_dict(state_dict, strict=False) + else: + config = AutoConfig.from_pretrained( + model_args.model_name_or_path) + if model_args.low_resource: + kwargs = dict( + torch_dtype=torch.float16, + load_in_8bit=True, + device_map="auto", + ) + else: + # kwargs = dict(torch_dtype=torch.float16) + kwargs = dict(device_map="auto") + if (model_args.image_encoder_name_or_path is None and + model_args.qformer_name_or_path is None and + model_args.llm_model_name_or_path is None): + config = AutoConfig.from_pretrained( + model_args.model_name_or_path) + model = CustomAutoVision2SeqModel.from_pretrained( + model_args.model_name_or_path, **kwargs) + else: + config = update_custom_config(config, model_args) + model = CustomAutoVision2SeqModel( + config, + image_encoder_name_or_path=model_args.image_encoder_name_or_path, + qformer_name_or_path=model_args.qformer_name_or_path, + language_model_name_or_path=model_args.llm_model_name_or_path, + low_resource=model_args.low_resource) + if model_args.pretrained_language_projection_path is not None: + state_dict = torch.load( + model_args.pretrained_language_projection_path, map_location="cpu") + new_state_dict = {} + new_state_dict['model.language_projection.weight'] = \ + state_dict['model.mm_projector.weight'] + new_state_dict['model.language_projection.bias'] = \ + state_dict['model.mm_projector.bias'] + if model_args.llava_pretrain_model_path is not None: + # used for inference that directly load the preatrain model + model = load_llava_pretrain_model( + model, model_args.llava_pretrain_model_path) + if model_args.save_pretrain_model_path is not None: + model.save_pretrained( + model_args.save_pretrain_model_path) + self.backend_model = model + # init tokenizer + if self.arch_type == "encoder_decoder": + self.tokenizer = AutoTokenizer.from_pretrained( + model_args.model_name_or_path, trust_remote_code=True) + elif self.arch_type == "vision_encoder_decoder": + if model_args.llava_loading is False: + # blip2 image and token processor + self.tokenizer = AutoProcessor.from_pretrained( + model_args.model_name_or_path, trust_remote_code=True) + if model_args.llm_model_name_or_path is not None: + # update the tokenizer from the custom llm. + self.tokenizer.tokenizer = ( + AutoTokenizer.from_pretrained( + model_args.llm_model_name_or_path) + ) + self.image_processor = self.tokenizer.image_processor + + else: + # image processor is stored in the vision encoder + if model_args.llm_model_name_or_path is not None: + self.tokenizer = AutoTokenizer.from_pretrained( + model_args.llm_model_name_or_path) + else: + self.tokenizer = AutoTokenizer.from_pretrained( + config.text_config._name_or_path) + self.image_processor = self.backend_model.image_processor + else: + raise NotImplementedError + + self.backend_model_full = self.backend_model + if peft_model_id is not None: + self.backend_model = PeftModel.from_pretrained( + self.backend_model, peft_model_id + ) + if tune_strategy == "none" and with_deepspeed is True: + # when load the model with 4bit / 8bit. + # fail to use deepspeed. + if device == "gpu": + deepspeed.init_distributed() + self.ds_engine = deepspeed.initialize(model=self.backend_model, config_params=ds_config)[0] + self.ds_engine.module.eval() + + self.tokenizer.padding_side = "left" # necessary for auto-gressive inference + + elif tune_strategy == 'adapter': + raise NotImplementedError('adapter tune strategy not implemented') + + if self.arch_type == "encoder_decoder": + if self.tokenizer.eos_token_id is None: + self.tokenizer.eos_token_id = self.backend_model.config.eos_token_id + if self.tokenizer.pad_token is None: + self.tokenizer.pad_token_id = self.tokenizer.eos_token_id + +
+[docs] + def tokenize(self, dataset, *args, **kwargs): + """ + Tokenize the full dataset. + + Parameters + ------------ + dataset : + Text dataset. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + tokenized_datasets : + The tokenized dataset. + """ + raise NotImplementedError('tokenize not implemented')
+ + +
+[docs] + def encode(self, input: Union[str, List[str]], *args, **kwargs ) -> Union[List[int], List[List[int]]]: + """ + Perform encoding process of the tokenizer. + + Parameters + ------------ + inputs : str or list. + The text sequence. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The tokenized inputs. + """ + # check how to handle the image processor + if isinstance(input, dict): + # TODO refactor the input type to make it elegant. + kwargs.update(input) + if "images" not in input: + tokens = self.tokenizer(*args, **kwargs) + else: + if getattr(self.tokenizer, "image_processor", None) is not None: + tokens = self.tokenizer(*args, **kwargs) + elif getattr(self, "image_processor", None) is not None: + images = kwargs.pop("images") + tokens = self.tokenizer(*args, **kwargs) + images = self.image_processor.preprocess( + images, return_tensors='pt')['pixel_values'][0] + tokens['pixel_values'] = images + else: + print("Can not find the image processor") + raise NotImplementedError + return tokens + elif isinstance(input, list): + return self.tokenizer(text=input, *args, **kwargs)#batch encode,will automatically do left padding + elif isinstance(input, str): + return self.tokenizer.encode(text=input, *args, **kwargs) + else: + raise NotImplementedError(f'type "{type(input)}" cannot be encoded')
+ + + +
+[docs] + def decode(self, input, *args, **kwargs ) -> Union[str, List[str]]: + """ + Perform decoding process of the tokenizer. + + Parameters + ------------ + inputs : list. + The token sequence. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The text decoded from the token inputs. + """ + if isinstance(input, List): + input=torch.tensor(input) + if input.dim()==2: + return self.tokenizer.batch_decode(input, *args, **kwargs)#batch_decode + else: + # Can be list of ints or a Tensor + return self.tokenizer.decode(input, *args, **kwargs)
+ + + +
+[docs] + def inference(self, inputs, *args, **kwargs): + """ + Perform generation process of the model. + + Parameters + ------------ + inputs : + The sequence used as a prompt for the generation or as model inputs to the model. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The generated sequence output + """ + # current_time = time.strftime("%H:%M:%S", time.localtime()) + # print(f"{current_time}: model.inference: start", flush=True) + + # TODO need to discuss how to handle pad_token_id + if self.arch_type == "encoder_decoder": + kwargs.update(pad_token_id=self.tokenizer.pad_token_id) + elif self.arch_type == "vision_encoder_decoder": + # TODO disucss how to modify the interface to remove this part. + inputs = copy.deepcopy(inputs) + input_ids = inputs.pop('input_ids') + kwargs.update(**inputs) + inputs = input_ids + + # current_time = time.strftime("%H:%M:%S", time.localtime()) + # print(f"{current_time}: model.inference: kwargs update end", flush=True) + + with torch.no_grad(): + if self.device == "gpu": + if getattr(self, "ds_engine", None) is not None: + outputs = self.ds_engine.module.generate( + input_ids=inputs, + synced_gpus=True, + *args, + **kwargs + ) + else: + outputs = self.backend_model.generate( + input_ids=inputs, + synced_gpus=True, + *args, + **kwargs, + ) + elif self.device == "cpu": + outputs = self.backend_model.generate( + input_ids=inputs, + synced_gpus=True, + *args, + **kwargs + ) + else: + raise NotImplementedError( + f"device \"{self.device}\" is not supported" + ) + + # current_time = time.strftime("%H:%M:%S", time.localtime()) + # print(f"{current_time}: model.inference: end", flush=True) + + return outputs
+ + + +
+[docs] + def merge_lora_weights(self): + if self.model_args.use_lora: + self.get_backend_model().merge_and_unload() + else: + logger.warning("LoRA training is NOT enabled. Merging LoRA weights is not applicable.")
+ + + +
+[docs] + def save(self, dir, save_full_model=False, *args, **kwargs): + """ + Perform generation process of the model. + + Parameters + ------------ + dir : + The directory to save model and tokenizer + + save_full_model : Optional. + Whether to save full model. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The generated sequence output + """ + self.get_tokenizer().save_pretrained(dir) + if save_full_model and self.model_args.use_lora: + self.backend_model_full.save_pretrained(dir) + else: + self.get_backend_model().save_pretrained(dir)
+ + + +
+[docs] + def get_max_length(self): + """ + Return max acceptable input length in terms of tokens. + """ + if "tokenizer" not in self.tokenizer.__dict__: + return self.tokenizer.model_max_length + else: + # for the multi-modality processor, + # the max length is stored in the inner text tokenizer + return self.tokenizer.tokenizer.model_max_length
+ + + +
+[docs] + def get_tokenizer(self): + """ + Return the tokenizer of the model. + """ + return self.tokenizer
+ + + +
+[docs] + def get_backend_model(self): + """ + Return the backend model. + """ + return self.backend_model
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/models/hf_model_mixin.html b/_modules/lmflow/models/hf_model_mixin.html new file mode 100644 index 000000000..d768e3896 --- /dev/null +++ b/_modules/lmflow/models/hf_model_mixin.html @@ -0,0 +1,1084 @@ + + + + + + + + + + lmflow.models.hf_model_mixin — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.models.hf_model_mixin

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+import gc
+import os
+import logging
+from typing import Union, Optional, Dict, List
+import copy
+
+import torch
+import deepspeed
+from transformers import (
+    CONFIG_MAPPING,
+    AutoConfig,
+    BitsAndBytesConfig,
+    AutoTokenizer,
+    PreTrainedTokenizer,
+    PreTrainedTokenizerFast,
+    AutoModelForCausalLM,
+    AutoModelForSequenceClassification,
+)
+from peft import (
+    LoraConfig,
+    PeftModel,
+    TaskType,
+    get_peft_model,
+    prepare_model_for_kbit_training
+)
+from peft.utils.constants import TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING
+from vllm import LLM, SamplingParams
+from vllm.distributed.parallel_state import destroy_model_parallel
+
+from lmflow.models.base_model import BaseModel
+from lmflow.utils.constants import (
+    LMFLOW_LORA_TARGET_MODULES_MAPPING
+)
+from lmflow.args import ModelArguments
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +HF_AUTOMODEL_MAPPING = { + "decoder_only": AutoModelForCausalLM, + "text_regression": AutoModelForSequenceClassification +}
+ + +
+[docs] +HF_AUTOMODEL_TYPE = Union[AutoModelForCausalLM, AutoModelForSequenceClassification]
+ + +
+[docs] +LORA_TARGET_MODULES_MAPPING = { + k: TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING.get(k, LMFLOW_LORA_TARGET_MODULES_MAPPING.get(k)) + for k in set(TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING) | set(LMFLOW_LORA_TARGET_MODULES_MAPPING) +}
+ + + +
+[docs] +class HFModelMixin(BaseModel): + def __init__( + self, + model_args: ModelArguments, + do_train: bool, + ds_config=None, + device: Optional[str]="gpu", + use_accelerator: bool=False, + hf_auto_model_additional_args: Optional[Dict]=None, + *args, + **kwargs + ): + """Initializes a HFModel instance. + + Parameters + ---------- + model_args : + Dictionary with model arguments such as model name, path, revision, etc. + do_train : bool + To prepare the model for training or inference. + ds_config : optional + Deepspeed configuration for distributed training, by default None + device : str, optional + By default "gpu" + use_accelerator : bool, optional + By default False + """ + + # See more about loading any type of standard or custom dataset (from + # files, python dict, pandas DataFrame, etc) at + # https://huggingface.co/docs/datasets/loading_datasets.html. + + # Load pretrained model and tokenizer + # + # Distributed training: The .from_pretrained methods guarantee that + # only one local process can concurrently download model & vocab. + +
+[docs] + self.device = device
+ +
+[docs] + self.model_args = model_args
+ +
+[docs] + self.hf_auto_model = HF_AUTOMODEL_MAPPING[model_args.arch_type]
+ +
+[docs] + self.use_accelerator = use_accelerator
+ +
+[docs] + self.ds_config = ds_config
+ +
+[docs] + self.do_train = do_train
+ + +
+[docs] + self.tokenizer = self.__prepare_tokenizer(model_args)
+ +
+[docs] + self.torch_dtype = self.__prepare_dtype(model_args)
+ +
+[docs] + self.hf_model_config = self.__prepare_model_config(model_args, hf_auto_model_additional_args)
+ +
+[docs] + self.quant_config = self.__prepare_quant_config(model_args)
+ +
+[docs] + self.peft_config = self.__prepare_peft_config(model_args)
+ +
+[docs] + self._activated = False # for inference load and offload
+ + + # Some implementations require custom modules to be injected into the model. + self.__model_module_inject(model_args) + + if self.do_train: + self.__prepare_model_for_training(model_args, self.hf_auto_model) + + +
+[docs] + def __prepare_tokenizer( + self, + model_args: ModelArguments, + ) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]: + tokenizer_name = model_args.tokenizer_name or model_args.model_name_or_path + if not tokenizer_name: + raise ValueError( + "You are instantiating a new tokenizer from scratch. This is" + " not supported by this script. You can do it from another" + " script, save it, and load it from here, using" + " --tokenizer_name." + ) + + tokenizer_kwargs = { + "cache_dir": model_args.cache_dir, + "use_fast": model_args.use_fast_tokenizer, + "revision": model_args.model_revision, + "use_auth_token": True if model_args.use_auth_token else None, + "trust_remote_code": model_args.trust_remote_code, + } + if model_args.padding_side != 'auto': + tokenizer_kwargs["padding_side"] = model_args.padding_side + + try: + tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, **tokenizer_kwargs) + except RecursionError: + logger.warning( + "The tokenizer_config.json file doesn't set the special tokens. Using default values: " + "<unk>, <s>, </s> for unknown token, bos token and eos token respectively.") + tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, unk_token="<unk>", + bos_token="<s>", + eos_token="</s>", + **tokenizer_kwargs) + + tokenizer.truncation_side = model_args.truncation_side or tokenizer.truncation_side + tokenizer.model_max_length = model_args.model_max_length or tokenizer.model_max_length + + return tokenizer
+ + + +
+[docs] + def __prepare_dtype( + self, + model_args: ModelArguments, + ) -> torch.dtype: + if model_args.arch_type == 'text_regression': + if model_args.torch_dtype in ["auto", None, "bf16", "bfloat16"]: + torch_dtype = torch.bfloat16 + else: + torch_dtype = getattr(torch, model_args.torch_dtype) + logger.warning( + f"If you are doing reward modeling," + f" InstructGPT uses torch.bfloat16 for reward model, but you" + f" are using {torch_dtype} for your reward model init. Ignore" + f" this warning if it is intended.") + else: + torch_dtype = ( + model_args.torch_dtype + if model_args.torch_dtype in ["auto", None] + else getattr(torch, model_args.torch_dtype) + ) + + logger.debug(f"torch_dtype on init: {torch_dtype}") + + return torch_dtype
+ + + +
+[docs] + def __prepare_model_config( + self, + model_args: ModelArguments, + hf_auto_model_additional_args: Optional[Dict]=None, + ): + """Prepare model configuration for hf auto register, + Parameters + ---------- + model_args : ModelArguments + LMFlow model arguments. + hf_auto_model_additional_args : Optional[Dict], optional + Special configurations such as `num_labels` in `AutoModelForSequenceClassification` + (commonly used in reward modeling) will not preset in __prepare_model_config, + so it should be passed in hf_auto_model_additional_args. + Returns + ------- + config : ModelConfig + hf model config. + """ + config_kwargs = { + "attn_implementation": "flash_attention_2" if model_args.use_flash_attention else None, + "cache_dir": model_args.cache_dir, + "revision": model_args.model_revision, + "use_auth_token": True if model_args.use_auth_token else None, + "trust_remote_code": model_args.trust_remote_code, + "from_tf": bool(".ckpt" in model_args.model_name_or_path), + } + if hf_auto_model_additional_args is not None: + config_kwargs.update(hf_auto_model_additional_args) + + if model_args.config_name: + config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) + elif model_args.model_name_or_path: + config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) + else: + config = CONFIG_MAPPING[model_args.model_type]() + logger.warning("You are instantiating a new config instance from scratch.") + if model_args.config_overrides is not None: + logger.info(f"Overriding config: {model_args.config_overrides}") + config.update_from_string(model_args.config_overrides) + logger.info(f"New config: {config}") + + return config
+ + + +
+[docs] + def __prepare_quant_config( + self, + model_args: ModelArguments, + ): + quant_config = None + if self.do_train: + if model_args.use_qlora: + quant_config = BitsAndBytesConfig( + load_in_4bit=model_args.bits == 4, + load_in_8bit=model_args.bits == 8, + llm_int8_threshold=6.0, + llm_int8_has_fp16_weight=False, + bnb_4bit_compute_dtype=self.torch_dtype, + bnb_4bit_use_double_quant=model_args.double_quant, + bnb_4bit_quant_type=model_args.quant_type, + ) + else: # inference + if model_args.use_int8: + quant_config = BitsAndBytesConfig( + load_in_8bit = model_args.use_int8, + ) + + return quant_config
+ + + +
+[docs] + def __prepare_peft_config( + self, + model_args: ModelArguments, + ): + peft_config = None + if model_args.use_lora: + if model_args.lora_target_modules: + lora_target_modules = model_args.lora_target_modules + else: + model_config = self.hf_model_config + if hasattr(model_config, "to_dict"): + model_config = model_config.to_dict() + if "model_type" not in model_config or not model_config["model_type"]: + logger.warning("It seems that your base model is a custom model, since " + "model_type is not found in model_config when preparing peft config. " + "Setting model_type to 'custom' as a fallback.") + model_config["model_type"] = "custom" + lora_target_modules = LORA_TARGET_MODULES_MAPPING.get(model_config["model_type"], None) + + peft_config = LoraConfig( + task_type=TaskType.CAUSAL_LM, + inference_mode=False, + r=model_args.lora_r, + lora_alpha=model_args.lora_alpha, + lora_dropout=model_args.lora_dropout, + target_modules=lora_target_modules, + ) + + return peft_config
+ + + +
+[docs] + def __model_module_inject( + self, + model_args: ModelArguments, + ) -> None: + """Override some model modules with custom implementations. + + Current implementations: + - Position interpolation (model_args.do_rope_scaling): + replace llama embeddings with condense embeddings. + """ + # position interpolation + if model_args.do_rope_scaling: + if "LlamaForCausalLM" in self.model_config.architectures: + from lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch import ( + replace_llama_with_condense, + ) + replace_llama_with_condense(model_args.rope_pi_ratio, model_args.rope_ntk_ratio)
+ + + +
+[docs] + def __prepare_model_for_training( + self, + model_args: ModelArguments, + hf_auto_model: HF_AUTOMODEL_TYPE, + ): + assert self.do_train, "To prepare the model for training, please set do_train=True." + # TODO: change to accelerate + logger.info("Preparing model for training") + if model_args.model_name_or_path: + model = hf_auto_model.from_pretrained( + model_args.model_name_or_path, + torch_dtype=self.torch_dtype, + config=self.hf_model_config, + quantization_config=self.quant_config, + ) + + if model_args.use_qlora: + model.gradient_checkpointing_enable() + model = prepare_model_for_kbit_training(model) + else: + model = hf_auto_model.from_config(self.hf_model_config) + n_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values()) + logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") + self.backend_model_full = model + + if model_args.ignore_bias_buffers: + # torch distributed hack + # fix for DDP issues with LM bias/mask buffers - invalid scalar type, inplace operation. + # See: https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992 + model._ddp_params_and_buffers_to_ignore = [ + name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool + ] + + if model_args.use_lora: + model.enable_input_require_grads() + model = get_peft_model(model, self.peft_config) + model.print_trainable_parameters() + + # We resize the embeddings only when necessary to avoid index errors. + # If you are creating a model from scratch on a small vocab and want a + # smaller embedding size, remove this test. + with deepspeed.zero.GatheredParameters(model.get_input_embeddings().weight, modifier_rank=None): + weights = model.get_input_embeddings().weight + embedding_size = weights.shape[0] + if len(self.tokenizer) > embedding_size: + model.resize_token_embeddings(len(self.tokenizer)) + + self.backend_model = model + self.__prepare_model_post_process()
+ + + +
+[docs] + def __prepare_model_for_inference( + self, + model_args: ModelArguments, + hf_auto_model: HF_AUTOMODEL_TYPE, + use_accelerator: bool, + ds_config + ): + logger.info(f"Backend model already initialized, moving to device: {self.device}") + if hasattr(self, "backend_model"): + if self.backend_model.device == torch.device("cpu"): + self.backend_model.to(self.device) + return + + # TODO: change to accelerate + logger.info("Preparing model for inference") + inference_load_kwargs = {} + inference_load_kwargs_bak = copy.deepcopy(inference_load_kwargs) + ram_optimized_load_kwargs = { + "device_map": "auto", + "offload_folder": "offload", + "offload_state_dict": True, + } + + if use_accelerator or model_args.use_ram_optimized_load: + inference_load_kwargs.update(ram_optimized_load_kwargs) + + if not use_accelerator: + from transformers.integrations import HfDeepSpeedConfig + dschf = HfDeepSpeedConfig(ds_config) + + try: + self.backend_model = hf_auto_model.from_pretrained( + model_args.model_name_or_path, + torch_dtype=self.torch_dtype, + config=self.hf_model_config, + quantization_config=self.quant_config, + **inference_load_kwargs, + ) + except: + logger.warning( + "Failed to use RAM optimized load. Using original load instead." + ) + self.backend_model = hf_auto_model.from_pretrained( + model_args.model_name_or_path, + torch_dtype=self.torch_dtype, + config=self.hf_model_config, + quantization_config=self.quant_config, + **inference_load_kwargs_bak, + ) + + self.backend_model_full = self.backend_model + + if model_args.lora_model_path is not None: + self.backend_model = PeftModel.from_pretrained( + self.backend_model, + model_args.lora_model_path, + ) + + if (not use_accelerator) and self.device == "gpu": + deepspeed.init_distributed() + self.ds_engine = deepspeed.initialize(model=self.backend_model, config_params=ds_config)[0] + self.ds_engine.module.eval() + + self.__prepare_model_post_process()
+ + + +
+[docs] + def __prepare_model_for_vllm_inference( + self, + model_args: ModelArguments, + vllm_gpu_memory_utilization: float, + vllm_tensor_parallel_size: int, + ): + self.backend_model_for_inference = LLM( + model=model_args.model_name_or_path, + tokenizer=model_args.model_name_or_path, + dtype=model_args.torch_dtype if model_args.torch_dtype else "auto", + load_format="auto", + gpu_memory_utilization=vllm_gpu_memory_utilization, + tensor_parallel_size=vllm_tensor_parallel_size, + )
+ + + +
+[docs] + def __prepare_model_post_process(self): + # old models/tokenizers may not have these attributes, fixing + if self.tokenizer.eos_token is None: + self.tokenizer.eos_token = self.backend_model.config.eos_token + if self.tokenizer.eos_token_id is None: + self.tokenizer.eos_token_id = self.backend_model.config.eos_token_id + + if self.tokenizer.pad_token is None: + self.tokenizer.pad_token = self.tokenizer.eos_token + if self.tokenizer.pad_token_id is None: + self.tokenizer.pad_token_id = self.tokenizer.eos_token_id + + if self.model_args.eos_padding: + self.tokenizer.pad_token = self.tokenizer.eos_token + + if not hasattr(self.backend_model.config, "pad_token_id"): + logger.warning("pad_token_id not found in model config. Setting pad_token_id to eos_token_id.") + self.backend_model.config.pad_token_id = self.backend_model.config.eos_token_id + elif self.backend_model.config.pad_token_id is None: + logger.warning("pad_token_id is None in model config. Setting pad_token_id to eos_token_id.") + self.backend_model.config.pad_token_id = self.backend_model.config.eos_token_id
+ + + +
+[docs] + def activate_model_for_inference( + self, + use_vllm: bool=False, + **kwargs, + ): + if self._activated: + logger.warning("You are trying to activate the model for inference, but it is already activated.") + return + + if use_vllm: + self.__prepare_model_for_vllm_inference( + model_args=self.model_args, + vllm_gpu_memory_utilization=kwargs.get("vllm_gpu_memory_utilization"), + vllm_tensor_parallel_size=kwargs.get("vllm_tensor_parallel_size"), + ) + else: + self.__prepare_model_for_inference( + model_args=self.model_args, + hf_auto_model=self.hf_auto_model, + use_accelerator=self.use_accelerator, + ds_config=self.ds_config, + ) + + self._activated = True
+ + + +
+[docs] + def deactivate_model_for_inference( + self, + use_vllm: bool=False, + ): + """Deactivate the model and release the resources. + + NOTE: Currently, VLLM doesn't have an official way to do this, and the + implementation below cannot release all gpu resources by our observation. + Thus this method is just a placeholder for future implementation. See: + [Github issue](https://github.com/vllm-project/vllm/issues/1908) + """ + if not self._activated: + logger.warning("You are trying to deactivate the model for inference, but it is already deactivated.") + return + + if use_vllm: + destroy_model_parallel() + del self.backend_model_for_inference.llm_engine.model_executor.driver_worker + del self.backend_model_for_inference + gc.collect() + torch.cuda.empty_cache() + else: + self.backend_model.to("cpu") + pass + + self._activated = False
+ + + +
+[docs] + def get_max_length(self): + """ + Return max acceptable input length in terms of tokens. + """ + return self.tokenizer.model_max_length
+ + + +
+[docs] + def get_tokenizer(self): + """ + Return the tokenizer of the model. + """ + return self.tokenizer
+ + + +
+[docs] + def get_backend_model(self): + """ + Return the backend model. + """ + return self.backend_model
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/models/hf_text_regression_model.html b/_modules/lmflow/models/hf_text_regression_model.html new file mode 100644 index 000000000..3bae19f20 --- /dev/null +++ b/_modules/lmflow/models/hf_text_regression_model.html @@ -0,0 +1,970 @@ + + + + + + + + + + lmflow.models.hf_text_regression_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.models.hf_text_regression_model

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+import os
+import copy
+import hashlib
+import logging
+from pathlib import Path
+from typing import List, Union, Dict, Optional
+
+import ray
+import ray.data
+import torch
+from peft import (
+    LoraConfig,
+    PeftModel,
+    TaskType,
+    get_peft_config,
+    get_peft_model,
+    prepare_model_for_kbit_training
+)
+from transformers.modeling_outputs import SequenceClassifierOutputWithPast
+from vllm import SamplingParams
+
+from lmflow.args import ModelArguments
+from lmflow.datasets.dataset import Dataset, KEY_SCORE
+from lmflow.models.interfaces.tunable import Tunable
+from lmflow.models.hf_model_mixin import HFModelMixin
+from lmflow.models.text_regression_model import TextRegressionModel
+from lmflow.tokenization.hf_text_regression_model import (
+    paired_conversation_tokenize_function, 
+    conversation_tokenize_function,
+    tokenize_function,
+    text_to_textlist_tokenize_function,
+)
+from lmflow.utils.conversation_template import PRESET_TEMPLATES
+from lmflow.utils.constants import (
+    PAIRED_CONVERSATION_DATASET_DESCRIPTION, 
+    TEXT2TEXT_DATASET_DESCRIPTION,
+    TEXT_ONLY_DATASET_DESCRIPTION,
+    TEXT_TO_TEXTLIST_DATASET_DESCRIPTION,
+    CONVERSATION_DATASET_DESCRIPTION, 
+)
+from lmflow.utils.data_utils import RewardModelInferenceResultWithInput
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +class HFTextRegressionModel(TextRegressionModel, HFModelMixin, Tunable): + r""" + Initializes a HFTextRegressionModel instance. + + Parameters + ------------ + + model_args : + Model arguments such as model name, path, revision, etc. + + tune_strategy : str or none, default="normal". + A string representing the dataset backend. Defaults to "huggingface". + + ds_config : + Deepspeed configuations. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + """ + + def __init__( + self, + model_args: ModelArguments, + tune_strategy: str='normal', + ds_config=None, + device="gpu", + use_accelerator=False, + *args, + **kwargs + ): + """ + Initializes a HFTextRegressionModel instance. + :param model_args: dictionary with model arguments such as model name, path, revision, etc. + :param tune_strategy: tuning strategy: normal, none, lora or adapter + :param ds_config: deepspeed configuration for distributed training + """ + assert model_args.arch_type == "text_regression", ( + f"Invalid model architecture type: {model_args.arch_type}. " + f"Expected: text_regression" + ) +
+[docs] + config_additional_args = {"num_labels": 1}
+ + HFModelMixin.__init__( + self, + model_args=model_args, + do_train=True if tune_strategy == "normal" else False, + ds_config=ds_config, + device=device, + use_accelerator=use_accelerator, + hf_auto_model_additional_args=config_additional_args, + *args, + **kwargs + ) + + +
+[docs] + def tokenize( + self, + dataset: Dataset, + add_special_tokens=True, + *args, + **kwargs + ): + """ + Tokenize the full dataset. + + Parameters + ------------ + dataset : lmflow.datasets.Dataset. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + tokenized_datasets : + The tokenized dataset, without any leading or trailing special + tokens (normally they are Begin-Of-Sentence or End-Of-Sentence + tokens). + """ + # Preprocessing the datasets. + # First we tokenize all the texts. + if dataset.get_backend() != "huggingface": + raise NotImplementedError( + "tokenization of datasets with non-huggingface backend are" + "not supported yet" + ) + + dataset_type = dataset.get_type() + model_args = self.model_args + raw_datasets = dataset + hf_raw_datasets = dataset.get_backend_dataset() + column_names = list(hf_raw_datasets.features) # in paired conversation, for example, would be 'chosen' and 'rejected' + data_args = raw_datasets.get_data_args() + + # Whether to truncate long sequences to fit into max_length + use_truncation = False + if model_args.use_lora or data_args.disable_group_texts: + use_truncation = True + + # Requires three types of information for tokenizing different datasets + # 1) Which fields require tokenization, e.g. + # "text2float": "text", but not "float" + # "text2text": both "input" and "output" + # 2) How will there tokenized sequence concatenated together, e.g. + # "text_only": "text" -> "text" + # "text2text": "input", "output" -> "input" + "output" + # 3) Which fields require loss in final computation, e.g. + # "text_only": "text" + # "text2text": "output" only + tokenize_fn = None + tokenize_fn_kwargs = { + "data_args": data_args, + "tokenizer": self.tokenizer, + "column_names": column_names, + } + if dataset_type == "text_only": + tokenize_fn = tokenize_function + text_only_tokenize_fn_kwargs = { + "tokenized_column_order": ["text"], + "label_columns": ["text"], + "add_special_tokens": add_special_tokens, + "use_truncation": use_truncation, + } + tokenize_fn_kwargs.update(text_only_tokenize_fn_kwargs) + + elif dataset_type == "text2text": + tokenize_fn = tokenize_function + text2text_tokenize_fn_kwargs = { + "tokenized_column_order": ["input", "output"], + "label_columns": ["output"], + "add_special_tokens": False, + "use_truncation": use_truncation, + } + tokenize_fn_kwargs.update(text2text_tokenize_fn_kwargs) + + elif dataset_type in ["conversation", "paired_conversation"]: + if dataset_type == "conversation": + tokenize_fn = conversation_tokenize_function + elif dataset_type == "paired_conversation": + tokenize_fn = paired_conversation_tokenize_function + + if data_args.conversation_template: + if data_args.conversation_template in PRESET_TEMPLATES.keys(): + conversation_template = PRESET_TEMPLATES[data_args.conversation_template] + else: + raise NotImplementedError( + f"Conversation template {data_args.conversation_template} is not supported yet." + ) + else: + logger.warning("No conversation template provided. Using default template.") + conversation_template = PRESET_TEMPLATES['empty'] + tokenize_fn_kwargs["conversation_template"] = conversation_template + logger.warning(f"Conversation template: {conversation_template}") + + elif dataset_type == "text_to_textlist": + tokenize_fn = text_to_textlist_tokenize_function + text_to_textlist_tokenize_fn_kwargs = { + "add_special_tokens": add_special_tokens, + "use_truncation": use_truncation, + } + tokenize_fn_kwargs.update(text_to_textlist_tokenize_fn_kwargs) + + else: + raise NotImplementedError( + f"Dataset type \"{dataset_type}\" is not supported, currently" + " only support following data types for HFTextRegressionModel:\n" + f" 1) [Inference]{TEXT_ONLY_DATASET_DESCRIPTION}\n" + f" 2) [Inference]{TEXT2TEXT_DATASET_DESCRIPTION}\n" + f" 3) [Training]{PAIRED_CONVERSATION_DATASET_DESCRIPTION}\n" + f" 4) [Inference]{CONVERSATION_DATASET_DESCRIPTION}\n" + f" 5) [Inference]{TEXT_TO_TEXTLIST_DATASET_DESCRIPTION}\n" + ) + + tokenize_kwargs = {} + if not data_args.streaming: + fingerprint = hashlib.md5( + ( + raw_datasets.get_fingerprint() + + str(self.tokenizer) + + f'###padding_side={self.tokenizer.padding_side}' + + ('###conversation_template=' + str(conversation_template) if "conversation" in dataset_type else "") + + f'###disable_group_texts={data_args.disable_group_texts}' + + f'###block_size={data_args.block_size}' + ).encode("utf-8") + ).hexdigest() + tokenize_kwargs = { + "num_proc": data_args.preprocessing_num_workers, + "load_from_cache_file": not data_args.overwrite_cache, + "desc": "Running tokenizer on dataset", + "new_fingerprint": fingerprint, + } + + tokenized_datasets = raw_datasets.map( + tokenize_fn, + batched=True, + remove_columns=column_names, + fn_kwargs=tokenize_fn_kwargs, + **tokenize_kwargs + ) + return tokenized_datasets
+ + + +
+[docs] + def inference( + self, + inputs, + release_gpu: bool = False, + use_vllm: bool = False, + **kwargs + ) -> Union[List[float], SequenceClassifierOutputWithPast]: + """ + Perform generation process of the model. + + Parameters + ------------ + inputs : + The sequence used as a prompt for the generation or as model inputs to the model. + When using vllm inference, this should be a string or a list of strings. + When using normal inference, this should be a tensor. + release_gpu : bool, optional + Whether to release the GPU resource after inference, by default False. + use_vllm : bool, optional + Whether to use VLLM for inference, by default False. + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The generated sequence output + """ + if use_vllm: + logger.warning( + "VLLM inference is not supported for text regression model, using normal inference instead." + ) + use_vllm = False + + if not self._activated: + self.activate_model_for_inference( + use_vllm=use_vllm, + **kwargs, + ) + + if use_vllm: + res = self.__vllm_inference(inputs, **kwargs) + else: + res = self.__inference(inputs, **kwargs) + + if release_gpu: + self.deactivate_model_for_inference(use_vllm=use_vllm) + + return res
+ + + +
+[docs] + def __inference( + self, + inputs, + **kwargs + ): + """ + Perform generation process of the model. + + Parameters + ------------ + inputs : + The **tokenized** sequence used as a prompt for the generation or as model inputs to the model. + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The generated sequence output + """ + with torch.no_grad(): + if self.use_accelerator: + outputs = self.backend_model( + input_ids=inputs, + **kwargs, + ) + else: + if self.device == "gpu": + outputs = self.ds_engine.module( + input_ids=inputs, + synced_gpus=True, + **kwargs, + ) + elif self.device == "cpu": + outputs = self.backend_model( + input_ids=inputs, + synced_gpus=True, + **kwargs, + ) + else: + raise NotImplementedError( + f"device \"{self.device}\" is not supported" + ) + + if kwargs.get('return_input', False): + outputs = {"input": inputs, "output": outputs} + + return outputs
+ + + +
+[docs] + def __vllm_inference( + self, + inputs: Union[str, List[str]], + sampling_params: Optional[SamplingParams] = None, + **kwargs, + ) -> Union[List[List[str]], List[List[List[int]]]]: + """Perform VLLM inference process of the model. + + Parameters + ---------- + inputs : Union[str, List[str]] + Prompt(s), string or a list of strings. + sampling_params : Optional[SamplingParams], optional + vllm SamplingParams object, by default None. + + Returns + ------- + """ + raise NotImplementedError( + "VLLM inference is not supported for text regression model." + )
+ + + +
+[docs] + def prepare_inputs_for_inference( + self, + dataset: Dataset, + enable_distributed_inference: bool = False, + use_vllm: bool = False, + **kwargs, + ) -> Union[Dataset, ray.data.Dataset]: + if use_vllm: + raise NotImplementedError( + "VLLM inference is not supported for text regression model." + ) + + inference_inputs = self.tokenize(dataset) + + if enable_distributed_inference: + inference_inputs.sanity_check(drop_invalid=True) + inference_inputs = inference_inputs.get_backend_dataset() + inference_inputs = ray.data.from_items(inference_inputs) + # -> Dict[str, np.ndarray] + # Example (batch size=2): + # {'input': array(['...','...'], dtype=object), + # 'output': array([array(["...", "..."], dtype=object), array(['...','...'], dtype=object)], dtype=object), + # 'input_ids': array( + # [ + # array([array([ 27, 91, 882, ..., 128256, 128256, 128256]), + # array([ 27, 91, 882, ..., 128256, 128256, 128256])], + # dtype=object), + # array([array([ 27, 91, 882, ..., 128256, 128256, 128256]), + # array([ 27, 91, 882, ..., 128256, 128256, 128256])], + # dtype=object) + # ], dtype=object)} + + return inference_inputs
+ + + + @staticmethod +
+[docs] + def postprocess_inference_outputs( + dataset: Dataset, + scores: Union[List[float], List[List[float]]], + ): + output_dict = {"type": "", "instances": []} + if dataset.get_type() == "text_to_textlist": + output_dict["type"] = "text_to_scored_textlist" + for idx, instance in enumerate(dataset.get_backend_dataset()): + if len(instance["output"]) < 2: + logger.warning(f"Instance {idx} has less than 2 outputs, skipping.") + output_dict["instances"].append( + { + "input": instance["input"], + "output": [{"text": text} for text in instance["output"]], + } + ) + else: + raise NotImplementedError(f"Dataset type {dataset.get_type()} is not supported for reward model inference.") + + for i, instance_scores in enumerate(scores): + for j, score in enumerate(instance_scores): + output_dict["instances"][i]["output"][j][KEY_SCORE] = score + + output_dataset_args = copy.deepcopy(dataset.data_args) + output_dataset_args.dataset_path = None + output_dataset_args.dataset_name = f"{output_dataset_args.dataset_name}_scored" + output_dataset = Dataset(output_dataset_args) + output_dataset = output_dataset.from_dict(output_dict) + + return output_dataset
+ + + + @staticmethod +
+[docs] + def postprocess_distributed_inference_outputs( + dataset: Dataset, + inference_result: List[RewardModelInferenceResultWithInput], + ): + output_dict = {"type": "text_to_scored_textlist", "instances": inference_result} + output_dataset_args = copy.deepcopy(dataset.data_args) + output_dataset_args.dataset_path = None + output_dataset_args.dataset_name = f"{output_dataset_args.dataset_name}_scored" + output_dataset = Dataset(output_dataset_args) + output_dataset = output_dataset.from_dict(output_dict) + + return output_dataset
+ + + +
+[docs] + def save(self, dir, *args, **kwargs): + """ + Perform generation process of the model. + + Parameters + ------------ + dir : + The directory to save model and tokenizer + + kwargs : Optional. + Keyword arguments. + """ + self.get_tokenizer().save_pretrained(dir) + self.get_backend_model().save_pretrained(dir)
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/models/interfaces/tunable.html b/_modules/lmflow/models/interfaces/tunable.html new file mode 100644 index 000000000..ad8564e6c --- /dev/null +++ b/_modules/lmflow/models/interfaces/tunable.html @@ -0,0 +1,473 @@ + + + + + + + + + + lmflow.models.interfaces.tunable — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.models.interfaces.tunable

+#!/usr/bin/env python
+# coding=utf-8
+"""Tunable class
+"""
+
+from abc import ABC
+
+
+
+[docs] +class Tunable(ABC): + pass
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/models/regression_model.html b/_modules/lmflow/models/regression_model.html new file mode 100644 index 000000000..fd4a90385 --- /dev/null +++ b/_modules/lmflow/models/regression_model.html @@ -0,0 +1,474 @@ + + + + + + + + + + lmflow.models.regression_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.models.regression_model

+#!/usr/bin/env python
+# coding=utf-8
+"""General regression model."""
+
+from lmflow.models.base_model import BaseModel
+
+
+
+[docs] +class RegressionModel(BaseModel): + + def __init__(self, *args, **kwargs): + pass
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/models/text_regression_model.html b/_modules/lmflow/models/text_regression_model.html new file mode 100644 index 000000000..5a86e8001 --- /dev/null +++ b/_modules/lmflow/models/text_regression_model.html @@ -0,0 +1,529 @@ + + + + + + + + + + lmflow.models.text_regression_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.models.text_regression_model

+#!/usr/bin/env python
+# coding=utf-8
+"""
+A model maps "text_only" data to float.
+"""
+
+from lmflow.models.regression_model import RegressionModel
+from lmflow.datasets.dataset import Dataset
+
+
+
+[docs] +class TextRegressionModel(RegressionModel): + r""" + Initializes a TextRegressionModel instance. + + Parameters + ------------ + + model_args : + Model arguments such as model name, path, revision, etc. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + """ + + def __init__( + self, + model_args, + *args, + **kwargs + ): + """ + Initializes a TextRegressionModel instance. + :param model_args: dictionary with model arguments such as model name, path, revision, etc. + """ +
+[docs] + self.inference_func = None
+ + + +
+[docs] + def register_inference_function(self, inference_func): + """ + Registers a regression function. + """ + self.inference_func = inference_func
+ + + +
+[docs] + def inference(self, inputs: Dataset): + """ + Gets regression results of a given dataset. + + :inputs: Dataset object, only accept type "text_only". + """ + if self.inference_func is not None: + return self.inference_func(inputs) + else: + pass
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/models/vision2seq_model.html b/_modules/lmflow/models/vision2seq_model.html new file mode 100644 index 000000000..8e588e732 --- /dev/null +++ b/_modules/lmflow/models/vision2seq_model.html @@ -0,0 +1,1004 @@ + + + + + + + + + + lmflow.models.vision2seq_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.models.vision2seq_model

+#!/usr/bin/env python
+# coding=utf-8
+# TODO update the doc
+
+import copy
+import logging
+import time
+import torch
+import torch.nn as nn
+from typing import List, Optional, Tuple, Union
+from torch.nn import CrossEntropyLoss
+
+from transformers import (
+    AutoModelForCausalLM,
+    AutoModelForSeq2SeqLM,
+    AutoModel,
+    Blip2ForConditionalGeneration,
+    Blip2Config,
+    Blip2QFormerModel,
+    Blip2VisionModel,
+    Blip2PreTrainedModel,
+    PreTrainedModel,
+)
+from transformers.modeling_outputs import CausalLMOutputWithPast
+from transformers.deepspeed import is_deepspeed_zero3_enabled
+
+from lmflow.models.base_model import BaseModel
+from lmflow.models.vision_encoder import build_vision_tower
+
+
+
+[docs] +class CustomAutoVision2SeqModel(Blip2ForConditionalGeneration, BaseModel): + def __init__(self, + config: Blip2Config, + image_encoder_name_or_path=None, + qformer_name_or_path=None, + language_model_name_or_path=None, + low_resource=False,): + ''' + TODO update the docs + Args: + config: + # the below varaible are used to overwrite the model in config + image_encoder_name_or_path: + qformer_name_or_path: + language_model_name_or_path: + Returns: + ''' + super(Blip2PreTrainedModel, self).__init__(config) +
+[docs] + self.custom_vision_model = getattr( + config, "custom_vision_model", False)
+ +
+[docs] + self.with_qformer = getattr(config, "with_qformer", True)
+ + # vision model + if self.custom_vision_model: + # custom vision model means the vit model customized from llava. + # vision_model_args = getattr(config, "vision_model_args", dict()) + self.vision_model = build_vision_tower(config) + config.vision_config = self.vision_model.config + self.image_processor = self.vision_model.image_processor + elif image_encoder_name_or_path is not None: + # use the model from transformers + self.vision_model = AutoModel.from_pretrained( + image_encoder_name_or_path) + config.vision_config = self.vision_model.config + else: + # the default vit in Blip2 + self.vision_model = Blip2VisionModel(config.vision_config) + if self.with_qformer: + # check if with qformer, the blip series model use qformer + # and the llava based models don't use qformer. + if qformer_name_or_path is not None: + self.query_tokens = nn.Parameter( + torch.zeros(1, config.num_query_tokens, + config.qformer_config.hidden_size)) + self.qformer = AutoModel.from_pretrained( + qformer_name_or_path) + else: + self.query_tokens = nn.Parameter( + torch.zeros(1, config.num_query_tokens, + config.qformer_config.hidden_size)) + self.qformer = Blip2QFormerModel(config.qformer_config) +
+[docs] + kwargs = dict()
+ + if language_model_name_or_path is not None: + if low_resource: + kwargs = dict( + torch_dtype=torch.float16, + load_in_8bit=True, + device_map="auto", + low_cpu_mem_usage=True) + else: + if not is_deepspeed_zero3_enabled: + kwargs = dict(device_map="auto", + torch_dtype=torch.float16) + language_model = AutoModelForCausalLM.from_pretrained( + language_model_name_or_path, **kwargs) + config.text_config = language_model.config + else: + if config.use_decoder_only_language_model: + language_model = AutoModelForCausalLM.from_config( + config.text_config, **kwargs) + else: + language_model = AutoModelForSeq2SeqLM.from_config( + config.text_config, **kwargs) + # Update _tied_weights_keys using the base model used. + if getattr(language_model, "_tied_weights_keys", None) is not None: + self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys] + +
+[docs] + self.language_model = language_model
+ + if self.with_qformer: + self.language_projection = nn.Linear( + self.qformer.config.hidden_size, + self.language_model.config.hidden_size) + else: + self.language_projection = nn.Linear( + self.vision_model.hidden_size, + self.language_model.config.hidden_size) + if image_encoder_name_or_path is None and \ + language_model_name_or_path is None: + self.post_init() + # for deepspeed +
+[docs] + self.hidden_size = self.language_model.config.hidden_size
+ + self.config.hidden_size = self.language_model.config.hidden_size + +
+[docs] + def get_backend_model(self): + return self
+ + +
+[docs] + def vision_model_from_pretrained(self, pretrained_path): + self.vision_model = self.vision_model.from_pretrained( + pretrained_path, + config=self.config.vision_config)
+ + +
+[docs] + def qformer_from_pretrained(self, pretrained_path): + self.qformer = self.qformer.from_pretrained( + pretrained_path, + config=self.config.qformer_config)
+ + +
+[docs] + def language_model_from_pretrained(self, + pretrained_path, + low_resource=False, + use_prompt_cache=False): + # TODO remove the low resource related loading in the future + self.use_prompt_cache = use_prompt_cache + if low_resource: + kwargs = dict( + torch_dtype=torch.float16, + load_in_8bit=True, + device_map="auto" + ) + else: + kwargs = {} + past_model_dim = self.language_model.model_dim + self.language_model = AutoModelForCausalLM.from_pretrained( + pretrained_path, + config=self.config.text_config, + **kwargs) + if self.config.text_config.hidden_size != past_model_dim: + # should update the language projection layer + in_channels = self.language_projection.in_features + self.language_projection = nn.Linear(in_channels, + self.config.text_config.hidden_size, + bias=True)
+ + +
+[docs] + def vision_feature_select(self, image_forward_outs): + image_features = image_forward_outs.hidden_states[self.vision_feature_select_layer] + if self.select_vision_feature_type == "patch": + image_features = image_features[:, 1:] + elif self.select_vision_feature_type == "cls_patch": + image_features = image_features + else: + raise ValueError(f'Unexpected select feature: {self.select_feature}') + return image_features
+ + +
+[docs] + def register_prompt_cache(self, prompt_ids, prompt_keys_values): + """ + Udpate the prompt id and embedding for reuse in the future + + Args: + prompt_ids (torch.LongTensor): The id of the prompt. + prompt_keys_values (torch.FloatTensor): The embedding of the prompt. + + Returns: + None + """ + self.prompt_ids = prompt_ids + self.prompt_keys_values = prompt_keys_values + self.with_prompt_cache = True
+ + +
+[docs] + def save_prompt_cache(self, path): + """ + Save prompt embedding and id. + + Args: + path: The path to save the prompt embedding and id. + + Returns: + None + """ + + torch.save( + dict( + prompt_ids=self.prompt_ids, + prompt_keys_values=self.prompt_keys_values + ), + path)
+ + +
+[docs] + def load_prompt_cache(self, path): + """ + Load prompt embedding and id. + Args: + path: The path to load the prompt embedding and id. + + Returns: + None + """ + prompt_cache = torch.load(path) + self.register_prompt_cache(prompt_cache["prompt_ids"], + prompt_cache["prompt_keys_values"])
+ + +
+[docs] + def get_tokenizer(self): + return self.tokenizer
+ + +
+[docs] + def forward( + self, + input_ids: torch.LongTensor = None, + pixel_values: Optional[torch.FloatTensor] = None, + images: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + image_token_indexes: Optional[List] = [0], + one_sample_multiple_images: bool = False, + ) -> Union[Tuple, CausalLMOutputWithPast]: + if pixel_values is None and images is not None: + pixel_values = images + + if not one_sample_multiple_images: + batch_size = pixel_values.shape[0] + else: + batch_size = 1 + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if not self.custom_vision_model: + # do the processing as blip2 and mini gpt-4; + if past_key_values is not None and input_ids.shape[1] == 1: + # no need to recompute the key values + attention_mask = torch.ones(( + attention_mask.shape[0], + past_key_values[-1][-1].shape[-2] + 1), + dtype=attention_mask.dtype, + device=attention_mask.device) + else: + image_embeds = self.vision_model( + pixel_values, return_dict=True).last_hidden_state + image_attention_mask = torch.ones( + image_embeds.size()[:-1], + dtype=torch.long, + device=image_embeds.device) + if self.with_qformer: + query_tokens = self.query_tokens.expand( + image_embeds.shape[0], -1, -1) + query_outputs = self.qformer( + query_embeds=query_tokens, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_attention_mask, + return_dict=True, + ) + else: + query_outputs = image_embeds + query_output = query_outputs.last_hidden_state + language_model_inputs = self.language_projection(query_output) + inputs_embeds, attention_mask = \ + self.processor_image_token_in_minigpt4( + input_ids, + language_model_inputs, + attention_mask, + image_token_indexes, + pixel_values, + batch_size) + input_ids = None + else: + # do the processing in the vision model + # language is the causallm model. + # so use language model.model to do the embed_tokens + input_ids, attention_mask, past_key_values, inputs_embeds, labels = \ + self.vision_model.prepare_inputs_labels_for_multimodal( + input_ids, attention_mask, + past_key_values, labels, + pixel_values, + self.language_projection, + self.language_model.model) + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + # TODO check how to generate the labels with image embeddings + # print(input_ids, attention_mask) + # if inputs_embeds is not None: + # print("input_embeds", inputs_embeds.shape) + # attention_mask.shape, inputs_embeds.shape) + # TODO remove this code by fixing the ddp training issue + inputs_embeds = inputs_embeds.to( + self.language_model.lm_head.weight.dtype) + outputs = self.language_model( + input_ids=input_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict + ) + if labels is not None: + logits = outputs[0] + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view( + -1, self.config.text_config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model/pipeline parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (shift_logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + )
+ + +
+[docs] + def processor_image_token_in_minigpt4(self, + input_ids, + language_model_inputs, + attention_mask, + image_token_indexes, + pixel_values, + batch_size=1): + language_attention_mask = torch.ones( + language_model_inputs.size()[:-1], + dtype=torch.long, device=language_model_inputs.device + ) + if input_ids is None: + input_ids = ( + torch.LongTensor([[self.config.text_config.bos_token_id]]) + .repeat(batch_size, 1) + .to(language_model_inputs.device) + ) + if attention_mask is None: + attention_mask = torch.ones_like(input_ids) + attention_mask = attention_mask.to(language_attention_mask.device) + + # concatenate query embeddings with prompt embeddings + inputs_embeds = self.get_input_embeddings()(input_ids) + inputs_embeds = inputs_embeds.to(device=language_model_inputs.device) + # concatenate the text embeddings with image embeddings + inputs_embeds_with_images = [] + attention_mask_with_images = [] + # currently we only support with one image + start_index, end_index = 0, 0 + assert len(image_token_indexes) == pixel_values.shape[0] + # token format: (# text, # image)xN, # text + + for idx, image_token_index in enumerate(image_token_indexes): + end_index += image_token_index + inputs_embeds_with_images.append( + inputs_embeds[:, start_index:end_index]) + inputs_embeds_with_images.append(language_model_inputs[idx][None]) + attention_mask_with_images.append( + attention_mask[:, start_index:end_index]) + attention_mask_with_images.append( + language_attention_mask[idx][None]) + start_index = end_index + + inputs_embeds_with_images.append( + inputs_embeds[:, image_token_indexes[-1]:]) + inputs_embeds = torch.cat( + inputs_embeds_with_images, dim=1) + attention_mask_with_images.append( + attention_mask[:, image_token_indexes[-1]:]) + attention_mask = torch.cat(attention_mask_with_images, dim=1) + # comebine the embeds + inputs_embeds = inputs_embeds.to( + self.language_model.lm_head.weight.dtype) + attention_mask = attention_mask.to( + self.language_model.lm_head.weight.dtype) + return inputs_embeds, attention_mask
+ + + @torch.no_grad() +
+[docs] + def generate( + self, + pixel_values: torch.FloatTensor, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + image_token_indexes: Optional[List] = [0], + one_sample_multiple_images: Optional[bool] = False, + images: Optional[torch.LongTensor] = None, + **generate_kwargs, + ) -> torch.LongTensor: + """ + Overrides `generate` function to be able to use the model as a conditional generator. + + Args: + pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)): + Input images to be processed. + input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): + The sequence used as a prompt for the generation. + attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): + Mask to avoid performing attention on padding token indices + image_token_indexes (bool, *optional*): + The index for inserting the image tokens. + one_sample_multiple_images: (bool, *optional*): + The flag for inference that the input batch size is 1 and contain multiple images. + + Returns: + captions (list): A list of strings of length batch_size * num_captions. + """ + if pixel_values is None and images is not None: + pixel_values = images + + if not one_sample_multiple_images: + batch_size = pixel_values.shape[0] + else: + batch_size = 1 + if not self.custom_vision_model: + # do the processing as blip2 and mini gpt-4; + image_embeds = self.vision_model( + pixel_values, return_dict=True).last_hidden_state + image_attention_mask = torch.ones( + image_embeds.size()[:-1], + dtype=torch.long, + device=image_embeds.device) + if self.with_qformer: + query_tokens = self.query_tokens.expand( + image_embeds.shape[0], -1, -1) + query_outputs = self.qformer( + query_embeds=query_tokens, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_attention_mask, + return_dict=True, + ) + else: + query_outputs = image_embeds + query_output = query_outputs.last_hidden_state + language_model_inputs = self.language_projection(query_output) + inputs_embeds, attention_mask = \ + self.processor_image_token_in_minigpt4( + input_ids, + language_model_inputs, + attention_mask, + image_token_indexes, + pixel_values, + batch_size) + input_ids = None + else: + # do the processing in the vision model + # language is the causallm model. + # so use language model.model to do the embed_tokens + if pixel_values.dim() == 3: + # the batch dim is missing; + pixel_values = pixel_values[None] + input_ids, attention_mask, past_key_values, inputs_embeds, labels = \ + self.vision_model.prepare_inputs_labels_for_multimodal( + input_ids, attention_mask, + None, None, + pixel_values, + self.language_projection, + self.language_model.model) + # convert the dtype. + # FIXME check when need to do this + inputs_embeds = inputs_embeds.to( + device=self.language_model.lm_head.weight.device) + inputs_embeds = inputs_embeds.to( + self.language_model.lm_head.weight.dtype) + outputs = self.language_model.generate( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + **generate_kwargs, + ) + return outputs
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/models/vision_encoder/clip_encoder.html b/_modules/lmflow/models/vision_encoder/clip_encoder.html new file mode 100644 index 000000000..c793d9d45 --- /dev/null +++ b/_modules/lmflow/models/vision_encoder/clip_encoder.html @@ -0,0 +1,738 @@ + + + + + + + + + + lmflow.models.vision_encoder.clip_encoder — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.models.vision_encoder.clip_encoder

+from webbrowser import get
+import torch
+import torch.nn as nn
+
+from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig
+
+from lmflow.utils.constants import (IGNORE_INDEX,
+                                    IMAGE_TOKEN_INDEX,
+                                    DEFAULT_IMAGE_PATCH_TOKEN,
+                                    DEFAULT_IM_START_TOKEN,
+                                    DEFAULT_IM_END_TOKEN)
+
+
+[docs] +def build_vision_tower(vision_tower_cfg, **kwargs): + vision_tower = getattr(vision_tower_cfg, 'image_encoder_name_or_path', "openai/clip-vit-large-patch14") + if vision_tower.startswith("openai") or vision_tower.startswith("laion"): + return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs) + + raise ValueError(f'Unknown vision tower: {vision_tower}')
+ + +# FIXME check if can directly use the BlipVisionEncoder +
+[docs] +class CLIPVisionTower(nn.Module): + def __init__(self, vision_tower, args, delay_load=False): + super().__init__() + +
+[docs] + self.is_loaded = False
+ + +
+[docs] + self.vision_tower_name = vision_tower
+ +
+[docs] + self.select_layer = args.vision_select_layer
+ +
+[docs] + self.select_feature = getattr(args, 'vision_select_feature', 'patch')
+ + if not delay_load: + self.load_model() + else: + self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name) + +
+[docs] + def load_model(self): + self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name) + self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name) + self.vision_tower.requires_grad_(False) + self.is_loaded = True
+ + +
+[docs] + def encode_images(self, images, language_projection): + image_features = self(images) + # FIXME the language projection is register in the CustomAutoVision2SeqModel + # check how to move this code to improve the readability + if language_projection is not None: + image_features = language_projection(image_features) + return image_features
+ + +
+[docs] + def feature_select(self, image_forward_outs): + image_features = image_forward_outs.hidden_states[self.select_layer] + if self.select_feature == 'patch': + image_features = image_features[:, 1:] + elif self.select_feature == 'cls_patch': + image_features = image_features + else: + raise ValueError(f'Unexpected select feature: {self.select_feature}') + return image_features
+ + + @torch.no_grad() +
+[docs] + def forward(self, images): + if type(images) is list: + image_features = [] + for image in images: + image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True) + image_feature = self.feature_select(image_forward_out).to(image.dtype) + image_features.append(image_feature) + else: + image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True) + image_features = self.feature_select(image_forward_outs).to(images.dtype) + + return image_features
+ + + @property +
+[docs] + def dummy_feature(self): + return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
+ + + @property +
+[docs] + def dtype(self): + return self.vision_tower.dtype
+ + + @property +
+[docs] + def device(self): + return self.vision_tower.device
+ + + @property +
+[docs] + def config(self): + if self.is_loaded: + return self.vision_tower.config + else: + return self.cfg_only
+ + + @property +
+[docs] + def hidden_size(self): + return self.config.hidden_size
+ + + @property +
+[docs] + def num_patches(self): + return (self.config.image_size // self.config.patch_size) ** 2
+ + +
+[docs] + def prepare_inputs_labels_for_multimodal( + self, input_ids, attention_mask, past_key_values, labels, images, + language_projection=None, + language_model=None, + **kwargs + ): + ''' + Copy from the LLAVA code base. + Should be polished. + ''' + vision_tower = self.vision_tower + # commonly used in model.generate (past_key_values is not None) + # to avoid forward the image multiple time + if vision_tower is None or images is None or input_ids.shape[1] == 1: + if (past_key_values is not None and + vision_tower is not None and + images is not None and + input_ids.shape[1] == 1): + attention_mask = torch.ones(( + attention_mask.shape[0], + past_key_values[-1][-1].shape[-2] + 1), + dtype=attention_mask.dtype, device=attention_mask.device) + return input_ids, attention_mask, past_key_values, None, labels + if type(images) is list or images.ndim == 5: + concat_images = torch.cat([image for image in images], dim=0) + image_features = self.encode_images(concat_images, language_projection) + split_sizes = [image.shape[0] for image in images] + image_features = torch.split(image_features, split_sizes, dim=0) + image_features = [x.flatten(0, 1) for x in image_features] + else: + image_features = self.encode_images(images, language_projection) + new_input_embeds = [] + new_labels = [] if labels is not None else None + cur_image_idx = 0 + for batch_idx, cur_input_ids in enumerate(input_ids): + if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: + # multimodal LLM, but the current sample is not multimodal + cur_input_embeds = language_model.embed_tokens(cur_input_ids) + cur_input_embeds = cur_input_embeds + (0. * language_projection(vision_tower.dummy_feature)).sum() + new_input_embeds.append(cur_input_embeds) + if labels is not None: + new_labels.append(labels[batch_idx]) + cur_image_idx += 1 + continue + image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] + cur_new_input_embeds = [] + if labels is not None: + cur_labels = labels[batch_idx] + cur_new_labels = [] + assert cur_labels.shape == cur_input_ids.shape + while image_token_indices.numel() > 0: + cur_image_features = image_features[cur_image_idx] + image_token_start = image_token_indices[0] + # print("image token_start", image_token_start, + # "curr_input_ids", cur_input_ids.shape) + if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): + cur_new_input_embeds.append(language_model.embed_tokens(cur_input_ids[:image_token_start-1]).detach()) + cur_new_input_embeds.append(language_model.embed_tokens(cur_input_ids[image_token_start-1:image_token_start])) + cur_new_input_embeds.append(cur_image_features) + cur_new_input_embeds.append(language_model.embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2])) + if labels is not None: + cur_new_labels.append(cur_labels[:image_token_start]) + cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) + cur_new_labels.append(cur_labels[image_token_start:image_token_start+1]) + cur_labels = cur_labels[image_token_start+2:] + else: + cur_input_ids = cur_input_ids.to(device=language_model.device) + cur_new_input_embeds.append(language_model.embed_tokens(cur_input_ids[:image_token_start])) + cur_new_input_embeds.append(cur_image_features) + if labels is not None: + cur_new_labels.append(cur_labels[:image_token_start]) + cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) + cur_labels = cur_labels[image_token_start+1:] + cur_image_idx += 1 + if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): + cur_input_ids = cur_input_ids[image_token_start+2:] + else: + cur_input_ids = cur_input_ids[image_token_start+1:] + image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] + if cur_input_ids.numel() > 0: + if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): + cur_new_input_embeds.append(language_model.embed_tokens(cur_input_ids).detach()) + else: + cur_new_input_embeds.append(language_model.embed_tokens(cur_input_ids)) + if labels is not None: + cur_new_labels.append(cur_labels) + cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds] + cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0) + new_input_embeds.append(cur_new_input_embeds) + if labels is not None: + cur_new_labels = torch.cat(cur_new_labels, dim=0) + new_labels.append(cur_new_labels) + + if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds): + max_len = max(x.shape[0] for x in new_input_embeds) + + new_input_embeds_align = [] + for cur_new_embed in new_input_embeds: + cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0) + new_input_embeds_align.append(cur_new_embed) + new_input_embeds = torch.stack(new_input_embeds_align, dim=0) + + if labels is not None: + new_labels_align = [] + _new_labels = new_labels + for cur_new_label in new_labels: + cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0) + new_labels_align.append(cur_new_label) + new_labels = torch.stack(new_labels_align, dim=0) + if attention_mask is not None: + new_attention_mask = [] + for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels): + new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device) + new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device) + cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0) + new_attention_mask.append(cur_new_attention_mask) + attention_mask = torch.stack(new_attention_mask, dim=0) + assert attention_mask.shape == new_labels.shape + else: + new_input_embeds = torch.stack(new_input_embeds, dim=0) + if labels is not None: + new_labels = torch.stack(new_labels, dim=0) + if attention_mask is not None: + new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device) + attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1) + assert attention_mask.shape == new_input_embeds.shape[:2] + return None, attention_mask, past_key_values, \ + new_input_embeds, new_labels
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/adabelief.html b/_modules/lmflow/optim/adabelief.html new file mode 100644 index 000000000..ca9d484c2 --- /dev/null +++ b/_modules/lmflow/optim/adabelief.html @@ -0,0 +1,671 @@ + + + + + + + + + + lmflow.optim.adabelief — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.adabelief

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import math
+import torch
+from torch.optim.optimizer import Optimizer
+
+
+[docs] +class AdaBelief(Optimizer): + r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch + reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020 + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16, + weight_decay=0, amsgrad=False, weight_decouple=True, fixed_decay=False, rectify=True, + degenerated_to_sgd=True, print_change_log = True): + + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + +
+[docs] + self.degenerated_to_sgd = degenerated_to_sgd
+ + if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict): + for param in params: + if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]): + param['buffer'] = [[None, None, None] for _ in range(10)] + +
+[docs] + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad, buffer=[[None, None, None] for _ in range(10)])
+ + super(AdaBelief, self).__init__(params, defaults) + + self.degenerated_to_sgd = degenerated_to_sgd +
+[docs] + self.weight_decouple = weight_decouple
+ +
+[docs] + self.rectify = rectify
+ +
+[docs] + self.fixed_decay = fixed_decay
+ + if self.weight_decouple: + print('Weight decoupling enabled in AdaBelief') + if self.fixed_decay: + print('Weight decay fixed') + if self.rectify: + print('Rectification enabled in AdaBelief') + if amsgrad: + print('AMSGrad enabled in AdaBelief') + +
+[docs] + def __setstate__(self, state): + super(AdaBelief, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False)
+ + +
+[docs] + def reset(self): + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + amsgrad = group['amsgrad'] + + # State initialization + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p.data) + + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p.data)
+ + +
+[docs] + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + # cast data type + half_precision = False + if p.data.dtype == torch.float16: + half_precision = True + p.data = p.data.float() + p.grad = p.grad.float() + + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError( + 'AdaBelief does not support sparse gradients, please consider SparseAdam instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + beta1, beta2 = group['betas'] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p.data) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p.data) + + # perform weight decay, check if decoupled weight decay + if self.weight_decouple: + if not self.fixed_decay: + p.data.mul_(1.0 - group['lr'] * group['weight_decay']) + else: + p.data.mul_(1.0 - group['weight_decay']) + else: + if group['weight_decay'] != 0: + grad.add_(p.data, alpha=group['weight_decay']) + + # get current state variable + exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Update first and second moment running average + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + grad_residual = grad - exp_avg + exp_avg_var.mul_(beta2).addcmul_( grad_residual, grad_residual, value=1 - beta2) + + if amsgrad: + max_exp_avg_var = state['max_exp_avg_var'] + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var) + + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + # update + if not self.rectify: + # Default update + step_size = group['lr'] / bias_correction1 + p.data.addcdiv_( exp_avg, denom, value=-step_size) + + else: # Rectified update, forked from RAdam + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + N_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + N_sma_max = 2 / (1 - beta2) - 1 + N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = N_sma + + # more conservative since it's an approximated value + if N_sma >= 5: + step_size = math.sqrt( + (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / ( + N_sma_max - 2)) / (1 - beta1 ** state['step']) + elif self.degenerated_to_sgd: + step_size = 1.0 / (1 - beta1 ** state['step']) + else: + step_size = -1 + buffered[2] = step_size + + if N_sma >= 5: + denom = exp_avg_var.sqrt().add_(group['eps']) + p.data.addcdiv_(exp_avg, denom, value=-step_size * group['lr']) + elif step_size > 0: + p.data.add_( exp_avg, alpha=-step_size * group['lr']) + + if half_precision: + p.data = p.data.half() + p.grad = p.grad.half() + + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/adabound.html b/_modules/lmflow/optim/adabound.html new file mode 100644 index 000000000..1e15869ff --- /dev/null +++ b/_modules/lmflow/optim/adabound.html @@ -0,0 +1,633 @@ + + + + + + + + + + lmflow.optim.adabound — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.adabound

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import math
+
+import torch
+from torch.optim.optimizer import Optimizer
+
+
+[docs] +class AdaBound(Optimizer): + r"""Implements AdaBound algorithm. + + It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of + Learning Rate + https://arxiv.org/abs/1902.09843 + Note: + Reference code: https://github.com/Luolc/AdaBound + """ + + def __init__( + self, + params, + lr: float = 1e-3, + betas = (0.9, 0.999), + final_lr: float = 0.1, + gamma: float = 1e-3, + eps: float = 1e-8, + weight_decay: float = 0, + amsbound: bool = False, + ) -> None: + if lr <= 0.0: + raise ValueError("Invalid learning rate: {}".format(lr)) + if eps < 0.0: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError( + "Invalid beta parameter at index 0: {}".format(betas[0]) + ) + if not 0.0 <= betas[1] < 1.0: + raise ValueError( + "Invalid beta parameter at index 1: {}".format(betas[1]) + ) + if final_lr < 0.0: + raise ValueError( + "Invalid final learning rate: {}".format(final_lr) + ) + if not 0.0 <= gamma < 1.0: + raise ValueError("Invalid gamma parameter: {}".format(gamma)) + if weight_decay < 0: + raise ValueError( + "Invalid weight_decay value: {}".format(weight_decay) + ) +
+[docs] + defaults = dict( + lr=lr, + betas=betas, + final_lr=final_lr, + gamma=gamma, + eps=eps, + weight_decay=weight_decay, + amsbound=amsbound, + )
+ + super(AdaBound, self).__init__(params, defaults) +
+[docs] + self.base_lrs = [group["lr"] for group in self.param_groups]
+ + +
+[docs] + def __setstate__(self, state) -> None: + super(AdaBound, self).__setstate__(state) + for group in self.param_groups: + group.setdefault("amsbound", False)
+ + +
+[docs] + def step(self, closure = None): + r"""Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group, base_lr in zip(self.param_groups, self.base_lrs): + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + msg = ( + "AdaBound does not support sparse gradients, " + "please consider SparseAdam instead" + ) + raise RuntimeError(msg) + amsbound = group["amsbound"] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = 0 + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + if amsbound: + # Maintains max of all exp. moving avg. of + # sq. grad. values + state["max_exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] + if amsbound: + max_exp_avg_sq = state["max_exp_avg_sq"] + beta1, beta2 = group["betas"] + + state["step"] += 1 + + if group["weight_decay"] != 0: + grad = grad.add(p.data, alpha=group["weight_decay"]) + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + if amsbound: + # Maintains the maximum of all 2nd moment running + # avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group["eps"]) + else: + denom = exp_avg_sq.sqrt().add_(group["eps"]) + + bias_correction1 = 1 - beta1 ** state["step"] + bias_correction2 = 1 - beta2 ** state["step"] + step_size = ( + group["lr"] + * math.sqrt(bias_correction2) + / bias_correction1 + ) + + # Applies bounds on actual learning rate + # lr_scheduler cannot affect final_lr, this is a workaround + # to apply lr decay + final_lr = group["final_lr"] * group["lr"] / base_lr + lower_bound = final_lr * ( + 1 - 1 / (group["gamma"] * state["step"] + 1) + ) + upper_bound = final_lr * ( + 1 + 1 / (group["gamma"] * state["step"]) + ) + step_size = torch.full_like(denom, step_size) + step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_( + exp_avg + ) + + p.data.add_(-step_size) + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/adadelta.html b/_modules/lmflow/optim/adadelta.html new file mode 100644 index 000000000..9f97bd4a7 --- /dev/null +++ b/_modules/lmflow/optim/adadelta.html @@ -0,0 +1,512 @@ + + + + + + + + + + lmflow.optim.adadelta — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.adadelta

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import torch
+from torch.optim.optimizer import Optimizer
+
+
+[docs] +class Adadelta(Optimizer): + def __init__(self, params, lr=1.0, rho=0.95, eps=1e-6): +
+[docs] + defaults = dict(lr=lr, rho=rho, eps=eps)
+ + super(Adadelta, self).__init__(params, defaults) + +
+[docs] + def step(self, closure=None): + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['square_avg'] = torch.zeros_like(p.data) + state['acc_delta'] = torch.zeros_like(p.data) + + square_avg, acc_delta = state['square_avg'], state['acc_delta'] + rho, eps = group['rho'], group['eps'] + + state['step'] += 1 + + square_avg.mul_(rho).addcmul_(1 - rho, grad, grad) + + std = square_avg.add(eps).sqrt_() + delta = acc_delta.add(eps).sqrt_().div_(std).mul_(grad) + + p.data.add_(-delta) + + acc_delta.mul_(rho).addcmul_(1 - rho, delta, delta) + + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/adagrad.html b/_modules/lmflow/optim/adagrad.html new file mode 100644 index 000000000..c2adf18b4 --- /dev/null +++ b/_modules/lmflow/optim/adagrad.html @@ -0,0 +1,504 @@ + + + + + + + + + + lmflow.optim.adagrad — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.adagrad

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import torch
+from torch.optim.optimizer import Optimizer
+
+
+[docs] +class AdaGrad(torch.optim.Optimizer): + def __init__(self, params, lr=0.001, eps=1e-8, weight_decay=0): +
+[docs] + defaults = dict(lr=lr, eps=eps, weight_decay=weight_decay)
+ + super(AdaGrad, self).__init__(params, defaults) + +
+[docs] + def step(self, closure=None): + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if group['weight_decay'] != 0: + grad = grad.add(group['weight_decay'], p.data) + + state = self.state[p] + + if len(state) == 0: + state['sum'] = torch.zeros_like(p.data) + + sum = state['sum'] + sum.addcmul_(1, grad, grad) + std = sum.sqrt().add_(group['eps']) + p.data.addcdiv_(-group['lr'], grad, std) + + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/adam.html b/_modules/lmflow/optim/adam.html new file mode 100644 index 000000000..6c9063d6f --- /dev/null +++ b/_modules/lmflow/optim/adam.html @@ -0,0 +1,515 @@ + + + + + + + + + + lmflow.optim.adam — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.adam

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import torch
+from torch.optim.optimizer import Optimizer
+
+
+[docs] +class Adam(Optimizer): + def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-8): +
+[docs] + defaults = dict(lr=lr, betas=betas, eps=eps)
+ + super(Adam, self).__init__(params, defaults) + +
+[docs] + def step(self, closure=None): + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p.data) + state['exp_avg_sq'] = torch.zeros_like(p.data) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + exp_avg.mul_(beta1).add_(1 - beta1, grad) + + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + step_size = group['lr'] * (bias_correction2 ** 0.5) / bias_correction1 + denom = exp_avg_sq.sqrt().add_(group['eps']) + p.data.addcdiv_(-step_size, exp_avg, denom) + + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/adamax.html b/_modules/lmflow/optim/adamax.html new file mode 100644 index 000000000..112f9d1d9 --- /dev/null +++ b/_modules/lmflow/optim/adamax.html @@ -0,0 +1,540 @@ + + + + + + + + + + lmflow.optim.adamax — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.adamax

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import torch
+from torch.optim.optimizer import Optimizer
+
+
+[docs] +class Adamax(Optimizer): + def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) +
+[docs] + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
+ + super(Adamax, self).__init__(params, defaults) + +
+[docs] + def __setstate__(self, state): + super(Adamax, self).__setstate__(state)
+ + +
+[docs] + def step(self, closure=None): + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Adamax does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p.data) + state['exp_inf'] = torch.zeros_like(p.data) + + exp_avg, exp_inf = state['exp_avg'], state['exp_inf'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + if group['weight_decay'] != 0: + grad = grad.add(group['weight_decay'], p.data) + + # Update biased first moment estimate + exp_avg.mul_(beta1).add_(1 - beta1, grad) + + # Update the exponentially weighted infinity norm + norm_buf = torch.cat([ + exp_inf.mul_(beta2).unsqueeze(0), + grad.abs().unsqueeze_(0) + ], 0) + torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long())) + + bias_correction = 1 - beta1 ** state['step'] + clr = group['lr'] / bias_correction + + p.data.addcdiv_(-clr, exp_avg, exp_inf + group['eps']) + + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/adamp.html b/_modules/lmflow/optim/adamp.html new file mode 100644 index 000000000..cec3efaeb --- /dev/null +++ b/_modules/lmflow/optim/adamp.html @@ -0,0 +1,654 @@ + + + + + + + + + + lmflow.optim.adamp — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.adamp

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import math
+import torch
+from torch.optim.optimizer import Optimizer
+
+
+
+[docs] +class AdamP(Optimizer): + r"""Implements AdamP algorithm. + + It has been proposed in `Slowing Down the Weight Norm Increase in + Momentum-based Optimizers` + https://arxiv.org/abs/2006.08217 + + Note: + Reference code: https://github.com/clovaai/AdamP + """ + + def __init__( + self, + params, + lr: float = 1e-3, + betas = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 0, + delta: float = 0.1, + wd_ratio: float = 0.1, + nesterov: bool = False, + ) -> None: + if lr <= 0.0: + raise ValueError("Invalid learning rate: {}".format(lr)) + if eps < 0.0: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError( + "Invalid beta parameter at index 0: {}".format(betas[0]) + ) + if not 0.0 <= betas[1] < 1.0: + raise ValueError( + "Invalid beta parameter at index 1: {}".format(betas[1]) + ) + if weight_decay < 0: + raise ValueError( + "Invalid weight_decay value: {}".format(weight_decay) + ) + if delta < 0: + raise ValueError("Invalid delta value: {}".format(delta)) + if wd_ratio < 0: + raise ValueError("Invalid wd_ratio value: {}".format(wd_ratio)) + +
+[docs] + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + delta=delta, + wd_ratio=wd_ratio, + nesterov=nesterov, + )
+ + super(AdamP, self).__init__(params, defaults) + + @staticmethod +
+[docs] + def _channel_view(x): + return x.view(x.size(0), -1)
+ + + @staticmethod +
+[docs] + def _layer_view(x): + return x.view(1, -1)
+ + + @staticmethod +
+[docs] + def _cosine_similarity(x, y, eps, view_func): + x = view_func(x) + y = view_func(y) + + x_norm = x.norm(dim=1).add_(eps) + y_norm = y.norm(dim=1).add_(eps) + dot = (x * y).sum(dim=1) + + return dot.abs() / x_norm / y_norm
+ + +
+[docs] + def _projection(self, p, grad, perturb, delta, wd_ratio, eps): + wd = 1 + expand_size = [-1] + [1] * (len(p.shape) - 1) + for view_func in [self._channel_view, self._layer_view]: + cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func) + + if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)): + p_n = p.data / view_func(p.data).norm(dim=1).view( + expand_size + ).add_(eps) + perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view( + expand_size + ) + wd = wd_ratio + + return perturb, wd + + return perturb, wd
+ + +
+[docs] + def step(self, closure = None): + r"""Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: + continue + + grad = p.grad.data + beta1, beta2 = group["betas"] + nesterov = group["nesterov"] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = 0 + state["exp_avg"] = torch.zeros_like( + p.data, memory_format=torch.preserve_format + ) + state["exp_avg_sq"] = torch.zeros_like( + p.data, memory_format=torch.preserve_format + ) + + # Adam + exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] + + state["step"] += 1 + bias_correction1 = 1 - beta1 ** state["step"] + bias_correction2 = 1 - beta2 ** state["step"] + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_( + group["eps"] + ) + step_size = group["lr"] / bias_correction1 + + if nesterov: + perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom + else: + perturb = exp_avg / denom + + # Projection + wd_ratio = 1 + if len(p.shape) > 1: + perturb, wd_ratio = self._projection( + p, + grad, + perturb, + group["delta"], + group["wd_ratio"], + group["eps"], + ) + + # Weight decay + if group["weight_decay"] > 0: + p.data.mul_( + 1 - group["lr"] * group["weight_decay"] * wd_ratio + ) + + # Step + p.data.add_(perturb, alpha=-step_size) + + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/adamw_schedule_free.html b/_modules/lmflow/optim/adamw_schedule_free.html new file mode 100644 index 000000000..5052f435c --- /dev/null +++ b/_modules/lmflow/optim/adamw_schedule_free.html @@ -0,0 +1,654 @@ + + + + + + + + + + lmflow.optim.adamw_schedule_free — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.adamw_schedule_free

+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+# 
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import torch
+import torch.optim
+import math
+
+
+[docs] +class AdamWScheduleFree(torch.optim.Optimizer): + r""" + Schedule-Free AdamW + As the name suggests, no scheduler is needed with this optimizer. + To add warmup, rather than using a learning rate schedule you can just + set the warmup_steps parameter. + + This optimizer requires that .train() and .eval() be called before the + beginning of training and evaluation respectively. The optimizer should + also be placed in eval mode when saving checkpoints. + """ + def __init__(self, + params, + lr=0.0025, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0, + warmup_steps=0, + r=0.0, + weight_lr_power=2.0, + foreach=hasattr(torch, "_foreach_mul_") + ): + +
+[docs] + defaults = dict(lr=lr, + betas=betas, + eps=eps, + r=r, + k=0, + warmup_steps=warmup_steps, + train_mode=True, + weight_sum=0.0, + lr_max=-1.0, + weight_lr_power=weight_lr_power, + weight_decay=weight_decay, + foreach=foreach)
+ + super().__init__(params, defaults) + +
+[docs] + def eval(self): + for group in self.param_groups: + train_mode = group['train_mode'] + beta1, _ = group['betas'] + if train_mode: + for p in group['params']: + state = self.state[p] + if 'z' in state: + # Set p.data to x + p.data.lerp_(end=state['z'], weight=1-1/beta1) + group['train_mode'] = False
+ + +
+[docs] + def train(self): + for group in self.param_groups: + train_mode = group['train_mode'] + beta1, _ = group['betas'] + if not train_mode: + for p in group['params']: + state = self.state[p] + if 'z' in state: + # Set p.data to y + p.data.lerp_(end=state['z'], weight=1-beta1) + group['train_mode'] = True
+ + +
+[docs] + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + eps = group['eps'] + beta1, beta2 = group['betas'] + decay = group['weight_decay'] + k = group['k'] + r = group['r'] + warmup_steps = group['warmup_steps'] + weight_lr_power = group['weight_lr_power'] + + if k < warmup_steps: + sched = (k+1) / warmup_steps + else: + sched = 1.0 + + bias_correction2 = 1 - beta2 ** (k+1) + lr = group['lr']*sched*math.sqrt(bias_correction2) + + lr_max = group['lr_max'] = max(lr, group['lr_max']) + + weight = ((k+1)**r) * (lr_max**weight_lr_power) + weight_sum = group['weight_sum'] = group['weight_sum'] + weight + + try: + ckp1 = weight/weight_sum + except ZeroDivisionError: + ckp1 = 0 + + if not group['train_mode']: + raise Exception("Not in train mode!") + + active_p = [p for p in group['params'] if p.grad is not None] + + for p in active_p: + if 'z' not in self.state[p]: + self.state[p]['z'] = torch.clone(p.data) + self.state[p]['exp_avg_sq'] = torch.zeros_like(p.data) + + if group['foreach'] and len(active_p) > 0: + y, grad, exp_avg_sq, z = zip(*[(p.data, + p.grad, + self.state[p]['exp_avg_sq'], + self.state[p]['z']) + for p in active_p]) + + # Decay the first and second moment running average coefficient + torch._foreach_mul_(exp_avg_sq, beta2) + torch._foreach_addcmul_(exp_avg_sq, grad, grad, value=1-beta2) + denom = torch._foreach_sqrt(exp_avg_sq) + torch._foreach_add_(denom, eps) + + # Normalize grad in-place for memory efficiency + torch._foreach_div_(grad, denom) + + # Weight decay calculated at y + if decay != 0: + torch._foreach_add_(grad, y, alpha=decay) + + # These operations update y in-place, + # without computing x explicitly. + torch._foreach_lerp_(y, z, weight=ckp1) + torch._foreach_add_(y, grad, alpha=lr*(beta1*(1-ckp1)-1)) + + # z step + torch._foreach_sub_(z, grad, alpha=lr) + else: + for p in active_p: + y = p.data # Notation to match theory + grad = p.grad.data + + state = self.state[p] + + z = state['z'] + exp_avg_sq = state['exp_avg_sq'] + + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1-beta2) + denom = exp_avg_sq.sqrt().add_(eps) + + # Reuse grad buffer for memory efficiency + grad_normalized = grad.div_(denom) + + # Weight decay calculated at y + if decay != 0: + grad_normalized.add_(y, alpha=decay) + + # These operations update y in-place, + # without computing x explicitly. + y.lerp_(end=z, weight=ckp1) + y.add_(grad_normalized, alpha=lr*(beta1*(1-ckp1)-1)) + + # z step + z.sub_(grad_normalized, alpha=lr) + + group['k'] = k+1 + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/adan.html b/_modules/lmflow/optim/adan.html new file mode 100644 index 000000000..560307fe9 --- /dev/null +++ b/_modules/lmflow/optim/adan.html @@ -0,0 +1,761 @@ + + + + + + + + + + lmflow.optim.adan — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.adan

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import math
+from typing import List
+import torch
+from torch import Tensor
+from torch.optim.optimizer import Optimizer
+
+
+[docs] +class Adan(Optimizer): + """Implements a pytorch variant of Adan. + + Adan was proposed in + Adan : Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models. + https://arxiv.org/abs/2208.06677 + + """ + + def __init__(self, + params, + lr=1e-3, + betas=(0.98, 0.92, 0.99), + eps=1e-8, + weight_decay=0.0, + max_grad_norm=0.0, + no_prox=False, + foreach: bool = True): + if not 0.0 <= max_grad_norm: + raise ValueError('Invalid Max grad norm: {}'.format(max_grad_norm)) + if not 0.0 <= lr: + raise ValueError('Invalid learning rate: {}'.format(lr)) + if not 0.0 <= eps: + raise ValueError('Invalid epsilon value: {}'.format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError('Invalid beta parameter at index 0: {}'.format( + betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError('Invalid beta parameter at index 1: {}'.format( + betas[1])) + if not 0.0 <= betas[2] < 1.0: + raise ValueError('Invalid beta parameter at index 2: {}'.format( + betas[2])) +
+[docs] + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + max_grad_norm=max_grad_norm, + no_prox=no_prox, + foreach=foreach)
+ + super().__init__(params, defaults) + +
+[docs] + def __setstate__(self, state): + super(Adan, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('no_prox', False)
+ + + @torch.no_grad() +
+[docs] + def restart_opt(self): + for group in self.param_groups: + group['step'] = 0 + for p in group['params']: + if p.requires_grad: + state = self.state[p] + # State initialization + + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + # Exponential moving average of gradient difference + state['exp_avg_diff'] = torch.zeros_like(p)
+ + + @torch.no_grad() +
+[docs] + def step(self): + """Performs a single optimization step.""" + if self.defaults['max_grad_norm'] > 0: + device = self.param_groups[0]['params'][0].device + global_grad_norm = torch.zeros(1, device=device) + + max_grad_norm = torch.tensor( + self.defaults['max_grad_norm'], device=device) + for group in self.param_groups: + + for p in group['params']: + if p.grad is not None: + grad = p.grad + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm = torch.sqrt(global_grad_norm) + group['eps'] + + clip_global_grad_norm = \ + torch.clamp(max_grad_norm / global_grad_norm, max=1.0) + else: + clip_global_grad_norm = 1.0 + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + exp_avg_diffs = [] + pre_grads = [] + + beta1, beta2, beta3 = group['betas'] + # assume same step across group now to simplify things + # per parameter step can be easily support + # by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + bias_correction1 = 1.0 - beta1**group['step'] + bias_correction2 = 1.0 - beta2**group['step'] + bias_correction3 = 1.0 - beta3**group['step'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + grads.append(p.grad) + + state = self.state[p] + if len(state) == 0: + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + state['exp_avg_diff'] = torch.zeros_like(p) + + if 'pre_grad' not in state or group['step'] == 1: + # at first step grad wouldn't be clipped + # by `clip_global_grad_norm` + # this is only to simplify implementation + state['pre_grad'] = p.grad + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + exp_avg_diffs.append(state['exp_avg_diff']) + pre_grads.append(state['pre_grad']) + + kwargs = dict( + params=params_with_grad, + grads=grads, + exp_avgs=exp_avgs, + exp_avg_sqs=exp_avg_sqs, + exp_avg_diffs=exp_avg_diffs, + pre_grads=pre_grads, + beta1=beta1, + beta2=beta2, + beta3=beta3, + bias_correction1=bias_correction1, + bias_correction2=bias_correction2, + bias_correction3_sqrt=math.sqrt(bias_correction3), + lr=group['lr'], + weight_decay=group['weight_decay'], + eps=group['eps'], + no_prox=group['no_prox'], + clip_global_grad_norm=clip_global_grad_norm, + ) + if group['foreach']: + copy_grads = _multi_tensor_adan(**kwargs) + else: + copy_grads = _single_tensor_adan(**kwargs) + + for p, copy_grad in zip(params_with_grad, copy_grads): + self.state[p]['pre_grad'] = copy_grad
+
+ + + +
+[docs] +def _single_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, + clip_global_grad_norm: Tensor, +): + copy_grads = [] + for i, param in enumerate(params): + grad = grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + exp_avg_diff = exp_avg_diffs[i] + pre_grad = pre_grads[i] + + grad = grad.mul_(clip_global_grad_norm) + copy_grads.append(grad.clone()) + + diff = grad - pre_grad + update = grad + beta2 * diff + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t + exp_avg_diff.mul_(beta2).add_(diff, alpha=1 - beta2) # diff_t + exp_avg_sq.mul_(beta3).addcmul_(update, update, value=1 - beta3) # n_t + + denom = (exp_avg_sq.sqrt() / bias_correction3_sqrt).add_(eps) + update = exp_avg / bias_correction1 + update.add_(beta2 * exp_avg_diff / bias_correction2).div_(denom) + + if no_prox: + param.mul_(1 - lr * weight_decay) + param.add_(update, alpha=-lr) + else: + param.add_(update, alpha=-lr) + param.div_(1 + lr * weight_decay) + return copy_grads
+ + + +
+[docs] +def _multi_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, + clip_global_grad_norm: Tensor, +): + if clip_global_grad_norm < 1.0: + torch._foreach_mul_(grads, clip_global_grad_norm.item()) + copy_grads = [g.clone() for g in grads] + + diff = torch._foreach_sub(grads, pre_grads) + # NOTE: line below while looking identical gives different result, + # due to float precision errors. + # using mul+add produces identical results to single-tensor, + # using add+alpha doesn't + # update = torch._foreach_add(grads, torch._foreach_mul(diff, beta2)) + update = torch._foreach_add(grads, diff, alpha=beta2) + + torch._foreach_mul_(exp_avgs, beta1) + torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # m_t + + torch._foreach_mul_(exp_avg_diffs, beta2) + torch._foreach_add_(exp_avg_diffs, diff, alpha=1 - beta2) # diff_t + + torch._foreach_mul_(exp_avg_sqs, beta3) + torch._foreach_addcmul_( + exp_avg_sqs, update, update, value=1 - beta3) # n_t + + denom = torch._foreach_sqrt(exp_avg_sqs) + torch._foreach_div_(denom, bias_correction3_sqrt) + torch._foreach_add_(denom, eps) + + update = torch._foreach_div(exp_avgs, bias_correction1) + # NOTE: same issue as above. + # beta2 * diff / bias_correction2 != diff * (beta2 / bias_correction2) # noqa + # using faster version by default. uncomment for tests to pass + # torch._foreach_add_(update, torch._foreach_div(torch._foreach_mul(exp_avg_diffs, beta2), bias_correction2)) # noqa + torch._foreach_add_( + update, torch._foreach_mul(exp_avg_diffs, beta2 / bias_correction2)) + torch._foreach_div_(update, denom) + + if no_prox: + torch._foreach_mul_(params, 1 - lr * weight_decay) + else: + torch._foreach_add_(params, update, alpha=-lr) + torch._foreach_div_(params, 1 + lr * weight_decay) + return copy_grads
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/dummy.html b/_modules/lmflow/optim/dummy.html new file mode 100644 index 000000000..bad2d5b8b --- /dev/null +++ b/_modules/lmflow/optim/dummy.html @@ -0,0 +1,549 @@ + + + + + + + + + + lmflow.optim.dummy — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.dummy

+#!/usr/bin/env python
+# coding=utf-8
+"""Dummy Optimizer.
+"""
+import math
+import warnings
+from typing import Callable, Iterable, Tuple
+
+import torch
+from torch import nn
+from torch.optim import Optimizer
+
+
+[docs] +class Dummy(Optimizer): + """ + An dummy optimizer that does nothing. + + Parameters: + params (:obj:`Iterable[nn.parameter.Parameter]`): + Iterable of parameters to optimize or dictionaries defining parameter groups. + lr (:obj:`float`, `optional`, defaults to 0): + The learning rate to use. + """ + + def __init__( + self, + params: Iterable[nn.parameter.Parameter], + lr: float = 0., + betas: Tuple[float, float] = (0.9, 0.999), + weight_decay: float = 0.0, + ): + if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr} - should be >= 0.0") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter: {betas[0]} - should be in [0.0, 1.0)") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter: {betas[1]} - should be in [0.0, 1.0)") +
+[docs] + defaults = {"lr": lr, "betas": betas, "weight_decay": weight_decay}
+ + super().__init__(params, defaults) + + + @torch.no_grad() +
+[docs] + def step(self, closure: Callable=None): + """ + Performs a single optimization step. + + Arguments: + closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError("Dummy does not support sparse gradients yet") + + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = 0 + state["exp_avg"] = torch.zeros_like(p) + state["exp_avg2"] = torch.zeros_like(p) + + # v := exp_avg + # m := double_exp_avg + v, m = state["exp_avg"], state["exp_avg2"] + beta1, beta2 = group["betas"] + step_size = group["lr"] + + state["step"] += 1 + + p.add_(m, alpha=-0.0) + if group["weight_decay"] > 0.0: + p.add_(p, alpha=(-group["lr"] * group["weight_decay"])) + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/lamb.html b/_modules/lmflow/optim/lamb.html new file mode 100644 index 000000000..488f18482 --- /dev/null +++ b/_modules/lmflow/optim/lamb.html @@ -0,0 +1,610 @@ + + + + + + + + + + lmflow.optim.lamb — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.lamb

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import math
+import torch
+from torch.optim.optimizer import Optimizer
+
+
+[docs] +class Lamb(Optimizer): + r"""Implements Lamb algorithm. + + It has been proposed in `Large Batch Optimization for Deep Learning: + Training BERT in 76 minutes` + https://arxiv.org/abs/1904.00962 + + Note: + Reference code: https://github.com/cybertronai/pytorch-lamb + """ + + def __init__( + self, + params, + lr: float = 1e-3, + betas = (0.9, 0.999), + eps: float = 1e-6, + weight_decay: float = 0, + clamp_value: float = 10, + adam: bool = False, + debias: bool = False, + ) -> None: + if lr <= 0.0: + raise ValueError("Invalid learning rate: {}".format(lr)) + if eps < 0.0: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError( + "Invalid beta parameter at index 0: {}".format(betas[0]) + ) + if not 0.0 <= betas[1] < 1.0: + raise ValueError( + "Invalid beta parameter at index 1: {}".format(betas[1]) + ) + if weight_decay < 0: + raise ValueError( + "Invalid weight_decay value: {}".format(weight_decay) + ) + if clamp_value < 0.0: + raise ValueError("Invalid clamp value: {}".format(clamp_value)) + +
+[docs] + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
+ +
+[docs] + self.clamp_value = clamp_value
+ +
+[docs] + self.adam = adam
+ +
+[docs] + self.debias = debias
+ + + super(Lamb, self).__init__(params, defaults) + +
+[docs] + def step(self, closure = None): + r"""Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + msg = ( + "Lamb does not support sparse gradients, " + "please consider SparseAdam instead" + ) + raise RuntimeError(msg) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = 0 + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] + beta1, beta2 = group["betas"] + + state["step"] += 1 + + # Decay the first and second moment running average coefficient + # m_t + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + # v_t + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + # Paper v3 does not use debiasing. + if self.debias: + bias_correction = math.sqrt(1 - beta2 ** state["step"]) + bias_correction /= 1 - beta1 ** state["step"] + else: + bias_correction = 1 + + # Apply bias to lr to avoid broadcast. + step_size = group["lr"] * bias_correction + + weight_norm = torch.norm(p.data).clamp(0, self.clamp_value) + + adam_step = exp_avg / exp_avg_sq.sqrt().add(group["eps"]) + if group["weight_decay"] != 0: + adam_step.add_(p.data, alpha=group["weight_decay"]) + + adam_norm = torch.norm(adam_step) + if weight_norm == 0 or adam_norm == 0: + trust_ratio = 1 + else: + trust_ratio = weight_norm / adam_norm + state["weight_norm"] = weight_norm + state["adam_norm"] = adam_norm + state["trust_ratio"] = trust_ratio + if self.adam: + trust_ratio = 1 + + p.data.add_(adam_step, alpha=-step_size * trust_ratio) + + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/lars.html b/_modules/lmflow/optim/lars.html new file mode 100644 index 000000000..ea249df0a --- /dev/null +++ b/_modules/lmflow/optim/lars.html @@ -0,0 +1,615 @@ + + + + + + + + + + lmflow.optim.lars — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.lars

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import torch
+from torch.optim.optimizer import Optimizer
+
+
+[docs] +class LARS(Optimizer): + r"""Extends SGD in PyTorch with LARS scaling from the paper + `Large batch training of Convolutional Networks`__. + .. note:: + The application of momentum in the SGD part is modified according to + the PyTorch standards. LARS scaling fits into the equation in the + following fashion. + + .. math:: + \begin{aligned} + g_{t+1} & = \text{lars_lr} * (\beta * p_{t} + g_{t+1}), \\ + v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\ + p_{t+1} & = p_{t} - \text{lr} * v_{t+1}, + \\end{aligned} + + where :math:`p`, :math:`g`, :math:`v`, :math:`\\mu` and :math:`\beta` + denote the parameters, gradient, velocity, momentum, and weight decay + respectively. The :math:`lars_lr` is defined by Eq. 6 in the paper. + The Nesterov version is analogously modified. + + .. warning:: + Parameters with weight decay set to 0 will automatically be excluded + from layer-wise LR scaling. This is to ensure consistency with papers + like SimCLR and BYOL. + + + __ https://arxiv.org/pdf/1708.03888.pdf + + Note: + Reference code: https://github.com/PyTorchLightning/lightning-bolts/ + """ + + def __init__( + self, + params, + lr: float = 1e-2, + momentum: float = 0.0, + dampening: float = 0.0, + weight_decay: float = 0.0, + nesterov: bool = False, + trust_coefficient: float = 0.01, + eps: float = 1e-8, + ): + if lr <= 0.0: + raise ValueError("Invalid learning rate: {}".format(lr)) + if eps < 0.0: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if momentum < 0.0: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if dampening < 0.0: + raise ValueError("Invalid dampening value: {}".format(dampening)) + if weight_decay < 0.0: + raise ValueError( + "Invalid weight_decay value: {}".format(weight_decay) + ) + if trust_coefficient < 0.0: + raise ValueError( + "Invalid trust_coefficient value: {}".format(trust_coefficient) + ) + +
+[docs] + defaults = dict( + lr=lr, + momentum=momentum, + dampening=dampening, + weight_decay=weight_decay, + nesterov=nesterov, + trust_coefficient=trust_coefficient, + eps=eps, + )
+ + if nesterov and (momentum <= 0 or dampening != 0): + raise ValueError( + "Nesterov momentum requires a momentum and zero dampening" + ) + + super().__init__(params, defaults) + +
+[docs] + def __setstate__(self, state) -> None: + super().__setstate__(state) + + for group in self.param_groups: + group.setdefault("nesterov", False)
+ + + @torch.no_grad() +
+[docs] + def step(self, closure = None): + r"""Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + # exclude scaling for params with 0 weight decay + for group in self.param_groups: + weight_decay = group["weight_decay"] + momentum = group["momentum"] + dampening = group["dampening"] + nesterov = group["nesterov"] + + for p in group["params"]: + if p.grad is None: + continue + + d_p = p.grad + p_norm = torch.norm(p.data) + g_norm = torch.norm(p.grad.data) + + # lars scaling + weight decay part + if weight_decay != 0: + if p_norm != 0 and g_norm != 0: + lars_lr = p_norm / ( + g_norm + p_norm * weight_decay + group["eps"] + ) + lars_lr *= group["trust_coefficient"] + + d_p = d_p.add(p, alpha=weight_decay) + d_p *= lars_lr + + if momentum != 0: + param_state = self.state[p] + if "momentum_buffer" not in param_state: + buf = param_state["momentum_buffer"] = torch.clone( + d_p + ).detach() + else: + buf = param_state["momentum_buffer"] + buf.mul_(momentum).add_(d_p, alpha=1 - dampening) + if nesterov: + d_p = d_p.add(buf, alpha=momentum) + else: + d_p = buf + + p.add_(d_p, alpha=-group["lr"]) + + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/nadam.html b/_modules/lmflow/optim/nadam.html new file mode 100644 index 000000000..b22519f73 --- /dev/null +++ b/_modules/lmflow/optim/nadam.html @@ -0,0 +1,545 @@ + + + + + + + + + + lmflow.optim.nadam — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.nadam

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import torch
+import math
+
+
+[docs] +class NAdam(torch.optim.Optimizer): + def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, momentum_decay=4e-3): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= momentum_decay: + raise ValueError("Invalid momentum_decay value: {}".format(momentum_decay)) +
+[docs] + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, momentum_decay=momentum_decay)
+ + super(NAdam, self).__init__(params, defaults) + +
+[docs] + def __setstate__(self, state): + super(NAdam, self).__setstate__(state)
+ + +
+[docs] + def step(self, closure=None): + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('NAdam does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['m_prev'] = torch.zeros_like(p.data) + state['v'] = torch.zeros_like(p.data) + + m_prev, v = state['m_prev'], state['v'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + if group['weight_decay'] != 0: + grad = grad.add(group['weight_decay'], p.data) + + m = beta1 * m_prev + (1 - beta1) * grad + v.mul_(beta2).addcmul_(1 - beta2, grad, grad) + + m_hat = m / bias_correction1 + v_hat = v / bias_correction2 + + denom = v_hat.sqrt().add_(group['eps']) + + momentum_decay = group['momentum_decay'] + m_prev.mul_(beta1).add_(1 - beta1, grad) + m_prev_hat = m_prev / bias_correction1 + + step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 + + p.data.addcdiv_(-step_size, m_hat + momentum_decay * m_prev_hat, denom) + + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/novograd.html b/_modules/lmflow/optim/novograd.html new file mode 100644 index 000000000..4df667298 --- /dev/null +++ b/_modules/lmflow/optim/novograd.html @@ -0,0 +1,556 @@ + + + + + + + + + + lmflow.optim.novograd — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.novograd

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import torch
+import torch.optim as optim
+
+
+[docs] +class NovoGrad(optim.Optimizer): + def __init__(self, params, lr=0.01, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, grad_averaging=False, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) +
+[docs] + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, grad_averaging=grad_averaging, amsgrad=amsgrad)
+ + super(NovoGrad, self).__init__(params, defaults) + +
+[docs] + def __setstate__(self, state): + super(NovoGrad, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False)
+ + +
+[docs] + def step(self, closure=None): + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('NovoGrad does not support sparse gradients') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p.data) + state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + if amsgrad: + state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + norm = torch.sum(torch.pow(grad, 2)) + + if exp_avg_sq == 0: + exp_avg_sq.copy_(norm) + else: + exp_avg_sq.mul_(beta2).add_(1 - beta2, norm) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + grad.div_(denom) + if group['weight_decay'] != 0: + grad.add_(group['weight_decay'], p.data) + + if group['grad_averaging']: + grad.mul_(1 - beta1) + + exp_avg.mul_(beta1).add_(grad) + + p.data.add_(-group['lr'], exp_avg) + + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/radam.html b/_modules/lmflow/optim/radam.html new file mode 100644 index 000000000..94ecd78ca --- /dev/null +++ b/_modules/lmflow/optim/radam.html @@ -0,0 +1,645 @@ + + + + + + + + + + lmflow.optim.radam — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.radam

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import math
+import warnings
+import torch
+from torch.optim.optimizer import Optimizer
+
+
+[docs] +class RAdam(Optimizer): + r"""Implements RAdam optimization algorithm. + + Note: + Deprecated, please use version provided by PyTorch_. + + It has been proposed in `On the Variance of the Adaptive Learning + Rate and Beyond`. + https://arxiv.org/abs/1908.03265 + + Note: + Reference code: https://github.com/LiyuanLucasLiu/RAdam + """ + + def __init__( + self, + params, + lr: float = 1e-3, + betas = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 0, + ) -> None: + warnings.warn( + "RAdam optimizer is deprecated, since it is included " + "in pytorch natively.", + DeprecationWarning, + stacklevel=2, + ) + if lr <= 0.0: + raise ValueError("Invalid learning rate: {}".format(lr)) + if eps < 0.0: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError( + "Invalid beta parameter at index 0: {}".format(betas[0]) + ) + if not 0.0 <= betas[1] < 1.0: + raise ValueError( + "Invalid beta parameter at index 1: {}".format(betas[1]) + ) + if weight_decay < 0: + raise ValueError( + "Invalid weight_decay value: {}".format(weight_decay) + ) + + if ( + isinstance(params, (list, tuple)) + and len(params) > 0 + and isinstance(params[0], dict) + ): + for param in params: + if "betas" in param and ( + param["betas"][0] != betas[0] + or param["betas"][1] != betas[1] + ): + param["buffer"] = [[None, None, None] for _ in range(10)] + +
+[docs] + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + buffer=[[None, None, None] for _ in range(10)], + )
+ + super(RAdam, self).__init__(params, defaults) + +
+[docs] + def __setstate__(self, state): + super(RAdam, self).__setstate__(state)
+ + +
+[docs] + def step(self, closure = None): + r"""Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + """ + + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + lr = group["lr"] + weight_decay = group["weight_decay"] + beta1, beta2 = group["betas"] + eps = group["eps"] + + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad.data.float() + if grad.is_sparse: + msg = ( + "RAdam does not support sparse gradients, " + "please consider SparseAdam instead" + ) + raise RuntimeError(msg) + + p_data_fp32 = p.data.float() + + state = self.state[p] + + if len(state) == 0: + state["step"] = 0 + state["exp_avg"] = torch.zeros_like( + p_data_fp32, memory_format=torch.preserve_format + ) + state["exp_avg_sq"] = torch.zeros_like( + p_data_fp32, memory_format=torch.preserve_format + ) + else: + state["exp_avg"] = state["exp_avg"].type_as(p_data_fp32) + state["exp_avg_sq"] = state["exp_avg_sq"].type_as( + p_data_fp32 + ) + + exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] + + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + + state["step"] += 1 + buffered = group["buffer"][int(state["step"] % 10)] + if state["step"] == buffered[0]: + N_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state["step"] + beta2_t = beta2 ** state["step"] + N_sma_max = 2 / (1 - beta2) - 1 + N_sma = N_sma_max - 2 * state["step"] * beta2_t / ( + 1 - beta2_t + ) + buffered[1] = N_sma + + # more conservative since it's an approximated value + if N_sma >= 5: + step_size = ( + lr + * math.sqrt( + (1 - beta2_t) + * (N_sma - 4) + / (N_sma_max - 4) + * (N_sma - 2) + / N_sma + * N_sma_max + / (N_sma_max - 2) + ) + / (1 - beta1 ** state["step"]) + ) + else: + step_size = lr / (1 - beta1 ** state["step"]) + buffered[2] = step_size + + if weight_decay != 0: + p_data_fp32.add_(p_data_fp32, alpha=-weight_decay * lr) + + # more conservative since it's an approximated value + if N_sma >= 5: + denom = exp_avg_sq.sqrt().add_(eps) + p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + p_data_fp32.add_(exp_avg, alpha=-step_size) + + p.data.copy_(p_data_fp32) + + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/sgd_schedule_free.html b/_modules/lmflow/optim/sgd_schedule_free.html new file mode 100644 index 000000000..359e3df00 --- /dev/null +++ b/_modules/lmflow/optim/sgd_schedule_free.html @@ -0,0 +1,633 @@ + + + + + + + + + + lmflow.optim.sgd_schedule_free — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.sgd_schedule_free

+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+# 
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import torch
+import torch.optim
+
+
+[docs] +class SGDScheduleFree(torch.optim.Optimizer): + r""" + Schedule-Free SGD + As the name suggests, no scheduler is needed with this optimizer. + To add warmup, rather than using a learning rate schedule you can just + set the warmup_steps parameter. + + This optimizer requires that .train() and .eval() be called before the + beginning of training and evaluation respectively. The optimizer should + also be placed in eval mode when saving checkpoints. + """ + def __init__(self, + params, + lr=1.0, + momentum=0.9, + weight_decay=0, + warmup_steps=0, + r=0.0, + weight_lr_power=2, + foreach=hasattr(torch, "_foreach_mul_"), + ): + if lr < 0.0: + raise ValueError("Invalid learning rate: {}".format(lr)) + if weight_decay < 0.0: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if momentum <= 0 or momentum >= 1: + raise ValueError("Momentum must be between 0 and 1 exclusive: {}".format(momentum)) + +
+[docs] + defaults = dict(lr=lr, + momentum=momentum, + r=r, + k=0, + warmup_steps=warmup_steps, + train_mode=True, + weight_sum=0.0, + lr_max=-1.0, + weight_lr_power=weight_lr_power, + weight_decay=weight_decay, + foreach=foreach)
+ + super().__init__(params, defaults) + +
+[docs] + def eval(self): + for group in self.param_groups: + train_mode = group['train_mode'] + momentum = group['momentum'] + if train_mode: + for p in group['params']: + state = self.state[p] + if 'z' in state: + # Set p.data to x + p.data.lerp_(end=state['z'], weight=1-1/momentum) + group['train_mode'] = False
+ + +
+[docs] + def train(self): + for group in self.param_groups: + train_mode = group['train_mode'] + momentum = group['momentum'] + if not train_mode: + for p in group['params']: + state = self.state[p] + if 'z' in state: + # Set p.data to y + p.data.lerp_(end=state['z'], weight=1-momentum) + group['train_mode'] = True
+ + +
+[docs] + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + momentum = group['momentum'] + lr = group['lr'] + weight_decay = group['weight_decay'] + k = group['k'] + warmup_steps = group['warmup_steps'] + + if k < warmup_steps: + sched = (k+1) / warmup_steps + else: + sched = 1.0 + lr = group['lr']*sched + + weight_lr_power = group['weight_lr_power'] + + r = group['r'] + lr_max = group['lr_max'] = max(lr, group['lr_max']) + + weight = ((k+1)**r) * (lr_max**weight_lr_power) + weight_sum = group['weight_sum'] = group['weight_sum'] + weight + + try: + ckp1 = weight/weight_sum + except ZeroDivisionError: + ckp1 = 0 + + if not group['train_mode']: + raise Exception("Not in train mode!") + + active_p = [p for p in group['params'] if p.grad is not None] + + for p in active_p: + if 'z' not in self.state[p]: + self.state[p]['z'] = torch.clone(p.data) + + if group['foreach'] and len(active_p) > 0: + y, grad, z = zip(*[(p.data, p.grad, self.state[p]['z']) + for p in active_p]) + + # Apply weight decay + if weight_decay != 0: + torch._foreach_add_(grad, y, alpha=weight_decay) + + # These operations update y in-place, + # without computing x explicitly. + torch._foreach_lerp_(y, z, weight=ckp1) + torch._foreach_add_(y, grad, alpha=lr*(momentum*(1-ckp1)-1)) + + # SGD step + torch._foreach_sub_(z, grad, alpha=lr) + else: + for p in active_p: + y = p.data # Notation to match theory + grad = p.grad.data + z = self.state[p]['z'] + + # Apply weight decay + if weight_decay != 0: + grad.add_(y, alpha=weight_decay) + + # These operations update y in-place, + # without computing x explicitly. + y.lerp_(end=z, weight=ckp1) + y.add_(grad, alpha=lr*(momentum*(1-ckp1)-1)) + + # SGD step + z.sub_(grad, alpha=lr) + + group['k'] = k+1 + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/sgdp.html b/_modules/lmflow/optim/sgdp.html new file mode 100644 index 000000000..fb9913ada --- /dev/null +++ b/_modules/lmflow/optim/sgdp.html @@ -0,0 +1,642 @@ + + + + + + + + + + lmflow.optim.sgdp — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.sgdp

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import math
+import torch
+from torch.optim.optimizer import Optimizer
+
+
+
+[docs] +class SGDP(Optimizer): + r"""Implements SGDP algorithm. + + It has been proposed in `Slowing Down the Weight Norm Increase in + Momentum-based Optimizers`. + https://arxiv.org/abs/2006.08217 + + Note: + Reference code: https://github.com/clovaai/AdamP + """ + + def __init__( + self, + params, + lr: float = 1e-3, + momentum: float = 0, + dampening: float = 0, + eps: float = 1e-8, + weight_decay: float = 0, + delta: float = 0.1, + wd_ratio: float = 0.1, + nesterov: bool = False, + ) -> None: + if lr <= 0.0: + raise ValueError("Invalid learning rate: {}".format(lr)) + if eps < 0.0: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if momentum < 0.0: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if dampening < 0.0: + raise ValueError("Invalid dampening value: {}".format(dampening)) + if weight_decay < 0: + raise ValueError( + "Invalid weight_decay value: {}".format(weight_decay) + ) + if delta < 0: + raise ValueError("Invalid delta value: {}".format(delta)) + if wd_ratio < 0: + raise ValueError("Invalid wd_ratio value: {}".format(wd_ratio)) + +
+[docs] + defaults = dict( + lr=lr, + momentum=momentum, + dampening=dampening, + eps=eps, + weight_decay=weight_decay, + delta=delta, + wd_ratio=wd_ratio, + nesterov=nesterov, + )
+ + super(SGDP, self).__init__(params, defaults) + + @staticmethod +
+[docs] + def _channel_view(x): + return x.view(x.size(0), -1)
+ + + @staticmethod +
+[docs] + def _layer_view(x): + return x.view(1, -1)
+ + + @staticmethod +
+[docs] + def _cosine_similarity(x, y, eps, view_func): + x = view_func(x) + y = view_func(y) + + x_norm = x.norm(dim=1).add_(eps) + y_norm = y.norm(dim=1).add_(eps) + dot = (x * y).sum(dim=1) + + return dot.abs() / x_norm / y_norm
+ + +
+[docs] + def _projection(self, p, grad, perturb, delta, wd_ratio, eps): + wd = 1 + expand_size = [-1] + [1] * (len(p.shape) - 1) + for view_func in [self._channel_view, self._layer_view]: + cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func) + + if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)): + p_n = p.data / view_func(p.data).norm(dim=1).view( + expand_size + ).add_(eps) + perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view( + expand_size + ) + wd = wd_ratio + + return perturb, wd + + return perturb, wd
+ + +
+[docs] + def step(self, closure = None): + r"""Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + weight_decay = group["weight_decay"] + momentum = group["momentum"] + dampening = group["dampening"] + nesterov = group["nesterov"] + + for p in group["params"]: + if p.grad is None: + continue + + grad = p.grad.data + state = self.state[p] + + # State initialization + if len(state) == 0: + state["momentum"] = torch.zeros_like( + p.data, memory_format=torch.preserve_format + ) + + # SGD + buf = state["momentum"] + buf.mul_(momentum).add_(grad, alpha=1 - dampening) + if nesterov: + d_p = grad + momentum * buf + else: + d_p = buf + + # Projection + wd_ratio = 1 + if len(p.shape) > 1: + d_p, wd_ratio = self._projection( + p, + grad, + d_p, + group["delta"], + group["wd_ratio"], + group["eps"], + ) + + # Weight decay + if weight_decay != 0: + p.data.mul_( + 1 + - group["lr"] + * group["weight_decay"] + * wd_ratio + / (1 - momentum) + ) + + # Step + p.data.add_(d_p, alpha=-group["lr"]) + + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/sophia.html b/_modules/lmflow/optim/sophia.html new file mode 100644 index 000000000..22e7149cd --- /dev/null +++ b/_modules/lmflow/optim/sophia.html @@ -0,0 +1,593 @@ + + + + + + + + + + lmflow.optim.sophia — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.sophia

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import torch
+from torch.optim.optimizer import Optimizer
+
+
+[docs] +class SophiaG(Optimizer): + """ + Sophia: A Scalable Stochastic Second-order Optimizer for Language Model Pre-training. + Code from: https://github.com/Liuhong99/Sophia/ + """ + + def __init__(self, params, lr=1e-4, betas=(0.965, 0.99), rho = 0.04, + weight_decay=1e-1, *, maximize: bool = False, + capturable: bool = False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + if not 0.0 <= rho: + raise ValueError("Invalid rho parameter at index 1: {}".format(rho)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) +
+[docs] + defaults = dict(lr=lr, betas=betas, rho=rho, + weight_decay=weight_decay, + maximize=maximize, capturable=capturable)
+ + super(SophiaG, self).__init__(params, defaults) + +
+[docs] + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('maximize', False) + group.setdefault('capturable', False) + state_values = list(self.state.values()) + step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step']) + if not step_is_tensor: + for s in state_values: + s['step'] = torch.tensor(float(s['step']))
+ + + @torch.no_grad() +
+[docs] + def update_hessian(self): + for group in self.param_groups: + beta1, beta2 = group['betas'] + for p in group['params']: + if p.grad is None: + continue + state = self.state[p] + + if len(state) == 0: + state['step'] = torch.zeros((1,), dtype=torch.float, device=p.device) \ + if self.defaults['capturable'] else torch.tensor(0.) + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) + state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format) + + if 'hessian' not in state.keys(): + state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format) + + state['hessian'].mul_(beta2).addcmul_(p.grad, p.grad, value=1 - beta2)
+ + + @torch.no_grad() +
+[docs] + def step(self, closure=None, bs=5120): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + state_steps = [] + hessian = [] + beta1, beta2 = group['betas'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + + if p.grad.is_sparse: + raise RuntimeError('SophiaG does not support sparse gradients') + grads.append(p.grad) + state = self.state[p] + # State initialization + if len(state) == 0: + state['step'] = torch.zeros((1,), dtype=torch.float, device=p.device) \ + if self.defaults['capturable'] else torch.tensor(0.) + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) + state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format) + + if 'hessian' not in state.keys(): + state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format) + + exp_avgs.append(state['exp_avg']) + state_steps.append(state['step']) + hessian.append(state['hessian']) + + if self.defaults['capturable']: + bs = torch.ones((1,), dtype=torch.float, device=p.device) * bs + + # Perform the actual update step here instead of calling SophiaG again + for p, grad, exp_avg, h, step in zip(params_with_grad, grads, exp_avgs, hessian, state_steps): + if group['weight_decay'] != 0: + grad = grad.add(p, alpha=group['weight_decay']) + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + step.add_(1) + + # Compute the update using the hessian information + update = exp_avg.div(1 - beta1 ** step.item()) + h_sqrt = h.sqrt().add_(group['rho']) + p.addcdiv_(update, h_sqrt, value=-group['lr']) + + return loss
+
+ + +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/optim/yogi.html b/_modules/lmflow/optim/yogi.html new file mode 100644 index 000000000..83bdc0d64 --- /dev/null +++ b/_modules/lmflow/optim/yogi.html @@ -0,0 +1,595 @@ + + + + + + + + + + lmflow.optim.yogi — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.optim.yogi

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import math
+import torch
+import torch.nn as nn
+from torch.optim.optimizer import Optimizer
+
+
+[docs] +class Yogi(Optimizer): + r"""Implements Yogi Optimizer Algorithm. + It has been proposed in `Adaptive methods for Nonconvex Optimization`. + + https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization # noqa + + Note: + Reference code: https://github.com/4rtemi5/Yogi-Optimizer_Keras + """ + + def __init__( + self, + params, + lr: float = 1e-2, + betas = (0.9, 0.999), + eps: float = 1e-3, + initial_accumulator: float = 1e-6, + weight_decay: float = 0, + ) -> None: + if lr <= 0.0: + raise ValueError("Invalid learning rate: {}".format(lr)) + if eps < 0.0: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError( + "Invalid beta parameter at index 0: {}".format(betas[0]) + ) + if not 0.0 <= betas[1] < 1.0: + raise ValueError( + "Invalid beta parameter at index 1: {}".format(betas[1]) + ) + if weight_decay < 0: + raise ValueError( + "Invalid weight_decay value: {}".format(weight_decay) + ) + +
+[docs] + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + initial_accumulator=initial_accumulator, + weight_decay=weight_decay, + )
+ + super(Yogi, self).__init__(params, defaults) + +
+[docs] + def step(self, closure = None): + r"""Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError( + "Yogi does not support sparse gradients, " + "please consider SparseAdam instead" + ) + + state = self.state[p] + + # State initialization + # Followed from official implementation in tensorflow addons: + # https://github.com/tensorflow/addons/blob/master/tensorflow_addons/optimizers/yogi.py#L118 # noqa + # For more details refer to the discussion: + # https://github.com/jettify/pytorch-optimizer/issues/77 + if len(state) == 0: + state["step"] = 0 + # Exponential moving average of gradient values + state["exp_avg"] = nn.init.constant_( + torch.empty_like( + p.data, memory_format=torch.preserve_format + ), + group["initial_accumulator"], + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = nn.init.constant_( + torch.empty_like( + p.data, memory_format=torch.preserve_format + ), + group["initial_accumulator"], + ) + + exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] + beta1, beta2 = group["betas"] + + state["step"] += 1 + bias_correction1 = 1 - beta1 ** state["step"] + bias_correction2 = 1 - beta2 ** state["step"] + + if group["weight_decay"] != 0: + grad = grad.add(p.data, alpha=group["weight_decay"]) + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + + grad_squared = grad.mul(grad) + + exp_avg_sq.addcmul_( + torch.sign(exp_avg_sq - grad_squared), + grad_squared, + value=-(1 - beta2), + ) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_( + group["eps"] + ) + step_size = group["lr"] / bias_correction1 + p.data.addcdiv_(exp_avg, denom, value=-step_size) + + return loss
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/auto_pipeline.html b/_modules/lmflow/pipeline/auto_pipeline.html new file mode 100644 index 000000000..ff25f4d15 --- /dev/null +++ b/_modules/lmflow/pipeline/auto_pipeline.html @@ -0,0 +1,540 @@ + + + + + + + + + + lmflow.pipeline.auto_pipeline — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.auto_pipeline

+#!/usr/bin/env python
+# coding=utf-8
+"""Return a pipeline automatically based on its name.
+"""
+import pkg_resources
+
+
+[docs] +def is_package_version_at_least(package_name, min_version): + try: + package_version = pkg_resources.get_distribution(package_name).version + if (pkg_resources.parse_version(package_version) + < pkg_resources.parse_version(min_version)): + return False + except pkg_resources.DistributionNotFound: + return False + return True
+ + +from lmflow.pipeline.evaluator import Evaluator +from lmflow.pipeline.finetuner import Finetuner +from lmflow.pipeline.inferencer import Inferencer +from lmflow.pipeline.vllm_inferencer import VLLMInferencer +from lmflow.pipeline.dpo_aligner import DPOAligner +from lmflow.pipeline.dpov2_aligner import DPOv2Aligner +from lmflow.pipeline.rm_tuner import RewardModelTuner +from lmflow.pipeline.rm_inferencer import RewardModelInferencer +from lmflow.pipeline.iterative_dpo_aligner import IterativeDPOAligner +
+[docs] +PIPELINE_MAPPING = { + "evaluator": Evaluator, + "finetuner": Finetuner, + "inferencer": Inferencer, + "vllm_inferencer": VLLMInferencer, + "rm_inferencer": RewardModelInferencer, + "dpo_aligner": DPOAligner, + "dpov2_aligner": DPOv2Aligner, + "rm_tuner": RewardModelTuner, + "iterative_dpo_aligner": IterativeDPOAligner, +}
+ + +if not is_package_version_at_least('transformers', '4.35.0'): + from lmflow.pipeline.raft_aligner import RaftAligner + PIPELINE_MAPPING['raft_aligner'] = RaftAligner + + +
+[docs] +class AutoPipeline: + """ + The class designed to return a pipeline automatically based on its name. + """ + @classmethod +
+[docs] + def get_pipeline(self, + pipeline_name, + model_args, + data_args, + pipeline_args, + *args, + **kwargs + ): + if pipeline_name not in PIPELINE_MAPPING: + raise NotImplementedError( + f'Pipeline "{pipeline_name}" is not supported' + ) + + pipeline = PIPELINE_MAPPING[pipeline_name]( + model_args, + data_args, + pipeline_args, + *args, + **kwargs + ) + return pipeline
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/base_aligner.html b/_modules/lmflow/pipeline/base_aligner.html new file mode 100644 index 000000000..92961b529 --- /dev/null +++ b/_modules/lmflow/pipeline/base_aligner.html @@ -0,0 +1,490 @@ + + + + + + + + + + lmflow.pipeline.base_aligner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.base_aligner

+#!/usr/bin/env python
+# coding=utf-8
+""" BaseTuner: a subclass of BasePipeline.
+"""
+
+from lmflow.pipeline.base_pipeline import BasePipeline
+
+
+
+[docs] +class BaseAligner(BasePipeline): + """ A subclass of BasePipeline which is alignable. + """ + def __init__(self, *args, **kwargs): + pass + +
+[docs] + def _check_if_alignable(self, model, dataset, reward_model): + # TODO: check if the model is alignable and dataset is compatible + # TODO: add reward_model + pass
+ + +
+[docs] + def align(self, model, dataset, reward_model): + raise NotImplementedError(".align is not implemented")
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/base_pipeline.html b/_modules/lmflow/pipeline/base_pipeline.html new file mode 100644 index 000000000..fa6a7df05 --- /dev/null +++ b/_modules/lmflow/pipeline/base_pipeline.html @@ -0,0 +1,472 @@ + + + + + + + + + + lmflow.pipeline.base_pipeline — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.base_pipeline

+#!/usr/bin/env python
+# coding=utf-8
+""" BasePipeline.
+"""
+
+from abc import ABC         # abstract class
+
+
+[docs] +class BasePipeline(ABC): + pass
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/base_tuner.html b/_modules/lmflow/pipeline/base_tuner.html new file mode 100644 index 000000000..6271b2fd6 --- /dev/null +++ b/_modules/lmflow/pipeline/base_tuner.html @@ -0,0 +1,489 @@ + + + + + + + + + + lmflow.pipeline.base_tuner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.base_tuner

+#!/usr/bin/env python
+# coding=utf-8
+""" BaseTuner: a subclass of BasePipeline.
+"""
+
+from lmflow.pipeline.base_pipeline import BasePipeline
+
+
+
+[docs] +class BaseTuner(BasePipeline): + """ A subclass of BasePipeline which is tunable. + """ + def __init__(self, *args, **kwargs): + pass + +
+[docs] + def _check_if_tunable(self, model, dataset): + # TODO: check if the model is tunable and dataset is compatible + pass
+ + +
+[docs] + def tune(self, model, dataset): + raise NotImplementedError(".tune is not implemented")
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/dpo_aligner.html b/_modules/lmflow/pipeline/dpo_aligner.html new file mode 100644 index 000000000..e052ecd5a --- /dev/null +++ b/_modules/lmflow/pipeline/dpo_aligner.html @@ -0,0 +1,650 @@ + + + + + + + + + + lmflow.pipeline.dpo_aligner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.dpo_aligner

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Time    : 7/4/2024 21:12
+# @Author  : Yu Li
+# @Site    :
+# @File    : dpo_pipeline.py
+import os
+from pathlib import Path
+from typing import Dict, Optional
+
+from trl import DPOTrainer
+from datasets import Dataset, load_dataset
+from peft import LoraConfig
+from transformers import TrainingArguments
+
+from lmflow.pipeline.base_aligner import BaseAligner
+
+
+
+[docs] +def get_paired_dataset( + data_root: str, + data_dir: str, + sanity_check: bool = False, + cache_dir: Optional[str] = None, + num_proc=24, +) -> Dataset: + """Load dataset and convert it to the necessary format. + + The dataset is converted to a dictionary with the following structure: + { + 'prompt': List[str], + 'chosen': List[str], + 'rejected': List[str], + } + + Prompts are structured as follows: + "Question: " + <prompt> + "\n\nAnswer: " + """ + data_path = Path(data_root) / data_dir + data_files = [ + x.absolute().as_posix() + for x in data_path.glob("*.json") + ] + dataset = load_dataset( + path=data_root, + split="train", + data_files=data_files, + cache_dir=cache_dir, + ) + original_columns = dataset.column_names + + if sanity_check: + dataset = dataset.select(range(min(len(dataset), 1000))) + + def return_prompt_and_responses(samples) -> Dict[str, str]: + return { + "prompt": ["Question: " + question + "\n\nAnswer: " for question in samples["question"]], + "chosen": samples["response_j"], + "rejected": samples["response_k"], + } + + return dataset.map( + return_prompt_and_responses, + batched=True, + num_proc=num_proc, + remove_columns=original_columns, + )
+ + + +
+[docs] +class DPOAligner(BaseAligner): + def __init__(self, model_args, data_args, aligner_args): +
+[docs] + self.model_args = model_args
+ +
+[docs] + self.data_args = data_args
+ +
+[docs] + self.aligner_args = aligner_args
+ + +
+[docs] + def _initialize_trainer(self, model, tokenizer): + peft_config = LoraConfig( + r=self.model_args.lora_r, + lora_alpha=self.model_args.lora_alpha, + lora_dropout=self.model_args.lora_dropout, + target_modules=[ + "q_proj", + "v_proj", + "k_proj", + "out_proj", + "fc_in", + "fc_out", + "wte", + ], + bias="none", + task_type="CAUSAL_LM", + ) + training_args = TrainingArguments( + per_device_train_batch_size=self.aligner_args.per_device_train_batch_size, + per_device_eval_batch_size=self.aligner_args.per_device_eval_batch_size, + max_steps=self.aligner_args.max_steps, + logging_steps=self.aligner_args.logging_steps, + save_steps=self.aligner_args.save_steps, + gradient_accumulation_steps=self.aligner_args.gradient_accumulation_steps, + gradient_checkpointing=self.aligner_args.gradient_checkpointing, + learning_rate=self.aligner_args.learning_rate, + evaluation_strategy="steps", + eval_steps=self.aligner_args.eval_steps, + output_dir=self.aligner_args.output_dir, + report_to=self.aligner_args.report_to, + lr_scheduler_type=self.aligner_args.lr_scheduler_type, + warmup_steps=self.aligner_args.warmup_steps, + optim=self.aligner_args.optimizer_type, + bf16=True, + remove_unused_columns=False, + run_name=self.aligner_args.run_name, + ddp_find_unused_parameters=False, + # gradient_checkpointing_kwargs=dict(use_reentrant=self.aligner_args.gradient_checkpointing_use_reentrant), + seed=self.aligner_args.seed, + ) + dpo_trainer = DPOTrainer( + model, + ref_model=None, + args=training_args, + beta=self.aligner_args.beta, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + tokenizer=tokenizer, + peft_config=peft_config, + max_prompt_length=self.aligner_args.beta, + max_length=self.aligner_args.max_length, + ) + return dpo_trainer
+ + +
+[docs] + def _load_dataset(self): + # load training set + self.train_dataset = get_paired_dataset(data_root=self.data_args.dataset_path, + data_dir="train", + sanity_check=self.aligner_args.sanity_check) + self.train_dataset = self.train_dataset.filter( + lambda x: len(x["prompt"]) + len(x["chosen"]) <= self.aligner_args.max_length + and len(x["prompt"]) + len(x["rejected"]) <= self.aligner_args.max_length + ) + # load evaluation set + self.eval_dataset = get_paired_dataset(data_root=self.data_args.dataset_path, + data_dir="test", + sanity_check=True) + self.eval_dataset = self.eval_dataset.filter( + lambda x: len(x["prompt"]) + len(x["chosen"]) <= self.aligner_args.max_length + and len(x["prompt"]) + len(x["rejected"]) <= self.aligner_args.max_length + )
+ + +
+[docs] + def align(self, model, dataset, reward_model): + tokenizer = model.get_tokenizer() + tokenizer.pad_token = tokenizer.eos_token + tokenizer.pad_token_id = tokenizer.eos_token_id + self._load_dataset() + + wrapped_model = model + model = model.get_backend_model() + + dpo_trainer = self._initialize_trainer(model, tokenizer) + dpo_trainer.train() + dpo_trainer.save_model(self.aligner_args.output_dir) + + # 7. save + output_dir = os.path.join(self.aligner_args.output_dir, "final_checkpoint") + dpo_trainer.model.save_pretrained(output_dir)
+
+ + + + + +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/dpov2_aligner.html b/_modules/lmflow/pipeline/dpov2_aligner.html new file mode 100644 index 000000000..305d380ec --- /dev/null +++ b/_modules/lmflow/pipeline/dpov2_aligner.html @@ -0,0 +1,875 @@ + + + + + + + + + + lmflow.pipeline.dpov2_aligner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.dpov2_aligner

+import copy
+import importlib.resources as pkg_resources
+import logging
+import os
+import subprocess
+import sys
+from typing import Optional, List, Tuple, Dict, Union
+
+import numpy as np
+from tqdm import tqdm
+import torch
+from transformers import TrainingArguments
+
+from lmflow.pipeline.utils.dpov2_trainer import DPOv2Trainer
+from lmflow.pipeline.base_aligner import BaseAligner
+from lmflow.args import (
+    ModelArguments,
+    DatasetArguments,
+    DPOv2AlignerArguments
+)
+from lmflow.utils.common import (
+    make_shell_args_from_dataclass, 
+    add_dataclass_attr_prefix, 
+    create_copied_dataclass
+)
+from lmflow.models.hf_decoder_model import HFDecoderModel
+from lmflow.datasets.dataset import Dataset, KEY_SCORE, KEY_TYPE, KEY_INSTANCES
+from lmflow.utils.constants import MEMORY_SAFE_DPOV2_ALIGN_ENV_VAR_TO_REMOVE
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +ReferenceModelArguments = create_copied_dataclass( + original_dataclass=ModelArguments, + field_prefix="reference_", + class_prefix="Reference" +)
+ + + +
+[docs] +class DPOv2Aligner(BaseAligner): + def __init__( + self, + model_args: ModelArguments, + data_args: DatasetArguments, + aligner_args: DPOv2AlignerArguments, + ref_model_args: ModelArguments, + ): +
+[docs] + self.model_args = model_args
+ +
+[docs] + self.ref_model_args = ref_model_args
+ +
+[docs] + self.data_args = data_args
+ +
+[docs] + self.aligner_args = aligner_args
+ + + +
+[docs] + def align( + self, + model: HFDecoderModel, + ref_model: HFDecoderModel, + train_dataset: Dataset, + eval_dataset: Dataset, + transform_dataset_in_place: bool=True, + ): + if (train_dataset.get_type() not in ["text_to_scored_textlist", "paired_text_to_text"]) or \ + (eval_dataset.get_type() not in ["text_to_scored_textlist", "paired_text_to_text"]): + raise ValueError( + f"Unsupported dataset type {train_dataset.get_type()} for DPOv2 aligner." + ) + + # step 0. setting up + if self.aligner_args.gradient_checkpointing: + logger.warning( + "Setting backend_model.config.use_cache to False since using gradient checkpointing" + ) + model.get_backend_model().config.use_cache = False + ref_model.get_backend_model().config.use_cache = False + + # step 1. prepare datasets + if train_dataset.get_type() == "text_to_scored_textlist": + train_dataset = self.convert_to_paired_dataset( + source_dataset=train_dataset, + sampling_paired_method=self.aligner_args.sampling_paired_method, + length_penalty=self.aligner_args.length_penalty, + margin_scale=self.aligner_args.margin_scale, + use_fast=False, + ) + if self.data_args.max_train_samples: + train_dataset.backend_dataset = train_dataset.backend_dataset.select(range(self.data_args.max_train_samples)) + + if eval_dataset.get_type() == "text_to_scored_textlist": + eval_dataset = self.convert_to_paired_dataset( + source_dataset=eval_dataset, + sampling_paired_method=self.aligner_args.sampling_paired_method, + margin_scale=self.aligner_args.margin_scale, + use_fast=False, + ) + + # step 2. prepare trainer + dpo_trainer = DPOv2Trainer( + model.get_backend_model(), + ref_model.get_backend_model(), + train_dataset=train_dataset.get_backend_dataset(), # tokenization is done in the trainer + eval_dataset=eval_dataset.get_backend_dataset(), + tokenizer=model.tokenizer, + args=self.__prepare_training_args(self.aligner_args), + beta=self.aligner_args.beta, + loss_type=self.aligner_args.loss_type, + max_prompt_length=self.aligner_args.max_prompt_length, + max_length=self.aligner_args.max_length, + mask_prompt=self.aligner_args.mask_prompt, + len_penalty=self.aligner_args.length_penalty, + # preprocessing_num_workers=self.data_args.preprocessing_num_workers, # will trigger TypeError: cannot pickle 'torch._C._distributed_c10d.ProcessGroup' object + ) + + # step 3. train + dpo_trainer.train() + dpo_trainer.save_model(self.aligner_args.output_dir) + + # step 4. save + output_dir = os.path.join(self.aligner_args.output_dir, "final_checkpoint") + dpo_trainer.model.save_pretrained(output_dir) + + # step 5. release resources + with torch.no_grad(): + torch.cuda.empty_cache()
+ + + +
+[docs] + def __prepare_training_args( + self, + args: DPOv2AlignerArguments, + ) -> TrainingArguments: + training_args = TrainingArguments( + per_device_train_batch_size=args.per_device_train_batch_size, + per_device_eval_batch_size=args.per_device_eval_batch_size, + num_train_epochs=args.num_train_epochs, + save_strategy=args.save_strategy, + logging_steps=args.logging_steps, + save_steps=args.save_steps, + gradient_accumulation_steps=args.gradient_accumulation_steps, + gradient_checkpointing=args.gradient_checkpointing, + learning_rate=args.learning_rate, + evaluation_strategy=args.evaluation_strategy, + eval_steps=args.eval_steps, + output_dir=args.output_dir, + lr_scheduler_type=args.lr_scheduler_type, + warmup_steps=args.warmup_steps, + optim=args.optim, + bf16=args.bf16, + report_to=args.report_to, + run_name=args.run_name, + remove_unused_columns=False, # DO NOT CHANGE THIS, may cause error https://discuss.huggingface.co/t/indexerror-invalid-key-16-is-out-of-bounds-for-size-0/14298/3 + ) + logger.debug(f"Actual training arguments for dpo trainer: {training_args}") + + return training_args
+ + + +
+[docs] + def convert_to_paired_dataset( + self, + source_dataset: Dataset, + sampling_paired_method: str="random", + length_penalty: float=0.0, + margin_scale: float=1.0, + use_fast: bool=False, + ) -> Dataset: + """Convert a scored one to multiple (text_to_scored_textlist) to a paired dataset by rejection sampling. + """ + output_dict = { + KEY_INSTANCES: [] + } + if source_dataset.get_type() in ["text_to_scored_textlist"]: + output_dict[KEY_TYPE] = "paired_text_to_text" + + for sample in tqdm(source_dataset.get_backend_dataset(), desc="Converting to paired dataset"): + sample_output_dict = {} + lengths = self._calc_response_lengths(sample["output"], source_dataset.get_type()) + penalized_rewards = self._calc_reward_with_length_penalty( + rewards=[content[KEY_SCORE] for content in sample["output"]], + lengths=lengths, + length_penalty=length_penalty + ) + chosen_idx, rejected_idx = self.sampling_paired_idx_from_rewards( + rewards=penalized_rewards, + sampling_paired_method=sampling_paired_method, + use_fast=use_fast + ) + + sample_output_dict["prompt"] = sample["input"] + sample_output_dict["chosen"] = sample["output"][chosen_idx]["text"] + sample_output_dict["rejected"] = sample["output"][rejected_idx]["text"] + sample_output_dict["margin"] = (sample["output"][chosen_idx][KEY_SCORE] - sample["output"][rejected_idx][KEY_SCORE]) * margin_scale + output_dict[KEY_INSTANCES].append(sample_output_dict) + + output_dataset_args = copy.deepcopy(source_dataset.data_args) + output_dataset_args.dataset_path = None + output_dataset_args.dataset_name = f"paired_{output_dataset_args.dataset_name}" + output_dataset = Dataset(output_dataset_args) + output_dataset = output_dataset.from_dict(output_dict) + + return output_dataset
+ + + +
+[docs] + def _calc_response_lengths( + self, + outputs: List[Union[str, Dict[str, str]]], + dataset_type: str, + ) -> List[int]: + all_lengths = [] + if dataset_type == "text_to_scored_textlist": + all_lengths = [len(output["text"]) for output in outputs] + + else: + raise NotImplementedError( + f"Unknown dataset type {dataset_type} when calculating the response length." + ) + + return all_lengths
+ + + +
+[docs] + def _calc_reward_with_length_penalty( + self, + rewards: List[float], + lengths: List[int], + length_penalty: float, + ) -> List[float]: + """When length_penalty > 0, penalize the longer sequence by subtracting + length_penalty * length from the reward. Vice versa when length_penalty < 0. + """ + assert len(rewards) == len(lengths), "The number of rewards and lengths should be the same." + return [reward - length_penalty * length for reward, length in zip(rewards, lengths)]
+ + + +
+[docs] + def sampling_paired_idx_from_rewards( + self, + rewards: List[float], + sampling_paired_method: str="random", + use_fast: bool=False, + ) -> Tuple[int, int]: + """Prepare the dataset for DPO training by rejection sampling. + We implement different strategies to select pairs, including + random: randomly select two instances + max_min: best v.s. worst + max_max: best v.s. second best + max_random: best v.s. random from the remaining + """ + if use_fast: + return self._sampling_paired_idx_from_rewards_fast(rewards, sampling_paired_method) + else: + return self._sampling_paired_idx_from_rewards(rewards, sampling_paired_method)
+ + + +
+[docs] + def _sampling_paired_idx_from_rewards( + self, + rewards: List[float], + sampling_paired_method: str="random" + ) -> Tuple[int, int]: + idx_0, idx_1 = -1, -1 + + if sampling_paired_method == "random": + idx_0, idx_1 = np.random.choice(len(rewards), size=2, replace=False) + elif sampling_paired_method == "max_min": + idx_0, idx_1 = np.argmax(rewards), np.argmin(rewards) + elif sampling_paired_method == "max_max": + sorted_indices = np.argsort(rewards) + idx_0, idx_1 = sorted_indices[-1], sorted_indices[-2] + elif sampling_paired_method == "max_random": + idx_0 = np.argmax(rewards) + idx_1 = np.random.choice([i for i in range(len(rewards)) if i != idx_0]) + else: + raise ValueError(f"Unknown sampling method: {sampling_paired_method}") + + chosen_idx, rejected_idx = (idx_0, idx_1) if rewards[idx_0] > rewards[idx_1] else (idx_1, idx_0) + + return chosen_idx, rejected_idx
+ + + +
+[docs] + def _sampling_paired_idx_from_rewards_fast( + self, + rewards: List[float], + sampling_paired_method: str="random" + ) -> Tuple[int, int]: + idx_0, idx_1 = -1, -1 + + if sampling_paired_method == "random": + idx_0, idx_1 = 0, 1 + elif sampling_paired_method == "max_min": + idx_0, idx_1 = np.argmax(rewards), np.argmin(rewards) + elif sampling_paired_method == "max_max": + sorted_indices = np.argsort(rewards) + idx_0, idx_1 = sorted_indices[-1], sorted_indices[-2] + elif sampling_paired_method == "max_random": + idx_0 = np.argmax(rewards) + idx_1 = 0 if idx_0 != 0 else 1 + else: + raise ValueError(f"Unknown sampling method: {sampling_paired_method}") + + chosen_idx, rejected_idx = (idx_0, idx_1) if rewards[idx_0] > rewards[idx_1] else (idx_1, idx_0) + + return chosen_idx, rejected_idx
+
+ + + +
+[docs] +class MemorySafeDPOv2Aligner: + def __init__( + self, + model_args: ModelArguments, + data_args: DatasetArguments, + aligner_args: DPOv2AlignerArguments, + ref_model_args: ModelArguments, + ): +
+[docs] + self.model_args = model_args
+ +
+[docs] + self.ref_model_args = ReferenceModelArguments(**add_dataclass_attr_prefix(ref_model_args, 'reference_'))
+ +
+[docs] + self.data_args = data_args
+ +
+[docs] + self.aligner_args = aligner_args
+ +
+[docs] + self.aligner_file_path = pkg_resources.files("lmflow.pipeline.utils") / "memory_safe_dpov2_align.py"
+ + +
+[docs] + def align(self): + aligner_args = make_shell_args_from_dataclass( + dataclass_objects=[ + self.model_args, + self.data_args, + self.aligner_args, + self.ref_model_args + ], + format="shell", + ignored_args_list=['accelerator_config', 'fsdp_config', '_n_gpu'], + ) + cmd = ( + f"accelerate launch --config_file {self.aligner_args.accelerate_config_file}" + + " " + + str(self.aligner_file_path) + + " " + + aligner_args + ) + current_env = os.environ.copy() + for var in MEMORY_SAFE_DPOV2_ALIGN_ENV_VAR_TO_REMOVE: + current_env.pop(var, None) + + cli_res = subprocess.run( + args=cmd, + stdout=sys.stdout, + stderr=sys.stdout, + shell=True, + preexec_fn=os.setsid, + env=current_env, + ) + logger.info(f"MemorySafeDPOv2Aligner subprocess run finished, info at finish: {cli_res}") + + if cli_res.returncode != 0: + print(cli_res.stderr) + raise RuntimeError(f"Error during MemorySafeDPOv2Aligner: {cli_res}")
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/evaluator.html b/_modules/lmflow/pipeline/evaluator.html new file mode 100644 index 000000000..43ed20b13 --- /dev/null +++ b/_modules/lmflow/pipeline/evaluator.html @@ -0,0 +1,1004 @@ + + + + + + + + + + lmflow.pipeline.evaluator — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.evaluator

+"""The Evaluator class simplifies the process of running evaluation on a language model provided by a HFDecoderModel instance imported from the lmflow package. The class constructor takes three dictionaries as arguments: model_args containing arguments related to the language model, data_args containing arguments related to the data used for evaluation, and evaluator_args containing other arguments for the evaluation process.
+
+The class has two methods: create_dataloader() that loads the data from the test file, creates a data loader, and returns it with the size of the data, and evaluate(model) that generates output text given input text. It uses the create_dataloader() method to load the data, iterates over the data in mini-batches, and encodes the input text with the encode() method of the HFDecoderModel class. Then, it generates output text using the evaluate() method of the HFDecoderModel class, decodes the generated output text using the decode() method of the HFDecoderModel class, and writes the output to a file in the output directory. The method also logs some information to the console and Weights and Biases if the use_wandb argument is True.
+"""
+import os
+import torch
+import wandb
+import deepspeed
+import sys
+import numpy as np
+import datetime
+import json
+# TODO: remove later
+from accelerate import Accelerator
+from transformers import AutoConfig
+import torch.distributed as dist
+
+from lmflow.datasets.dataset import Dataset
+from lmflow.pipeline.base_pipeline import BasePipeline
+from lmflow.models.hf_decoder_model import HFDecoderModel
+from lmflow.utils.data_utils import set_random_seed, batchlize, answer_extraction
+os.environ["TOKENIZERS_PARALLELISM"] = "false"  # To avoid warnings about parallelism in tokenizers
+
+
+[docs] +class Evaluator(BasePipeline): + """ + Initializes the `Evaluator` class with given arguments. + + Parameters + ------------ + model_args : ModelArguments object. + Contains the arguments required to load the model. + + data_args : DatasetArguments object. + Contains the arguments required to load the dataset. + + evaluator_args : EvaluatorArguments object. + Contains the arguments required to perform evaluation. + + + """ + def __init__(self, model_args, data_args, evaluator_args): + # our method +
+[docs] + self.data_args = data_args
+ +
+[docs] + self.evaluator_args = evaluator_args
+ +
+[docs] + self.model_args = model_args
+ + + # logger + if(self.evaluator_args.use_wandb == True): + wandb.init(project="lmflow_evaluation") + # random seed + set_random_seed(self.evaluator_args.random_seed) +
+[docs] + self.local_rank = int(os.getenv("LOCAL_RANK", "0"))
+ +
+[docs] + self.world_size = int(os.getenv("WORLD_SIZE", "1"))
+ + torch.cuda.set_device(self.local_rank) # NOTE: cpu-only machine will have error + + if evaluator_args.use_accelerator_for_evaluator: + self.accelerator = Accelerator() + self.accelerator.wait_for_everyone() + else: + deepspeed.init_distributed() + +
+[docs] + self.config = AutoConfig.from_pretrained(model_args.model_name_or_path)
+ + try: + self.model_hidden_size = self.config.hidden_size + except: + print("Error in setting hidden size, use the default size 1024") + self.model_hidden_size = 1024 # gpt2 seems do not have hidden_size in config + + print(f"model_hidden_size = {self.model_hidden_size}") + # batch size has to be divisible by world_size, but can be bigger than world_size +
+[docs] + train_batch_size = self.evaluator_args.inference_batch_size_per_device * self.world_size
+ +
+[docs] + self.evaluator_args.minibatch_size = train_batch_size
+ +
+[docs] + self.block_size = evaluator_args.evaluate_block_size
+ + # dataloader, data_size = create_dataloader(args) # load dataset + + +
+[docs] + def create_dataloader(self, dataset: Dataset): + data_dict = dataset.to_dict() + inputs = [ instance["input"] for instance in data_dict["instances"] ] + outputs = [ instance["output"] for instance in data_dict["instances"] ] + dataset_size = len(outputs) + dataset_buf = [] + for idx in range(dataset_size): + dataset_buf.append({ + "input": inputs[idx], + "output": outputs[idx], + "input_idx": idx + }) + + dataloader = batchlize( + dataset_buf, + self.evaluator_args.minibatch_size, + self.evaluator_args.random_shuffle + ) + print(f"Successfully create dataloader with size {len(dataloader)},batch_size {self.evaluator_args.minibatch_size}.") + + return dataloader, dataset_size
+ + + + # TODO: Split for better unittest + +
+[docs] + def _match(self, predicted_answer, groundtruth, answer_type=None): + case_insensitive_types = [ + "strategyqa", + "coin_flip", + "pubmedqa", + "binary_choice", + "medmcqa", + "usmle", + ] + if answer_type in case_insensitive_types: + return predicted_answer.lower() == groundtruth.lower() + else: + return predicted_answer == groundtruth + return False
+ + + +
+[docs] + def evaluate( + self, + model, + dataset: Dataset, + metric = "accuracy", + verbose=True, + ): + """ + Perform Evaluation for a model + + Parameters + ------------ + model : TunableModel object. + TunableModel to perform inference + + dataset : Dataset object. + + + """ + if metric in ["acc", "accuracy"]: + if self.evaluator_args.use_accelerator_for_evaluator: + acc = self._evaluate_acc_with_accelerator(model, dataset, verbose=verbose) + else: + acc = self._evaluate_acc_with_deepspeed(model, dataset, verbose=verbose) + print(f"Evaluating final accuracy: {acc}") + return acc + elif metric in ["ppl", "perplexity"]: + ppl = self._evaluate_ppl(model, dataset, verbose=verbose) + print(f"Evaluating final perplexity: {ppl}") + return ppl + elif metric in ["nll", "neg_log_likelihood"]: + nll = self._evaluate_nll(model, dataset, verbose=verbose) + print(f"Evaluating final negative log likelihood: {nll}") + return nll + else: + raise NotImplementedError(f"metric {metric} is not supported")
+ + + +
+[docs] + def _evaluate_acc_with_accelerator(self, model, dataset, verbose=True): + dataloader, data_size = self.create_dataloader(dataset) + if self.accelerator.is_local_main_process: + if not os.path.exists(self.evaluator_args.output_dir): + os.makedirs(self.evaluator_args.output_dir) + output_writer = open(f"{self.evaluator_args.output_dir}/evaluation.json", "w") + + correct_number_list = [] + for batch_index, batch in enumerate(dataloader): + if batch_index * self.world_size >= self.data_args.max_eval_samples: + break + if self.local_rank*self.evaluator_args.inference_batch_size_per_device >= len(batch): + current_batch = batch[:self.evaluator_args.inference_batch_size_per_device] + else: + current_batch = batch[self.local_rank*self.evaluator_args.inference_batch_size_per_device:(self.local_rank+1)*self.evaluator_args.inference_batch_size_per_device] + prompt_structure = self.evaluator_args.prompt_structure + input = [prompt_structure.format(input=i['input']) for i in current_batch] + output = [i['output'] for i in current_batch] + + batch_input = model.encode(input, return_tensors="pt",padding=True).to(self.accelerator.device) + inputs = batch_input['input_ids'] + mask = batch_input['attention_mask'] + with self.accelerator.autocast(): + outputs = model.inference(inputs, max_new_tokens=self.evaluator_args.max_new_tokens,attention_mask=mask,temperature=self.evaluator_args.temperature, repetition_penalty=self.evaluator_args.repetition_penalty,use_accelerator=self.evaluator_args.use_accelerator_for_evaluator) + text_out = model.decode(outputs, skip_special_tokens=True) + decoded_input = model.decode(inputs, skip_special_tokens=True,) + prompt_length = [len(i) for i in decoded_input] + text_out = [text_out[i][prompt_length[i]:] for i in range(len(text_out))] + answer_type = self.evaluator_args.answer_type + pred_answer = [] + for i in text_out: + pred_answer.append(answer_extraction( + i, + answer_type=answer_type, + )) + if verbose: + print(f"batch_index{batch_index} rank{self.local_rank}:\n question={input}\n prediction={text_out}\n") + print(f"predicted answer: {pred_answer} \n") + print(f"groundtruth answer: {output} \n") + + if self.local_rank * self.evaluator_args.inference_batch_size_per_device >= len(batch): + correct_ = 0 + else: + correct_ = 0 + for i in range(len(pred_answer)): + if self._match(pred_answer[i], output[i], answer_type): + correct_ += 1 + + # collect accuracy from all gpus + all_process = torch.tensor([correct_], dtype=torch.float32, device=self.local_rank) + all_process = self.accelerator.gather(all_process) + correct_ = sum(all_process.tolist()) + correct_number_list.append(correct_) + + # collect predictions from all gpus + output_dict = {"question": input, + "prediction": text_out, + "pred_answer": pred_answer, + "answer": output} + if(self.world_size > 1): + all_process_list = [{}] * self.world_size + dist.gather_object(output_dict, all_process_list if dist.get_rank() == 0 else None, dst=0) + else: + all_process_list = [output_dict] + + if self.accelerator.is_local_main_process: + current_total = (batch_index+1) * self.world_size * self.evaluator_args.inference_batch_size_per_device + current_accuracy = np.sum(correct_number_list) / current_total if int(current_total) < data_size else np.sum(correct_number_list) / data_size + print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), f"{int(current_total) if int(current_total) < data_size else data_size} / {data_size} has been finished, # correct = { np.sum(correct_number_list)}, current accuracy = {current_accuracy}") + + if(self.evaluator_args.use_wandb == True): + wandb.log({"Accuracy": current_accuracy}) + + for index, output in enumerate(all_process_list): + output_json = json.dumps(output) + output_writer.write(output_json + '\n') + + if self.accelerator.is_local_main_process: + current_accuracy = np.sum(correct_number_list) / data_size + print(f"# Correct = {np.sum(correct_number_list)}, # Total = {data_size}, Final accuracy = ", current_accuracy) + output_writer.close() + return np.sum(correct_number_list) / data_size
+ + +
+[docs] + def _evaluate_acc_with_deepspeed(self, model, dataset, verbose=True): + dataloader, data_size = self.create_dataloader(dataset) + if not dist.is_initialized() or dist.get_rank() == 0: + if not os.path.exists(self.evaluator_args.output_dir): + os.makedirs(self.evaluator_args.output_dir) + output_writer = open(f"{self.evaluator_args.output_dir}/evaluation.json", "w") + + correct_number_list = [] + for batch_index, batch in enumerate(dataloader): + if batch_index * self.world_size >= self.data_args.max_eval_samples: + break + if self.local_rank*self.evaluator_args.inference_batch_size_per_device >= len(batch): + current_batch = batch[:self.evaluator_args.inference_batch_size_per_device] + else: + current_batch = batch[self.local_rank*self.evaluator_args.inference_batch_size_per_device:(self.local_rank+1)*self.evaluator_args.inference_batch_size_per_device] + prompt_structure = self.evaluator_args.prompt_structure + input = [prompt_structure.format(input=i['input']) for i in current_batch] + output = [i['output'] for i in current_batch] + input_idx = [i['input_idx'] for i in current_batch] + batch_input = model.encode(input, return_tensors="pt",padding=True).to(device=self.local_rank) + inputs = batch_input['input_ids'] + mask = batch_input['attention_mask'] + outputs = model.inference(inputs, max_new_tokens=self.evaluator_args.max_new_tokens, attention_mask=mask,temperature=self.evaluator_args.temperature, repetition_penalty=self.evaluator_args.repetition_penalty) + text_out = model.decode(outputs, skip_special_tokens=True) + # # only return the generation, trucating the input + decoded_input = model.decode(inputs, skip_special_tokens=True,) + prompt_length = [len(i) for i in decoded_input] + text_out = [text_out[i][prompt_length[i]:] for i in range(len(text_out))] + answer_type = self.evaluator_args.answer_type + pred_answer = [] + for i in text_out: + pred_answer.append(answer_extraction( + i, + answer_type=answer_type, + )) + if verbose: + print(f"batch_index{batch_index} rank{self.local_rank}:\n question={input}\n prediction={text_out}\n") + print(f"predicted answer: {pred_answer} \n") + print(f"groundtruth answer: {output} \n") + + if self.local_rank * self.evaluator_args.inference_batch_size_per_device >= len(batch): + correct_ = 0 + else: + correct_ = 0 + for i in range(len(pred_answer)): + if self._match(pred_answer[i], output[i], answer_type): + correct_ += 1 + + # collect accuracy from all gpus + all_process = torch.tensor([correct_], dtype=torch.float32, device=self.local_rank) + dist.all_reduce(all_process, dist.ReduceOp.SUM, async_op=False) + correct_ = all_process.tolist() + correct_number_list.append(correct_) + + # collect predictions from all gpus + output_dict = {"question": input, + "prediction": text_out, + "pred_answer": pred_answer, + "answer": output} + all_process_list = [{}] * self.world_size + + dist.gather_object(output_dict, all_process_list if dist.get_rank() == 0 else None, dst=0) + if not dist.is_initialized() or dist.get_rank() == 0: + current_total = (batch_index+1) * self.world_size * self.evaluator_args.inference_batch_size_per_device + current_accuracy = np.sum(correct_number_list) / current_total if int(current_total) < data_size else np.sum(correct_number_list) / data_size + print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), f"{int(current_total) if int(current_total) < data_size else data_size} / {data_size} has been finished, # correct = { np.sum(correct_number_list)}, current accuracy = {current_accuracy}") + + if(self.evaluator_args.use_wandb == True): + wandb.log({"Accuracy": current_accuracy}) + + for index, output in enumerate(all_process_list): + output_json = json.dumps(output) + output_writer.write(output_json + '\n') + + if not dist.is_initialized() or dist.get_rank() == 0: + current_accuracy = np.sum(correct_number_list) / data_size + print(f"# Correct = {np.sum(correct_number_list)}, # Total = {data_size}, Final accuracy = ", current_accuracy) + output_writer.close() + return np.sum(correct_number_list) / data_size
+ + +
+[docs] + def _evaluate_ppl(self, model, dataset: Dataset, verbose=True): + data_dict = dataset.to_dict() + if data_dict['type'] == 'text2text': + raise NotImplementedError("ppl evaluation is currently not supported for text2text dataset, please use text_only dataset.") + texts = [ instance["text"] for instance in data_dict["instances"] ] + encodings = model.get_tokenizer()("\n\n".join(texts), return_tensors="pt") + # Define some constant + if self.model_args.truncate_to_model_max_length: + try: + max_length = min(model.get_backend_model().config.n_positions, model.get_max_length()) + except: + max_length = min(1024, model.get_max_length()) + else: + max_length = self.block_size + + if verbose: + print(f"The maximum sequence length : {max_length}") + seq_len = encodings.input_ids.size(1) + + nlls = [] + prev_end_loc = 0 + for begin_loc in range(0, seq_len, self.block_size): + end_loc = min(begin_loc + max_length, seq_len) + trg_len = end_loc - prev_end_loc # may be different from block_size on last loop + input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device=self.local_rank) + target_ids = input_ids.clone() + target_ids[:, :-trg_len] = -100 + + with torch.no_grad(): + outputs = model.get_backend_model()(input_ids, labels=target_ids) + # loss is calculated using CrossEntropyLoss which averages over valid labels + # N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels + # to the left by 1. + neg_log_likelihood = outputs.loss + + nlls.append(neg_log_likelihood) + prev_end_loc = end_loc + if verbose: + print(f"Evaluating PPL: {int(begin_loc/self.block_size) + 1} / {int(seq_len/self.block_size)} Complete, current ppl : {torch.exp(torch.stack(nlls).mean())}") + if end_loc == seq_len: + break + ppl = torch.exp(torch.stack(nlls).mean()) + return ppl
+ + + +
+[docs] + def _evaluate_nll( + self, + model, + dataset: Dataset, + verbose=True, + ): + """ + Evaluates negative log likelihood of the model over a dataset. + + NLL = -1/N sum_{i=1}^N sum_{j=1}^|w_i| ln(p(w_{i,j}|context_window)), + + where N is the number of data samples, w_{i,j} is the j-th token in + i-th sample. Here "context_window" = p(w_{i,start}, w_{i,start+1}, ..., + p_{i,j-1} with start = max(0, j - window_length + 1). "window_length" + is normally the maximum length accepted by the model. + + Returns: + A float which represents the negative log likelihood. + """ + data_dict = dataset.to_dict() + + # Handles prompt structure + if dataset.get_type() == "text2text": + prompt = self.evaluator_args.prompt_structure + data_dict["instances"] = [ + { + "input": prompt.format(input=instance["input"]), + "output": instance["output"] + } + for instance in data_dict["instances"] + ] + + dataset = dataset.from_dict(data_dict) + tokenized_dataset = model.tokenize(dataset, add_special_tokens=False) + tokenized_dataset = tokenized_dataset.get_backend_dataset() + encoding_list = [ + { + "input_ids": torch.tensor([input_ids]), + "labels": torch.tensor([labels]), + } + for input_ids, labels in zip(tokenized_dataset["input_ids"], + tokenized_dataset["labels"]) + ] + + # Gets context window length + try: + max_length = min(model.get_backend_model().config.n_positions, + model.get_max_length()) + except: + max_length = min(1024, model.get_max_length()) + + nlls = [] + full_nlls = [] + num_samples = len(encoding_list) + for sample_idx, encodings in enumerate(encoding_list): + seq_len = encodings["input_ids"].size(1) + + prev_end_loc = 0 + for begin_loc in range(0, seq_len, self.block_size): + end_loc = min(begin_loc + max_length, seq_len) + + # may be different from block_size on last loop + trg_len = end_loc - prev_end_loc + input_ids = encodings["input_ids"][:, begin_loc:end_loc] + input_ids = input_ids.to(device=self.local_rank) + + labels = encodings["labels"][:, begin_loc:end_loc] + target_ids = labels.clone() + full_target_ids = input_ids.clone() + + def get_nll(label_ids, nll_list): + label_ids[:, :-trg_len] = -100 + label_ids = label_ids.to(device=self.local_rank) + + # Valid labels are from 0 to `vocab_size` + num_valid_labels = torch.count_nonzero(label_ids >= 0) + if label_ids[0, 0] != -100: + num_valid_labels -= 1 + + if not torch.all(label_ids == -100): + with torch.no_grad(): + outputs = model.get_backend_model()( + input_ids, labels=label_ids + ) + # loss is calculated using CrossEntropyLoss which + # sums over valid labels N.B. the model only + # calculates loss over trg_len - 1 labels, because + # it internally shifts the labels to the left by 1. + neg_log_likelihood = outputs.loss * num_valid_labels + else: + neg_log_likelihood = torch.zeros([]).to( + device=self.local_rank + ) + + nll_list.append(neg_log_likelihood) + + get_nll(target_ids, nlls) + get_nll(full_target_ids, full_nlls) + + current_output_nll = torch.stack(nlls).sum() / (sample_idx + 1) + current_full_nll = torch.stack(full_nlls).sum() / (sample_idx + 1) + + prev_end_loc = end_loc + if verbose: + if dataset.get_type() == "text_only": + print( + f"Evaluating negative log likelihood:" + f" {sample_idx + 1} / {num_samples} Complete," + f" current nll: {current_full_nll}" + ) + elif dataset.get_type() == "text2text": + print( + f"Evaluating negative log likelihood:" + f" {sample_idx + 1} / {num_samples} Complete," + f" current full nll / input nll / output nll:" + f" {current_full_nll} /" + f" {current_full_nll - current_output_nll} /" + f" {current_output_nll}" + ) + else: + raise NotImplementedError( + "f{dataset.get_type()} typed datasets are not" + " supported" + ) + + if end_loc == seq_len: + break + + mean_nll = torch.stack(nlls).sum() / num_samples + return mean_nll
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/finetuner.html b/_modules/lmflow/pipeline/finetuner.html new file mode 100644 index 000000000..dd2ffc5f6 --- /dev/null +++ b/_modules/lmflow/pipeline/finetuner.html @@ -0,0 +1,1122 @@ + + + + + + + + + + lmflow.pipeline.finetuner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.finetuner

+#!/usr/bin/env python
+# coding=utf-8
+"""The Finetuner class simplifies the process of running finetuning process on a language model for a TunableModel instance with given dataset.
+"""
+
+import copy
+import logging
+import os
+import sys
+from typing import Any, Iterable, Optional, Tuple
+
+import datasets
+import transformers
+import evaluate
+from itertools import chain
+from transformers import (
+    Trainer,
+    default_data_collator,
+    set_seed,
+)
+from copy import deepcopy
+from transformers import PreTrainedModel, TrainingArguments
+from transformers.trainer_utils import get_last_checkpoint
+from transformers.trainer_callback import (
+    TrainerCallback,
+    TrainerControl,
+    TrainerState,
+)
+from transformers.utils import (
+    is_sagemaker_mp_enabled,
+    send_example_telemetry,
+)
+import numpy as np
+
+import lmflow.optim.optimizers as optim
+from lmflow.args import OptimizerNames
+from lmflow.datasets.dataset import Dataset
+from lmflow.pipeline.base_tuner import BaseTuner
+from lmflow.pipeline.utils.peft_trainer import PeftTrainer, PeftSavingCallback
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +class Finetuner(BaseTuner): + """ + Initializes the `Finetuner` class with given arguments. + + Parameters + ------------ + model_args : ModelArguments object. + Contains the arguments required to load the model. + + data_args : DatasetArguments object. + Contains the arguments required to load the dataset. + + finetuner_args : FinetunerArguments object. + Contains the arguments required to perform finetuning. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + """ + def __init__(self, model_args, data_args, finetuner_args, *args, **kwargs): + +
+[docs] + self.model_args = model_args
+ +
+[docs] + self.data_args = data_args
+ +
+[docs] + self.finetuner_args = finetuner_args
+ + + # Sending telemetry. Tracking the example usage helps us better + # allocate resources to maintain them. The information sent is the one + # passed as arguments along with your Python/PyTorch versions. + send_example_telemetry("run_clm", model_args, data_args) + + # Setup logging + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], + ) + +
+[docs] + log_level = finetuner_args.get_process_log_level()
+ + logger.setLevel(log_level) + datasets.utils.logging.set_verbosity(log_level) + transformers.utils.logging.set_verbosity(log_level) + transformers.utils.logging.enable_default_handler() + transformers.utils.logging.enable_explicit_format() + + # Log on each process the small summary: + logger.warning( + f"Process rank: {finetuner_args.local_rank}," + f" device: {finetuner_args.device}," + f" n_gpu: {finetuner_args.n_gpu}," + f"distributed training: {bool(finetuner_args.local_rank != -1)}," + f" 16-bits training: {finetuner_args.fp16}" + ) + logger.info(f"Training/evaluation parameters {finetuner_args}") + + # Detecting last checkpoint. +
+[docs] + last_checkpoint = None
+ + if os.path.isdir(finetuner_args.output_dir) and finetuner_args.do_train and not finetuner_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(finetuner_args.output_dir) + if last_checkpoint is None and len(os.listdir(finetuner_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({finetuner_args.output_dir}) already" + " exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and finetuner_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at" + f" {last_checkpoint}. To avoid this behavior, change" + " the `--output_dir` or add `--overwrite_output_dir` to" + " train from scratch." + ) + self.last_checkpoint = last_checkpoint + + # Set seed before initializing model. + set_seed(finetuner_args.seed) + + +
+[docs] + def group_text(self, tokenized_datasets, model_max_length): + """ + Groups texts together to form blocks of maximum length `model_max_length` and returns the processed data as + a dictionary. + """ + data_args = self.data_args + finetuner_args = self.finetuner_args + + if data_args.block_size is None: + block_size = model_max_length + if block_size > 1024: + logger.warning( + "The chosen tokenizer supports a `model_max_length` that is" + " longer than the default `block_size` value" + " of 1024. If you would like to use a longer `block_size`" + " up to `tokenizer.model_max_length` you can override this " + " default with `--block_size xxx`." + ) + block_size = 1024 + else: + if data_args.block_size > model_max_length: + if self.model_args.truncate_to_model_max_length: + logger.warning( + f"The block_size passed ({data_args.block_size}) is larger" + f" than the maximum length for the model" + f"({model_max_length})." + f" Using block_size={model_max_length}." + f"If you would like to use a longer 'block_size' that is" + f" longer than the maximum length supported by the model," + f" you can override this behavior with" + f"default with `--truncate_to_model_max_length False`." + ) + block_size = model_max_length + else: + logger.warning( + f"The block_size passed ({data_args.block_size}) is larger" + f"than the maximum length for the model" + f"({model_max_length})." + f"Using block_size={data_args.block_size}.") + block_size = data_args.block_size + else: + block_size = data_args.block_size + # Main data processing function that will concatenate all texts from + # our dataset and generate chunks of block_size. + def group_texts(examples): + # Concatenate all texts. + concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} + total_length = len(concatenated_examples[list(examples.keys())[0]]) + # We drop the small remainder, we could add padding if the model + # supported it instead of this drop, you can customize this part to + # your needs. + total_length = (total_length // block_size) * block_size + # Split by chunks of max_len. + result = { + k: [t[i : i + block_size] for i in range(0, total_length, block_size)] + for k, t in concatenated_examples.items() + } + return result + + # Note that with `batched=True`, this map processes 1,000 texts + # together, so group_texts throws away a remainder for each of those + # groups of 1,000 texts. You can adjust that batch_size here but a + # higher value might be slower to preprocess. + # + # To speed up this part, we use multiprocessing. See the documentation + # of the map method for more information: + # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map + with finetuner_args.main_process_first(desc="grouping texts together"): + group_batch_size = data_args.group_texts_batch_size + if data_args.disable_group_texts: + group_batch_size = 1 + if not data_args.streaming: + lm_datasets = tokenized_datasets.map( + group_texts, + batched=True, + batch_size=group_batch_size, + num_proc=data_args.preprocessing_num_workers, + load_from_cache_file=not data_args.overwrite_cache, + desc=f"Grouping texts in chunks of {block_size}", + ) + else: + lm_datasets = tokenized_datasets.map( + group_texts, + batched=True, + batch_size=group_batch_size, + ) + + return lm_datasets
+ + +
+[docs] + def create_customized_optimizer(self, base_trainer_class, model_args): + class CustomizedOptimTrainer(base_trainer_class): + + @staticmethod + def get_optimizer_cls_and_kwargs( + args: TrainingArguments, + model: Optional[PreTrainedModel] = None, + ) -> Tuple[Any, Any]: + # parse args.optim_args + optim_args = {} + if args.customized_optim_args: + for mapping in args.customized_optim_args.replace(" ", "").split(","): + key, value = mapping.split("=") + optim_args[key] = value + + optimizer_kwargs = {"lr": args.learning_rate} + + if args.customized_optim == OptimizerNames.DUMMY: + optimizer_cls = optim.Dummy + dummy_kwargs = { + "betas": (args.optim_dummy_beta1, args.optim_dummy_beta2), + } + optimizer_kwargs.update(dummy_kwargs) + elif args.customized_optim == OptimizerNames.ADABELIEF: + optimizer_cls = optim.AdaBelief + adabelief_kwargs = { + "betas": (args.optim_beta1, args.optim_beta2), + "weight_decay": (args.optim_weight_decay) + } + optimizer_kwargs.update(adabelief_kwargs) + elif args.customized_optim == OptimizerNames.ADABOUND: + optimizer_cls = optim.AdaBound + adabound_kwargs = { + "betas": (args.optim_beta1, args.optim_beta2), + "weight_decay": (args.optim_weight_decay) + } + optimizer_kwargs.update(adabound_kwargs) + elif args.customized_optim == OptimizerNames.LARS: + optimizer_cls = optim.LARS + lars_kwargs = { + "momentum": (args.optim_momentum), + "weight_decay": (args.optim_weight_decay), + } + optimizer_kwargs.update(lars_kwargs) + elif args.customized_optim == OptimizerNames.LAMB: + optimizer_cls = optim.Lamb + lamb_kwargs = { + "betas": (args.optim_beta1, args.optim_beta2), + "weight_decay": (args.optim_weight_decay), + } + optimizer_kwargs.update(lamb_kwargs) + elif args.customized_optim == OptimizerNames.ADAMAX: + optimizer_cls = optim.Adamax + adamax_kwargs = { + "betas": (args.optim_beta1, args.optim_beta2), + "weight_decay": (args.optim_weight_decay), + } + optimizer_kwargs.update(adamax_kwargs) + elif args.customized_optim == OptimizerNames.NADAM: + optimizer_cls = optim.NAdam + nadam_kwargs = { + "betas": (args.optim_beta1, args.optim_beta2), + "weight_decay": (args.optim_weight_decay), + } + optimizer_kwargs.update(nadam_kwargs) + elif args.customized_optim == OptimizerNames.RADAM: + optimizer_cls = optim.RAdam + radam_kwargs = { + "betas": (args.optim_beta1, args.optim_beta2), + "weight_decay": (args.optim_weight_decay), + } + optimizer_kwargs.update(radam_kwargs) + elif args.customized_optim == OptimizerNames.ADAMP: + optimizer_cls = optim.AdamP + adamp_kwargs = { + "betas": (args.optim_beta1, args.optim_beta2), + "weight_decay": (args.optim_weight_decay), + } + optimizer_kwargs.update(adamp_kwargs) + elif args.customized_optim == OptimizerNames.SGDP: + optimizer_cls = optim.SGDP + sgdp_kwargs = { + "momentum": (args.optim_momentum), + "weight_decay": (args.optim_weight_decay), + } + optimizer_kwargs.update(sgdp_kwargs) + elif args.customized_optim == OptimizerNames.YOGI: + optimizer_cls = optim.Yogi + yogi_kwargs = { + "betas": (args.optim_beta1, args.optim_beta2), + "weight_decay": (args.optim_weight_decay), + } + optimizer_kwargs.update(yogi_kwargs) + elif args.customized_optim == OptimizerNames.SOPHIA: + optimizer_cls = optim.SophiaG + sophia_kwargs = { + "betas": (args.optim_beta1, args.optim_beta2), + "weight_decay": (args.optim_weight_decay), + } + optimizer_kwargs.update(sophia_kwargs) + elif args.customized_optim == OptimizerNames.ADAM: + optimizer_cls = optim.Adam + adam_kwargs = { + "betas": (args.optim_beta1, args.optim_beta2), + } + optimizer_kwargs.update(adam_kwargs) + elif args.customized_optim == OptimizerNames.NOVOGRAD: + optimizer_cls = optim.NovoGrad + novograd_kwargs = { + "betas": (args.optim_beta1, args.optim_beta2), + "weight_decay": (args.optim_weight_decay), + } + optimizer_kwargs.update(novograd_kwargs) + elif args.customized_optim == OptimizerNames.ADADELTA: + optimizer_cls = optim.Adadelta + adadelta_kwargs = { + } + optimizer_kwargs.update(adadelta_kwargs) + elif args.customized_optim == OptimizerNames.ADAGRAD: + optimizer_cls = optim.AdaGrad + adagrad_kwargs = { + } + optimizer_kwargs.update(adagrad_kwargs) + elif args.customized_optim == OptimizerNames.ADAMW_SCHEDULE_FREE: + optimizer_cls = optim.AdamWScheduleFree + adamw_schedule_free_kwargs = { + "betas": (args.optim_beta1, args.optim_beta2), + "weight_decay": (args.optim_weight_decay), + } + optimizer_kwargs.update(adamw_schedule_free_kwargs) + elif args.customized_optim == OptimizerNames.SGD_SCHEDULE_FREE: + optimizer_cls = optim.SGDScheduleFree + sgd_schedule_free_kwargs = { + "momentum": (args.optim_momentum), + "weight_decay": (args.optim_weight_decay), + } + optimizer_kwargs.update(sgd_schedule_free_kwargs) + elif args.customized_optim == OptimizerNames.ADAN: + optimizer_cls = optim.Adan + adan_kwargs = { + "betas": (args.optim_beta1, args.optim_beta2, args.optim_beta3), + "weight_decay": (args.optim_weight_decay), + } + optimizer_kwargs.update(adan_kwargs) + else: + raise ValueError( + f"Trainer cannot instantiate unsupported optimizer: " + f" {args.customized_optim}" + ) + return optimizer_cls, optimizer_kwargs + + def create_optimizer(self): + opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model + + if self.optimizer is None: + decay_parameters = self.get_decay_parameter_names(opt_model) + optimizer_grouped_parameters = [ + { + "params": [ + p for n, p in opt_model.named_parameters() + if (n in decay_parameters and p.requires_grad) + ], + "weight_decay": self.args.weight_decay, + }, + { + "params": [ + p for n, p in opt_model.named_parameters() + if (n not in decay_parameters and p.requires_grad) + ], + "weight_decay": 0.0, + }, + ] + + optimizer_cls, optimizer_kwargs = CustomizedOptimTrainer.get_optimizer_cls_and_kwargs(self.args, opt_model) + + # Overwrite `params` in case it's created by + # `get_optimizer_cls_and_kwargs` e.g. for GaLore optimizer. + if "params" in optimizer_kwargs: + optimizer_grouped_parameters = optimizer_kwargs.pop( + "params" + ) + + # For layer-wise dummy optimizers we overwrite + # optimizer_grouped_parameters with `optimizer_dict` to + # avoid arguments conflicts. + if "optimizer_dict" in optimizer_kwargs: + optimizer_grouped_parameters = optimizer_kwargs.pop( + "optimizer_dict" + ) + + self.optimizer = optimizer_cls( + optimizer_grouped_parameters, + **optimizer_kwargs + ) + if is_sagemaker_mp_enabled(): + self.optimizer = smp.DistributedOptimizer(self.optimizer) + + return CustomizedOptimTrainer
+ + +
+[docs] + def tune(self, + model, + dataset, + transform_dataset_in_place=True, + data_collator=None): + """ + Perform tuning for a model + + Parameters + ------------ + model : TunableModel object. + TunableModel to perform tuning. + + dataset: + dataset to train model. + + """ + model_args = self.model_args + data_args = self.data_args + finetuner_args = self.finetuner_args + if not transform_dataset_in_place: + dataset = copy.deepcopy(dataset) + + # Tokenization and text grouping must be done in the main process + if dataset.backend == "custom_multi_modal": + dataset.backend_dataset.register_tokenizer( + model.tokenizer, model.image_processor) + lm_dataset = dataset + else: + with finetuner_args.main_process_first(desc="dataset map tokenization"): + tokenized_dataset = model.tokenize(dataset) + if data_args.disable_group_texts: + lm_dataset = tokenized_dataset + else: + lm_dataset = self.group_text( + tokenized_dataset, + model_max_length=model.get_max_length(), + ) + + train_dataset = lm_dataset.get_backend_dataset() + logger.info(f"Number of train samples: {len(train_dataset)}") + + if finetuner_args.do_eval: + eval_dataset_args = deepcopy(data_args) + eval_dataset_args.dataset_path = finetuner_args.eval_dataset_path + eval_dataset = Dataset(eval_dataset_args) + with finetuner_args.main_process_first(desc="dataset map tokenization"): + tokenized_dataset = model.tokenize(eval_dataset) + if data_args.disable_group_texts: + lm_dataset = tokenized_dataset + else: + lm_dataset = self.group_text( + tokenized_dataset, + model_max_length=model.get_max_length(), + ) + eval_dataset = lm_dataset.get_backend_dataset() + logger.info(f"Number of eval samples: {len(eval_dataset)}") + + def preprocess_logits_for_metrics(logits, labels): + if isinstance(logits, tuple): + # Depending on the model and config, logits may contain extra tensors, + # like past_key_values, but logits always come first + logits = logits[0] + return logits.argmax(dim=-1) + + metric = evaluate.load("accuracy") + + def compute_metrics(eval_preds): + preds, labels = eval_preds + # preds have the same shape as the labels, after the argmax(-1) has been calculated + # by preprocess_logits_for_metrics but we need to shift the labels + labels = labels[:, 1:].reshape(-1) + preds = preds[:, :-1].reshape(-1) + return metric.compute(predictions=preds, references=labels) + + if finetuner_args.do_train: + if data_args.max_train_samples is not None: + max_train_samples = min(len(train_dataset), data_args.max_train_samples) + train_dataset = train_dataset.select(range(max_train_samples)) + + # Initialize our Trainer + training_args = finetuner_args + + if model_args.use_lora: + FinetuningTrainer = PeftTrainer + trainer_callbacks = [PeftSavingCallback] + else: + FinetuningTrainer = Trainer + trainer_callbacks = [] + if data_collator is None: + data_collator = default_data_collator + + if training_args.use_customized_optim: + BaseTrainer = FinetuningTrainer + FinetuningTrainer = self.create_customized_optimizer( + BaseTrainer, model_args + ) + + if training_args.use_lisa: + class DynamicLayerActivationCallback(TrainerCallback): + def __init__(self, n_layers, interval_steps, model): + super().__init__() + self.n_layers = n_layers + self.interval_steps = interval_steps + self.model = model + + # Determine the way to access layers based on the model type + class_to_layers_map = { + 'LlamaForCausalLM': 'model.model.layers', + 'Qwen2ForCausalLM': 'model.model.layers', + 'MistralForCausalLM': 'model.model.layers', + 'MixtralForCausalLM': 'model.model.layers', + 'GemmaForCausalLM': 'model.model.layers', + 'GPT2LMHeadModel': 'model.transformer.h', + } + model_class_name = self.model.__class__.__name__ + if model_class_name in class_to_layers_map: + self.layers_attribute = class_to_layers_map[model_class_name] + else: + self.layers_attribute = training_args.lisa_layers_attribute + self.total_layers = len(eval('self.' + self.layers_attribute)) # Dynamically execute to get the number of layers + + self.active_layers_indices = [] + + def freeze_all_layers(self): + layers = eval('self.' + self.layers_attribute) # Dynamically execute to get layers + for layer in layers: + for param in layer.parameters(): + param.requires_grad = False + + def on_step_begin(self, args, state, control, **kwargs): + # Check if it's time to switch active layers, including at step 0 + if state.global_step % self.interval_steps == 0: + self.switch_active_layers() + + def switch_active_layers(self): + # First, disable gradients for all layers + self.freeze_all_layers() + + # Randomly select n_layers to activate + layers = eval('self.' + self.layers_attribute) # Re-fetch layer references + self.active_layers_indices = np.random.choice(range(self.total_layers), self.n_layers, replace=False) + print(f"Activating layers at indices: {self.active_layers_indices} for the next steps.", flush=True) + + # Enable gradients only for the selected layers + for idx in self.active_layers_indices: + for param in layers[idx].parameters(): + param.requires_grad = True + + # Instantiate the callback + dynamic_layer_activation_callback = DynamicLayerActivationCallback( + n_layers=training_args.lisa_activated_layers, # Number of layers to activate + interval_steps=training_args.lisa_interval_steps, # Step interval to update active layers + model=model.get_backend_model() + ) + + trainer_callbacks.append(dynamic_layer_activation_callback) + + trainer = FinetuningTrainer( + model=model.get_backend_model(), + args=training_args, + train_dataset=train_dataset if training_args.do_train else None, + eval_dataset=eval_dataset if training_args.do_eval else None, + tokenizer=model.get_tokenizer(), + # Data collator will default to DataCollatorWithPadding, so we change it. + data_collator=data_collator, + compute_metrics=compute_metrics if training_args.do_eval else None, + preprocess_logits_for_metrics=preprocess_logits_for_metrics if training_args.do_eval else None, + callbacks=trainer_callbacks + ) + # Training + if training_args.do_train: + checkpoint = None + last_checkpoint = self.last_checkpoint + if training_args.resume_from_checkpoint is not None: + checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint + train_result = trainer.train(resume_from_checkpoint=checkpoint) + + if not model_args.use_lora: + trainer.save_model() # Saves the tokenizer too for easy upload + else: + if model_args.save_aggregated_lora: + model.merge_lora_weights() + model.save(finetuner_args.output_dir, model_args.save_aggregated_lora) + # save language_projection for multi-modal model; + if self.finetuner_args.save_language_projection: + language_projection_state = trainer.model.language_projection.state_dict() + torch.save( + osp.join( + self.finetuner_args.output_dir, + "language_projection.pth"), + language_projection_state) + metrics = train_result.metrics + + max_train_samples = ( + data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) + ) + metrics["train_samples"] = min(max_train_samples, len(train_dataset)) + + trainer.log_metrics("train", metrics) + trainer.save_metrics("train", metrics) + trainer.save_state() + + kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"} + if data_args.dataset_name is not None: + kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + kwargs["dataset_args"] = data_args.dataset_config_name + kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + kwargs["dataset"] = data_args.dataset_name + + if training_args.push_to_hub: + trainer.push_to_hub(**kwargs) + else: + trainer.create_model_card(**kwargs) + + return model
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/inferencer.html b/_modules/lmflow/pipeline/inferencer.html new file mode 100644 index 000000000..99143c1fe --- /dev/null +++ b/_modules/lmflow/pipeline/inferencer.html @@ -0,0 +1,1200 @@ + + + + + + + + + + lmflow.pipeline.inferencer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.inferencer

+#!/usr/bin/env python
+# coding=utf-8
+"""The Inferencer class simplifies the process of model inferencing."""
+
+import copy
+import os
+import torch
+import wandb
+import deepspeed
+import sys
+import numpy as np
+import datetime
+import json
+import time
+import logging
+from typing import Dict, List
+from concurrent.futures import ThreadPoolExecutor
+import subprocess
+
+from accelerate import Accelerator
+from transformers import AutoConfig
+import torch.distributed as dist
+import torch.nn.functional as F
+
+from lmflow.args import DatasetArguments
+from lmflow.datasets.dataset import Dataset
+from lmflow.pipeline.base_pipeline import BasePipeline
+from lmflow.models.hf_decoder_model import HFDecoderModel
+from lmflow.utils.data_utils import (set_random_seed, batchlize,
+                                     answer_extraction, process_image_flag)
+from lmflow.utils.constants import IMAGE_TOKEN_INDEX
+os.environ["TOKENIZERS_PARALLELISM"] = "false"  # To avoid warnings about parallelism in tokenizers
+
+[docs] +def rstrip_partial_utf8(string): + return string.replace("\ufffd", "")
+ + +
+[docs] +supported_dataset_type = [ + "text_only", + "image_text", +]
+ + +
+[docs] +logger = logging.getLogger(__name__)
+ + +
+[docs] +class Inferencer(BasePipeline): + """ + Initializes the `Inferencer` class with given arguments. + + Parameters + ------------ + model_args : ModelArguments object. + Contains the arguments required to load the model. + + data_args : DatasetArguments object. + Contains the arguments required to load the dataset. + + inferencer_args : InferencerArguments object. + Contains the arguments required to perform inference. + + + """ + def __init__(self, model_args, data_args, inferencer_args): +
+[docs] + self.data_args = data_args
+ +
+[docs] + self.inferencer_args = inferencer_args
+ +
+[docs] + self.model_args = model_args
+ + + set_random_seed(self.inferencer_args.random_seed) + +
+[docs] + self.local_rank = int(os.getenv("LOCAL_RANK", "0"))
+ +
+[docs] + self.world_size = int(os.getenv("WORLD_SIZE", "1"))
+ + if inferencer_args.device == "gpu": + torch.cuda.set_device(self.local_rank) # NOTE: cpu-only machine will have error + deepspeed.init_distributed() + else: + os.environ["MASTER_ADDR"] = "localhost" + os.environ["MASTER_PORT"] = "15000" + dist.init_process_group( + "gloo", rank=self.local_rank, world_size=self.world_size + ) + +
+[docs] + self.config = AutoConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=True)
+ + try: + self.model_hidden_size = self.config.hidden_size + except: + print("Error in setting hidden size, use the default size 1024") + self.model_hidden_size = 1024 # gpt2 seems do not have hidden_size in config + + if inferencer_args.use_accelerator: + self.accelerator = Accelerator() + self.accelerator.wait_for_everyone() + + +
+[docs] + def create_dataloader(self, dataset: Dataset): + r"""Batchlize dataset and format it to dataloader. + + Args: + dataset (Dataset): the dataset object + + Output: + dataloader (batchlize): the dataloader object + dataset_size (int): the length of the dataset + + """ + if dataset.get_type() == "text_only": + data_dict = dataset.to_dict() + inputs = [instance["text"] for instance in data_dict["instances"] ] + elif dataset.get_type() == "image_text": + inputs = dataset.to_list() + + dataset_size = len(inputs) + dataset_buf = [] + for idx in range(dataset_size): + dataset_buf.append({ + "input": inputs[idx], + "input_idx": idx + }) + + dataloader = batchlize( + dataset_buf, + batch_size=1, + random_shuffle=False, + ) + return dataloader, dataset_size
+ + + +
+[docs] + def inference( + self, + model, + dataset: Dataset, + max_new_tokens: int=100, + temperature: float=0.0, + prompt_structure: str='{input}', + remove_image_flag: bool=False, + chatbot_type: str="mini_gpt", + ): + """ + Perform inference for a model + + Parameters + ------------ + model : TunableModel object. + TunableModel to perform inference + + dataset : Dataset object. + + + Returns: + + output_dataset: Dataset object. + """ + if dataset.get_type() not in supported_dataset_type: + raise NotImplementedError( + 'input dataset should have type {}'.format( + supported_dataset_type)) + dataloader, data_size = self.create_dataloader(dataset) + + # The output dataset + output_dict = { + "type": "text_only", + "instances": [ + ] + } + + for batch_index, batch in enumerate(dataloader): + current_batch = batch[0] # batch size is 1 + if isinstance(current_batch['input'], str): + input = prompt_structure.format(input=current_batch['input']) + else: + input = current_batch['input'] + input['text'] = prompt_structure.format(input=input['text']) + + if False and 'images' in input and isinstance(input['images'], list): + input['images'] = np.array(input['images']) + if remove_image_flag: + # remove the image flag <ImageHere> in tokenization; + if chatbot_type == "mini_gpt": + image_split_flag = "<ImageHere>" + elif chatbot_type: + image_split_flag = "<image>" + else: + raise NotImplementedError + input['text'] = input['text'].split(image_split_flag) + # TODO remove this code by update the tokenizer + input_ids = [] + attention_mask = [] + image_token_indexes = [] + temp_input = copy.deepcopy(input) + for idx in range(len(input['text'])): + temp_input['text'] = input['text'][idx] + temp_inputs = model.encode( + temp_input, + return_tensors="pt", + add_special_tokens=idx == 0 + ).to(device=self.local_rank) + input_ids.append(temp_inputs['input_ids']) + attention_mask.append(temp_inputs['attention_mask']) + if chatbot_type == "llava": + # add the flag for inserting the image. + # TODO should merge the way of handling image flag in minigpt and llava. + index_tensor = torch.tensor( + [IMAGE_TOKEN_INDEX] + ).to(device=self.local_rank) + index_tensor = index_tensor.reshape(1, 1) + input_ids.append(index_tensor) + attention_mask.append( + torch.ones(1,1).to(device=self.local_rank)) + image_token_indexes.append( + temp_inputs["input_ids"].shape[1]) + if len(image_token_indexes) > 1: + image_token_indexes = image_token_indexes[:-1] + if chatbot_type == "llava": + input_ids = input_ids[:-1] + attention_mask = attention_mask[:-1] + inputs = temp_inputs + inputs["input_ids"] = torch.cat(input_ids, dim=1) + inputs["attention_mask"] = torch.cat(attention_mask, dim=1) + else: + if self.inferencer_args.device == "gpu": + inputs = model.encode( + input, return_tensors="pt" + ).to(device=self.local_rank) + elif self.inferencer_args.device == "cpu": + inputs = model.encode( + input, return_tensors="pt" + ).to(device='cpu') + else: + raise NotImplementedError( + f"device \"{self.inferencer_args.device}\" is not supported" + ) + + if self.inferencer_args.use_accelerator: + inputs = inputs.to(self.accelerator.device) + + + if remove_image_flag: + inputs["image_token_indexes"] = image_token_indexes + inputs["one_sample_multiple_images"] = True + + if self.inferencer_args.use_accelerator: + with self.accelerator.autocast(): + outputs = model.inference( + inputs, + max_new_tokens=max_new_tokens, + temperature=self.inferencer_args.temperature, + repetition_penalty=self.inferencer_args.repetition_penalty, + do_sample=self.inferencer_args.do_sample, + use_accelerator=True, + ) + else: + outputs = model.inference( + inputs, + max_new_tokens=max_new_tokens, + temperature=self.inferencer_args.temperature, + repetition_penalty=self.inferencer_args.repetition_penalty, + do_sample=self.inferencer_args.do_sample, + ) + + # only return the generation, trucating the input + if self.model_args.arch_type != "vision_encoder_decoder": + text_out = model.decode(outputs[0], skip_special_tokens=True) + prompt_length = len(model.decode(inputs[0], skip_special_tokens=True,)) + text_out = text_out[prompt_length:] + else: + # to avoid redundant/missing leading space problem, we use a + # part of the input text + input_text = inputs['input_ids'][0][-1:] + text_out = model.decode(torch.cat([input_text, outputs[0]]), skip_special_tokens=True) + prompt_length = len(model.decode(input_text, skip_special_tokens=True,)) + text_out = text_out[prompt_length:] + + output_dict["instances"].append({ "text": text_out }) + + output_dataset = Dataset(DatasetArguments(dataset_path = None)) + output_dataset = output_dataset.from_dict(output_dict) + + return output_dataset
+ + +
+[docs] + def stream_inference( + self, + context, + model, + max_new_tokens, + token_per_step, + temperature, + end_string, + input_dataset, + remove_image_flag: bool=False, + ): + response = "" + history = [] + if "ChatGLMModel" in self.config.architectures: + for response, history in model.get_backend_model().stream_chat(model.get_tokenizer(), context, history=history): + response = rstrip_partial_utf8(response) + yield response, False + else: + for _ in range(0, self.inferencer_args.max_new_tokens // token_per_step): + output_dataset = self.inference( + model=model, + dataset=input_dataset, + max_new_tokens=token_per_step, + temperature=self.inferencer_args.temperature, + remove_image_flag=remove_image_flag, + ) + + new_append_text = output_dataset.to_dict()["instances"][0]["text"] + new_append_text = rstrip_partial_utf8(new_append_text) + response += new_append_text + + input_dict = input_dataset.to_dict() + input_dict["instances"][0]["text"] += new_append_text + input_dataset = input_dataset.from_dict(input_dict) + + flag_break = False + try: + index = response.index(end_string) + flag_break = True + except ValueError: + response += end_string + index = response.index(end_string) + + response = response[:index] + + yield response, flag_break
+
+ + + +
+[docs] +class SpeculativeInferencer(Inferencer): + """ + Ref: [arXiv:2211.17192v2](https://arxiv.org/abs/2211.17192) + + Parameters + ------------ + target_model_args : ModelArguments object. + Contains the arguments required to load the target model. + + draft_model_args : ModelArguments object. + Contains the arguments required to load the draft model. + + data_args : DatasetArguments object. + Contains the arguments required to load the dataset. + + inferencer_args : InferencerArguments object. + Contains the arguments required to perform inference. + + + """ + def __init__(self, model_args, draft_model_args, data_args, inferencer_args): + super().__init__(model_args, data_args, inferencer_args) +
+[docs] + self.draft_model_args = draft_model_args
+ + +
+[docs] + self.draft_config = AutoConfig.from_pretrained(draft_model_args.model_name_or_path, trust_remote_code=True)
+ + try: + self.draft_model_hidden_size = self.draft_config.hidden_size + except: + print("Error in setting hidden size for draft model, use the default size 1024") + self.draft_model_hidden_size = 768 + + + @staticmethod +
+[docs] + def score_to_prob(scores: torch.Tensor, + temperature: float = 0., + top_p: float = 1.,) -> torch.Tensor: + """Convert scores (NOT softmaxed tensor) to probabilities with support for temperature, top-p sampling, and argmax. + + Parameters + ---------- + scores : torch.Tensor + Input scores. + temperature : float, optional + Temperature parameter for controlling randomness. Higher values make the distribution more uniform, + lower values make it peakier. When temperature <= 1e-6, argmax is used. by default 0.0 + top_p : float, optional + Top-p sampling parameter for controlling the cumulative probability threshold, by default 1.0 (no threshold) + + Returns + ------- + torch.Tensor + Probability distribution after adjustments. + """ + assert temperature >= 0.0 + assert 0.0 < top_p <= 1.0 + + if temperature <= 1e-6: + final_prob = F.one_hot(scores.argmax(dim=1), num_classes=scores.size(1)).float() + else: + scores /= temperature + if top_p < 1.0: + sorted_scores, _ = torch.sort(scores, descending=True) + probs = sorted_scores.softmax(dim=1) + cumulative_probs = torch.cumsum(probs, dim=1) + mask = cumulative_probs <= top_p + if mask.any(): + thresholded_probs = probs * mask + thresholded_probs = thresholded_probs / thresholded_probs.sum(dim=1, keepdim=True) + final_prob = torch.zeros_like(scores) + final_prob.scatter_add_(1, sorted_scores.argsort(dim=1), thresholded_probs) + else: + final_prob = scores.softmax(dim=1) + + else: + final_prob = scores.softmax(dim=1) + + return final_prob
+ + + + @staticmethod +
+[docs] + def sample(prob: torch.Tensor, num_samples: int = 1) -> Dict: + """Sample from a tensor of probabilities + """ + sampled_indices = torch.multinomial(prob, num_samples=num_samples, replacement=True) + return {'sampled_token': sampled_indices, 'sampled_prob': prob.gather(dim=1, index=sampled_indices), 'all_prob': prob}
+ + + + @staticmethod +
+[docs] + def predict_next_token(model: HFDecoderModel, input_ids: torch.Tensor, num_new_tokens: int = 1): + """Predict the next token given the input_ids. + """ + output = model.inference(input_ids, + use_accelerator=True, + max_new_tokens=num_new_tokens, + return_dict_in_generate=True, + output_scores=True, + do_sample=True, + num_beams=1) + return output
+ + + +
+[docs] + def autoregressive_sampling(self, + input_ids: torch.Tensor, + model: HFDecoderModel, + temperature: float = 0., + num_new_tokens: int = 5) -> Dict: + """Ref: [arXiv:2211.17192v2](https://arxiv.org/abs/2211.17192) Section 2.2 + """ + sequence = input_ids + new_tokens = [] + + for _ in range(num_new_tokens): + pred = self.predict_next_token(model=model, input_ids=sequence, num_new_tokens=1) # predict next one token + prob = self.score_to_prob(pred.scores[0], temperature=temperature) + sampled = self.sample(prob=prob, num_samples=1) + new_tokens.append(sampled) + sequence = torch.cat([sequence, sampled['sampled_token']], dim=1) + + return {"sequence": sequence, "new_tokens": new_tokens}
+ + + +
+[docs] + def inference( + self, + model: HFDecoderModel, + draft_model: HFDecoderModel, + input: str, + temperature: float = 0., + gamma: int = 5, + max_new_tokens: int = 100, + ): + """ + Perform inference for a model + + Parameters + ------------ + model : HFDecoderModel object. + TunableModel to verify tokens generated by the draft model. + + draft_model : HFDecoderModel object. + TunableModel that provides approximations of the target model. + + input : str. + The input text (i.e., the prompt) for the model. + + gamma : int. + The number of tokens to be generated by the draft model within each iter. + + max_new_tokens : int. + The maximum number of tokens to be generated by the target model. + + + Returns + ------- + output: str. + The output text generated by the model. + """ + assert gamma > 0 + + if self.inferencer_args.device == "gpu": + inputs = model.encode(input, return_tensors="pt").to(device=self.local_rank) + elif self.inferencer_args.device == "cpu": + inputs = model.encode(input, return_tensors="pt").to(device='cpu') + else: + raise NotImplementedError( + f"device \"{self.inferencer_args.device}\" is not supported" + ) + + + def speculative_sampling(input_ids: torch.Tensor, + model: HFDecoderModel, + draft_model: HFDecoderModel, + temperature: float = 0.) -> torch.Tensor: + """Ref: [arXiv:2211.17192v2](https://arxiv.org/abs/2211.17192) + + Parameters + ---------- + input_ids : torch.Tensor + draft_model : TunableModel object + model_list : List[TunableModel object] + + Returns + ------- + torch.Tensor + """ + len_input_ids= input_ids.shape[1] + logger.debug(f"len of input_ids: {len_input_ids}") + + # STEP 1: Sample γ guesses x1, ..., xγ from Mq (draft model) autoregressively + output_draft = self.autoregressive_sampling(input_ids=input_ids, model=draft_model, num_new_tokens=gamma) + logger.debug(f"draft result: {output_draft['sequence']}") + logger.debug(f"draft result decoded: {draft_model.decode(output_draft['sequence'][0])}") + + + # STEP 2: Run Mp (target model) in parallel + # generate sequences [prefix, x1, x2, ..., xγ] + output = model.get_backend_model()(input_ids=output_draft['sequence'], return_dict=True) + logger.debug(f'shape of output: {output.logits.shape}') + + + # STEP 3: Determine the number of accepted guesses n + accepted = [False] * gamma + for i in range(gamma): + draft_sampled_token_id = output_draft['new_tokens'][i]['sampled_token'] + draft_sampled_token_prob = output_draft['new_tokens'][i]['sampled_prob'] + token_prob = self.score_to_prob(output.logits[:,len_input_ids+i-1,:], temperature=temperature)[0, draft_sampled_token_id] + + # reject the sample with probability 1 - p(x)/q(x) + if torch.rand_like(token_prob) > token_prob/draft_sampled_token_prob: + break + else: + accepted[i] = True + + logger.debug(f"Speculative Sampling: Accepted: {sum(accepted)}/{gamma}") + + + # STEP 4: Adjust the distribution from Mp if needed + if not all(accepted): + all_prob = self.score_to_prob(output.logits[:,len_input_ids+i-1,:], temperature=temperature) + draft_all_prob = output_draft['new_tokens'][i]['all_prob'] + adjusted_prob = torch.max(torch.zeros_like(all_prob), all_prob - draft_all_prob) + prob = adjusted_prob / adjusted_prob.sum(dim=1, keepdim=True) + else: + prob = self.score_to_prob(output.logits[:,-1,:], temperature=temperature) + + + # STEP 5: Return n tokens from Mq, and one token from Mp + token_from_target_model = self.sample(prob)['sampled_token'] + final_sequence = torch.concat([output_draft['sequence'][:,:len_input_ids+sum(accepted)], token_from_target_model], dim=1) + + return final_sequence + + + num_generated_new_tokens = 0 + len_raw_input = len(inputs[0]) + while num_generated_new_tokens < max_new_tokens: + logger.debug(f'===== New iter =====') + logger.debug(f"input_ids: {inputs}") + sampling_result = speculative_sampling(input_ids=inputs, + model=model, + draft_model=draft_model, + temperature=temperature) + logger.debug(f'sampling result: {sampling_result}') + logger.debug(f'sampling result decoded: {model.decode(sampling_result[0])}') + num_generated_new_tokens += len(sampling_result[0]) - len(inputs[0]) + inputs = sampling_result + + + # if, say, num_generated_new_tokens = 19, and the model accept 3 + # tokens, the actual generated tokens would be 22. + return model.decode(inputs[0,:len_raw_input+max_new_tokens])
+ + + +
+[docs] + def stream_inference(self): + raise NotImplementedError("Streaming output for SpeculativeInferencer is not supported yet")
+
+ + +
+[docs] +class ToolInferencer(Inferencer): + """ + Initializes the `ToolInferencer` class with given arguments. + + Parameters + ------------ + model_args : ModelArguments object. + Contains the arguments required to load the model. + + data_args : DatasetArguments object. + Contains the arguments required to load the dataset. + + inferencer_args : InferencerArguments object. + Contains the arguments required to perform inference. + + + """ + def __init__(self, model_args, data_args, inferencer_args): + super().__init__(model_args, data_args, inferencer_args) + +
+[docs] + self.model = HFDecoderModel(self.model_args)
+ + +
+[docs] + def inference( + self, + model: HFDecoderModel, + input: str, + max_new_tokens: int=1024, + ): + """ + Perform inference for a model + + Parameters + ------------ + model : HFDecoderModel object. + TunableModel to perform inference + + input : str. + The input text (i.e., the prompt) for the model. + + max_new_tokens : int. + The maximum number of tokens to be generated by the model. + + Returns: + + output : str. + The output text generated by the model. + """ + if self.inferencer_args.device == "gpu": + input_id = model.encode(input, return_tensors="pt").to(device=self.local_rank) + elif self.inferencer_args.device == "cpu": + input_id = model.encode(input, return_tensors="pt").to(device='cpu') + logger.debug(f"input_id: {input_id}") + input_length = input_id.shape[1] + output_id = model.inference( + input_id, + use_accelerator=True, + max_new_tokens=max_new_tokens, + # pad_token_id=model.tokenizer.eos_token_id, + ) + # logger.debug(f"output: {output_id}") + output = model.decode(output_id[0]) + output = output.replace(input,"") + return output
+ + +
+[docs] + def code_exec(self, code): + # Execute the code + result = subprocess.run(["python", "-c", code], capture_output=True, text=True) + + # Print the result + if result.returncode == 0: + print("Successfully Executed, the result is:") + print(result.stdout) + return result.stdout + else: + print("Error:") + print(result.stderr) + return result
+
+ + + + + + + + +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/iterative_dpo_aligner.html b/_modules/lmflow/pipeline/iterative_dpo_aligner.html new file mode 100644 index 000000000..9b2c55ed2 --- /dev/null +++ b/_modules/lmflow/pipeline/iterative_dpo_aligner.html @@ -0,0 +1,765 @@ + + + + + + + + + + lmflow.pipeline.iterative_dpo_aligner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.iterative_dpo_aligner

+import copy
+from dataclasses import fields
+import gc
+import json
+import logging
+from pathlib import Path
+from typing import List, Dict, Any, Optional
+
+from tqdm import tqdm
+
+from lmflow.models.hf_text_regression_model import HFTextRegressionModel
+from lmflow.models.hf_decoder_model import HFDecoderModel
+from lmflow.datasets.dataset import Dataset
+from lmflow.pipeline.dpov2_aligner import MemorySafeDPOv2Aligner
+from lmflow.pipeline.rm_inferencer import RewardModelInferencer
+from lmflow.pipeline.vllm_inferencer import MemorySafeVLLMInferencer
+from lmflow.args import (
+    ModelArguments, 
+    DatasetArguments, 
+    InferencerArguments,
+    IterativeDPOAlignerArguments,
+    DPOv2AlignerArguments,
+)
+from lmflow.utils.common import print_banner
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +class IterativeDPOAligner: + def __init__( + self, + model_args: ModelArguments, + data_args: DatasetArguments, + aligner_args:IterativeDPOAlignerArguments, + ref_model_args: ModelArguments, + reward_model_args: ModelArguments, + **kwargs, + ): +
+[docs] + self.model_args = model_args
+ +
+[docs] + self.data_args = data_args
+ +
+[docs] + self.aligner_args = aligner_args
+ +
+[docs] + self.ref_model_args = ref_model_args
+ +
+[docs] + self.reward_model_args = reward_model_args
+ +
+[docs] + self.workspace_path = Path(self.aligner_args.output_dir)
+ + + +
+[docs] + def align( + self, + dataset_list: List[Dataset] + ): + num_iterations = len(dataset_list) + + for iter_idx in tqdm( + range(self.aligner_args.initial_iter_idx, num_iterations), + desc="Iterative DPO Align", + unit="iteration" + ): + if iter_idx == 0: + target_model_args = self.model_args + else: + target_model_args = copy.deepcopy(self.model_args) + target_model_args.model_name_or_path = str(self.workspace_path/f"iteration_{iter_idx}"/"model") + + self._align_single_iteration( + iteration_name=f"iteration_{iter_idx+1}", + target_model_args=target_model_args, + reward_model_args=self.reward_model_args, + ref_model_args=self.ref_model_args, + dataset=dataset_list[iter_idx], + )
+ + + +
+[docs] + def _align_single_iteration( + self, + iteration_name: str, + target_model_args: ModelArguments, + reward_model_args: ModelArguments, + ref_model_args: ModelArguments, + dataset: Dataset, + ): + if self.aligner_args.do_response_generation: + # generate responses + print_banner(f'Iterative DPO {iteration_name}: Generate responses') + model = HFDecoderModel( + model_args=target_model_args, + tune_strategy='none' + ) + self._do_target_model_inference( + model=model, + dataset=dataset, + output_dir=str(self.workspace_path/iteration_name), + ) + del model + + if self.aligner_args.do_scoring: + # reward model scoring + print_banner(f'Iterative DPO {iteration_name}: Reward model scoring') + reward_model = HFTextRegressionModel( + model_args=reward_model_args, + tune_strategy='none', + use_accelerator=self.aligner_args.use_accelerator, + ) + target_model_inference_result_data_args = copy.deepcopy(dataset.data_args) + target_model_inference_result_data_args.dataset_path = str(self.workspace_path/iteration_name/"target_model_inference_result") + target_model_inference_result_data_args.block_size = self.aligner_args.reward_model_inference_block_size + target_model_inference_result_dataset = Dataset(target_model_inference_result_data_args) + self._do_reward_model_inference( + model=reward_model, + dataset=target_model_inference_result_dataset, + output_dir=str(self.workspace_path/iteration_name), + ) + del reward_model + + if self.aligner_args.do_dpo_align: + # DPO training + print_banner(f'Iterative DPO {iteration_name}: DPO training') + dpo_train_data_args = copy.deepcopy(dataset.data_args) + dpo_train_data_args.dataset_path = str(self.workspace_path/iteration_name/"reward_model_inference_result") + self._do_single_dpo_align( + model_args=target_model_args, + ref_model_args=ref_model_args, + data_args=dpo_train_data_args, + output_dir=str(self.workspace_path/iteration_name/"model"), + iteration_name=iteration_name, + )
+ + + +
+[docs] + def _do_target_model_inference( + self, + model: HFDecoderModel, + dataset: Dataset, + output_dir: str, + ): + result_cache_path = str(Path(output_dir)/"cache"/"target_model_inference_result.json") + inferencer = MemorySafeVLLMInferencer( + model_args=model.model_args, + data_args=dataset.data_args, + inferencer_args=self._parse_target_model_inference_args( + args=self.aligner_args, + result_cache_path=result_cache_path, + ), + ) + res = inferencer.inference() + + dataset_out = {"type": "text_to_textlist", "instances": res} + + target_model_inference_result_dir = Path(output_dir)/"target_model_inference_result" + target_model_inference_result_dir.mkdir(parents=True, exist_ok=True) + json.dump( + dataset_out, + open(str(target_model_inference_result_dir/"result.json"), "w", encoding='utf-8'), + ensure_ascii=False, + indent=4, + )
+ + + +
+[docs] + def _do_reward_model_inference( + self, + model: HFTextRegressionModel, + dataset: Dataset, + output_dir: str, + ): + inferencer = RewardModelInferencer( + model_args=model.model_args, + data_args=dataset.data_args, + inferencer_args=self._parse_reward_model_inference_args(self.aligner_args), + ) + res = inferencer.inference( + model=model, + dataset=dataset, + transform_dataset_in_place=True, + use_vllm=False, + enable_distributed_inference=self.aligner_args.enable_distributed_inference, + distributed_inference_num_instances=self.aligner_args.distributed_inference_num_instances, + inference_batch_size=self.aligner_args.reward_model_inference_batch_size, + ) + + reward_model_inference_result_dir = Path(output_dir)/"reward_model_inference_result" + reward_model_inference_result_dir.mkdir(parents=True, exist_ok=True) + res.save(str(reward_model_inference_result_dir/"result.json"))
+ + + +
+[docs] + def _do_single_dpo_align( + self, + model_args: ModelArguments, + ref_model_args: ModelArguments, + data_args: DatasetArguments, + output_dir: str, + iteration_name: str, + ): + aligner = MemorySafeDPOv2Aligner( + model_args=model_args, + data_args=data_args, + aligner_args=self._parse_dpo_aligner_args( + args=self.aligner_args, + output_dir=output_dir, + iteration_name=iteration_name, + ), + ref_model_args=ref_model_args, + ) + aligner.align()
+ + + +
+[docs] + def _parse_target_model_inference_args( + self, + args: IterativeDPOAlignerArguments, + result_cache_path: str, + ) -> InferencerArguments: + inferencer_args = self.__filter_args( + mixed_args=args, + target_cls=InferencerArguments, + ) + inferencer_args.save_results=True + inferencer_args.results_path=result_cache_path + + return inferencer_args
+ + + +
+[docs] + def _parse_reward_model_inference_args( + self, + args: IterativeDPOAlignerArguments, + ) -> InferencerArguments: + inferencer_args = self.__filter_args( + mixed_args=args, + target_cls=InferencerArguments, + ) + + return inferencer_args
+ + + +
+[docs] + def _parse_dpo_aligner_args( + self, + args: IterativeDPOAlignerArguments, + output_dir: str, + iteration_name: str, + ) -> DPOv2AlignerArguments: + aligner_args = self.__filter_args( + mixed_args=args, + target_cls=DPOv2AlignerArguments, + ) + aligner_args.output_dir = output_dir + aligner_args.run_name = f"{args.run_name}_{iteration_name}" + + return aligner_args
+ + + +
+[docs] + def __filter_args( + self, + mixed_args, + target_cls, + ): + target_cls_fields = {f.name for f in fields(target_cls) if f.init} + common_fields = {f: getattr(mixed_args, f) for f in target_cls_fields if hasattr(mixed_args, f)} + return target_cls(**common_fields)
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/raft_aligner.html b/_modules/lmflow/pipeline/raft_aligner.html new file mode 100644 index 000000000..a4ff7dc05 --- /dev/null +++ b/_modules/lmflow/pipeline/raft_aligner.html @@ -0,0 +1,1198 @@ + + + + + + + + + + lmflow.pipeline.raft_aligner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.raft_aligner

+#!/usr/bin/env python
+# coding=utf-8
+"""
+The Aligner class simplifies the process of running alignment.
+"""
+
+import logging
+import numpy as np
+import os
+import sys
+import time
+from itertools import chain
+
+import torch
+import torch.distributed as dist
+import transformers
+from datasets import (
+    set_caching_enabled,
+    Dataset,
+    DatasetDict,
+)
+from transformers import (
+    default_data_collator,
+    pipeline,
+    set_seed,
+)
+from transformers.testing_utils import CaptureLogger
+
+from lmflow.args import DatasetArguments
+from lmflow.datasets.dataset import Dataset as LMFlowDataset
+from lmflow.pipeline.base_aligner import BaseAligner
+from lmflow.pipeline.utils.raft_trainer import RaftTrainer
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +class RaftAligner(BaseAligner): + """ + Initializes the `RaftAligner` class with given arguments. + + Parameters + ------------ + model_args : ModelArguments object. + Contains the arguments required to load the model. + + data_args : DatasetArguments object. + Contains the arguments required to load the dataset. + + raft_aligner_args : RaftAlignerArguments object. + Contains the arguments required to perform alignment. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + """ + def __init__(self, model_args, data_args, aligner_args, *args, **kwargs): +
+[docs] + self.model_args = model_args
+ +
+[docs] + self.data_args = data_args
+ +
+[docs] + self.aligner_args = aligner_args
+ + + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], + ) + +
+[docs] + self.INF = 888888888
+ + logger.setLevel(logging.INFO) + +
+[docs] + output_reward_path = aligner_args.output_reward_path
+ + if output_reward_path is not None: + os.makedirs(os.path.dirname(output_reward_path), exist_ok=True) + # Deletes a maybe-exist file + try: + os.remove(output_reward_path) + except OSError: + pass + + +
+[docs] + def _initialize_trainer(self, model, tokenizer, training_args): + """ + This function takes the model and tokenizer as the input and initialize the trainer. + """ + trainer = RaftTrainer( + model=model, + args=training_args, + train_dataset=Dataset.from_dict({"text": [ " " ] }), + eval_dataset=Dataset.from_dict({}), + tokenizer=tokenizer, + data_collator=default_data_collator, + compute_metrics=None, + preprocess_logits_for_metrics=None, + ) + return trainer
+ + + +
+[docs] + def _load_dataset( + self, + selected_dataset, + model, + tokenizer, + model_args, + data_args, + training_args, + ): + ''' + This function prepares the dataset for every iteration. + ''' + raw_datasets = selected_dataset + + if training_args.do_train: + column_names = list(raw_datasets["train"].features) + else: + column_names = list(raw_datasets["validation"].features) + text_column_name = "text" if "text" in column_names else column_names[0] + + # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function + tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") + + def tokenize_function(examples): + with CaptureLogger(tok_logger) as cl: + output = tokenizer(examples[text_column_name]) + # clm input could be much much longer than block_size + if "Token indices sequence length is longer than the" in cl.out: + tok_logger.warning( + "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" + " before being passed to the model." + ) + return output + + with training_args.main_process_first(desc="dataset map tokenization"): + if not data_args.streaming: + tokenized_datasets = raw_datasets.map( + tokenize_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on dataset", + ) + else: + tokenized_datasets = raw_datasets.map( + tokenize_function, + batched=True, + remove_columns=column_names, + ) + + if data_args.block_size is None: + block_size = tokenizer.model_max_length + if block_size > 1024: + logger.warning( + "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" + " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" + " override this default with `--block_size xxx`." + ) + block_size = 512 + else: + if data_args.block_size > tokenizer.model_max_length: + logger.warning( + f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model" + f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." + ) + block_size = min(data_args.block_size, tokenizer.model_max_length) + + # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. + def group_texts(examples): + # Concatenate all texts. + concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} + total_length = len(concatenated_examples[list(examples.keys())[0]]) + # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can + # customize this part to your needs. + if total_length >= block_size: + total_length = (total_length // block_size) * block_size + # Split by chunks of max_len. + result = { + k: [t[i : i + block_size] for i in range(0, total_length, block_size)] + for k, t in concatenated_examples.items() + } + result["labels"] = result["input_ids"].copy() + return result + + # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder + # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower + # to preprocess. + # + # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: + # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map + + with training_args.main_process_first(desc="grouping texts together"): + group_batch_size = 1000 + if data_args.disable_group_texts: + group_batch_size = 1 + if not data_args.streaming: + lm_datasets = tokenized_datasets.map( + group_texts, + batched=True, + batch_size=group_batch_size, + num_proc=data_args.preprocessing_num_workers, + load_from_cache_file=not data_args.overwrite_cache, + desc=f"Grouping texts in chunks of {block_size}", + ) + else: + lm_datasets = tokenized_datasets.map( + group_texts, + batched=True, + batch_size=group_batch_size, + ) + + if training_args.do_train: + if "train" not in tokenized_datasets: + raise ValueError("--do_train requires a train dataset") + train_dataset = lm_datasets["train"] + if data_args.max_train_samples is not None: + max_train_samples = min(len(train_dataset), data_args.max_train_samples) + train_dataset = train_dataset.select(range(max_train_samples)) + + return train_dataset
+ + + +
+[docs] + def _load_input_dataset(self, dataset, tokenizer): + """ + Load input dataset (i.e. prompt/question dataset) for training. + + Args: + dataset: A Dataset object. + The dataset to be loaded. + + Returns: + dataloader (`torch.utils.data.DataLoader`): + The dataloader for the dataset. + """ + ds = dataset.get_backend_dataset() + + def tokenize(sample): + sample["input_ids"] = tokenizer.encode(sample["text"]) + sample['input'] = tokenizer.decode(sample["input_ids"]) + return sample + + ds = ds.map(tokenize, batched=False) + ds = ds.filter(lambda x: len(x["input_ids"]) <= 256) + + ds.set_format(type='torch') + + return ds
+ + +
+[docs] + def _clean_text(self, text): + if len(text) == 0: + return text + stext = [x for x in text.split("###Human") if x] + return stext[0].strip().strip("#")
+ + +
+[docs] + def _discard_sample(self, text): + if "#" in text: + return True + elif len(text) < 2: # delete empty sample + return True + return False
+ + +
+[docs] + def _get_batch_dataset_top( + self, + model, + batch_input, + alpha=0.2, + iter_id=0, + local_rank=0, + output_min_length=16, + output_max_length=48, + infer_batch_size=8, + generation_kwargs={}, + tokenizer=None, + training_args=None, + reward_model=None, + output_reward_path=None, + ): + """ + :param batch_input: input prompts + """ + # we will get the batch dataset via Dataset.from_dict + start_time = time.time() + + query_tensors = batch_input['input_ids'] + querys = batch_input['input'] + data_size = len(querys) + + reward_eva = [] # record the reward of the samples + input_texts = [] + responses = [] + + for i, query_tensor in enumerate(query_tensors): + query = querys[i] + input_texts.append(query) + if (i + 1) % infer_batch_size == 0 or (i+1 == data_size): + gen_len = np.random.randint(output_min_length, output_max_length) + generation_kwargs["max_new_tokens"] = gen_len + inputs = tokenizer(input_texts, return_tensors="pt", padding=True).to(training_args.device) + with torch.no_grad(): + outputs = model.generate(**inputs, **generation_kwargs) + generated_texts = tokenizer.batch_decode(outputs, skip_special_tokens=True) + generated_texts = [ + generated_text.replace(input_texts[i], "") for i, generated_text in enumerate(generated_texts) + ] + texts_for_rewards = [q + r for q, r in zip(input_texts, generated_texts)] + + texts_for_reward_dataset = LMFlowDataset.create_from_dict({ + "type": "text_only", + "instances": [ + { "text": text } for text in texts_for_rewards + ], + }) + + reward_dataset = reward_model.inference(texts_for_reward_dataset) + rewards = [ sample["value"] for sample in reward_dataset.to_dict()["instances"] ] + + reward_eva.extend(rewards) + responses.extend(generated_texts) + input_texts = [] + + data = [{"input": querys[j], "output": [responses[j]]} for j in range(len(reward_eva))] + + world_size = int(os.getenv("WORLD_SIZE", "1")) + all_process_list =[{}] * world_size + + + data_to_send = [[data[i], reward_eva[i]] for i in range(len(data))] + dist.all_gather_object(all_process_list, data_to_send) + gathered_data = [] + gathered_reward = [] + for i in range(world_size): + tmp_data = [tmp[0] for tmp in all_process_list[i]] + gathered_data.extend(tmp_data) + + tmp_reward = [tmp[1] for tmp in all_process_list[i]] + gathered_reward.extend(tmp_reward) + + idx = np.argsort(gathered_reward)[::-1][:int(len(gathered_reward) * alpha)] + gathered_data = [gathered_data[j] for j in idx] + reward_train = [gathered_reward[j] for j in idx] + + self.reward_seq.append(np.mean(gathered_reward)) + self.train_reawrd.append(np.mean(reward_train)) + import matplotlib.pyplot as plt + if training_args.local_rank == 0: + plt.plot(self.reward_seq, marker="o") + plt.plot(self.train_reawrd, marker="*") + plt.legend(["Model reward", "Reward of SFT Set"]) + plt.savefig(self.store_dir + '/training_reward.png') + plt.close() + + logger.info(f"collected data of {len(gathered_data)}") + logger.info([np.mean(gathered_reward), np.mean(reward_train)]) + + if training_args.local_rank == 0 and output_reward_path is not None: + with open(output_reward_path, mode='a') as fout: + fout.write('mean reward: ' + str(np.mean(gathered_reward)) + 'mean reward in training set: ' + str(np.mean(reward_train))) + fout.write("\n") + + + prompt_structure = "{definition}{input}{output}" + tmp_output_dataset = { + "text": [ prompt_structure.format( + definition="", input=sample["input"], output=sample["output"][0] + ) for sample in gathered_data + ] + } + + # We store the training set for monitoring the RAFT training + all_texts = tmp_output_dataset['text'] + output_eval_dataset = {} + output_eval_dataset['type'] = 'text_only' + output_eval_dataset['instances'] = [{'text': i_text} for i_text in all_texts] + import json + if local_rank == 0: + with open(self.store_dir + "/train_set_" + str(iter_id) + ".json", 'w', encoding='utf8') as f: + json.dump(output_eval_dataset, f, ensure_ascii=False) + + + # We need to make sure that the order of the samples are the same for each agent + all_process_list = [{}] * world_size + data_to_send = [tmp_output_dataset, local_rank] + dist.all_gather_object(all_process_list, data_to_send) + for i in range(world_size): + if all_process_list[i][1] == 0: + output_dataset = all_process_list[i][0] + break + + return DatasetDict({ "train": Dataset.from_dict(output_dataset) })
+ + +
+[docs] + def _get_batch_dataset_local( + self, + model, + batch_input, + K=8, + iter_id=0, + local_rank=0, + output_min_length=16, + output_max_length=48, + infer_batch_size=8, + generation_kwargs={}, + tokenizer=None, + training_args=None, + reward_model=None, + output_reward_path=None, + ): + """ + :param batch_input: input prompts + """ + # we will get the batch dataset via Dataset.from_dict + start_time = time.time() + + querys = batch_input['input'] + data_size = len(querys) + + reward_eva = [] + reward_train = [] + + input_texts = [] + responses = [] + record_querys = [] + all_outputs = [] + + for i, query in enumerate(querys): + input_texts = [query for _ in range(K)] + + gen_len = np.random.randint(output_min_length, output_max_length) + generation_kwargs["max_new_tokens"] = gen_len + inputs = tokenizer(input_texts, return_tensors="pt", padding=True).to(training_args.device) + with torch.no_grad(): + outputs = model.generate(**inputs, **generation_kwargs) + generated_texts = tokenizer.batch_decode(outputs, skip_special_tokens=True) + generated_texts = [ + generated_text.replace(input_texts[i], "") for i, generated_text in enumerate(generated_texts) + ] + generated_texts = [ + self._clean_text(generated_text) for generated_text in generated_texts + ] + texts_for_rewards = [q + r for q, r in zip(input_texts, generated_texts)] + + texts_for_reward_dataset = LMFlowDataset.create_from_dict({ + "type": "text_only", + "instances": [ + { "text": texts_for_rewards[i] } for i in range(len(texts_for_rewards)) + ], + }) + + reward_dataset = reward_model.inference(texts_for_reward_dataset) + rewards = [ sample["value"] for sample in reward_dataset.to_dict()["instances"] ] + reward_eva.append(rewards[0]) + + ################################ + # we impose some post-detection and discard the samples with certain criteria. + for kk in range(K): + if self._discard_sample(generated_texts[kk]): + rewards[kk] = -self.INF + ################################ + + idx_to_record = np.argmax(rewards) + all_outputs.append(generated_texts[0]) + + # if we discard all the samples, we do not record the sample + if rewards[idx_to_record] != -self.INF: + responses.append(generated_texts[idx_to_record]) + reward_train.append(rewards[idx_to_record]) + record_querys.append(query) + input_texts = [] + + + data = [] + for j in range(len(reward_train)): + sample = {} + sample["input"] = record_querys[j] + sample["output"] = [responses[j]] + data.append(sample) + + + world_size = int(os.getenv("WORLD_SIZE", "1")) + all_process_data =[{}] * world_size + dist.all_gather_object(all_process_data, data) + + all_process_eval_reward =[{}] * world_size + dist.all_gather_object(all_process_eval_reward, reward_eva) + all_process_train_set_reward =[{}] * world_size + dist.all_gather_object(all_process_train_set_reward, reward_train) + + + gathered_data = [] + gathered_reward = [] + gathered_train_reward = [] + + for i in range(world_size): + gathered_data.extend(all_process_data[i]) + gathered_reward.extend(all_process_eval_reward[i]) + gathered_train_reward.extend(all_process_train_set_reward[i]) + + if training_args.local_rank == 0 and output_reward_path is not None: + with open(output_reward_path, mode='a') as fout: + fout.write('mean reward: ' + str(np.mean(gathered_reward)) + 'mean reward in training set: ' + str(np.mean(gathered_train_reward))) + fout.write("\n") + logger.info([np.mean(gathered_reward), np.mean(gathered_train_reward)]) + + + self.reward_seq.append(np.mean(gathered_reward)) + self.train_reawrd.append(np.mean(reward_train)) + import matplotlib.pyplot as plt + if training_args.local_rank == 0: + plt.plot(self.reward_seq, marker="o") + plt.plot(self.train_reawrd, marker="*") + plt.legend(["Model reward", "Reward of SFT Set"]) + plt.savefig(self.store_dir + '/training_reward.png') + plt.close() + + + prompt_structure = "{definition}{input}{output}" + tmp_output_dataset = { + "text": [ prompt_structure.format( + definition="", input=sample["input"], output=sample["output"][0] + ) for sample in gathered_data + ] + } + + # We store the training set for monitoring the RAFT training + all_texts = tmp_output_dataset['text'] + output_eval_dataset = {} + output_eval_dataset['type'] = 'text_only' + output_eval_dataset['instances'] = [{'text': i_text} for i_text in all_texts] + import json + if local_rank == 0: + with open(self.store_dir + "/train_set_" + str(iter_id) + ".json", 'w', encoding='utf8') as f: + json.dump(output_eval_dataset, f, ensure_ascii=False) + + + # We need to make sure that the order of the samples are the same for each agent + all_process_list = [{}] * world_size + data_to_send = [tmp_output_dataset, local_rank] + dist.all_gather_object(all_process_list, data_to_send) + for i in range(world_size): + if all_process_list[i][1] == 0: + output_dataset = all_process_list[i][0] + break + + logger.info(f"collected data of {len(output_dataset['text'])}") + + + return DatasetDict({ "train": Dataset.from_dict(output_dataset) })
+ + + +
+[docs] + def align(self, model, dataset, reward_model): + """ + Perform alignment for a model + + Parameters + ------------ + model : BaseModel object. + dataset: Dataset object. + Input dataset for model to generate outputs. The input and output + will then be feed into reward model to get the reward for + alignment. + reward_model: RegressionModel object. + """ + tokenizer = model.get_tokenizer() + tokenizer.pad_token = tokenizer.eos_token + tokenizer.pad_token_id = tokenizer.eos_token_id + tokenizer.padding_side = "left" + + dataset = self._load_input_dataset(dataset, tokenizer) + set_caching_enabled(False) + + wrapped_model = model + model = model.get_backend_model() + + generation_kwargs = { + "min_length": 1, + "top_k": 0.0, + "top_p": 1.0, + "do_sample": True, + "pad_token_id": tokenizer.eos_token_id, + "temperature":0.85, + } + + aligner_args = self.aligner_args + training_args = aligner_args + model_args = self.model_args + data_args = self.data_args + world_size = int(os.getenv("WORLD_SIZE", "1")) + + + set_seed(42 + training_args.local_rank) + ITERATION = aligner_args.num_raft_iteration + collection_strategy = aligner_args.collection_strategy + sft_batch_size = aligner_args.raft_batch_size + + if collection_strategy == "top": + alpha = aligner_args.top_reward_percentage + M = int(sft_batch_size / world_size / alpha) + elif collection_strategy == "local": + K = int(1/aligner_args.top_reward_percentage) + M = int(sft_batch_size / world_size) + else: + raise NotImplementedError("We only support two data collection strategies") + + print(M, K) + if training_args.local_rank == 0: + print(aligner_args) + self.store_dir = aligner_args.output_dir + self.reward_seq = [] + self.train_reawrd = [] + + data_size = len(dataset['input']) + lr = training_args.learning_rate + random_idxs = np.arange(data_size) + np.random.shuffle(random_idxs) + + raft_trainer = self._initialize_trainer(model, tokenizer, training_args) + raft_trainer.train(resume_from_checkpoint=False, is_first_time=True) + + for iteration in range(ITERATION): + set_seed(666 + training_args.local_rank + world_size * (iteration+1)) + + end_idx = np.min([data_size, (iteration+1) * M]) + batch_input = dataset.select(random_idxs[iteration * M : end_idx]) + model.gradient_checkpointing_disable() + model.config.use_cache = True + + start_time = time.time() + if collection_strategy == "top": + selected_dataset = self._get_batch_dataset_top( + raft_trainer.tmp_model, + batch_input, + alpha, + iteration, + training_args.local_rank, + output_min_length=aligner_args.output_min_length, + output_max_length=aligner_args.output_max_length, + infer_batch_size=aligner_args.inference_batch_size_per_device, + generation_kwargs=generation_kwargs, + tokenizer=tokenizer, + training_args=training_args, + reward_model=reward_model, + output_reward_path=aligner_args.output_reward_path, + ) + elif collection_strategy == "local": + selected_dataset = self._get_batch_dataset_local( + raft_trainer.tmp_model, + batch_input, + K, + iteration, + training_args.local_rank, + output_min_length=aligner_args.output_min_length, + output_max_length=aligner_args.output_max_length, + infer_batch_size=K, + generation_kwargs=generation_kwargs, + tokenizer=tokenizer, + training_args=training_args, + reward_model=reward_model, + output_reward_path=aligner_args.output_reward_path, + ) + end_time = time.time() + logger.info("It takes %.2f s to inference one stage", end_time - start_time) + + raft_trainer.train_dataset = self._load_dataset( + selected_dataset, + raft_trainer.tmp_model, + tokenizer, + model_args, + data_args, + training_args, + ) + + logger.info(f"iter {iteration}") + start_time = time.time() + model.gradient_checkpointing_enable() + model.config.use_cache = False + + + train_result = raft_trainer.train(resume_from_checkpoint=False) + end_time = time.time() + logger.info("It takes %.2f s to train one stage", end_time - start_time) + if (iteration+1) * M > data_size: + logger.info("One epoch is completed.") + break + + ''' + if training_args.local_rank == 0 and iteration % 2 == 0: + wrapped_model.save(aligner_args.output_dir + "/" + "model" + str(iteration)) + print(iteration, "I save a model with", self.reward_seq[-1]) + ''' + + if aligner_args.output_dir is not None: + wrapped_model.save(aligner_args.output_dir) + + return wrapped_model
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/rm_inferencer.html b/_modules/lmflow/pipeline/rm_inferencer.html new file mode 100644 index 000000000..f138d4f28 --- /dev/null +++ b/_modules/lmflow/pipeline/rm_inferencer.html @@ -0,0 +1,858 @@ + + + + + + + + + + lmflow.pipeline.rm_inferencer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.rm_inferencer

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+import copy
+import os
+import torch
+import wandb
+import deepspeed
+import sys
+import numpy as np
+import datetime
+import json
+import time
+import logging
+from typing import Dict, List, Union, Tuple, Any
+
+from accelerate import Accelerator
+import ray
+import ray.data
+from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
+import torch
+from tqdm import tqdm
+from transformers import AutoConfig
+from transformers.modeling_outputs import SequenceClassifierOutputWithPast
+import torch.distributed as dist
+import torch.nn.functional as F
+
+from lmflow.args import (
+    DatasetArguments,
+    ModelArguments,
+    InferencerArguments,
+)
+from lmflow.datasets.dataset import Dataset
+from lmflow.models.hf_text_regression_model import HFTextRegressionModel
+from lmflow.pipeline.base_pipeline import BasePipeline
+from lmflow.utils.data_utils import (
+    set_random_seed,
+    batchlize,
+    RewardModelInferenceResultWithInput,
+)
+from lmflow.datasets.dataset import KEY_SCORE
+
+
+os.environ["TOKENIZERS_PARALLELISM"] = "false"  # To avoid warnings about parallelism in tokenizers
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +class RewardModelInferencer(BasePipeline): + """ + Initializes the `Inferencer` class with given arguments. + + Parameters + ------------ + model_args : ModelArguments object. + Contains the arguments required to load the model. + + data_args : DatasetArguments object. + Contains the arguments required to load the dataset. + + inferencer_args : InferencerArguments object. + Contains the arguments required to perform inference. + """ + def __init__( + self, + model_args: ModelArguments, + data_args: DatasetArguments, + inferencer_args: InferencerArguments, + **kwargs, + ): +
+[docs] + self.data_args = data_args
+ +
+[docs] + self.inferencer_args = inferencer_args
+ +
+[docs] + self.model_args = model_args
+ + + set_random_seed(self.inferencer_args.random_seed) + +
+[docs] + self.local_rank = int(os.getenv("LOCAL_RANK", "0"))
+ +
+[docs] + self.world_size = int(os.getenv("WORLD_SIZE", "1"))
+ + if inferencer_args.device == "gpu": + torch.cuda.set_device(self.local_rank) # NOTE: cpu-only machine will have error + deepspeed.init_distributed() + else: + dist.init_process_group( + "gloo", rank=self.local_rank, world_size=self.world_size + ) + + if inferencer_args.use_accelerator: + self.accelerator: Accelerator = kwargs.get('accelerator', Accelerator()) + + +
+[docs] + def inference( + self, + model: HFTextRegressionModel, + dataset: Dataset, + transform_dataset_in_place: bool=True, + use_vllm: bool = False, + enable_distributed_inference: bool = False, + **kwargs, + ) -> Dataset: + if use_vllm: + logger.warning("VLLM doesn't support reward model inference, using normal inference instead.") + use_vllm = False + + assert isinstance(model, HFTextRegressionModel), "model should be HFTextRegressionModel" + if not transform_dataset_in_place: + dataset = copy.deepcopy(dataset) + + model_input = model.prepare_inputs_for_inference( + dataset=dataset, + apply_chat_template=True, + enable_distributed_inference=enable_distributed_inference, + use_vllm=use_vllm + ) + + if use_vllm: + inference_result = self.__vllm_inference( + model=model, + model_input=model_input, + enable_distributed_inference=enable_distributed_inference, + ) + else: + inference_result = self._inference( + model=model, + model_input=model_input, + enable_distributed_inference=enable_distributed_inference, + **kwargs, + ) + + if enable_distributed_inference: + output_dataset = model.postprocess_distributed_inference_outputs( + dataset=dataset, + inference_result=inference_result, + ) + else: + output_dataset = model.postprocess_inference_outputs( + dataset=dataset, + scores=inference_result + ) + + return output_dataset
+ + + +
+[docs] + def _inference( + self, + model: HFTextRegressionModel, + model_input: Union[Dataset, ray.data.Dataset], + enable_distributed_inference: bool = False, + **kwargs, + ): + if enable_distributed_inference: + inference_res = self.__distributed_inference( + model=model, + model_input=model_input, + num_instances=kwargs.get("distributed_inference_num_instances", 1), + batch_size=kwargs.get("inference_batch_size", 1), + ) + else: + inference_res = self.__inference( + model=model, + model_input=model_input, + ) + + return inference_res
+ + + +
+[docs] + def __inference( + self, + model: HFTextRegressionModel, + model_input: Dataset, + ) -> Union[List[float], List[List[float]]]: + if model_input.get_type() in ["text_to_textlist"]: + model_input_ids, num_outputs = self.flatten_list(model_input.get_backend_dataset()["input_ids"]) + else: + model_input_ids = model_input.get_backend_dataset()["input_ids"] + + dataloader = batchlize( + examples=model_input_ids, + batch_size=self.inferencer_args.inference_batch_size, + random_shuffle=False, # DO NOT shuffle when inference + ) + num_batches = len(dataloader) + final_output = [] + + for batch_index, batched_input_ids in tqdm( + iterable=enumerate(dataloader), + total=num_batches, + desc="Inference", + unit="batch" + ): + # len(batch) = batch_size, and batch element is dataset sample + model_input_tensor = torch.LongTensor(batched_input_ids).to("cpu" if model.device == "cpu" else "cuda") + if self.inferencer_args.use_accelerator: + with self.accelerator.autocast(): + batch_output = model.inference( + inputs=model_input_tensor, + use_vllm=False, + ) + else: + batch_output = model.inference( + inputs=model_input_tensor, + use_vllm=False, + ) + + batch_output = self.__post_process_model_output(batch_output) + final_output.extend(batch_output) + + if model_input.get_type() in ["text_to_textlist"]: + final_output = self.compress_list(final_output, num_outputs) + + return final_output
+ + + +
+[docs] + def __distributed_inference( + self, + model: HFTextRegressionModel, + model_input: ray.data.Dataset, + num_instances: int, + batch_size: int, + ) -> List[RewardModelInferenceResultWithInput]: + def scheduling_strategy_fn(): + # One bundle per tensor parallel worker + pg = ray.util.placement_group( + [{ + "GPU": 1, + "CPU": 1 + }] * self.inferencer_args.tensor_parallel_size, + strategy="STRICT_PACK", + ) + return dict( + scheduling_strategy=PlacementGroupSchedulingStrategy( + pg, placement_group_capture_child_tasks=True + ) + ) + + resources_kwarg: Dict[str, Any] = {} + if self.inferencer_args.tensor_parallel_size == 1: + # For tensor_parallel_size == 1, we simply set num_gpus=1. + resources_kwarg["num_gpus"] = 1 + else: + # Otherwise, we have to set num_gpus=0 and provide + # a function that will create a placement group for + # each instance. + resources_kwarg["num_gpus"] = 0 + resources_kwarg["ray_remote_args_fn"] = scheduling_strategy_fn + + ## predictor + class DistributedPredictor: + def __init__( + self, + model_args: ModelArguments, + ): + self.model = HFTextRegressionModel( + model_args=model_args, + tune_strategy='none', + use_accelerator=True + ) + self.model.activate_model_for_inference(use_vllm=False) + + def __call__(self, batch: Dict[str, np.ndarray]): + """batch: Dict[str, np.ndarray] + Example (batch size=2): + {'input': array(['...','...'], dtype=object), + 'output': array([array(["...", "..."], dtype=object), array(['...','...'], dtype=object)], dtype=object), + 'input_ids': array([[[128000, 128006, 882, ..., 128256, 128256, 128256], + [128000, 128006, 882, ..., 128256, 128256, 128256]], + [[128000, 128006, 882, ..., 128256, 128256, 128256], + [128000, 128006, 882, ..., 128256, 128256, 128256]]])} + """ + # The batch is managed by ray and the actual batch size may smaller than + # inference_batch_size in config, since there may be some remainders. + # For example, 10 examples with 2 inference instances and inference_batch_size=4, + # there will be only 2 examples for instance 0 to run and then the + # actual batch size changes. + actual_batch_size = len(batch['input']) + input_tensor = torch.LongTensor([ + [list(arr) for arr in batch['input_ids'][batch_idx]] + for batch_idx in range(actual_batch_size) + ]).flatten(start_dim=0, end_dim=1).to("cuda") + batched_inference_res = self.model.inference(input_tensor).logits + batched_inference_res = batched_inference_res.to("cpu").reshape(actual_batch_size, -1, 1).squeeze(dim=-1).tolist() + # [bs, num_output_sequences] + batched_final_res = { + "input": batch['input'].tolist(), + "output": [ + [ + {"score": batched_inference_res[j][i], "text": batch["output"][j][i]} + for i in range(len(batch['output'][j])) + ] + for j in range(actual_batch_size) + ], + } # do this since we're writing to a pandas dataframe + return batched_final_res + + # inference + model_input_mapping = model_input.map_batches( + DistributedPredictor, + concurrency=num_instances, # Set the concurrency to the number of LLM instances. + batch_size=batch_size, + fn_constructor_kwargs={ + "model_args": model.model_args, + }, + **resources_kwarg, + ) + + df_model_output = model_input_mapping.to_pandas() # the actual forwards are executed here + logger.info(f"Distributed reward model inference result preview:\n{df_model_output.head(10)}") + + model_output = [ + {"input": row["input"], "output": row["output"]} for _, row in df_model_output.iterrows() + ] + + return model_output
+ + + +
+[docs] + def __vllm_inference( + self, + model: HFTextRegressionModel, + model_input: List[str], + enable_distributed_inference: bool = False, + ) -> List[float]: + raise NotImplementedError("VLLM inference for reward model is not implemented yet.")
+ + + +
+[docs] + def __post_process_model_output( + self, + model_output: SequenceClassifierOutputWithPast, + ) -> List[float]: + final_output = model_output.logits.to("cpu").reshape(-1).tolist() + + return final_output
+ + + +
+[docs] + def flatten_list( + self, + list_of_list: List[List] + ) -> Tuple[List, List[int]]: + sublist_lengths = [len(sublist) for sublist in list_of_list] + flattened_list = [item for sublist in list_of_list for item in sublist] + return flattened_list, sublist_lengths
+ + + +
+[docs] + def compress_list( + self, + list_to_compress: List, + sublist_lengths: List[int] + ) -> List[List]: + assert sum(sublist_lengths) == len(list_to_compress), "Sum of sublist lengths should be equal to length of list to compress." + compressed_list = [] + start_index = 0 + for length in sublist_lengths: + sublist = list_to_compress[start_index: start_index + length] + compressed_list.append(sublist) + start_index += length + return compressed_list
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/rm_tuner.html b/_modules/lmflow/pipeline/rm_tuner.html new file mode 100644 index 000000000..3a140bfaa --- /dev/null +++ b/_modules/lmflow/pipeline/rm_tuner.html @@ -0,0 +1,702 @@ + + + + + + + + + + lmflow.pipeline.rm_tuner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.rm_tuner

+import sys
+import logging
+from typing import Optional
+from copy import deepcopy
+
+import numpy as np
+import datasets
+import transformers
+from transformers import set_seed
+from transformers.utils import send_example_telemetry
+from transformers.trainer_callback import (
+    TrainerCallback
+)
+
+from lmflow.datasets import Dataset
+from lmflow.models.hf_text_regression_model import HFTextRegressionModel
+from lmflow.pipeline.finetuner import Finetuner
+from lmflow.pipeline.utils.rm_trainer import compute_metrics, RewardTrainer, PeftRewardTrainer
+from lmflow.pipeline.utils.peft_trainer import PeftSavingCallback
+from lmflow.pipeline.utils.rm_dataprocessor import RewardDataCollatorWithPadding
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +class RewardModelTuner(Finetuner): + """Initializes the `RewardModelTuner` class. + + Parameters + ---------- + model_args : ModelArguments object. + Contains the arguments required to load the model. + + data_args : DatasetArguments object. + Contains the arguments required to load the dataset. + + finetuner_args : RewardModelTunerArguments object. + Contains the arguments required to perform finetuning. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + """ + def __init__( + self, + model_args, + data_args, + finetuner_args, + *args, + **kwargs + ): + super().__init__(model_args, data_args, finetuner_args, *args, **kwargs) + + +
+[docs] + def tune( + self, + model: HFTextRegressionModel, + dataset, + transform_dataset_in_place=True, + data_collator=None, + **kwargs + ): + # 0. basic init + if not transform_dataset_in_place: + dataset = deepcopy(dataset) + + # 1. prepare dataset + with self.finetuner_args.main_process_first(desc="dataset map tokenization"): + tokenized_dataset = model.tokenize(dataset) + if self.data_args.disable_group_texts: + lm_dataset = tokenized_dataset + else: + lm_dataset = self.group_text( + tokenized_dataset, + model_max_length=model.get_max_length(), + ) + train_dataset = lm_dataset.get_backend_dataset() + logger.info(f"Number of train samples: {len(train_dataset)}") + + if self.finetuner_args.do_train and self.data_args.max_train_samples is not None: + max_train_samples = min(len(train_dataset), self.data_args.max_train_samples) + train_dataset = train_dataset.select(range(max_train_samples)) + + if self.finetuner_args.do_eval: + eval_dataset_args = deepcopy(self.data_args) + eval_dataset_args.dataset_path = self.finetuner_args.eval_dataset_path + eval_dataset = Dataset(eval_dataset_args) + with self.finetuner_args.main_process_first(desc="dataset map tokenization"): + tokenized_dataset = model.tokenize(eval_dataset) + if self.data_args.disable_group_texts: + lm_dataset = tokenized_dataset + else: + lm_dataset = self.group_text( + tokenized_dataset, + model_max_length=model.get_max_length(), + ) + eval_dataset = lm_dataset.get_backend_dataset() + logger.info(f"Number of eval samples: {len(eval_dataset)}") + + if data_collator is None: + data_collator = RewardDataCollatorWithPadding( + tokenizer=model.get_tokenizer(), + max_length=self.model_args.model_max_length + ) + + # 2. prepare trainer + if self.model_args.use_lora: + RewardModelingTrainer = PeftRewardTrainer + trainer_callbacks = [PeftSavingCallback] + else: + RewardModelingTrainer = RewardTrainer + trainer_callbacks = [] + + if self.finetuner_args.use_lisa: + class DynamicLayerActivationCallback(TrainerCallback): + def __init__(self, n_layers, interval_steps, model, **kwargs): + super().__init__() + self.n_layers = n_layers + self.interval_steps = interval_steps + self.model = model + + # Determine the way to access layers based on the model type + class_to_layers_map = { + 'LlamaForCausalLM': 'model.model.layers', + 'Qwen2ForCausalLM': 'model.model.layers', + 'MistralForCausalLM': 'model.model.layers', + 'MixtralForCausalLM': 'model.model.layers', + 'GemmaForCausalLM': 'model.model.layers', + 'GPT2LMHeadModel': 'model.transformer.h', + } + model_class_name = self.model.__class__.__name__ + if model_class_name in class_to_layers_map: + self.layers_attribute = class_to_layers_map[model_class_name] + else: + self.layers_attribute = kwargs.get("lisa_layers_attribute") + self.total_layers = len(eval('self.' + self.layers_attribute)) # Dynamically execute to get the number of layers + + self.active_layers_indices = [] + + def freeze_all_layers(self): + layers = eval('self.' + self.layers_attribute) # Dynamically execute to get layers + for layer in layers: + for param in layer.parameters(): + param.requires_grad = False + + def on_step_begin(self, args, state, control, **kwargs): + # Check if it's time to switch active layers, including at step 0 + if state.global_step % self.interval_steps == 0: + self.switch_active_layers() + + def switch_active_layers(self): + # First, disable gradients for all layers + self.freeze_all_layers() + + # Randomly select n_layers to activate + layers = eval('self.' + self.layers_attribute) # Re-fetch layer references + self.active_layers_indices = np.random.choice(range(self.total_layers), self.n_layers, replace=False) + print(f"Activating layers at indices: {self.active_layers_indices} for the next steps.", flush=True) + + # Enable gradients only for the selected layers + for idx in self.active_layers_indices: + for param in layers[idx].parameters(): + param.requires_grad = True + + # Instantiate the callback + dynamic_layer_activation_callback = DynamicLayerActivationCallback( + n_layers=self.finetuner_args.lisa_activated_layers, # Number of layers to activate + interval_steps=self.finetuner_args.lisa_interval_steps, # Step interval to update active layers + model=model.get_backend_model(), + lisa_layers_attribute=self.finetuner_args.lisa_layers_attribute + ) + + trainer_callbacks.append(dynamic_layer_activation_callback) + + trainer = RewardModelingTrainer( + model=model.get_backend_model(), + args=self.finetuner_args, + train_dataset=train_dataset if self.finetuner_args.do_train else None, + eval_dataset=eval_dataset if self.finetuner_args.do_eval else None, + tokenizer=model.get_tokenizer(), + data_collator=data_collator, + compute_metrics=compute_metrics if self.finetuner_args.do_eval else None, + callbacks=trainer_callbacks + ) + + # 3. training + if self.finetuner_args.do_train: + checkpoint = None + last_checkpoint = self.last_checkpoint + if self.finetuner_args.resume_from_checkpoint is not None: + checkpoint = self.finetuner_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint + + if self.finetuner_args.gradient_checkpointing: + if model.get_backend_model().config.use_cache: + logger.warning( + "Backend model config `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." + ) + model.get_backend_model().config.use_cache = False + + train_result = trainer.train(resume_from_checkpoint=checkpoint) + + trainer.save_model() # Saves the tokenizer too for easy upload + + metrics = train_result.metrics + + max_train_samples = ( + self.data_args.max_train_samples if self.data_args.max_train_samples is not None else len(train_dataset) + ) + metrics["train_samples"] = min(max_train_samples, len(train_dataset)) + + trainer.log_metrics("train", metrics) + trainer.save_metrics("train", metrics) + trainer.save_state() + + kwargs = {"finetuned_from": self.model_args.model_name_or_path, "tasks": "reward-modeling"} + if self.data_args.dataset_name is not None: + kwargs["dataset_tags"] = self.data_args.dataset_name + if self.data_args.dataset_config_name is not None: + kwargs["dataset_args"] = self.data_args.dataset_config_name + kwargs["dataset"] = f"{self.data_args.dataset_name} {self.data_args.dataset_config_name}" + else: + kwargs["dataset"] = self.data_args.dataset_name + + if self.finetuner_args.push_to_hub: + trainer.push_to_hub(**kwargs) + else: + trainer.create_model_card(**kwargs) + + return model
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/utils/dpov2_dataprocessor.html b/_modules/lmflow/pipeline/utils/dpov2_dataprocessor.html new file mode 100644 index 000000000..73ea8ab09 --- /dev/null +++ b/_modules/lmflow/pipeline/utils/dpov2_dataprocessor.html @@ -0,0 +1,703 @@ + + + + + + + + + + lmflow.pipeline.utils.dpov2_dataprocessor — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.utils.dpov2_dataprocessor

+from dataclasses import dataclass
+import logging
+from typing import Optional, Union, Dict, List, Any
+
+import torch
+from torch.nn.utils.rnn import pad_sequence
+from transformers import (
+    PreTrainedModel,
+    PreTrainedTokenizerBase,
+)
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +@dataclass +
+[docs] +class PreferenceDataCollatorWithPadding: +
+[docs] + tokenizer: PreTrainedTokenizerBase
+ +
+[docs] + model: Optional[PreTrainedModel] = None
+ +
+[docs] + padding: Union[bool, str] = True
+ +
+[docs] + max_length: Optional[int] = None
+ +
+[docs] + max_prompt_length: Optional[int] = None
+ +
+[docs] + label_pad_token_id: int = -100
+ +
+[docs] + padding_value: int = 0
+ +
+[docs] + truncation_mode: str = "keep_end"
+ +
+[docs] + is_encoder_decoder: Optional[bool] = False
+ +
+[docs] + max_target_length: Optional[int] = None
+ +
+[docs] + mask_prompt: Optional[bool] = False
+ + + +
+[docs] + def tokenize_batch_element( + self, + prompt: str, + chosen: str, + rejected: str, + ) -> Dict: + """Tokenize a single batch element. + + At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation + in case the prompt + chosen or prompt + rejected responses is/are too long. First + we truncate the prompt; if we're still too long, we truncate the chosen/rejected. + + We also create the labels for the chosen/rejected responses, which are of length equal to + the sum of the length of the prompt and the chosen/rejected response, with + label_pad_token_id for the prompt tokens. + """ + batch = {} + + if self.is_encoder_decoder: + raise NotImplementedError + + chosen_tokens = self.tokenizer(chosen, add_special_tokens=False) + rejected_tokens = self.tokenizer(rejected, add_special_tokens=False) + prompt_tokens = self.tokenizer(prompt, add_special_tokens=False) + + eos_token_id = self.tokenizer.eos_token_id + # Get indices in list prompt_tokens["input_ids"] that equals the EOS token (often 0) + eos_indices_prompt = [i for i, x in enumerate(prompt_tokens["input_ids"]) if x == eos_token_id] + # attention mask these indices to eos_token_id + if self.mask_prompt: + new_attention_mask = [0 for i, p in enumerate(prompt_tokens["attention_mask"])] + else: + new_attention_mask = [ + 0 if i in eos_indices_prompt else p for i, p in enumerate(prompt_tokens["attention_mask"]) + ] + prompt_tokens["attention_mask"] = new_attention_mask + + # do the same for chosen and rejected + eos_indices_chosen = [i for i, x in enumerate(chosen_tokens["input_ids"]) if x == eos_token_id] + new_attention_mask_c = [ + 0 if i in eos_indices_chosen else p for i, p in enumerate(chosen_tokens["attention_mask"]) + ] + chosen_tokens["attention_mask"] = new_attention_mask_c + + eos_indices_rejected = [i for i, x in enumerate(rejected_tokens["input_ids"]) if x == eos_token_id] + new_attention_mask_r = [ + 0 if i in eos_indices_rejected else p for i, p in enumerate(rejected_tokens["attention_mask"]) + ] + rejected_tokens["attention_mask"] = new_attention_mask_r + + # add EOS token to end of prompt + + chosen_tokens["input_ids"].append(self.tokenizer.eos_token_id) + chosen_tokens["attention_mask"].append(1) + + rejected_tokens["input_ids"].append(self.tokenizer.eos_token_id) + rejected_tokens["attention_mask"].append(1) + + longer_response_length = max(len(chosen_tokens["input_ids"]), len(rejected_tokens["input_ids"])) + + # if combined sequence is too long, truncate the prompt + if len(prompt_tokens["input_ids"]) + longer_response_length > self.max_length: + if self.truncation_mode == "keep_start": + prompt_tokens = {k: v[: self.max_prompt_length] for k, v in prompt_tokens.items()} + elif self.truncation_mode == "keep_end": + prompt_tokens = {k: v[-self.max_prompt_length :] for k, v in prompt_tokens.items()} + else: + raise ValueError(f"Unknown truncation mode: {self.truncation_mode}") + + # if that's still too long, truncate the response + if len(prompt_tokens["input_ids"]) + longer_response_length > self.max_length: + chosen_tokens = {k: v[: self.max_length - self.max_prompt_length] for k, v in chosen_tokens.items()} + rejected_tokens = { + k: v[: self.max_length - self.max_prompt_length] for k, v in rejected_tokens.items() + } + + # Create labels + chosen_sequence_tokens = {k: prompt_tokens[k] + chosen_tokens[k] for k in chosen_tokens} + rejected_sequence_tokens = {k: prompt_tokens[k] + rejected_tokens[k] for k in rejected_tokens} + chosen_sequence_tokens["labels"] = chosen_sequence_tokens["input_ids"][:] + chosen_sequence_tokens["labels"][: len(prompt_tokens["input_ids"])] = [self.label_pad_token_id] * len( + prompt_tokens["input_ids"] + ) + rejected_sequence_tokens["labels"] = rejected_sequence_tokens["input_ids"][:] + rejected_sequence_tokens["labels"][: len(prompt_tokens["input_ids"])] = [self.label_pad_token_id] * len( + prompt_tokens["input_ids"] + ) + + for k, toks in { + "chosen": chosen_sequence_tokens, + "rejected": rejected_sequence_tokens, + "prompt": prompt_tokens, + }.items(): + for type_key, tokens in toks.items(): + if type_key == "token_type_ids": + continue + batch[f"{k}_{type_key}"] = tokens + + + + batch["prompt"] = prompt + batch["chosen"] = prompt + chosen + batch["rejected"] = prompt + rejected + batch["chosen_response_only"] = chosen + batch["rejected_response_only"] = rejected + + return batch
+ + + +
+[docs] + def collate(self, batch): + # first, pad everything to the same length + padded_batch = {} + for k in batch[0].keys(): + if k.endswith("_input_ids") or k.endswith("_attention_mask") or k.endswith("_labels"): + if self.is_encoder_decoder: + to_pad = [torch.LongTensor(ex[k]) for ex in batch] + + if (k.startswith("prompt")) and (k.endswith("input_ids")): + padding_value = self.tokenizer.pad_token_id + elif k.endswith("_attention_mask"): + padding_value = 0 + elif (k.startswith("chosen")) or (k.startswith("rejected")) or ("decoder" in k): + padding_value = self.label_pad_token_id + else: + raise ValueError(f"Unexpected key in batch '{k}'") + padded_batch[k] = pad_sequence(to_pad, batch_first=True, padding_value=padding_value) + else: + # adapted from https://stackoverflow.com/questions/73256206 + if "prompt" in k: + to_pad = [torch.LongTensor(ex[k][::-1]) for ex in batch] + else: + to_pad = [torch.LongTensor(ex[k]) for ex in batch] + if k.endswith("_input_ids"): + padding_value = self.tokenizer.pad_token_id + elif k.endswith("_labels"): + padding_value = self.label_pad_token_id + elif k.endswith("_attention_mask"): + padding_value = self.padding_value + else: + raise ValueError(f"Unexpected key in batch '{k}'") + + padded_batch[k] = pad_sequence(to_pad, batch_first=True, padding_value=padding_value) + # for the prompt, flip back so padding is on left side + if "prompt" in k: + padded_batch[k] = padded_batch[k].flip(dims=[1]) + else: + padded_batch[k] = [ex[k] for ex in batch] + + return padded_batch
+ + + +
+[docs] + def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: + tokenized_batch = [] + + for feature in features: + prompt = feature["prompt"] + chosen = feature["chosen"] + rejected = feature["rejected"] + + batch_element = self.tokenize_batch_element(prompt, chosen, rejected) + batch_element["margin"] = feature["margin"] + tokenized_batch.append(batch_element) + + # return collated batch + return self.collate(tokenized_batch)
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/utils/dpov2_trainer.html b/_modules/lmflow/pipeline/utils/dpov2_trainer.html new file mode 100644 index 000000000..3ed9eff99 --- /dev/null +++ b/_modules/lmflow/pipeline/utils/dpov2_trainer.html @@ -0,0 +1,728 @@ + + + + + + + + + + lmflow.pipeline.utils.dpov2_trainer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.utils.dpov2_trainer

+import logging
+from typing import Optional, Union, Dict, List, Any, Tuple, Callable, Literal
+
+from datasets import Dataset
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from transformers import (
+    PreTrainedModel, 
+    PreTrainedTokenizerBase, 
+    DataCollator, 
+    TrainingArguments, 
+    TrainerCallback
+)
+from transformers.trainer_callback import TrainerCallback
+from transformers.trainer_utils import EvalLoopOutput
+from trl import DPOTrainer
+
+from lmflow.pipeline.utils.dpov2_dataprocessor import PreferenceDataCollatorWithPadding
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +class DPOv2Trainer(DPOTrainer): + def __init__( + self, + model: Union[PreTrainedModel, nn.Module] = None, + ref_model: Optional[Union[PreTrainedModel, nn.Module]] = None, + beta: float = 0.1, + loss_type: Literal["sigmoid", "hinge", "cross_entropy", "kl", "rev_kl", "raft"] = "rev_kl", + args: TrainingArguments = None, + data_collator: Optional[DataCollator] = None, + label_pad_token_id: int = -100, + padding_value: int = 0, + truncation_mode: str = "keep_end", + train_dataset: Optional[Dataset] = None, + eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, + tokenizer: Optional[PreTrainedTokenizerBase] = None, + model_init: Optional[Callable[[], PreTrainedModel]] = None, + callbacks: Optional[List[TrainerCallback]] = None, + optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = ( + None, + None, + ), + preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, + max_length: Optional[int] = None, + max_prompt_length: Optional[int] = None, + max_target_length: Optional[int] = None, + peft_config: Optional[Dict] = None, + is_encoder_decoder: Optional[bool] = None, + disable_dropout: bool = True, + generate_during_eval: bool = False, + compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]] = None, + mask_prompt: Optional[bool] = False, + len_penalty: float = 0, + preprocessing_num_workers: int = 1, + ): + + if data_collator is None: + data_collator = PreferenceDataCollatorWithPadding( + tokenizer, + max_length=max_length, + max_prompt_length=max_prompt_length, + label_pad_token_id=label_pad_token_id, + padding_value=padding_value, + truncation_mode=truncation_mode, + is_encoder_decoder=False, + max_target_length=max_target_length, + mask_prompt=mask_prompt, + ) + super().__init__( + model=model, + ref_model=ref_model, + beta=beta, + loss_type=loss_type, + args=args, + data_collator=data_collator, + label_pad_token_id=label_pad_token_id, + padding_value=padding_value, + truncation_mode=truncation_mode, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + tokenizer=tokenizer, + model_init=model_init, + callbacks=callbacks, + optimizers=optimizers, + preprocess_logits_for_metrics=preprocess_logits_for_metrics, + max_length=max_length, + max_prompt_length=max_prompt_length, + max_target_length=max_target_length, + peft_config=peft_config, + is_encoder_decoder=is_encoder_decoder, + disable_dropout=disable_dropout, + generate_during_eval=generate_during_eval, + compute_metrics=compute_metrics, + dataset_num_proc=preprocessing_num_workers, + ) +
+[docs] + self.use_dpo_data_collator = True
+ +
+[docs] + self.len_penalty = len_penalty
+ + +
+[docs] + def dpo_loss( + self, + policy_chosen_logps: torch.FloatTensor, + policy_rejected_logps: torch.FloatTensor, + reference_chosen_logps: torch.FloatTensor, + reference_rejected_logps: torch.FloatTensor, + reference_free: bool = False, + margin: Optional[torch.FloatTensor] = None, + len_penalty: float = 0, + ) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: + """Compute the DPO loss for a batch of policy and reference model log probabilities. + + Args: + policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,) + policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,) + reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (batch_size,) + reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (batch_size,) + beta: Temperature parameter for the DPO loss, typically something in the range of 0.1 to 0.5. We ignore the reference model as beta -> 0. + reference_free: If True, we ignore the _provided_ reference model and implicitly use a reference model that assigns equal probability to all responses. + + Returns: + A tuple of three tensors: (losses, chosen_rewards, rejected_rewards). + The losses tensor contains the DPO loss for each example in the batch. + The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively. + """ + pi_logratios = policy_chosen_logps - policy_rejected_logps + ref_logratios = reference_chosen_logps - reference_rejected_logps + len_penalty + + if reference_free: + ref_logratios = 0 + + if self.loss_type == "sigmoid": + logits = pi_logratios - ref_logratios + losses = -F.logsigmoid(self.beta * logits) + elif self.loss_type == "hinge": + logits = pi_logratios - ref_logratios + losses = torch.relu(1 - self.beta * logits) + elif self.loss_type == "cross_entropy": + logits = policy_chosen_logps - reference_chosen_logps + losses = -F.logsigmoid(self.beta * logits) + elif self.loss_type == "raft": + losses = -policy_chosen_logps # F.logsigmoid(self.beta * logits) + elif self.loss_type == "ipo": + logits = pi_logratios - ref_logratios + # eqn (17) of the paper where beta is the regularization parameter for the IPO loss, denoted by tau in the paper. + losses = (logits - 1 / (2 * self.beta)) ** 2 + elif self.loss_type == "kl": + logits = pi_logratios - ref_logratios + p = F.sigmoid(self.beta * logits) + p = torch.minimum(p, torch.ones_like(p) * 0.999) + p_gt = torch.exp(margin) / (1 + torch.exp(margin) + 1e-3) + losses = p * (torch.log(p) - torch.log(p_gt)) + (1 - p) * (torch.log(1 - p) - torch.log(1 - p_gt)) + elif self.loss_type == "tv": + logits = pi_logratios - ref_logratios + p = F.sigmoid(self.beta * logits) + p_gt = torch.exp(margin) / (1 + torch.exp(margin)) + losses = torch.abs(p - p_gt) + elif self.loss_type == "hellinger": + logits = pi_logratios - ref_logratios + p = F.sigmoid(self.beta * logits) + p = torch.minimum(p, torch.ones_like(p) * 0.999) + p_gt = torch.exp(margin) / (1 + torch.exp(margin)) + losses = 0.5 * ((p**0.5 - p_gt**0.5) ** 2 + ((1 - p) ** 0.5 - (1 - p_gt) ** 0.5) ** 2) + elif self.loss_type == "rev_kl": + logits = pi_logratios - ref_logratios + logp = F.logsigmoid(self.beta * logits) + logp_neg = F.logsigmoid(-self.beta * logits) + p_gt = F.sigmoid(margin) + losses = -p_gt * (logp) - (1 - p_gt) * logp_neg + else: + raise ValueError(f"Unknown loss type: {self.loss_type}.") + + chosen_rewards = self.beta * (policy_chosen_logps - reference_chosen_logps).detach() + rejected_rewards = self.beta * (policy_rejected_logps - reference_rejected_logps).detach() + + return losses, chosen_rewards, rejected_rewards
+ + +
+[docs] + def get_batch_loss_metrics( + self, + model, + batch: Dict[str, Union[List, torch.LongTensor]], + train_eval: Literal["train", "eval"] = "train", + ): + return self.get_batch_metrics(model, batch, train_eval)
+ + +
+[docs] + def get_batch_metrics( + self, + model, + batch: Dict[str, Union[List, torch.LongTensor]], + train_eval: Literal["train", "eval"] = "train", + ): + """Compute the DPO loss and other metrics for the given batch of inputs for train or test.""" + metrics = {} + ( + policy_chosen_logps, + policy_rejected_logps, + policy_chosen_logits, + policy_rejected_logits, + ) = self.concatenated_forward(model, batch) + with torch.no_grad(): + if self.ref_model is None: + with self.accelerator.unwrap_model(self.model).disable_adapter(): + ( + reference_chosen_logps, + reference_rejected_logps, + _, + _, + ) = self.concatenated_forward(self.model, batch) + else: + ( + reference_chosen_logps, + reference_rejected_logps, + _, + _, + ) = self.concatenated_forward(self.ref_model, batch) + if self.len_penalty > 0: + chosen_len = batch["chosen_input_ids"].shape[1] * self.len_penalty + rejected_len = batch["rejected_input_ids"].shape[1] * self.len_penalty + len_penalty = chosen_len - rejected_len + else: + chosen_len = 1 + rejected_len = 1 + len_penalty = 0 + + margin = torch.tensor(batch["margin"], dtype=policy_chosen_logps.dtype).to(self.accelerator.device) + losses, chosen_rewards, rejected_rewards = self.dpo_loss( + policy_chosen_logps, + policy_rejected_logps, + reference_chosen_logps, + reference_rejected_logps, + margin=margin, + len_penalty=len_penalty, + ) + reward_accuracies = (chosen_rewards > rejected_rewards).float() + + prefix = "eval_" if train_eval == "eval" else "" + metrics[f"{prefix}rewards/chosen"] = chosen_rewards.cpu().mean() + metrics[f"{prefix}rewards/rejected"] = rejected_rewards.cpu().mean() + metrics[f"{prefix}rewards/accuracies"] = reward_accuracies.cpu().mean() + metrics[f"{prefix}rewards/margins"] = (chosen_rewards - rejected_rewards).cpu().mean() + metrics[f"{prefix}logps/rejected"] = policy_rejected_logps.detach().cpu().mean() + metrics[f"{prefix}logps/chosen"] = policy_chosen_logps.detach().cpu().mean() + metrics[f"{prefix}logits/rejected"] = policy_rejected_logits.detach().cpu().mean() + metrics[f"{prefix}logits/chosen"] = policy_chosen_logits.detach().cpu().mean() + + return losses.mean(), metrics
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/utils/memory_safe_dpov2_align.html b/_modules/lmflow/pipeline/utils/memory_safe_dpov2_align.html new file mode 100644 index 000000000..1c741c742 --- /dev/null +++ b/_modules/lmflow/pipeline/utils/memory_safe_dpov2_align.html @@ -0,0 +1,539 @@ + + + + + + + + + + lmflow.pipeline.utils.memory_safe_dpov2_align — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.utils.memory_safe_dpov2_align

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+import logging
+import os
+import sys
+import copy
+
+from transformers import (
+    HfArgumentParser
+)
+
+from lmflow.datasets import Dataset
+from lmflow.models.hf_decoder_model import HFDecoderModel
+from lmflow.pipeline.dpov2_aligner import DPOv2Aligner
+from lmflow.args import (
+    ModelArguments, 
+    DatasetArguments, 
+    DPOv2AlignerArguments,
+)
+from lmflow.utils.common import remove_dataclass_attr_prefix, create_copied_dataclass
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +ReferenceModelArguments: ModelArguments = create_copied_dataclass( + original_dataclass=ModelArguments, + field_prefix="reference_", + class_prefix="Reference" +)
+ + + +
+[docs] +def main(): + # Parses arguments + parser = HfArgumentParser(( + ModelArguments, + ReferenceModelArguments, + DatasetArguments, + DPOv2AlignerArguments, + )) + target_model_args, ref_model_args, data_args, aligner_args = parser.parse_args_into_dataclasses() + + ref_model_args_dict = remove_dataclass_attr_prefix(ref_model_args, "reference_") + ref_model_args = ModelArguments(**ref_model_args_dict) + + target_model = HFDecoderModel(target_model_args) + ref_model = HFDecoderModel(ref_model_args) + train_dataset = Dataset(data_args) + eval_dataset = copy.deepcopy(train_dataset.sample( + n=100, + seed=aligner_args.random_seed + )) + + aligner = DPOv2Aligner( + model_args=target_model_args, + data_args=train_dataset.data_args, + aligner_args=aligner_args, + ref_model_args=ref_model.model_args, + ) + aligner.align( + model=target_model, + ref_model=ref_model, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + )
+ + + +if __name__ == "__main__": + main() +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/utils/memory_safe_vllm_inference.html b/_modules/lmflow/pipeline/utils/memory_safe_vllm_inference.html new file mode 100644 index 000000000..3f5d55f7f --- /dev/null +++ b/_modules/lmflow/pipeline/utils/memory_safe_vllm_inference.html @@ -0,0 +1,534 @@ + + + + + + + + + + lmflow.pipeline.utils.memory_safe_vllm_inference — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.utils.memory_safe_vllm_inference

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+
+# Note that this is only a workaround, since vllm
+# inference engine cannot release GPU memory properly by now. Please see this github 
+# [issue](https://github.com/vllm-project/vllm/issues/1908).
+
+import logging
+import sys
+import os
+from typing import Dict
+
+from transformers import (
+    HfArgumentParser
+)
+
+from lmflow.datasets import Dataset
+from lmflow.models.auto_model import AutoModel
+from lmflow.pipeline.vllm_inferencer import VLLMInferencer
+from lmflow.args import (
+    ModelArguments, 
+    DatasetArguments, 
+    AutoArguments,
+)
+from lmflow.utils.constants import MEMORY_SAFE_VLLM_INFERENCE_FINISH_FLAG
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +def main(): + # Parses arguments + pipeline_name = "vllm_inferencer" + PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name) + + parser = HfArgumentParser(( + ModelArguments, + DatasetArguments, + PipelineArguments + )) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, pipeline_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) + else: + model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses() + + dataset = Dataset(data_args) + model = AutoModel.get_model(model_args, tune_strategy='none') + inferencer = VLLMInferencer(model_args, data_args, pipeline_args) + + res = inferencer.inference( + model, + dataset, + release_gpu=False, + enable_decode_inference_result=pipeline_args.enable_decode_inference_result, + enable_distributed_inference=pipeline_args.enable_distributed_inference, + distributed_inference_num_instances=pipeline_args.distributed_inference_num_instances, + inference_batch_size=pipeline_args.vllm_inference_batch_size, + ) + + # use this as a flag, stdout will be captured by the pipeline + print(MEMORY_SAFE_VLLM_INFERENCE_FINISH_FLAG)
+ + + +if __name__ == "__main__": + main() +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/utils/peft_trainer.html b/_modules/lmflow/pipeline/utils/peft_trainer.html new file mode 100644 index 000000000..07c19182c --- /dev/null +++ b/_modules/lmflow/pipeline/utils/peft_trainer.html @@ -0,0 +1,559 @@ + + + + + + + + + + lmflow.pipeline.utils.peft_trainer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.utils.peft_trainer

+#!/usr/bin/env python
+# coding=utf-8
+"""Trainer for Peft models
+"""
+
+from __future__ import absolute_import
+from transformers import Trainer
+from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
+from transformers.trainer_callback import (
+    TrainerCallback,
+    TrainerControl,
+    TrainerState,
+)
+from transformers.training_args import TrainingArguments
+import os
+import numpy as np
+
+
+[docs] +class PeftTrainer(Trainer): +
+[docs] + def _save_checkpoint(self, _, trial, metrics=None): + """ Don't save base model, optimizer etc. + but create checkpoint folder (needed for saving adapter) """ + checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" + + run_dir = self._get_output_dir(trial=trial) + output_dir = os.path.join(run_dir, checkpoint_folder) + + if metrics is not None and self.args.metric_for_best_model is not None: + metric_to_check = self.args.metric_for_best_model + if not metric_to_check.startswith("eval_"): + metric_to_check = f"eval_{metric_to_check}" + metric_value = metrics[metric_to_check] + + operator = np.greater if self.args.greater_is_better else np.less + if (self.state.best_metric is None or self.state.best_model_checkpoint is None + or operator(metric_value, self.state.best_metric)): + self.state.best_metric = metric_value + + self.state.best_model_checkpoint = output_dir + + os.makedirs(output_dir, exist_ok=True) + + if self.args.should_save: + self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
+
+ + +
+[docs] +class PeftSavingCallback(TrainerCallback): + """ Correctly save PEFT model and not full model """ +
+[docs] + def _save(self, model, folder): + if folder is None: + folder = "" + peft_model_path = os.path.join(folder, "adapter_model") + model.save_pretrained(peft_model_path)
+ + +
+[docs] + def on_train_end(self, args: TrainingArguments, state: TrainerState, + control: TrainerControl, **kwargs): + """ Save final best model adapter """ + self._save(kwargs['model'], state.best_model_checkpoint)
+ + +
+[docs] + def on_epoch_end(self, args: TrainingArguments, state: TrainerState, + control: TrainerControl, **kwargs): + """ Save intermediate model adapters in case of interrupted training """ + folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") + self._save(kwargs['model'], folder)
+ + +
+[docs] + def on_save( + self, + args: TrainingArguments, + state: TrainerState, + control: TrainerControl, + **kwargs, + ): + checkpoint_folder = os.path.join( + args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}" + ) + self._save(kwargs['model'], checkpoint_folder) + + peft_model_path = os.path.join(checkpoint_folder, "adapter_model") + kwargs["model"].save_pretrained(peft_model_path) + return control
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/utils/raft_trainer.html b/_modules/lmflow/pipeline/utils/raft_trainer.html new file mode 100644 index 000000000..ae996631d --- /dev/null +++ b/_modules/lmflow/pipeline/utils/raft_trainer.html @@ -0,0 +1,4614 @@ + + + + + + + + + + lmflow.pipeline.utils.raft_trainer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.utils.raft_trainer

+import contextlib
+import functools
+import glob
+import inspect
+import math
+import os
+import random
+import re
+import shutil
+import sys
+import time
+import warnings
+from collections.abc import Mapping
+from distutils.util import strtobool
+from pathlib import Path
+from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
+
+from tqdm.auto import tqdm
+
+
+# Integrations must be imported before ML frameworks:
+# isort: off
+
+### Fix the import bug in the latest version
+try:
+    from transformers.integrations import (
+        default_hp_search_backend,
+        get_reporting_integration_callbacks,
+        hp_params,
+        is_fairscale_available,
+        is_optuna_available,
+        is_ray_tune_available,
+        is_sigopt_available,
+        is_wandb_available,
+        run_hp_search_optuna,
+        run_hp_search_ray,
+        run_hp_search_sigopt,
+        run_hp_search_wandb,
+    )
+except ImportError:
+    from transformers.integrations import (
+    get_reporting_integration_callbacks,
+    hp_params,
+    is_fairscale_available,
+    )
+    from transformers.hyperparameter_search import default_hp_search_backend,ALL_HYPERPARAMETER_SEARCH_BACKENDS
+
+    
+
+# isort: on
+
+import numpy as np
+import torch
+import torch.distributed as dist
+from huggingface_hub import Repository, create_repo
+from packaging import version
+from torch import nn
+from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
+from torch.utils.data.distributed import DistributedSampler
+
+
+from transformers.configuration_utils import PretrainedConfig
+from transformers.data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
+from transformers.debug_utils import DebugOption, DebugUnderflowOverflow
+from transformers.deepspeed import deepspeed_init, is_deepspeed_zero3_enabled
+from transformers.dependency_versions_check import dep_version_check
+from transformers.modelcard import TrainingSummary
+from transformers.modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model
+from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES
+from transformers.optimization import Adafactor, get_scheduler
+from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
+
+
+[docs] +is_torch_greater_or_equal_than_1_10 = version.parse(torch.__version__) >= version.parse("1.10")
+ +
+[docs] +is_torch_less_than_1_11 = version.parse(torch.__version__) < version.parse("1.11")
+ +from transformers.tokenization_utils_base import PreTrainedTokenizerBase +from transformers.trainer_callback import ( + CallbackHandler, + DefaultFlowCallback, + PrinterCallback, + ProgressCallback, + TrainerCallback, + TrainerControl, + TrainerState, +) +from transformers.trainer_pt_utils import ( + DistributedLengthGroupedSampler, + DistributedSamplerWithLoop, + DistributedTensorGatherer, + IterableDatasetShard, + LabelSmoother, + LengthGroupedSampler, + SequentialDistributedSampler, + ShardSampler, + distributed_broadcast_scalars, + distributed_concat, + find_batch_size, + get_module_class_from_name, + get_parameter_names, + nested_concat, + nested_detach, + nested_numpify, + nested_truncate, + nested_xla_mesh_reduce, + reissue_pt_warnings, +) +from transformers.trainer_utils import ( + PREFIX_CHECKPOINT_DIR, + BestRun, + EvalLoopOutput, + EvalPrediction, + FSDPOption, + HPSearchBackend, + HubStrategy, + IntervalStrategy, + PredictionOutput, + RemoveColumnsCollator, + ShardedDDPOption, + TrainerMemoryTracker, + TrainOutput, + default_compute_objective, + denumpify_detensorize, + enable_full_determinism, + find_executable_batch_size, + get_last_checkpoint, + has_length, + number_of_arguments, + seed_worker, + set_seed, + speed_metrics, +) +from transformers.training_args import OptimizerNames, ParallelMode, TrainingArguments +from transformers.utils import ( + CONFIG_NAME, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, + can_return_loss, + find_labels, + get_full_repo_name, + is_accelerate_available, + is_apex_available, + is_datasets_available, + is_in_notebook, + is_ipex_available, + is_sagemaker_dp_enabled, + is_sagemaker_mp_enabled, + is_torch_compile_available, + is_torch_neuroncore_available, + is_torch_tpu_available, + logging, +) +from transformers.utils.generic import ContextManagers + + +
+[docs] +_is_native_cpu_amp_available = is_torch_greater_or_equal_than_1_10
+ + +
+[docs] +DEFAULT_CALLBACKS = [DefaultFlowCallback]
+ +DEFAULT_PROGRESS_CALLBACK = ProgressCallback + +if is_in_notebook(): + from transformers.utils.notebook import NotebookProgressCallback + +
+[docs] + DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
+ + +if is_apex_available(): + from apex import amp + +if is_datasets_available(): + import datasets + +if is_torch_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + import torch_xla.debug.metrics as met + import torch_xla.distributed.parallel_loader as pl + +if is_fairscale_available(): + dep_version_check("fairscale") + import fairscale + from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP + from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP + from fairscale.nn.wrap import auto_wrap + from fairscale.optim import OSS + from fairscale.optim.grad_scaler import ShardedGradScaler + + +if is_sagemaker_mp_enabled(): + import smdistributed.modelparallel.torch as smp + from smdistributed.modelparallel import __version__ as SMP_VERSION + +
+[docs] + IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10")
+ + + from transformers.trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat +else: + IS_SAGEMAKER_MP_POST_1_10 = False + + +
+[docs] +skip_first_batches = None
+ + + + +
+[docs] +logger = logging.get_logger(__name__)
+ + + +# Name of the files used for checkpointing +
+[docs] +TRAINING_ARGS_NAME = "training_args.bin"
+ +
+[docs] +TRAINER_STATE_NAME = "trainer_state.json"
+ +
+[docs] +OPTIMIZER_NAME = "optimizer.pt"
+ +
+[docs] +SCHEDULER_NAME = "scheduler.pt"
+ +
+[docs] +SCALER_NAME = "scaler.pt"
+ + + +
+[docs] +class RaftTrainer: + """ + Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers. + Args: + model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*): + The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed. + <Tip> + [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use + your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers + models. + </Tip> + args ([`TrainingArguments`], *optional*): + The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the + `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided. + data_collator (`DataCollator`, *optional*): + The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will + default to [`default_data_collator`] if no `tokenizer` is provided, an instance of + [`DataCollatorWithPadding`] otherwise. + train_dataset (`torch.utils.data.Dataset` or `torch.utils.data.IterableDataset`, *optional*): + The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. + Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a + distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a + `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will + manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally + sets the seed of the RNGs used. + eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`]), *optional*): + The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each + dataset prepending the dictionary key to the metric name. + tokenizer ([`PreTrainedTokenizerBase`], *optional*): + The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs to the + maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an + interrupted training or reuse the fine-tuned model. + model_init (`Callable[[], PreTrainedModel]`, *optional*): + A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start + from a new instance of the model as given by this function. + The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to + be able to choose different architectures according to hyper parameters (such as layer count, sizes of + inner layers, dropout probabilities etc). + compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*): + The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return + a dictionary string to metric values. + callbacks (List of [`TrainerCallback`], *optional*): + A list of callbacks to customize the training loop. Will add those to the list of default callbacks + detailed in [here](callback). + If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method. + optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*): A tuple + containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model + and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`. + preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*): + A function that preprocess the logits right before caching them at each evaluation step. Must take two + tensors, the logits and the labels, and return the logits once processed as desired. The modifications made + by this function will be reflected in the predictions received by `compute_metrics`. + Note that the labels (second parameter) will be `None` if the dataset does not have them. + Important attributes: + - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`] + subclass. + - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the + original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`, + the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner + model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`. + - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from + data parallelism, this means some of the model layers are split on different GPUs). + - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set + to `False` if model parallel or deepspeed is used, or if the default + `TrainingArguments.place_model_on_device` is overridden to return `False` . + - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while + in `train`) + """ + + from transformers.trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state + + def __init__( + self, + model: Union[PreTrainedModel, nn.Module] = None, + args: TrainingArguments = None, + data_collator: Optional[DataCollator] = None, + train_dataset: Optional[Dataset] = None, + eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, + tokenizer: Optional[PreTrainedTokenizerBase] = None, + model_init: Optional[Callable[[], PreTrainedModel]] = None, + compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, + callbacks: Optional[List[TrainerCallback]] = None, + optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), + preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, + ): + ############ +
+[docs] + self.save_counter = 0
+ + ############## + if args is None: + output_dir = "tmp_trainer" + logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.") + args = TrainingArguments(output_dir=output_dir) +
+[docs] + self.args = args
+ + # Seed must be set before instantiating the model when using model + enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) +
+[docs] + self.hp_name = None
+ +
+[docs] + self.deepspeed = None
+ +
+[docs] + self.is_in_train = False
+ + + # memory metrics - must set up as early as possible +
+[docs] + self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
+ + self._memory_tracker.start() + + # set the correct log level depending on the node +
+[docs] + log_level = args.get_process_log_level()
+ + logging.set_verbosity(log_level) + + # force device and distributed setup init explicitly + args._setup_devices + + if model is None: + if model_init is not None: + self.model_init = model_init + model = self.call_model_init() + else: + raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument") + else: + if model_init is not None: + warnings.warn( + "`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will" + " overwrite your model when calling the `train` method. This will become a fatal error in the next" + " release.", + FutureWarning, + ) + self.model_init = model_init + + if model.__class__.__name__ in MODEL_MAPPING_NAMES: + raise ValueError( + f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only " + "computes hidden states and does not accept any labels. You should choose a model with a head " + "suitable for your task like any of the `AutoModelForXxx` listed at " + "https://huggingface.co/docs/transformers/model_doc/auto." + ) + + if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel: + self.is_model_parallel = True + else: + self.is_model_parallel = False + + # At this stage the model is already loaded + if getattr(model, "is_loaded_in_8bit", False): + if getattr(model, "_is_int8_training_enabled", False): + logger.info( + "The model is loaded in 8-bit precision. To train this model you need to add additional modules" + " inside the model such as adapters using `peft` library and freeze the model weights. Please" + " check " + " the examples in https://github.com/huggingface/peft for more details." + ) + else: + raise ValueError( + "The model you want to train is loaded in 8-bit precision. if you want to fine-tune an 8-bit" + " model, please make sure that you have installed `bitsandbytes>=0.37.0`. " + ) + + # Setup Sharded DDP training +
+[docs] + self.sharded_ddp = None
+ + if len(args.sharded_ddp) > 0: + if args.deepspeed: + raise ValueError( + "Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags." + ) + if len(args.fsdp) > 0: + raise ValueError( + "Using --sharded_ddp xxx together with --fsdp is not possible, deactivate one of those flags." + ) + + if args.local_rank == -1: + raise ValueError("Using sharded DDP only works in distributed training.") + elif not is_fairscale_available(): + raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.") + elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None: + raise ImportError( + "Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found " + f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`." + ) + elif ShardedDDPOption.SIMPLE in args.sharded_ddp: + self.sharded_ddp = ShardedDDPOption.SIMPLE + elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp: + self.sharded_ddp = ShardedDDPOption.ZERO_DP_2 + elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp: + self.sharded_ddp = ShardedDDPOption.ZERO_DP_3 + +
+[docs] + self.fsdp = None
+ + if len(args.fsdp) > 0: + if args.deepspeed: + raise ValueError( + "Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags." + ) + if not args.fsdp_config["xla"] and args.local_rank == -1: + raise ValueError("Using fsdp only works in distributed training.") + + # dep_version_check("torch>=1.12.0") + # Would have to update setup.py with torch>=1.12.0 + # which isn't ideally given that it will force people not using FSDP to also use torch>=1.12.0 + # below is the current alternative. + if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.12.0"): + raise ValueError("FSDP requires PyTorch >= 1.12.0") + + from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, ShardingStrategy + + if FSDPOption.FULL_SHARD in args.fsdp: + self.fsdp = ShardingStrategy.FULL_SHARD + elif FSDPOption.SHARD_GRAD_OP in args.fsdp: + self.fsdp = ShardingStrategy.SHARD_GRAD_OP + elif FSDPOption.NO_SHARD in args.fsdp: + self.fsdp = ShardingStrategy.NO_SHARD + + self.backward_prefetch = BackwardPrefetch.BACKWARD_PRE + if "backward_prefetch" in self.args.fsdp_config and "backward_pos" not in self.backward_prefetch: + self.backward_prefetch = BackwardPrefetch.BACKWARD_POST + + self.forword_prefetch = False + if self.args.fsdp_config.get("forword_prefect", False): + self.forword_prefetch = True + + self.limit_all_gathers = False + if self.args.fsdp_config.get("limit_all_gathers", False): + self.limit_all_gathers = True + + # one place to sort out whether to place the model on device or not + # postpone switching model to cuda when: + # 1. MP - since we are trying to fit a much bigger than 1 gpu model + # 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway, + # and we only use deepspeed for training at the moment + # 3. full bf16 or fp16 eval - since the model needs to be cast to the right dtype first + # 4. Sharded DDP - same as MP + # 5. FSDP - same as MP +
+[docs] + self.place_model_on_device = args.place_model_on_device
+ + if ( + self.is_model_parallel + or args.deepspeed + or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train) + or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3]) + or (self.fsdp is not None) + ): + self.place_model_on_device = False + +
+[docs] + default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
+ +
+[docs] + self.data_collator = data_collator if data_collator is not None else default_collator
+ +
+[docs] + self.train_dataset = train_dataset
+ +
+[docs] + self.eval_dataset = eval_dataset
+ +
+[docs] + self.tokenizer = tokenizer
+ + + if self.place_model_on_device and not getattr(model, "is_loaded_in_8bit", False): + self._move_model_to_device(model, args.device) + + # Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs + if self.is_model_parallel: + self.args._n_gpu = 1 + + # later use `self.model is self.model_wrapped` to check if it's wrapped or not +
+[docs] + self.model_wrapped = model
+ +
+[docs] + self.model = model
+ + +
+[docs] + self.compute_metrics = compute_metrics
+ +
+[docs] + self.preprocess_logits_for_metrics = preprocess_logits_for_metrics
+ + self.optimizer, self.lr_scheduler = optimizers + if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None): + raise RuntimeError( + "Passing a `model_init` is incompatible with providing the `optimizers` argument. " + "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." + ) + if is_torch_tpu_available() and self.optimizer is not None: + for param in self.model.parameters(): + model_device = param.device + break + for param_group in self.optimizer.param_groups: + if len(param_group["params"]) > 0: + optimizer_device = param_group["params"][0].device + break + if model_device != optimizer_device: + raise ValueError( + "The model and the optimizer parameters are not on the same device, which probably means you" + " created an optimizer around your model **before** putting on the device and passing it to the" + " `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and" + " `model.to(xm.xla_device())` is performed before the optimizer creation in your script." + ) + if ((self.sharded_ddp is not None) or args.deepspeed or (self.fsdp is not None)) and ( + self.optimizer is not None or self.lr_scheduler is not None + ): + raise RuntimeError( + "Passing `optimizers` is not allowed if Fairscale, Deepspeed or PyTorch FSDP is enabled." + "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." + ) +
+[docs] + default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
+ +
+[docs] + callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
+ +
+[docs] + self.callback_handler = CallbackHandler( + callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler + )
+ + self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) + + # Will be set to True by `self._setup_loggers()` on first call to `self.log()`. +
+[docs] + self._loggers_initialized = False
+ + + # Create clone of distant repo and output directory if needed + if self.args.push_to_hub: + self.init_git_repo(at_init=True) + # In case of pull, we need to make sure every process has the latest. + if is_torch_tpu_available(): + xm.rendezvous("init git repo") + elif args.local_rank != -1: + dist.barrier() + + if self.args.should_save: + os.makedirs(self.args.output_dir, exist_ok=True) + + if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)): + raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).") + + if args.max_steps > 0: + logger.info("max_steps is given, it will override any value given in num_train_epochs") + + if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0: + raise ValueError("train_dataset does not implement __len__, max_steps has to be specified") + + if ( + train_dataset is not None + and isinstance(train_dataset, torch.utils.data.IterableDataset) + and args.group_by_length + ): + raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset") + +
+[docs] + self._signature_columns = None
+ + + # Mixed precision setup +
+[docs] + self.use_apex = False
+ +
+[docs] + self.use_cuda_amp = False
+ +
+[docs] + self.use_cpu_amp = False
+ + + # Mixed precision setup for SageMaker Model Parallel + if is_sagemaker_mp_enabled(): + # BF16 + model parallelism in SageMaker: currently not supported, raise an error + if args.bf16: + raise ValueError("SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ") + + if IS_SAGEMAKER_MP_POST_1_10: + # When there's mismatch between SMP config and trainer argument, use SMP config as truth + if args.fp16 != smp.state.cfg.fp16: + logger.warning( + f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}," + f"but FP16 provided in trainer argument is {args.fp16}," + f"setting to {smp.state.cfg.fp16}" + ) + args.fp16 = smp.state.cfg.fp16 + else: + # smp < 1.10 does not support fp16 in trainer. + if hasattr(smp.state.cfg, "fp16"): + logger.warning( + f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, " + "but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer." + ) + + if args.fp16 or args.bf16: + if args.half_precision_backend == "auto": + if args.device == torch.device("cpu"): + if args.fp16: + raise ValueError("Tried to use `fp16` but it is not supported on cpu") + elif _is_native_cpu_amp_available: + args.half_precision_backend = "cpu_amp" + else: + raise ValueError("Tried to use cpu amp but native cpu amp is not available") + else: + args.half_precision_backend = "cuda_amp" + + logger.info(f"Using {args.half_precision_backend} half precision backend") + +
+[docs] + self.do_grad_scaling = False
+ + if (args.fp16 or args.bf16) and not (args.deepspeed or is_sagemaker_mp_enabled() or is_torch_tpu_available()): + # deepspeed and SageMaker Model Parallel manage their own half precision + if args.half_precision_backend == "cuda_amp": + self.use_cuda_amp = True + self.amp_dtype = torch.float16 if args.fp16 else torch.bfloat16 + # bf16 does not need grad scaling + self.do_grad_scaling = self.amp_dtype == torch.float16 + if self.do_grad_scaling: + if self.sharded_ddp is not None: + self.scaler = ShardedGradScaler() + elif self.fsdp is not None: + from torch.distributed.fsdp.sharded_grad_scaler import ( + ShardedGradScaler as FSDPShardedGradScaler, + ) + + self.scaler = FSDPShardedGradScaler() + elif is_torch_tpu_available(): + from torch_xla.amp import GradScaler + + self.scaler = GradScaler() + else: + self.scaler = torch.cuda.amp.GradScaler() + elif args.half_precision_backend == "cpu_amp": + self.use_cpu_amp = True + self.amp_dtype = torch.bfloat16 + else: + if not is_apex_available(): + raise ImportError( + "Using FP16 with APEX but APEX is not installed, please refer to" + " https://www.github.com/nvidia/apex." + ) + self.use_apex = True + + # FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error. + if ( + is_sagemaker_mp_enabled() + and self.use_cuda_amp + and args.max_grad_norm is not None + and args.max_grad_norm > 0 + ): + raise ValueError( + "SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass " + "along 'max_grad_norm': 0 in your hyperparameters." + ) + + # Label smoothing + if self.args.label_smoothing_factor != 0: + self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor) + else: + self.label_smoother = None + +
+[docs] + self.state = TrainerState( + is_local_process_zero=self.is_local_process_zero(), + is_world_process_zero=self.is_world_process_zero(), + )
+ + +
+[docs] + self.control = TrainerControl()
+ + # Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then + # returned to 0 every time flos need to be logged +
+[docs] + self.current_flos = 0
+ +
+[docs] + self.hp_search_backend = None
+ +
+[docs] + self.use_tune_checkpoints = False
+ +
+[docs] + default_label_names = find_labels(self.model.__class__)
+ +
+[docs] + self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
+ +
+[docs] + self.can_return_loss = can_return_loss(self.model.__class__)
+ + self.control = self.callback_handler.on_init_end(self.args, self.state, self.control) + + # Internal variables to keep track of the original batch size +
+[docs] + self._train_batch_size = args.train_batch_size
+ + + # very last + self._memory_tracker.stop_and_update_metrics() + + # torch.compile + if args.torch_compile and not is_torch_compile_available(): + raise RuntimeError("Using torch.compile requires PyTorch 2.0 or higher.") + +
+[docs] + def add_callback(self, callback): + """ + Add a callback to the current list of [`~transformer.TrainerCallback`]. + Args: + callback (`type` or [`~transformer.TrainerCallback`]): + A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the + first case, will instantiate a member of that class. + """ + self.callback_handler.add_callback(callback)
+ + +
+[docs] + def pop_callback(self, callback): + """ + Remove a callback from the current list of [`~transformer.TrainerCallback`] and returns it. + If the callback is not found, returns `None` (and no error is raised). + Args: + callback (`type` or [`~transformer.TrainerCallback`]): + A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the + first case, will pop the first member of that class found in the list of callbacks. + Returns: + [`~transformer.TrainerCallback`]: The callback removed, if found. + """ + return self.callback_handler.pop_callback(callback)
+ + +
+[docs] + def remove_callback(self, callback): + """ + Remove a callback from the current list of [`~transformer.TrainerCallback`]. + Args: + callback (`type` or [`~transformer.TrainerCallback`]): + A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the + first case, will remove the first member of that class found in the list of callbacks. + """ + self.callback_handler.remove_callback(callback)
+ + +
+[docs] + def _move_model_to_device(self, model, device): + model = model.to(device) + # Moving a model to an XLA device disconnects the tied weights, so we have to retie them. + if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"): + model.tie_weights()
+ + +
+[docs] + def _set_signature_columns_if_needed(self): + if self._signature_columns is None: + # Inspect model forward signature to keep only the arguments it accepts. + signature = inspect.signature(self.model.forward) + self._signature_columns = list(signature.parameters.keys()) + # Labels may be named label or label_ids, the default data collator handles that. + self._signature_columns += list(set(["label", "label_ids"] + self.label_names))
+ + +
+[docs] + def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None): + if not self.args.remove_unused_columns: + return dataset + self._set_signature_columns_if_needed() + signature_columns = self._signature_columns + + ignored_columns = list(set(dataset.column_names) - set(signature_columns)) + if len(ignored_columns) > 0: + dset_description = "" if description is None else f"in the {description} set" + logger.info( + f"The following columns {dset_description} don't have a corresponding argument in " + f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}." + f" If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, " + " you can safely ignore this message." + ) + + columns = [k for k in signature_columns if k in dataset.column_names] + + if version.parse(datasets.__version__) < version.parse("1.4.0"): + dataset.set_format( + type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"] + ) + return dataset + else: + return dataset.remove_columns(ignored_columns)
+ + +
+[docs] + def _get_collator_with_removed_columns( + self, data_collator: Callable, description: Optional[str] = None + ) -> Callable: + """Wrap the data collator in a callable removing unused columns.""" + if not self.args.remove_unused_columns: + return data_collator + self._set_signature_columns_if_needed() + signature_columns = self._signature_columns + + remove_columns_collator = RemoveColumnsCollator( + data_collator=data_collator, + signature_columns=signature_columns, + logger=logger, + description=description, + model_name=self.model.__class__.__name__, + ) + return remove_columns_collator
+ + +
+[docs] + def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: + if self.train_dataset is None or not has_length(self.train_dataset): + return None + + generator = None + if self.args.world_size <= 1: + generator = torch.Generator() + # for backwards compatibility, we generate a seed here (which is sampled from a generator seeded with + # `args.seed`) if data_seed isn't provided. + # Further on in this method, we default to `args.seed` instead. + if self.args.data_seed is None: + seed = int(torch.empty((), dtype=torch.int64).random_().item()) + else: + seed = self.args.data_seed + generator.manual_seed(seed) + + seed = self.args.data_seed if self.args.data_seed is not None else self.args.seed + + # Build the sampler. + if self.args.group_by_length: + if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset): + lengths = ( + self.train_dataset[self.args.length_column_name] + if self.args.length_column_name in self.train_dataset.column_names + else None + ) + else: + lengths = None + model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None + if self.args.world_size <= 1: + return LengthGroupedSampler( + self.args.train_batch_size * self.args.gradient_accumulation_steps, + dataset=self.train_dataset, + lengths=lengths, + model_input_name=model_input_name, + generator=generator, + ) + else: + return DistributedLengthGroupedSampler( + self.args.train_batch_size * self.args.gradient_accumulation_steps, + dataset=self.train_dataset, + num_replicas=self.args.world_size, + rank=self.args.process_index, + lengths=lengths, + model_input_name=model_input_name, + seed=seed, + ) + + else: + if self.args.world_size <= 1: + return RandomSampler(self.train_dataset, generator=generator) + elif ( + self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL] + and not self.args.dataloader_drop_last + ): + # Use a loop for TPUs when drop_last is False to have all batches have the same size. + return DistributedSamplerWithLoop( + self.train_dataset, + batch_size=self.args.per_device_train_batch_size, + num_replicas=self.args.world_size, + rank=self.args.process_index, + seed=seed, + ) + else: + return DistributedSampler( + self.train_dataset, + num_replicas=self.args.world_size, + rank=self.args.process_index, + seed=seed, + )
+ + +
+[docs] + def get_train_dataloader(self) -> DataLoader: + """ + Returns the training [`~torch.utils.data.DataLoader`]. + Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed + training if necessary) otherwise. + Subclass and override this method if you want to inject some custom behavior. + """ + if self.train_dataset is None: + raise ValueError("Trainer: training requires a train_dataset.") + + train_dataset = self.train_dataset + data_collator = self.data_collator + if is_datasets_available() and isinstance(train_dataset, datasets.Dataset): + train_dataset = self._remove_unused_columns(train_dataset, description="training") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="training") + + if isinstance(train_dataset, torch.utils.data.IterableDataset): + if self.args.world_size > 1: + train_dataset = IterableDatasetShard( + train_dataset, + batch_size=self._train_batch_size, + drop_last=self.args.dataloader_drop_last, + num_processes=self.args.world_size, + process_index=self.args.process_index, + ) + + return DataLoader( + train_dataset, + batch_size=self._train_batch_size, + collate_fn=data_collator, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + ) + + train_sampler = self._get_train_sampler() + + return DataLoader( + train_dataset, + batch_size=self._train_batch_size, + sampler=train_sampler, + collate_fn=data_collator, + drop_last=self.args.dataloader_drop_last, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + worker_init_fn=seed_worker, + )
+ + +
+[docs] + def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]: + # Deprecated code + if self.args.use_legacy_prediction_loop: + if is_torch_tpu_available(): + return SequentialDistributedSampler( + eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal() + ) + elif is_sagemaker_mp_enabled(): + return SequentialDistributedSampler( + eval_dataset, + num_replicas=smp.dp_size(), + rank=smp.dp_rank(), + batch_size=self.args.per_device_eval_batch_size, + ) + elif self.args.local_rank != -1: + return SequentialDistributedSampler(eval_dataset) + else: + return SequentialSampler(eval_dataset) + + if self.args.world_size <= 1: + return SequentialSampler(eval_dataset) + else: + return ShardSampler( + eval_dataset, + batch_size=self.args.per_device_eval_batch_size, + num_processes=self.args.world_size, + process_index=self.args.process_index, + )
+ + +
+[docs] + def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: + """ + Returns the evaluation [`~torch.utils.data.DataLoader`]. + Subclass and override this method if you want to inject some custom behavior. + Args: + eval_dataset (`torch.utils.data.Dataset`, *optional*): + If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted + by the `model.forward()` method are automatically removed. It must implement `__len__`. + """ + if eval_dataset is None and self.eval_dataset is None: + raise ValueError("Trainer: evaluation requires an eval_dataset.") + eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset + data_collator = self.data_collator + + if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset): + eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="evaluation") + + if isinstance(eval_dataset, torch.utils.data.IterableDataset): + if self.args.world_size > 1: + eval_dataset = IterableDatasetShard( + eval_dataset, + batch_size=self.args.per_device_eval_batch_size, + drop_last=self.args.dataloader_drop_last, + num_processes=self.args.world_size, + process_index=self.args.process_index, + ) + return DataLoader( + eval_dataset, + batch_size=self.args.eval_batch_size, + collate_fn=data_collator, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + ) + + eval_sampler = self._get_eval_sampler(eval_dataset) + + return DataLoader( + eval_dataset, + sampler=eval_sampler, + batch_size=self.args.eval_batch_size, + collate_fn=data_collator, + drop_last=self.args.dataloader_drop_last, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + )
+ + +
+[docs] + def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: + """ + Returns the test [`~torch.utils.data.DataLoader`]. + Subclass and override this method if you want to inject some custom behavior. + Args: + test_dataset (`torch.utils.data.Dataset`, *optional*): + The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. It must implement `__len__`. + """ + data_collator = self.data_collator + + if is_datasets_available() and isinstance(test_dataset, datasets.Dataset): + test_dataset = self._remove_unused_columns(test_dataset, description="test") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="test") + + if isinstance(test_dataset, torch.utils.data.IterableDataset): + if self.args.world_size > 1: + test_dataset = IterableDatasetShard( + test_dataset, + batch_size=self.args.eval_batch_size, + drop_last=self.args.dataloader_drop_last, + num_processes=self.args.world_size, + process_index=self.args.process_index, + ) + return DataLoader( + test_dataset, + batch_size=self.args.eval_batch_size, + collate_fn=data_collator, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + ) + + test_sampler = self._get_eval_sampler(test_dataset) + + # We use the same batch_size as for eval. + return DataLoader( + test_dataset, + sampler=test_sampler, + batch_size=self.args.eval_batch_size, + collate_fn=data_collator, + drop_last=self.args.dataloader_drop_last, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + )
+ + +
+[docs] + def create_optimizer_and_scheduler(self, num_training_steps: int): + """ + Setup the optimizer and the learning rate scheduler. + We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the + Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or + `create_scheduler`) in a subclass. + """ + self.create_optimizer() + if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16: + # If smp >= 1.10 and fp16 is enabled, we unwrap the optimizer + optimizer = self.optimizer.optimizer + else: + optimizer = self.optimizer + self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
+ + +
+[docs] + def create_optimizer(self): + """ + Setup the optimizer. + We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the + Trainer's init through `optimizers`, or subclass and override this method in a subclass. + """ + opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model + + if self.optimizer is None: + decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS) + decay_parameters = [name for name in decay_parameters if "bias" not in name] + optimizer_grouped_parameters = [ + { + "params": [ + p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad) + ], + "weight_decay": self.args.weight_decay, + }, + { + "params": [ + p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad) + ], + "weight_decay": 0.0, + }, + ] + + optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args) + + if self.sharded_ddp == ShardedDDPOption.SIMPLE: + self.optimizer = OSS( + params=optimizer_grouped_parameters, + optim=optimizer_cls, + **optimizer_kwargs, + ) + else: + self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) + if optimizer_cls.__name__ == "Adam8bit": + import bitsandbytes + + manager = bitsandbytes.optim.GlobalOptimManager.get_instance() + + skipped = 0 + for module in opt_model.modules(): + if isinstance(module, nn.Embedding): + skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) + print(f"skipped {module}: {skipped/2**20}M params") + manager.register_module_override(module, "weight", {"optim_bits": 32}) + logger.debug(f"bitsandbytes: will optimize {module} in fp32") + print(f"skipped: {skipped/2**20}M params") + + if is_sagemaker_mp_enabled(): + self.optimizer = smp.DistributedOptimizer(self.optimizer) + + return self.optimizer
+ + + @staticmethod +
+[docs] + def get_optimizer_cls_and_kwargs(args: TrainingArguments) -> Tuple[Any, Any]: + """ + Returns the optimizer class and optimizer parameters based on the training arguments. + Args: + args (`transformers.training_args.TrainingArguments`): + The training arguments for the training session. + """ + + # parse args.optim_args + optim_args = {} + if args.optim_args: + for mapping in args.optim_args.replace(" ", "").split(","): + key, value = mapping.split("=") + optim_args[key] = value + + optimizer_kwargs = {"lr": args.learning_rate} + + adam_kwargs = { + "betas": (args.adam_beta1, args.adam_beta2), + "eps": args.adam_epsilon, + } + if args.optim == OptimizerNames.ADAFACTOR: + optimizer_cls = Adafactor + optimizer_kwargs.update({"scale_parameter": False, "relative_step": False}) + elif args.optim == OptimizerNames.ADAMW_HF: + from transformers.optimization import AdamW + + optimizer_cls = AdamW + optimizer_kwargs.update(adam_kwargs) + elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]: + from torch.optim import AdamW + + optimizer_cls = AdamW + optimizer_kwargs.update(adam_kwargs) + if args.optim == OptimizerNames.ADAMW_TORCH_FUSED: + optimizer_kwargs.update({"fused": True}) + elif args.optim == OptimizerNames.ADAMW_TORCH_XLA: + try: + from torch_xla.amp.syncfree import AdamW + + optimizer_cls = AdamW + optimizer_kwargs.update(adam_kwargs) + except ImportError: + raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.") + elif args.optim == OptimizerNames.ADAMW_APEX_FUSED: + try: + from apex.optimizers import FusedAdam + + optimizer_cls = FusedAdam + optimizer_kwargs.update(adam_kwargs) + except ImportError: + raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!") + elif args.optim == OptimizerNames.ADAMW_BNB: + try: + from bitsandbytes.optim import Adam8bit + + optimizer_cls = Adam8bit + optimizer_kwargs.update(adam_kwargs) + except ImportError: + raise ValueError("Trainer tried to instantiate bnb Adam8bit but bnb is not installed!") + elif args.optim == OptimizerNames.ADAMW_ANYPRECISION: + try: + from torchdistx.optimizers import AnyPrecisionAdamW + + optimizer_cls = AnyPrecisionAdamW + optimizer_kwargs.update(adam_kwargs) + + # TODO Change dtypes back to M=FP32, Var = BF16, Kahan = False once they can be cast together in torchdistx. + optimizer_kwargs.update( + { + "use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")), + "momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")), + "variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")), + "compensation_buffer_dtype": getattr( + torch, optim_args.get("compensation_buffer_dtype", "bfloat16") + ), + } + ) + except ImportError: + raise ValueError("Please install https://github.com/pytorch/torchdistx") + elif args.optim == OptimizerNames.SGD: + optimizer_cls = torch.optim.SGD + elif args.optim == OptimizerNames.ADAGRAD: + optimizer_cls = torch.optim.Adagrad + else: + raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}") + return optimizer_cls, optimizer_kwargs
+ + +
+[docs] + def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None): + """ + Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or + passed as an argument. + Args: + num_training_steps (int): The number of training steps to do. + """ + ############ + num_training_steps *= 3 + ############ + if self.lr_scheduler is None: + self.lr_scheduler = get_scheduler( + self.args.lr_scheduler_type, + optimizer=self.optimizer if optimizer is None else optimizer, + num_warmup_steps=self.args.get_warmup_steps(num_training_steps), + num_training_steps=num_training_steps, + ) + return self.lr_scheduler
+ + +
+[docs] + def num_examples(self, dataloader: DataLoader) -> int: + """ + Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When + dataloader.dataset does not exist or has no length, estimates as best it can + """ + try: + dataset = dataloader.dataset + # Special case for IterableDatasetShard, we need to dig deeper + if isinstance(dataset, IterableDatasetShard): + return len(dataloader.dataset.dataset) + return len(dataloader.dataset) + except (NameError, AttributeError, TypeError): # no dataset or length, estimate by length of dataloader + return len(dataloader) * self.args.per_device_train_batch_size
+ + +
+[docs] + def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]): + """HP search setup code""" + self._trial = trial + + if self.hp_search_backend is None or trial is None: + return + if self.hp_search_backend == HPSearchBackend.OPTUNA: + params = self.hp_space(trial) + elif self.hp_search_backend == HPSearchBackend.RAY: + params = trial + params.pop("wandb", None) + elif self.hp_search_backend == HPSearchBackend.SIGOPT: + params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()} + elif self.hp_search_backend == HPSearchBackend.WANDB: + params = trial + + for key, value in params.items(): + if not hasattr(self.args, key): + logger.warning( + f"Trying to set {key} in the hyperparameter search but there is no corresponding field in" + " `TrainingArguments`." + ) + continue + old_attr = getattr(self.args, key, None) + # Casting value to the proper type + if old_attr is not None: + value = type(old_attr)(value) + setattr(self.args, key, value) + if self.hp_search_backend == HPSearchBackend.OPTUNA: + logger.info(f"Trial: {trial.params}") + if self.hp_search_backend == HPSearchBackend.SIGOPT: + logger.info(f"SigOpt Assignments: {trial.assignments}") + if self.hp_search_backend == HPSearchBackend.WANDB: + logger.info(f"W&B Sweep parameters: {trial}") + if self.args.deepspeed: + # Rebuild the deepspeed config to reflect the updated training parameters + from transformers.deepspeed import HfTrainerDeepSpeedConfig + + self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed) + self.args.hf_deepspeed_config.trainer_config_process(self.args)
+ + + + + +
+[docs] + def _tune_save_checkpoint(self): + from ray import tune + + if not self.use_tune_checkpoints: + return + with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir: + output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}") + self.save_model(output_dir, _internal_call=True) + if self.args.should_save: + self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) + torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) + torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
+ + +
+[docs] + def call_model_init(self, trial=None): + model_init_argcount = number_of_arguments(self.model_init) + if model_init_argcount == 0: + model = self.model_init() + elif model_init_argcount == 1: + model = self.model_init(trial) + else: + raise RuntimeError("model_init should have 0 or 1 argument.") + + if model is None: + raise RuntimeError("model_init should not return None.") + + return model
+ + +
+[docs] + def torch_jit_model_eval(self, model, dataloader, training=False): + if not training: + if dataloader is None: + logger.warning("failed to use PyTorch jit mode due to current dataloader is none.") + return model + example_batch = next(iter(dataloader)) + example_batch = self._prepare_inputs(example_batch) + try: + jit_model = model.eval() + with ContextManagers([self.autocast_smart_context_manager(cache_enabled=False), torch.no_grad()]): + if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.14.0"): + if isinstance(example_batch, dict): + jit_model = torch.jit.trace(jit_model, example_kwarg_inputs=example_batch, strict=False) + else: + jit_model = torch.jit.trace( + jit_model, + example_kwarg_inputs={key: example_batch[key] for key in example_batch}, + strict=False, + ) + else: + jit_inputs = [] + for key in example_batch: + example_tensor = torch.ones_like(example_batch[key]) + jit_inputs.append(example_tensor) + jit_inputs = tuple(jit_inputs) + jit_model = torch.jit.trace(jit_model, jit_inputs, strict=False) + jit_model = torch.jit.freeze(jit_model) + with torch.no_grad(): + jit_model(**example_batch) + jit_model(**example_batch) + model = jit_model + self.use_cpu_amp = False + self.use_cuda_amp = False + except (RuntimeError, TypeError, ValueError, NameError, IndexError) as e: + logger.warning(f"failed to use PyTorch jit mode due to: {e}.") + + return model
+ + +
+[docs] + def ipex_optimize_model(self, model, training=False, dtype=torch.float32): + if not is_ipex_available(): + raise ImportError( + "Using IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer" + " to https://github.com/intel/intel-extension-for-pytorch." + ) + + import intel_extension_for_pytorch as ipex + + if not training: + model.eval() + dtype = torch.bfloat16 if not self.is_in_train and self.args.bf16_full_eval else dtype + # conv_bn_folding is disabled as it fails in symbolic tracing, resulting in ipex warnings + model = ipex.optimize(model, dtype=dtype, level="O1", conv_bn_folding=False, inplace=not self.is_in_train) + else: + if not model.training: + model.train() + model, self.optimizer = ipex.optimize( + model, dtype=dtype, optimizer=self.optimizer, inplace=True, level="O1" + ) + + return model
+ + +
+[docs] + def _wrap_model(self, model, training=True, dataloader=None): + if self.args.torch_compile: + model = torch.compile(model, backend=self.args.torch_compile_backend, mode=self.args.torch_compile_mode) + + if self.args.use_ipex: + dtype = torch.bfloat16 if self.use_cpu_amp else torch.float32 + model = self.ipex_optimize_model(model, training, dtype=dtype) + + if is_sagemaker_mp_enabled(): + # Wrapping the base model twice in a DistributedModel will raise an error. + if isinstance(self.model_wrapped, smp.model.DistributedModel): + return self.model_wrapped + return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps) + + # already initialized its own DDP and AMP + if self.deepspeed: + return self.deepspeed + + # train/eval could be run multiple-times - if already wrapped, don't re-wrap it again + if unwrap_model(model) is not model: + return model + + # Mixed precision training with apex (torch < 1.6) + if self.use_apex and training: + model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level) + + # Multi-gpu training (should be after apex fp16 initialization) + if self.args.n_gpu > 1: + model = nn.DataParallel(model) + + if self.args.jit_mode_eval: + start_time = time.time() + model = self.torch_jit_model_eval(model, dataloader, training) + self.jit_compilation_time = round(time.time() - start_time, 4) + + # Note: in torch.distributed mode, there's no point in wrapping the model + # inside a DistributedDataParallel as we'll be under `no_grad` anyways. + if not training: + return model + + # Distributed training (should be after apex fp16 initialization) + if self.sharded_ddp is not None: + # Sharded DDP! + if self.sharded_ddp == ShardedDDPOption.SIMPLE: + model = ShardedDDP(model, self.optimizer) + else: + mixed_precision = self.args.fp16 or self.args.bf16 + cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp + zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3 + # XXX: Breaking the self.model convention but I see no way around it for now. + if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp: + model = auto_wrap(model) + self.model = model = FullyShardedDDP( + model, + mixed_precision=mixed_precision, + reshard_after_forward=zero_3, + cpu_offload=cpu_offload, + ).to(self.args.device) + # Distributed training using PyTorch FSDP + elif self.fsdp is not None: + if not self.args.fsdp_config["xla"]: + # PyTorch FSDP! + from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, MixedPrecision + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy + + if FSDPOption.OFFLOAD in self.args.fsdp: + cpu_offload = CPUOffload(offload_params=True) + else: + cpu_offload = CPUOffload(offload_params=False) + + auto_wrap_policy = None + + if FSDPOption.AUTO_WRAP in self.args.fsdp: + if self.args.fsdp_config["fsdp_min_num_params"] > 0: + auto_wrap_policy = functools.partial( + size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["fsdp_min_num_params"] + ) + elif self.args.fsdp_config.get("fsdp_transformer_layer_cls_to_wrap", None) is not None: + transformer_cls_to_wrap = set() + for layer_class in self.args.fsdp_config["fsdp_transformer_layer_cls_to_wrap"]: + transformer_cls = get_module_class_from_name(model, layer_class) + if transformer_cls is None: + raise Exception("Could not find the transformer layer class to wrap in the model.") + else: + transformer_cls_to_wrap.add(transformer_cls) + auto_wrap_policy = functools.partial( + transformer_auto_wrap_policy, + # Transformer layer class to wrap + transformer_layer_cls=transformer_cls_to_wrap, + ) + mixed_precision_policy = None + dtype = None + if self.args.fp16: + dtype = torch.float16 + elif self.args.bf16: + dtype = torch.bfloat16 + if dtype is not None: + mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype) + if type(model) != FSDP: + # XXX: Breaking the self.model convention but I see no way around it for now. + self.model = model = FSDP( + model, + sharding_strategy=self.fsdp, + cpu_offload=cpu_offload, + auto_wrap_policy=auto_wrap_policy, + mixed_precision=mixed_precision_policy, + device_id=self.args.device, + backward_prefetch=self.backward_prefetch, + forward_prefetch=self.forword_prefetch, + limit_all_gathers=self.limit_all_gathers, + ) + else: + try: + from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP + from torch_xla.distributed.fsdp import checkpoint_module + from torch_xla.distributed.fsdp.wrap import ( + size_based_auto_wrap_policy, + transformer_auto_wrap_policy, + ) + except ImportError: + raise ImportError("Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.") + auto_wrap_policy = None + auto_wrapper_callable = None + if self.args.fsdp_config["fsdp_min_num_params"] > 0: + auto_wrap_policy = functools.partial( + size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["fsdp_min_num_params"] + ) + elif self.args.fsdp_config.get("fsdp_transformer_layer_cls_to_wrap", None) is not None: + transformer_cls_to_wrap = set() + for layer_class in self.args.fsdp_config["fsdp_transformer_layer_cls_to_wrap"]: + transformer_cls = get_module_class_from_name(model, layer_class) + if transformer_cls is None: + raise Exception("Could not find the transformer layer class to wrap in the model.") + else: + transformer_cls_to_wrap.add(transformer_cls) + auto_wrap_policy = functools.partial( + transformer_auto_wrap_policy, + # Transformer layer class to wrap + transformer_layer_cls=transformer_cls_to_wrap, + ) + fsdp_kwargs = self.args.xla_fsdp_config + if self.args.fsdp_config["xla_fsdp_grad_ckpt"]: + # Apply gradient checkpointing to auto-wrapped sub-modules if specified + def auto_wrapper_callable(m, *args, **kwargs): + return FSDP(checkpoint_module(m), *args, **kwargs) + + # Wrap the base model with an outer FSDP wrapper + self.model = model = FSDP( + model, + auto_wrap_policy=auto_wrap_policy, + auto_wrapper_callable=auto_wrapper_callable, + **fsdp_kwargs, + ) + + # Patch `xm.optimizer_step` should not reduce gradients in this case, + # as FSDP does not need gradient reduction over sharded parameters. + def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}): + loss = optimizer.step(**optimizer_args) + if barrier: + xm.mark_step() + return loss + + xm.optimizer_step = patched_optimizer_step + elif is_sagemaker_dp_enabled(): + model = nn.parallel.DistributedDataParallel( + model, device_ids=[int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))] + ) + elif self.args.local_rank != -1: + kwargs = {} + if self.args.ddp_find_unused_parameters is not None: + kwargs["find_unused_parameters"] = self.args.ddp_find_unused_parameters + elif isinstance(model, PreTrainedModel): + # find_unused_parameters breaks checkpointing as per + # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021 + kwargs["find_unused_parameters"] = not model.is_gradient_checkpointing + else: + kwargs["find_unused_parameters"] = True + + if self.args.ddp_bucket_cap_mb is not None: + kwargs["bucket_cap_mb"] = self.args.ddp_bucket_cap_mb + if is_torch_neuroncore_available(): + return model + model = nn.parallel.DistributedDataParallel( + model, + device_ids=[self.args.local_rank] if self.args._n_gpu != 0 else None, + output_device=self.args.local_rank if self.args._n_gpu != 0 else None, + **kwargs, + ) + + return model
+ + +
+[docs] + def train( + self, + resume_from_checkpoint: Optional[Union[str, bool]] = None, + trial: Union["optuna.Trial", Dict[str, Any]] = None, + ignore_keys_for_eval: Optional[List[str]] = None, + is_first_time = False, + **kwargs, + ): + """ + Main training entry point. + Args: + resume_from_checkpoint (`str` or `bool`, *optional*): + If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a + `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance + of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here. + trial (`optuna.Trial` or `Dict[str, Any]`, *optional*): + The trial run or the hyperparameter dictionary for hyperparameter search. + ignore_keys_for_eval (`List[str]`, *optional*) + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions for evaluation during the training. + kwargs: + Additional keyword arguments used to hide deprecated arguments + """ + if resume_from_checkpoint is False: + resume_from_checkpoint = None + + # memory metrics - must set up as early as possible + self._memory_tracker.start() + + args = self.args + + #self.is_in_train = True + + # do_train is not a reliable argument, as it might not be set and .train() still called, so + # the following is a workaround: + if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train: + self._move_model_to_device(self.model, args.device) + + if "model_path" in kwargs: + resume_from_checkpoint = kwargs.pop("model_path") + warnings.warn( + "`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` " + "instead.", + FutureWarning, + ) + if len(kwargs) > 0: + raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.") + # This might change the seed so needs to run first. + self._hp_search_setup(trial) + self._train_batch_size = self.args.train_batch_size + + # Model re-init + model_reloaded = False + if self.model_init is not None: + # Seed must be set before instantiating the model when using model_init. + enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) + self.model = self.call_model_init(trial) + model_reloaded = True + # Reinitializes optimizer and scheduler + self.optimizer, self.lr_scheduler = None, None + + # Load potential model checkpoint + if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint: + resume_from_checkpoint = get_last_checkpoint(args.output_dir) + if resume_from_checkpoint is None: + raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})") + + if resume_from_checkpoint is not None and not is_sagemaker_mp_enabled() and args.deepspeed is None: + self._load_from_checkpoint(resume_from_checkpoint) + + # If model was re-initialized, put it on the right device and update self.model_wrapped + if model_reloaded: + if self.place_model_on_device: + self._move_model_to_device(self.model, args.device) + self.model_wrapped = self.model + if is_first_time: + inner_training_loop1 = find_executable_batch_size( + self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size + ) + return inner_training_loop1( + args=args, + resume_from_checkpoint=resume_from_checkpoint, + trial=trial, + ignore_keys_for_eval=ignore_keys_for_eval, + ) + else: + inner_training_loop2 = find_executable_batch_size( + self._one_train, self._train_batch_size, args.auto_find_batch_size + ) + return inner_training_loop2( + args=args, + resume_from_checkpoint=resume_from_checkpoint, + trial=trial, + ignore_keys_for_eval=ignore_keys_for_eval, + )
+ + + +
+[docs] + def _one_train( + self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None + ): + #print(self.lr_scheduler) + #print(dir(self.lr_scheduler)) + + + self.state = TrainerState() + self.state.is_hyper_param_search = trial is not None + # Get dataloader + self._train_batch_size = batch_size + # Data loader and number of training steps + train_dataloader = self.get_train_dataloader() + #print("AAAAAAA", len(train_dataloader)) + + total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size + + len_dataloader = None + if has_length(train_dataloader): + len_dataloader = len(train_dataloader) + num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps + num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) + num_examples = self.num_examples(train_dataloader) + if args.max_steps > 0: + max_steps = args.max_steps + num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( + args.max_steps % num_update_steps_per_epoch > 0 + ) + # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's + # the best we can do. + num_train_samples = args.max_steps * total_train_batch_size + else: + max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch) + num_train_epochs = math.ceil(args.num_train_epochs) + num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs + elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size + max_steps = args.max_steps + # Setting a very large number of epochs so we go as many times as necessary over the iterator. + num_train_epochs = sys.maxsize + num_update_steps_per_epoch = max_steps + num_examples = total_train_batch_size * args.max_steps + num_train_samples = args.max_steps * total_train_batch_size + else: + raise ValueError( + "args.max_steps must be set to a positive value if dataloader does not have a length, was" + f" {args.max_steps}" + ) + ########### + #num_train_epochs = 5 + + # Train! + logger.info("***** Running training *****") + logger.info(f" Num examples = {num_examples}") + logger.info(f" Num Epochs = {num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {max_steps}") + logger.info( + f" Number of trainable parameters = {sum(p.numel() for p in self.tmp_model.parameters() if p.requires_grad)}" + ) + + self.state.epoch = 0 + start_time = time.time() + epochs_trained = 0 + steps_trained_in_current_epoch = 0 + steps_trained_progress_bar = None + + # Update the references + self.callback_handler.model = self.model + self.callback_handler.optimizer = self.optimizer + self.callback_handler.lr_scheduler = self.lr_scheduler + self.callback_handler.train_dataloader = train_dataloader + if self.hp_name is not None and self._trial is not None: + # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial + # parameter to Train when using DDP. + self.state.trial_name = self.hp_name(self._trial) + if trial is not None: + assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial + self.state.trial_params = hp_params(assignments) + else: + self.state.trial_params = None + # This should be the same if the state has been saved but in case the training arguments changed, it's safer + # to set this after the load. + self.state.max_steps = max_steps + self.state.num_train_epochs = num_train_epochs + self.state.is_local_process_zero = self.is_local_process_zero() + self.state.is_world_process_zero = self.is_world_process_zero() + + # tr_loss is a tensor to avoid synchronization of TPUs through .item() + tr_loss = torch.tensor(0.0).to(args.device) + # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses + self._total_loss_scalar = 0.0 + self._globalstep_last_logged = self.state.global_step + #model.zero_grad() + self.tmp_model.zero_grad() + + self.control = self.callback_handler.on_train_begin(args, self.state, self.control) + + # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point. + if not args.ignore_data_skip: + #print("I skip!") called + for epoch in range(epochs_trained): + is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance( + train_dataloader.sampler, RandomSampler + ) + if is_torch_less_than_1_11 or not is_random_sampler: + # We just need to begin an iteration to create the randomization of the sampler. + # That was before PyTorch 1.11 however... + for _ in train_dataloader: + break + else: + # Otherwise we need to call the whooooole sampler cause there is some random operation added + # AT THE VERY END! + _ = list(train_dataloader.sampler) + + ############### + #num_train_epochs = 10 + self.is_in_train = True + #print("The number of epoches: ", num_train_epochs) + ############# + total_batched_samples = 0 + for epoch in range(epochs_trained, num_train_epochs): + if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler): + train_dataloader.sampler.set_epoch(epoch) + elif hasattr(train_dataloader, "dataset") and isinstance(train_dataloader.dataset, IterableDatasetShard): + train_dataloader.dataset.set_epoch(epoch) + + if is_torch_tpu_available(): + parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device) + epoch_iterator = parallel_loader + else: + epoch_iterator = train_dataloader + + # Reset the past mems state at the beginning of each epoch if necessary. + if args.past_index >= 0: + self._past = None + + steps_in_epoch = ( + len(epoch_iterator) + if len_dataloader is not None + else args.max_steps * args.gradient_accumulation_steps + ) + self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) + + if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0: + self._load_rng_state(resume_from_checkpoint) + + rng_to_sync = False + steps_skipped = 0 + if skip_first_batches is not None and steps_trained_in_current_epoch > 0: + epoch_iterator = skip_first_batches(epoch_iterator, steps_trained_in_current_epoch) + steps_skipped = steps_trained_in_current_epoch + steps_trained_in_current_epoch = 0 + rng_to_sync = True + + #print("The number of one epoch: ", len(epoch_iterator)) + step = -1 + for step, inputs in enumerate(epoch_iterator): + total_batched_samples += 1 + if rng_to_sync: + self._load_rng_state(resume_from_checkpoint) + rng_to_sync = False + + # Skip past any already trained steps if resuming training + if steps_trained_in_current_epoch > 0: + steps_trained_in_current_epoch -= 1 + if steps_trained_progress_bar is not None: + steps_trained_progress_bar.update(1) + if steps_trained_in_current_epoch == 0: + self._load_rng_state(resume_from_checkpoint) + continue + elif steps_trained_progress_bar is not None: + steps_trained_progress_bar.close() + steps_trained_progress_bar = None + + if step % args.gradient_accumulation_steps == 0: + self.control = self.callback_handler.on_step_begin(args, self.state, self.control) + + if ( + (total_batched_samples % args.gradient_accumulation_steps != 0) + and args.local_rank != -1 + and args._no_sync_in_gradient_accumulation + ): + # Avoid unnecessary DDP synchronization since there will be no backward pass on this example. + with self.tmp_model.no_sync(): + tr_loss_step = self.training_step(self.tmp_model, inputs) + #with model.no_sync(): + #tr_loss_step = self.training_step(model, inputs) + else: + tr_loss_step = self.training_step(self.tmp_model, inputs) + + if ( + args.logging_nan_inf_filter + and not is_torch_tpu_available() + and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)) + ): + # if loss is nan or inf simply add the average of previous logged losses + tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged) + else: + tr_loss += tr_loss_step + + self.current_flos += float(self.floating_point_ops(inputs)) + + # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps + if self.deepspeed: + self.deepspeed.step() + + if total_batched_samples % args.gradient_accumulation_steps == 0 or ( + # last step in epoch but step is always smaller than gradient_accumulation_steps + steps_in_epoch <= args.gradient_accumulation_steps + and (step + 1) == steps_in_epoch + ): + # Gradient clipping + if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed: + # deepspeed does its own clipping + + if self.do_grad_scaling: + # Reduce gradients first for XLA + if is_torch_tpu_available(): + gradients = xm._fetch_gradients(self.optimizer) + xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) + # AMP: gradients need unscaling + self.scaler.unscale_(self.optimizer) + + if is_sagemaker_mp_enabled() and args.fp16: + self.optimizer.clip_master_grads(args.max_grad_norm) + elif hasattr(self.optimizer, "clip_grad_norm"): + # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping + self.optimizer.clip_grad_norm(args.max_grad_norm) + elif hasattr(model, "clip_grad_norm_"): + # Some models (like FullyShardedDDP) have a specific way to do gradient clipping + model.clip_grad_norm_(args.max_grad_norm) + else: + # Revert to normal clipping otherwise, handling Apex or full precision + nn.utils.clip_grad_norm_( + amp.master_params(self.optimizer) if self.use_apex else model.parameters(), + args.max_grad_norm, + ) + + # Optimizer step + optimizer_was_run = True + if self.deepspeed: + pass # called outside the loop + elif is_torch_tpu_available(): + if self.do_grad_scaling: + self.scaler.step(self.optimizer) + self.scaler.update() + else: + xm.optimizer_step(self.optimizer) + elif self.do_grad_scaling: + scale_before = self.scaler.get_scale() + self.scaler.step(self.optimizer) + self.scaler.update() + scale_after = self.scaler.get_scale() + optimizer_was_run = scale_before <= scale_after + else: + self.optimizer.step() + + if optimizer_was_run and not self.deepspeed: + self.lr_scheduler.step() + + self.tmp_model.zero_grad() + self.state.global_step += 1 + self.state.epoch = epoch + (step + 1 + steps_skipped) / steps_in_epoch + self.control = self.callback_handler.on_step_end(args, self.state, self.control) + + self._maybe_log_save_evaluate(tr_loss, self.tmp_model, trial, epoch, ignore_keys_for_eval) + else: + self.control = self.callback_handler.on_substep_end(args, self.state, self.control) + + if self.control.should_epoch_stop or self.control.should_training_stop: + break + if step < 0: + logger.warning( + "There seems to be not a single sample in your epoch_iterator, stopping training at step" + f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" + f" num_steps ({max_steps}) higher than the number of available samples." + ) + self.control.should_training_stop = True + + self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) + self._maybe_log_save_evaluate(tr_loss, self.tmp_model, trial, epoch, ignore_keys_for_eval) + + if DebugOption.TPU_METRICS_DEBUG in self.args.debug: + if is_torch_tpu_available(): + # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) + xm.master_print(met.metrics_report()) + else: + logger.warning( + "You enabled PyTorch/XLA debug metrics but you don't have a TPU " + "configured. Check your training configuration if this is unexpected." + ) + if self.control.should_training_stop: + break + + if args.past_index and hasattr(self, "_past"): + # Clean the state at the end of training + delattr(self, "_past") + + logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") + if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: + # Wait for everyone to get here so we are sur the model has been saved by process 0. + if is_torch_tpu_available(): + xm.rendezvous("load_best_model_at_end") + elif args.local_rank != -1: + dist.barrier() + elif is_sagemaker_mp_enabled(): + smp.barrier() + + self._load_best_model() + + # add remaining tr_loss + self._total_loss_scalar += tr_loss.item() + train_loss = self._total_loss_scalar / self.state.global_step + + metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps) + self.store_flos() + metrics["total_flos"] = self.state.total_flos + metrics["train_loss"] = train_loss + + self.is_in_train = False + + self._memory_tracker.stop_and_update_metrics(metrics) + + self.log(metrics) + + run_dir = self._get_output_dir(trial) + checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) + + # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint and process allowed to save. + if self.args.should_save and self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1: + for checkpoint in checkpoints_sorted: + if checkpoint != self.state.best_model_checkpoint: + logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") + shutil.rmtree(checkpoint) + + self.control = self.callback_handler.on_train_end(args, self.state, self.control) + + return TrainOutput(self.state.global_step, train_loss, metrics)
+ + +
+[docs] + def _inner_training_loop( + self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None + ): + ''' + 0 This function serves to train one time + 1 Update the self.train_dataset before calling this function + ''' + # 1 Get dataloader + self._train_batch_size = batch_size + # Data loader and number of training steps + train_dataloader = self.get_train_dataloader() + total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size + + len_dataloader = None + if has_length(train_dataloader): + len_dataloader = len(train_dataloader) + num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps + num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) + num_examples = self.num_examples(train_dataloader) + if args.max_steps > 0: + max_steps = args.max_steps + num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( + args.max_steps % num_update_steps_per_epoch > 0 + ) + # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's + # the best we can do. + num_train_samples = args.max_steps * total_train_batch_size + else: + max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch) + num_train_epochs = math.ceil(args.num_train_epochs) + num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs + elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size + max_steps = args.max_steps + # Setting a very large number of epochs so we go as many times as necessary over the iterator. + num_train_epochs = sys.maxsize + num_update_steps_per_epoch = max_steps + num_examples = total_train_batch_size * args.max_steps + num_train_samples = args.max_steps * total_train_batch_size + else: + raise ValueError( + "args.max_steps must be set to a positive value if dataloader does not have a length, was" + f" {args.max_steps}" + ) + + if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: + if self.args.n_gpu > 1: + # nn.DataParallel(model) replicates the model, creating new variables and module + # references registered here no longer work on other gpus, breaking the module + raise ValueError( + "Currently --debug underflow_overflow is not supported under DP. Please use DDP" + " (torch.distributed.launch)." + ) + else: + debug_overflow = DebugUnderflowOverflow(self.model) # noqa + + delay_optimizer_creation = ( + self.sharded_ddp is not None + and self.sharded_ddp != ShardedDDPOption.SIMPLE + or is_sagemaker_mp_enabled() + or self.fsdp is not None + ) + if args.deepspeed: + deepspeed_engine, optimizer, lr_scheduler = deepspeed_init( + self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint + ) + self.model = deepspeed_engine.module + self.model_wrapped = deepspeed_engine + self.deepspeed = deepspeed_engine + self.optimizer = optimizer + self.lr_scheduler = lr_scheduler + #print("I just create a optimizer here!") # called + elif not delay_optimizer_creation: + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + + self.state = TrainerState() + self.state.is_hyper_param_search = trial is not None + + # Activate gradient checkpointing if needed + if args.gradient_checkpointing: + self.model.gradient_checkpointing_enable() + + #model = self._wrap_model(self.model_wrapped) + self.tmp_model = self._wrap_model(self.model_wrapped) + + + #if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None: + # self._load_from_checkpoint(resume_from_checkpoint, model) + + # for the rest of this function `model` is the outside model, whether it was wrapped or not + if self.tmp_model is not self.model: + self.model_wrapped = self.tmp_model + + if delay_optimizer_creation: + print("I create here!") # not called + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + + return True
+ + # Check if saved optimizer or scheduler states exist + #self._load_optimizer_and_scheduler(resume_from_checkpoint) + + # important: at this point: + # self.model is the Transformers Model + # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc. + + + +
+[docs] + def _get_output_dir(self, trial): + if self.hp_search_backend is not None and trial is not None: + if self.hp_search_backend == HPSearchBackend.OPTUNA: + run_id = trial.number + elif self.hp_search_backend == HPSearchBackend.RAY: + from ray import tune + + run_id = tune.get_trial_id() + elif self.hp_search_backend == HPSearchBackend.SIGOPT: + run_id = trial.id + elif self.hp_search_backend == HPSearchBackend.WANDB: + import wandb + + run_id = wandb.run.id + run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}" + run_dir = os.path.join(self.args.output_dir, run_name) + else: + run_dir = self.args.output_dir + return run_dir
+ + +
+[docs] + def _load_from_checkpoint(self, resume_from_checkpoint, model=None): + if model is None: + model = self.model + + if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)) and not os.path.isfile( + os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME) + ): + raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}") + + logger.info(f"Loading model from {resume_from_checkpoint}.") + + if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)): + config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME)) + checkpoint_version = config.transformers_version + if checkpoint_version is not None and checkpoint_version != __version__: + logger.warning( + f"You are resuming training from a checkpoint trained with {checkpoint_version} of " + f"Transformers but your current version is {__version__}. This is not recommended and could " + "yield to errors or unwanted behaviors." + ) + + if os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)): + # If the model is on the GPU, it still works! + if is_sagemaker_mp_enabled(): + if os.path.isfile(os.path.join(resume_from_checkpoint, "user_content.pt")): + # If the 'user_content.pt' file exists, load with the new smp api. + # Checkpoint must have been saved with the new smp api. + smp.resume_from_checkpoint( + path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False + ) + else: + # If the 'user_content.pt' file does NOT exist, load with the old smp api. + # Checkpoint must have been saved with the old smp api. + if hasattr(self.args, "fp16") and self.args.fp16 is True: + logger.warning( + "Enabling FP16 and loading from smp < 1.10 checkpoint together is not suppported." + ) + state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu") + # Required for smp to not auto-translate state_dict from hf to smp (is already smp). + state_dict["_smp_is_partial"] = False + load_result = model.load_state_dict(state_dict, strict=True) + # release memory + del state_dict + else: + # We load the model state dict on the CPU to avoid an OOM error. + state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu") + # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 + # which takes *args instead of **kwargs + load_result = model.load_state_dict(state_dict, False) + # release memory + del state_dict + self._issue_warnings_after_load(load_result) + else: + # We load the sharded checkpoint + load_result = load_sharded_checkpoint(model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled()) + if not is_sagemaker_mp_enabled(): + self._issue_warnings_after_load(load_result)
+ + +
+[docs] + def _load_best_model(self): + logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).") + best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME) + model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model + if os.path.exists(best_model_path): + if self.deepspeed: + if self.model_wrapped is not None: + # this removes the pre-hooks from the previous engine + self.model_wrapped.destroy() + self.model_wrapped = None + + # temp hack until Deepspeed fixes the problem with resume from an existing engine that did some stepping + deepspeed_engine, optimizer, lr_scheduler = deepspeed_init( + self, + num_training_steps=self.args.max_steps, + resume_from_checkpoint=self.state.best_model_checkpoint, + ) + self.model = deepspeed_engine.module + self.model_wrapped = deepspeed_engine + self.deepspeed = deepspeed_engine + self.optimizer = optimizer + self.lr_scheduler = lr_scheduler + else: + if is_sagemaker_mp_enabled(): + if os.path.isfile(os.path.join(self.state.best_model_checkpoint, "user_content.pt")): + # If the 'user_content.pt' file exists, load with the new smp api. + # Checkpoint must have been saved with the new smp api. + smp.resume_from_checkpoint( + path=self.state.best_model_checkpoint, + tag=WEIGHTS_NAME, + partial=False, + load_optimizer=False, + ) + else: + # If the 'user_content.pt' file does NOT exist, load with the old smp api. + # Checkpoint must have been saved with the old smp api. + state_dict = torch.load(best_model_path, map_location="cpu") + state_dict["_smp_is_partial"] = False + load_result = model.load_state_dict(state_dict, strict=True) + else: + # We load the model state dict on the CPU to avoid an OOM error. + state_dict = torch.load(best_model_path, map_location="cpu") + # If the model is on the GPU, it still works! + # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 + # which takes *args instead of **kwargs + load_result = model.load_state_dict(state_dict, False) + if not is_sagemaker_mp_enabled(): + self._issue_warnings_after_load(load_result) + elif os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)): + load_result = load_sharded_checkpoint( + model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled() + ) + if not is_sagemaker_mp_enabled(): + self._issue_warnings_after_load(load_result) + else: + logger.warning( + f"Could not locate the best model at {best_model_path}, if you are running a distributed training " + "on multiple nodes, you should activate `--save_on_each_node`." + )
+ + +
+[docs] + def _issue_warnings_after_load(self, load_result): + if len(load_result.missing_keys) != 0: + if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set( + self.model._keys_to_ignore_on_save + ): + self.model.tie_weights() + else: + logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.") + if len(load_result.unexpected_keys) != 0: + logger.warning( + f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}." + )
+ + +
+[docs] + def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval): + if self.control.should_log: + if is_torch_tpu_available(): + xm.mark_step() + + logs: Dict[str, float] = {} + + # all_gather + mean() to get average loss over all processes + tr_loss_scalar = self._nested_gather(tr_loss).mean().item() + + # reset tr_loss to zero + tr_loss -= tr_loss + + logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) + logs["learning_rate"] = self._get_learning_rate() + + self._total_loss_scalar += tr_loss_scalar + self._globalstep_last_logged = self.state.global_step + self.store_flos() + + self.log(logs) + + metrics = None + if self.control.should_evaluate: + if isinstance(self.eval_dataset, dict): + for eval_dataset_name, eval_dataset in self.eval_dataset.items(): + metrics = self.evaluate( + eval_dataset=eval_dataset, + ignore_keys=ignore_keys_for_eval, + metric_key_prefix=f"eval_{eval_dataset_name}", + ) + else: + metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) + self._report_to_hp_search(trial, self.state.global_step, metrics) + + if self.control.should_save: + self._save_checkpoint(model, trial, metrics=metrics) + self.control = self.callback_handler.on_save(self.args, self.state, self.control)
+ + +
+[docs] + def _load_rng_state(self, checkpoint): + # Load RNG states from `checkpoint` + if checkpoint is None: + return + + if self.args.world_size > 1: + process_index = self.args.process_index + rng_file = os.path.join(checkpoint, f"rng_state_{process_index}.pth") + if not os.path.isfile(rng_file): + logger.info( + f"Didn't find an RNG file for process {process_index}, if you are resuming a training that " + "wasn't launched in a distributed fashion, reproducibility is not guaranteed." + ) + return + else: + rng_file = os.path.join(checkpoint, "rng_state.pth") + if not os.path.isfile(rng_file): + logger.info( + "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " + "fashion, reproducibility is not guaranteed." + ) + return + + checkpoint_rng_state = torch.load(rng_file) + random.setstate(checkpoint_rng_state["python"]) + np.random.set_state(checkpoint_rng_state["numpy"]) + torch.random.set_rng_state(checkpoint_rng_state["cpu"]) + if torch.cuda.is_available(): + if self.args.local_rank != -1: + torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) + else: + try: + torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"]) + except Exception as e: + logger.info( + f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" + "\nThis won't yield the same results as if the training had not been interrupted." + ) + if is_torch_tpu_available(): + xm.set_rng_state(checkpoint_rng_state["xla"])
+ + +
+[docs] + def _save_checkpoint(self, model, trial, metrics=None): + # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we + # want to save except FullyShardedDDP. + # assert unwrap_model(model) is self.model, "internal model should be a reference to self.model" + + # Save model checkpoint + #checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" + checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.save_counter}" + ########## + self.save_counter += 1 + ########## + if self.hp_search_backend is None and trial is None: + self.store_flos() + + run_dir = self._get_output_dir(trial=trial) + output_dir = os.path.join(run_dir, checkpoint_folder) + self.save_model(output_dir, _internal_call=True) + if self.deepspeed: + # under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed + # config `stage3_gather_16bit_weights_on_model_save` is True + self.deepspeed.save_checkpoint(output_dir) + + # Save optimizer and scheduler + if self.sharded_ddp == ShardedDDPOption.SIMPLE: + self.optimizer.consolidate_state_dict() + + if is_torch_tpu_available(): + xm.rendezvous("saving_optimizer_states") + xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) + with warnings.catch_warnings(record=True) as caught_warnings: + xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + reissue_pt_warnings(caught_warnings) + elif is_sagemaker_mp_enabled(): + opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False) + smp.barrier() + if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state: + smp.save( + opt_state_dict, + os.path.join(output_dir, OPTIMIZER_NAME), + partial=True, + v3=smp.state.cfg.shard_optimizer_state, + ) + if self.args.should_save: + with warnings.catch_warnings(record=True) as caught_warnings: + torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + reissue_pt_warnings(caught_warnings) + if self.do_grad_scaling: + torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME)) + elif self.args.should_save and not self.deepspeed: + # deepspeed.save_checkpoint above saves model/optim/sched + torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) + with warnings.catch_warnings(record=True) as caught_warnings: + torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + reissue_pt_warnings(caught_warnings) + if self.do_grad_scaling: + torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME)) + + # Determine the new best metric / best model checkpoint + if metrics is not None and self.args.metric_for_best_model is not None: + metric_to_check = self.args.metric_for_best_model + if not metric_to_check.startswith("eval_"): + metric_to_check = f"eval_{metric_to_check}" + metric_value = metrics[metric_to_check] + + operator = np.greater if self.args.greater_is_better else np.less + if ( + self.state.best_metric is None + or self.state.best_model_checkpoint is None + or operator(metric_value, self.state.best_metric) + ): + self.state.best_metric = metric_value + self.state.best_model_checkpoint = output_dir + + # Save the Trainer state + if self.args.should_save: + self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) + + # Save RNG state in non-distributed training + rng_states = { + "python": random.getstate(), + "numpy": np.random.get_state(), + "cpu": torch.random.get_rng_state(), + } + if torch.cuda.is_available(): + if self.args.local_rank == -1: + # In non distributed, we save the global CUDA RNG state (will take care of DataParallel) + rng_states["cuda"] = torch.cuda.random.get_rng_state_all() + else: + rng_states["cuda"] = torch.cuda.random.get_rng_state() + + if is_torch_tpu_available(): + rng_states["xla"] = xm.get_rng_state() + + # A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may + # not yet exist. + os.makedirs(output_dir, exist_ok=True) + + if self.args.world_size <= 1: + torch.save(rng_states, os.path.join(output_dir, "rng_state.pth")) + else: + torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth")) + + if self.args.push_to_hub: + self._push_from_checkpoint(output_dir) + + # Maybe delete some older checkpoints. + if self.args.should_save: + self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
+ + +
+[docs] + def _load_optimizer_and_scheduler(self, checkpoint): + """If optimizer and scheduler states exist, load them.""" + if checkpoint is None: + return + + if self.deepspeed: + # deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init + return + + checkpoint_file_exists = ( + glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*") + if is_sagemaker_mp_enabled() + else os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME)) + ) + if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)): + # Load in optimizer and scheduler states + if is_torch_tpu_available(): + # On TPU we have to take some extra precautions to properly load the states on the right device. + optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu") + with warnings.catch_warnings(record=True) as caught_warnings: + lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu") + reissue_pt_warnings(caught_warnings) + + xm.send_cpu_data_to_device(optimizer_state, self.args.device) + xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device) + + self.optimizer.load_state_dict(optimizer_state) + self.lr_scheduler.load_state_dict(lr_scheduler_state) + else: + map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device + if is_sagemaker_mp_enabled(): + if os.path.isfile(os.path.join(checkpoint, "user_content.pt")): + # Optimizer checkpoint was saved with smp >= 1.10 + def opt_load_hook(mod, opt): + opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) + + else: + # Optimizer checkpoint was saved with smp < 1.10 + def opt_load_hook(mod, opt): + if IS_SAGEMAKER_MP_POST_1_10: + opt.load_state_dict( + smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True) + ) + else: + opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) + + self.model_wrapped.register_post_step_hook(opt_load_hook) + else: + self.optimizer.load_state_dict( + torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location) + ) + with warnings.catch_warnings(record=True) as caught_warnings: + self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME))) + reissue_pt_warnings(caught_warnings) + if self.do_grad_scaling and os.path.isfile(os.path.join(checkpoint, SCALER_NAME)): + self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, SCALER_NAME)))
+ + + + + +
+[docs] + def log(self, logs: Dict[str, float]) -> None: + """ + Log `logs` on the various objects watching training. + Subclass and override this method to inject custom behavior. + Args: + logs (`Dict[str, float]`): + The values to log. + """ + if self.state.epoch is not None: + logs["epoch"] = round(self.state.epoch, 2) + + output = {**logs, **{"step": self.state.global_step}} + self.state.log_history.append(output) + self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
+ + +
+[docs] + def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]: + """ + Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors. + """ + if isinstance(data, Mapping): + return type(data)({k: self._prepare_input(v) for k, v in data.items()}) + elif isinstance(data, (tuple, list)): + return type(data)(self._prepare_input(v) for v in data) + elif isinstance(data, torch.Tensor): + kwargs = {"device": self.args.device} + if self.deepspeed and (torch.is_floating_point(data) or torch.is_complex(data)): + # NLP models inputs are int/uint and those get adjusted to the right dtype of the + # embedding. Other models such as wav2vec2's inputs are already float and thus + # may need special handling to match the dtypes of the model + kwargs.update({"dtype": self.args.hf_deepspeed_config.dtype()}) + return data.to(**kwargs) + return data
+ + +
+[docs] + def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]: + """ + Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and + handling potential state. + """ + inputs = self._prepare_input(inputs) + if len(inputs) == 0: + raise ValueError( + "The batch received was empty, your model won't be able to train on it. Double-check that your " + f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}." + ) + if self.args.past_index >= 0 and self._past is not None: + inputs["mems"] = self._past + + return inputs
+ + +
+[docs] + def compute_loss_context_manager(self): + """ + A helper wrapper to group together context managers. + """ + return self.autocast_smart_context_manager()
+ + +
+[docs] + def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True): + """ + A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired + arguments, depending on the situation. + """ + if self.use_cuda_amp or self.use_cpu_amp: + if is_torch_greater_or_equal_than_1_10: + ctx_manager = ( + torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype) + if self.use_cpu_amp + else torch.cuda.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype) + ) + else: + ctx_manager = torch.cuda.amp.autocast() + else: + ctx_manager = contextlib.nullcontext() if sys.version_info >= (3, 7) else contextlib.suppress() + + return ctx_manager
+ + +
+[docs] + def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: + """ + Perform a training step on a batch of inputs. + Subclass and override to inject custom behavior. + Args: + model (`nn.Module`): + The model to train. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + Return: + `torch.Tensor`: The tensor with training loss on this batch. + """ + model.train() + inputs = self._prepare_inputs(inputs) + + if is_sagemaker_mp_enabled(): + loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps) + return loss_mb.reduce_mean().detach().to(self.args.device) + + with self.compute_loss_context_manager(): + loss = self.compute_loss(model, inputs) + + if self.args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + + if self.args.gradient_accumulation_steps > 1 and not self.deepspeed: + # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward` + loss = loss / self.args.gradient_accumulation_steps + + if self.do_grad_scaling: + self.scaler.scale(loss).backward() + elif self.use_apex: + with amp.scale_loss(loss, self.optimizer) as scaled_loss: + scaled_loss.backward() + elif self.deepspeed: + # loss gets scaled under gradient_accumulation_steps in deepspeed + loss = self.deepspeed.backward(loss) + else: + loss.backward() + + return loss.detach()
+ + +
+[docs] + def compute_loss(self, model, inputs, return_outputs=False): + """ + How the loss is computed by Trainer. By default, all models return the loss in the first element. + Subclass and override for custom behavior. + """ + if self.label_smoother is not None and "labels" in inputs: + labels = inputs.pop("labels") + else: + labels = None + outputs = model(**inputs) + # Save past state if it exists + # TODO: this needs to be fixed and made cleaner later. + if self.args.past_index >= 0: + self._past = outputs[self.args.past_index] + + if labels is not None: + if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): + loss = self.label_smoother(outputs, labels, shift_labels=True) + else: + loss = self.label_smoother(outputs, labels) + else: + if isinstance(outputs, dict) and "loss" not in outputs: + raise ValueError( + "The model did not return a loss from the inputs, only the following keys: " + f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." + ) + # We don't use .loss here since the model may return tuples instead of ModelOutput. + loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] + + return (loss, outputs) if return_outputs else loss
+ + +
+[docs] + def is_local_process_zero(self) -> bool: + """ + Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several + machines) main process. + """ + return self.args.local_process_index == 0
+ + +
+[docs] + def is_world_process_zero(self) -> bool: + """ + Whether or not this process is the global main process (when training in a distributed fashion on several + machines, this is only going to be `True` for one process). + """ + # Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global + # process index. + if is_sagemaker_mp_enabled(): + return smp.rank() == 0 + else: + return self.args.process_index == 0
+ + +
+[docs] + def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False): + """ + Will save the model, so you can reload it using `from_pretrained()`. + Will only save from the main process. + """ + + if output_dir is None: + output_dir = self.args.output_dir + + if is_torch_tpu_available(): + self._save_tpu(output_dir) + elif is_sagemaker_mp_enabled(): + # Calling the state_dict needs to be done on the wrapped model and on all processes. + os.makedirs(output_dir, exist_ok=True) + state_dict = self.model_wrapped.state_dict() + if self.args.should_save: + self._save(output_dir, state_dict=state_dict) + if IS_SAGEMAKER_MP_POST_1_10: + # 'user_content.pt' indicates model state_dict saved with smp >= 1.10 + Path(os.path.join(output_dir, "user_content.pt")).touch() + elif ( + ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp + or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp + or self.fsdp is not None + ): + state_dict = self.model.state_dict() + + if self.args.should_save: + self._save(output_dir, state_dict=state_dict) + elif self.deepspeed: + # this takes care of everything as long as we aren't under zero3 + if self.args.should_save: + self._save(output_dir) + + if is_deepspeed_zero3_enabled(): + # It's too complicated to try to override different places where the weights dump gets + # saved, so since under zero3 the file is bogus, simply delete it. The user should + # either user deepspeed checkpoint to resume or to recover full weights use + # zero_to_fp32.py stored in the checkpoint. + if self.args.should_save: + file = os.path.join(output_dir, WEIGHTS_NAME) + if os.path.isfile(file): + # logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights") + os.remove(file) + + # now save the real model if stage3_gather_16bit_weights_on_model_save=True + # if false it will not be saved. + # This must be called on all ranks + if not self.deepspeed.save_16bit_model(output_dir, WEIGHTS_NAME): + logger.warning( + "deepspeed.save_16bit_model didn't save the model, since" + " stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use" + " zero_to_fp32.py to recover weights" + ) + self.deepspeed.save_checkpoint(output_dir) + + elif self.args.should_save: + self._save(output_dir) + + # Push to the Hub when `save_model` is called by the user. + if self.args.push_to_hub and not _internal_call: + self.push_to_hub(commit_message="Model save")
+ + +
+[docs] + def _save_tpu(self, output_dir: Optional[str] = None): + output_dir = output_dir if output_dir is not None else self.args.output_dir + logger.info(f"Saving model checkpoint to {output_dir}") + + if xm.is_master_ordinal(): + os.makedirs(output_dir, exist_ok=True) + torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) + + # Save a trained model and configuration using `save_pretrained()`. + # They can then be reloaded using `from_pretrained()` + xm.rendezvous("saving_checkpoint") + if not isinstance(self.model, PreTrainedModel): + if isinstance(unwrap_model(self.model), PreTrainedModel): + unwrap_model(self.model).save_pretrained( + output_dir, + is_main_process=self.args.should_save, + state_dict=self.model.state_dict(), + save_function=xm.save, + ) + else: + logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") + state_dict = self.model.state_dict() + xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) + else: + self.model.save_pretrained(output_dir, is_main_process=self.args.should_save, save_function=xm.save) + if self.tokenizer is not None and self.args.should_save: + self.tokenizer.save_pretrained(output_dir)
+ + +
+[docs] + def _save(self, output_dir: Optional[str] = None, state_dict=None): + # If we are executing this function, we are the process zero, so we don't check for that. + output_dir = output_dir if output_dir is not None else self.args.output_dir + os.makedirs(output_dir, exist_ok=True) + logger.info(f"Saving model checkpoint to {output_dir}") + # Save a trained model and configuration using `save_pretrained()`. + # They can then be reloaded using `from_pretrained()` + if not isinstance(self.model, PreTrainedModel): + if isinstance(unwrap_model(self.model), PreTrainedModel): + if state_dict is None: + state_dict = self.model.state_dict() + unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict) + else: + logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") + if state_dict is None: + state_dict = self.model.state_dict() + torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) + else: + self.model.save_pretrained(output_dir, state_dict=state_dict) + if self.tokenizer is not None: + self.tokenizer.save_pretrained(output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
+ + +
+[docs] + def store_flos(self): + # Storing the number of floating-point operations that went into the model + if self.args.local_rank != -1: + self.state.total_flos += ( + distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item() + ) + self.current_flos = 0 + else: + self.state.total_flos += self.current_flos + self.current_flos = 0
+ + +
+[docs] + def _sorted_checkpoints( + self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False + ) -> List[str]: + ordering_and_checkpoint_path = [] + + glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)] + + for path in glob_checkpoints: + if use_mtime: + ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) + else: + regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path) + if regex_match is not None and regex_match.groups() is not None: + ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) + + checkpoints_sorted = sorted(ordering_and_checkpoint_path) + checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] + # Make sure we don't delete the best model. + if self.state.best_model_checkpoint is not None: + best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint))) + for i in range(best_model_index, len(checkpoints_sorted) - 2): + checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] + return checkpoints_sorted
+ + +
+[docs] + def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: + if self.args.save_total_limit is None or self.args.save_total_limit <= 0: + return + + # Check if we should delete older checkpoint(s) + checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) + if len(checkpoints_sorted) <= self.args.save_total_limit: + return + + # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which + # we don't do to allow resuming. + save_total_limit = self.args.save_total_limit + if ( + self.state.best_model_checkpoint is not None + and self.args.save_total_limit == 1 + and checkpoints_sorted[-1] != self.state.best_model_checkpoint + ): + save_total_limit = 2 + + number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit) + checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] + for checkpoint in checkpoints_to_be_deleted: + logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") + shutil.rmtree(checkpoint, ignore_errors=True)
+ + +
+[docs] + def evaluate( + self, + eval_dataset: Optional[Dataset] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + ) -> Dict[str, float]: + """ + Run evaluation and returns metrics. + The calling script will be responsible for providing a method to compute metrics, as they are task-dependent + (pass it to the init `compute_metrics` argument). + You can also subclass and override this method to inject custom behavior. + Args: + eval_dataset (`Dataset`, *optional*): + Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns + not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__` + method. + ignore_keys (`Lst[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"eval"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "eval_bleu" if the prefix is "eval" (default) + Returns: + A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The + dictionary also contains the epoch number which comes from the training state. + """ + # memory metrics - must set up as early as possible + self._memory_tracker.start() + + eval_dataloader = self.get_eval_dataloader(eval_dataset) + start_time = time.time() + + eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop + output = eval_loop( + eval_dataloader, + description="Evaluation", + # No point gathering the predictions if there are no metrics, otherwise we defer to + # self.args.prediction_loss_only + prediction_loss_only=True if self.compute_metrics is None else None, + ignore_keys=ignore_keys, + metric_key_prefix=metric_key_prefix, + ) + + total_batch_size = self.args.eval_batch_size * self.args.world_size + if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: + start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] + output.metrics.update( + speed_metrics( + metric_key_prefix, + start_time, + num_samples=output.num_samples, + num_steps=math.ceil(output.num_samples / total_batch_size), + ) + ) + + self.log(output.metrics) + + if DebugOption.TPU_METRICS_DEBUG in self.args.debug: + # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) + xm.master_print(met.metrics_report()) + + self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics) + + self._memory_tracker.stop_and_update_metrics(output.metrics) + + return output.metrics
+ + +
+[docs] + def predict( + self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test" + ) -> PredictionOutput: + """ + Run prediction and returns predictions and potential metrics. + Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method + will also return metrics, like in `evaluate()`. + Args: + test_dataset (`Dataset`): + Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the + `model.forward()` method are automatically removed. Has to implement the method `__len__` + ignore_keys (`Lst[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"test"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "test_bleu" if the prefix is "test" (default) + <Tip> + If your predictions or labels have different sequence length (for instance because you're doing dynamic padding + in a token classification task) the predictions will be padded (on the right) to allow for concatenation into + one array. The padding index is -100. + </Tip> + Returns: *NamedTuple* A namedtuple with the following keys: + - predictions (`np.ndarray`): The predictions on `test_dataset`. + - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). + - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained + labels). + """ + # memory metrics - must set up as early as possible + self._memory_tracker.start() + + test_dataloader = self.get_test_dataloader(test_dataset) + start_time = time.time() + + eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop + output = eval_loop( + test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix + ) + total_batch_size = self.args.eval_batch_size * self.args.world_size + if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: + start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] + output.metrics.update( + speed_metrics( + metric_key_prefix, + start_time, + num_samples=output.num_samples, + num_steps=math.ceil(output.num_samples / total_batch_size), + ) + ) + + self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics) + self._memory_tracker.stop_and_update_metrics(output.metrics) + + return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
+ + +
+[docs] + def evaluation_loop( + self, + dataloader: DataLoader, + description: str, + prediction_loss_only: Optional[bool] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + ) -> EvalLoopOutput: + """ + Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. + Works both with or without labels. + """ + args = self.args + + prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only + + # if eval is called w/o train init deepspeed here + if args.deepspeed and not self.deepspeed: + # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval + # from the checkpoint eventually + deepspeed_engine, _, _ = deepspeed_init( + self, num_training_steps=0, resume_from_checkpoint=None, inference=True + ) + self.model = deepspeed_engine.module + self.model_wrapped = deepspeed_engine + self.deepspeed = deepspeed_engine + + model = self._wrap_model(self.model, training=False, dataloader=dataloader) + + # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called + # while ``train`` is running, cast it to the right dtype first and then put on device + if not self.is_in_train: + if args.fp16_full_eval: + model = model.to(dtype=torch.float16, device=args.device) + elif args.bf16_full_eval: + model = model.to(dtype=torch.bfloat16, device=args.device) + + batch_size = self.args.eval_batch_size + + logger.info(f"***** Running {description} *****") + if has_length(dataloader): + logger.info(f" Num examples = {self.num_examples(dataloader)}") + else: + logger.info(" Num examples: Unknown") + logger.info(f" Batch size = {batch_size}") + + model.eval() + + self.callback_handler.eval_dataloader = dataloader + # Do this before wrapping. + eval_dataset = getattr(dataloader, "dataset", None) + + if is_torch_tpu_available(): + dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device) + + if args.past_index >= 0: + self._past = None + + # Initialize containers + # losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps) + losses_host = None + preds_host = None + labels_host = None + inputs_host = None + + # losses/preds/labels on CPU (final containers) + all_losses = None + all_preds = None + all_labels = None + all_inputs = None + # Will be useful when we have an iterable dataset so don't know its length. + + observed_num_examples = 0 + # Main evaluation loop + for step, inputs in enumerate(dataloader): + # Update the observed num examples + observed_batch_size = find_batch_size(inputs) + if observed_batch_size is not None: + observed_num_examples += observed_batch_size + # For batch samplers, batch_size is not known by the dataloader in advance. + if batch_size is None: + batch_size = observed_batch_size + + # Prediction step + loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) + inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None + + if is_torch_tpu_available(): + xm.mark_step() + + # Update containers on host + if loss is not None: + losses = self._nested_gather(loss.repeat(batch_size)) + losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0) + if labels is not None: + labels = self._pad_across_processes(labels) + labels = self._nested_gather(labels) + labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) + if inputs_decode is not None: + inputs_decode = self._pad_across_processes(inputs_decode) + inputs_decode = self._nested_gather(inputs_decode) + inputs_host = ( + inputs_decode + if inputs_host is None + else nested_concat(inputs_host, inputs_decode, padding_index=-100) + ) + if logits is not None: + logits = self._pad_across_processes(logits) + logits = self._nested_gather(logits) + if self.preprocess_logits_for_metrics is not None: + logits = self.preprocess_logits_for_metrics(logits, labels) + preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) + self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) + + # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. + if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: + if losses_host is not None: + losses = nested_numpify(losses_host) + all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0) + if preds_host is not None: + logits = nested_numpify(preds_host) + all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) + if inputs_host is not None: + inputs_decode = nested_numpify(inputs_host) + all_inputs = ( + inputs_decode + if all_inputs is None + else nested_concat(all_inputs, inputs_decode, padding_index=-100) + ) + if labels_host is not None: + labels = nested_numpify(labels_host) + all_labels = ( + labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100) + ) + + # Set back to None to begin a new accumulation + losses_host, preds_host, inputs_host, labels_host = None, None, None, None + + if args.past_index and hasattr(self, "_past"): + # Clean the state at the end of the evaluation loop + delattr(self, "_past") + + # Gather all remaining tensors and put them back on the CPU + if losses_host is not None: + losses = nested_numpify(losses_host) + all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0) + if preds_host is not None: + logits = nested_numpify(preds_host) + all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) + if inputs_host is not None: + inputs_decode = nested_numpify(inputs_host) + all_inputs = ( + inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100) + ) + if labels_host is not None: + labels = nested_numpify(labels_host) + all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100) + + # Number of samples + if has_length(eval_dataset): + num_samples = len(eval_dataset) + # The instance check is weird and does not actually check for the type, but whether the dataset has the right + # methods. Therefore we need to make sure it also has the attribute. + elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0: + num_samples = eval_dataset.num_examples + else: + if has_length(dataloader): + num_samples = self.num_examples(dataloader) + else: # both len(dataloader.dataset) and len(dataloader) fail + num_samples = observed_num_examples + if num_samples == 0 and observed_num_examples > 0: + num_samples = observed_num_examples + + # Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of + # samplers has been rounded to a multiple of batch_size, so we truncate. + if all_losses is not None: + all_losses = all_losses[:num_samples] + if all_preds is not None: + all_preds = nested_truncate(all_preds, num_samples) + if all_labels is not None: + all_labels = nested_truncate(all_labels, num_samples) + if all_inputs is not None: + all_inputs = nested_truncate(all_inputs, num_samples) + + # Metrics! + if self.compute_metrics is not None and all_preds is not None and all_labels is not None: + if args.include_inputs_for_metrics: + metrics = self.compute_metrics( + EvalPrediction(predictions=all_preds, label_ids=all_labels, inputs=all_inputs) + ) + else: + metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels)) + else: + metrics = {} + + # To be JSON-serializable, we need to remove numpy types or zero-d tensors + metrics = denumpify_detensorize(metrics) + + if all_losses is not None: + metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item() + if hasattr(self, "jit_compilation_time"): + metrics[f"{metric_key_prefix}_jit_compilation_time"] = self.jit_compilation_time + + # Prefix all keys with metric_key_prefix + '_' + for key in list(metrics.keys()): + if not key.startswith(f"{metric_key_prefix}_"): + metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) + + return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
+ + +
+[docs] + def _nested_gather(self, tensors, name=None): + """ + Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before + concatenating them to `gathered` + """ + if tensors is None: + return + if is_torch_tpu_available(): + if name is None: + name = "nested_gather" + tensors = nested_xla_mesh_reduce(tensors, name) + elif is_sagemaker_mp_enabled(): + tensors = smp_gather(tensors) + elif self.args.local_rank != -1: + tensors = distributed_concat(tensors) + return tensors
+ + + # Copied from Accelerate. +
+[docs] + def _pad_across_processes(self, tensor, pad_index=-100): + """ + Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so + they can safely be gathered. + """ + if isinstance(tensor, (list, tuple)): + return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor) + elif isinstance(tensor, dict): + return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()}) + elif not isinstance(tensor, torch.Tensor): + raise TypeError( + f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors." + ) + + if len(tensor.shape) < 2: + return tensor + # Gather all sizes + size = torch.tensor(tensor.shape, device=tensor.device)[None] + sizes = self._nested_gather(size).cpu() + + max_size = max(s[1] for s in sizes) + # When extracting XLA graphs for compilation, max_size is 0, + # so use inequality to avoid errors. + if tensor.shape[1] >= max_size: + return tensor + + # Then pad to the maximum size + old_size = tensor.shape + new_size = list(old_size) + new_size[1] = max_size + new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index + new_tensor[:, : old_size[1]] = tensor + return new_tensor
+ + +
+[docs] + def prediction_step( + self, + model: nn.Module, + inputs: Dict[str, Union[torch.Tensor, Any]], + prediction_loss_only: bool, + ignore_keys: Optional[List[str]] = None, + ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: + """ + Perform an evaluation step on `model` using `inputs`. + Subclass and override to inject custom behavior. + Args: + model (`nn.Module`): + The model to evaluate. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + prediction_loss_only (`bool`): + Whether or not to return the loss only. + ignore_keys (`Lst[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + Return: + Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, + logits and labels (each being optional). + """ + has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names) + # For CLIP-like models capable of returning loss values. + # If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss` + # is `True` in `model.forward`. + return_loss = inputs.get("return_loss", None) + if return_loss is None: + return_loss = self.can_return_loss + loss_without_labels = True if len(self.label_names) == 0 and return_loss else False + + inputs = self._prepare_inputs(inputs) + if ignore_keys is None: + if hasattr(self.model, "config"): + ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", []) + else: + ignore_keys = [] + + # labels may be popped when computing the loss (label smoothing for instance) so we grab them first. + if has_labels or loss_without_labels: + labels = nested_detach(tuple(inputs.get(name) for name in self.label_names)) + if len(labels) == 1: + labels = labels[0] + else: + labels = None + + with torch.no_grad(): + if is_sagemaker_mp_enabled(): + raw_outputs = smp_forward_only(model, inputs) + if has_labels or loss_without_labels: + if isinstance(raw_outputs, dict): + loss_mb = raw_outputs["loss"] + logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"]) + else: + loss_mb = raw_outputs[0] + logits_mb = raw_outputs[1:] + + loss = loss_mb.reduce_mean().detach().cpu() + logits = smp_nested_concat(logits_mb) + else: + loss = None + if isinstance(raw_outputs, dict): + logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys) + else: + logits_mb = raw_outputs + logits = smp_nested_concat(logits_mb) + else: + if has_labels or loss_without_labels: + with self.compute_loss_context_manager(): + loss, outputs = self.compute_loss(model, inputs, return_outputs=True) + loss = loss.mean().detach() + + if isinstance(outputs, dict): + logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"]) + else: + logits = outputs[1:] + else: + loss = None + with self.compute_loss_context_manager(): + outputs = model(**inputs) + if isinstance(outputs, dict): + logits = tuple(v for k, v in outputs.items() if k not in ignore_keys) + else: + logits = outputs + # TODO: this needs to be fixed and made cleaner later. + if self.args.past_index >= 0: + self._past = outputs[self.args.past_index - 1] + + if prediction_loss_only: + return (loss, None, None) + + logits = nested_detach(logits) + if len(logits) == 1: + logits = logits[0] + + return (loss, logits, labels)
+ + +
+[docs] + def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]): + """ + For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point + operations for every backward + forward pass. If using another model, either implement such a method in the + model or subclass and override this method. + Args: + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + Returns: + `int`: The number of floating-point operations. + """ + if hasattr(self.model, "floating_point_ops"): + return self.model.floating_point_ops(inputs) + else: + return 0
+ + +
+[docs] + def init_git_repo(self, at_init: bool = False): + """ + Initializes a git repo in `self.args.hub_model_id`. + Args: + at_init (`bool`, *optional*, defaults to `False`): + Whether this function is called before any training or not. If `self.args.overwrite_output_dir` is + `True` and `at_init` is `True`, the path to the repo (which is `self.args.output_dir`) might be wiped + out. + """ + if not self.is_world_process_zero(): + return + if self.args.hub_model_id is None: + repo_name = Path(self.args.output_dir).absolute().name + else: + repo_name = self.args.hub_model_id + if "/" not in repo_name: + repo_name = get_full_repo_name(repo_name, token=self.args.hub_token) + + # Make sure the repo exists. + create_repo(repo_name, token=self.args.hub_token, private=self.args.hub_private_repo, exist_ok=True) + try: + self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token) + except EnvironmentError: + if self.args.overwrite_output_dir and at_init: + # Try again after wiping output_dir + shutil.rmtree(self.args.output_dir) + self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token) + else: + raise + + self.repo.git_pull() + + # By default, ignore the checkpoint folders + if ( + not os.path.exists(os.path.join(self.args.output_dir, ".gitignore")) + and self.args.hub_strategy != HubStrategy.ALL_CHECKPOINTS + ): + with open(os.path.join(self.args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer: + writer.writelines(["checkpoint-*/"]) + + # Add "*.sagemaker" to .gitignore if using SageMaker + if os.environ.get("SM_TRAINING_ENV"): + self._add_sm_patterns_to_gitignore() + + self.push_in_progress = None
+ + +
+[docs] + def create_model_card( + self, + language: Optional[str] = None, + license: Optional[str] = None, + tags: Union[str, List[str], None] = None, + model_name: Optional[str] = None, + finetuned_from: Optional[str] = None, + tasks: Union[str, List[str], None] = None, + dataset_tags: Union[str, List[str], None] = None, + dataset: Union[str, List[str], None] = None, + dataset_args: Union[str, List[str], None] = None, + ): + """ + Creates a draft of a model card using the information available to the `Trainer`. + Args: + language (`str`, *optional*): + The language of the model (if applicable) + license (`str`, *optional*): + The license of the model. Will default to the license of the pretrained model used, if the original + model given to the `Trainer` comes from a repo on the Hub. + tags (`str` or `List[str]`, *optional*): + Some tags to be included in the metadata of the model card. + model_name (`str`, *optional*): + The name of the model. + finetuned_from (`str`, *optional*): + The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo + of the original model given to the `Trainer` (if it comes from the Hub). + tasks (`str` or `List[str]`, *optional*): + One or several task identifiers, to be included in the metadata of the model card. + dataset_tags (`str` or `List[str]`, *optional*): + One or several dataset tags, to be included in the metadata of the model card. + dataset (`str` or `List[str]`, *optional*): + One or several dataset identifiers, to be included in the metadata of the model card. + dataset_args (`str` or `List[str]`, *optional*): + One or several dataset arguments, to be included in the metadata of the model card. + """ + if not self.is_world_process_zero(): + return + + training_summary = TrainingSummary.from_trainer( + self, + language=language, + license=license, + tags=tags, + model_name=model_name, + finetuned_from=finetuned_from, + tasks=tasks, + dataset_tags=dataset_tags, + dataset=dataset, + dataset_args=dataset_args, + ) + model_card = training_summary.to_model_card() + with open(os.path.join(self.args.output_dir, "README.md"), "w") as f: + f.write(model_card)
+ + +
+[docs] + def _push_from_checkpoint(self, checkpoint_folder): + # Only push from one node. + if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END: + return + # If we haven't finished the last push, we don't do this one. + if self.push_in_progress is not None and not self.push_in_progress.is_done: + return + + output_dir = self.args.output_dir + # To avoid a new synchronization of all model weights, we just copy the file from the checkpoint folder + modeling_files = [CONFIG_NAME, WEIGHTS_NAME] + for modeling_file in modeling_files: + if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)): + shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file)) + # Saving the tokenizer is fast and we don't know how many files it may have spawned, so we resave it to be sure. + if self.tokenizer is not None: + self.tokenizer.save_pretrained(output_dir) + # Same for the training arguments + torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) + + try: + if self.args.hub_strategy == HubStrategy.CHECKPOINT: + # Temporarily move the checkpoint just saved for the push + tmp_checkpoint = os.path.join(output_dir, "last-checkpoint") + # We have to remove the "last-checkpoint" dir if it exists, otherwise the checkpoint is moved as a + # subfolder. + if os.path.isdir(tmp_checkpoint): + shutil.rmtree(tmp_checkpoint) + shutil.move(checkpoint_folder, tmp_checkpoint) + + if self.args.save_strategy == IntervalStrategy.STEPS: + commit_message = f"Training in progress, step {self.state.global_step}" + else: + commit_message = f"Training in progress, epoch {int(self.state.epoch)}" + _, self.push_in_progress = self.repo.push_to_hub( + commit_message=commit_message, blocking=False, auto_lfs_prune=True + ) + finally: + if self.args.hub_strategy == HubStrategy.CHECKPOINT: + # Move back the checkpoint to its place + shutil.move(tmp_checkpoint, checkpoint_folder)
+ + +
+[docs] + def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str: + """ + Upload *self.model* and *self.tokenizer* to the 🤗 model hub on the repo *self.args.hub_model_id*. + Parameters: + commit_message (`str`, *optional*, defaults to `"End of training"`): + Message to commit while pushing. + blocking (`bool`, *optional*, defaults to `True`): + Whether the function should return only when the `git push` has finished. + kwargs: + Additional keyword arguments passed along to [`~Trainer.create_model_card`]. + Returns: + The url of the commit of your model in the given repository if `blocking=False`, a tuple with the url of + the commit and an object to track the progress of the commit if `blocking=True` + """ + # If a user calls manually `push_to_hub` with `self.args.push_to_hub = False`, we try to create the repo but + # it might fail. + if not hasattr(self, "repo"): + self.init_git_repo() + + model_name = kwargs.pop("model_name", None) + if model_name is None and self.args.should_save: + if self.args.hub_model_id is None: + model_name = Path(self.args.output_dir).name + else: + model_name = self.args.hub_model_id.split("/")[-1] + + # Needs to be executed on all processes for TPU training, but will only save on the processed determined by + # self.args.should_save. + self.save_model(_internal_call=True) + + # Only push from one node. + if not self.is_world_process_zero(): + return + + # Cancel any async push in progress if blocking=True. The commits will all be pushed together. + if blocking and self.push_in_progress is not None and not self.push_in_progress.is_done: + self.push_in_progress._process.kill() + self.push_in_progress = None + + git_head_commit_url = self.repo.push_to_hub( + commit_message=commit_message, blocking=blocking, auto_lfs_prune=True + ) + # push separately the model card to be independant from the rest of the model + if self.args.should_save: + self.create_model_card(model_name=model_name, **kwargs) + try: + self.repo.push_to_hub( + commit_message="update model card README.md", blocking=blocking, auto_lfs_prune=True + ) + except EnvironmentError as exc: + logger.error(f"Error pushing update to the model card. Please read logs and retry.\n${exc}") + + return git_head_commit_url
+ + + # + # Deprecated code + # + +
+[docs] + def prediction_loop( + self, + dataloader: DataLoader, + description: str, + prediction_loss_only: Optional[bool] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + ) -> EvalLoopOutput: + """ + Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. + Works both with or without labels. + """ + args = self.args + + if not has_length(dataloader): + raise ValueError("dataloader must implement a working __len__") + + prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only + + # if eval is called w/o train init deepspeed here + if args.deepspeed and not self.deepspeed: + # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval + # from the checkpoint eventually + deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None) + self.model = deepspeed_engine.module + self.model_wrapped = deepspeed_engine + self.deepspeed = deepspeed_engine + # XXX: we don't need optim/sched for inference, but this needs to be sorted out, since + # for example the Z3-optimizer is a must for zero3 to work even for inference - what we + # don't need is the deepspeed basic optimizer which is self.optimizer.optimizer + deepspeed_engine.optimizer.optimizer = None + deepspeed_engine.lr_scheduler = None + + model = self._wrap_model(self.model, training=False, dataloader=dataloader) + + # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called + # while ``train`` is running, cast it to the right dtype first and then put on device + if not self.is_in_train: + if args.fp16_full_eval: + model = model.to(dtype=torch.float16, device=args.device) + elif args.bf16_full_eval: + model = model.to(dtype=torch.bfloat16, device=args.device) + + batch_size = dataloader.batch_size + num_examples = self.num_examples(dataloader) + logger.info(f"***** Running {description} *****") + logger.info(f" Num examples = {num_examples}") + logger.info(f" Batch size = {batch_size}") + losses_host: torch.Tensor = None + preds_host: Union[torch.Tensor, List[torch.Tensor]] = None + labels_host: Union[torch.Tensor, List[torch.Tensor]] = None + inputs_host: Union[torch.Tensor, List[torch.Tensor]] = None + + world_size = max(1, args.world_size) + + eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size) + if not prediction_loss_only: + # The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass + # a batch size to the sampler) + make_multiple_of = None + if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler): + make_multiple_of = dataloader.sampler.batch_size + preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) + labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) + inputs_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) + + model.eval() + + if is_torch_tpu_available(): + dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device) + + if args.past_index >= 0: + self._past = None + + self.callback_handler.eval_dataloader = dataloader + + for step, inputs in enumerate(dataloader): + loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) + inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None + + if loss is not None: + losses = loss.repeat(batch_size) + losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0) + if logits is not None: + preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) + if labels is not None: + labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) + if inputs_decode is not None: + inputs_host = ( + inputs_decode + if inputs_host is None + else nested_concat(inputs_host, inputs_decode, padding_index=-100) + ) + self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) + + # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. + if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: + eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) + if not prediction_loss_only: + preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) + labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) + inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) + + # Set back to None to begin a new accumulation + losses_host, preds_host, labels_host, inputs_host = None, None, None, None + + if args.past_index and hasattr(self, "_past"): + # Clean the state at the end of the evaluation loop + delattr(self, "_past") + + # Gather all remaining tensors and put them back on the CPU + eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) + if not prediction_loss_only: + preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) + labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) + inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) + + eval_loss = eval_losses_gatherer.finalize() + preds = preds_gatherer.finalize() if not prediction_loss_only else None + label_ids = labels_gatherer.finalize() if not prediction_loss_only else None + inputs_ids = inputs_gatherer.finalize() if not prediction_loss_only else None + + if self.compute_metrics is not None and preds is not None and label_ids is not None: + if args.include_inputs_for_metrics: + metrics = self.compute_metrics( + EvalPrediction(predictions=preds, label_ids=label_ids, inputs=inputs_ids) + ) + else: + metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) + else: + metrics = {} + + # To be JSON-serializable, we need to remove numpy types or zero-d tensors + metrics = denumpify_detensorize(metrics) + + if eval_loss is not None: + metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item() + + # Prefix all keys with metric_key_prefix + '_' + for key in list(metrics.keys()): + if not key.startswith(f"{metric_key_prefix}_"): + metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) + + return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples)
+ + +
+[docs] + def _gather_and_numpify(self, tensors, name): + """ + Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before + concatenating them to `gathered` + """ + if tensors is None: + return + if is_torch_tpu_available(): + tensors = nested_xla_mesh_reduce(tensors, name) + elif is_sagemaker_mp_enabled(): + tensors = smp_gather(tensors) + elif self.args.local_rank != -1: + tensors = distributed_concat(tensors) + + return nested_numpify(tensors)
+ + +
+[docs] + def _add_sm_patterns_to_gitignore(self) -> None: + """Add SageMaker Checkpointing patterns to .gitignore file.""" + # Make sure we only do this on the main process + if not self.is_world_process_zero(): + return + + patterns = ["*.sagemaker-uploading", "*.sagemaker-uploaded"] + + # Get current .gitignore content + if os.path.exists(os.path.join(self.repo.local_dir, ".gitignore")): + with open(os.path.join(self.repo.local_dir, ".gitignore"), "r") as f: + current_content = f.read() + else: + current_content = "" + + # Add the patterns to .gitignore + content = current_content + for pattern in patterns: + if pattern not in content: + if content.endswith("\n"): + content += pattern + else: + content += f"\n{pattern}" + + # Write the .gitignore file if it has changed + if content != current_content: + with open(os.path.join(self.repo.local_dir, ".gitignore"), "w") as f: + logger.debug(f"Writing .gitignore file. Content: {content}") + f.write(content) + + self.repo.git_add(".gitignore") + + # avoid race condition with git status + time.sleep(0.5) + + if not self.repo.is_repo_clean(): + self.repo.git_commit("Add *.sagemaker patterns to .gitignore.") + self.repo.git_push()
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/utils/rm_dataprocessor.html b/_modules/lmflow/pipeline/utils/rm_dataprocessor.html new file mode 100644 index 000000000..beabc8cdc --- /dev/null +++ b/_modules/lmflow/pipeline/utils/rm_dataprocessor.html @@ -0,0 +1,534 @@ + + + + + + + + + + lmflow.pipeline.utils.rm_dataprocessor — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.utils.rm_dataprocessor

+import logging
+from dataclasses import dataclass
+from typing import Any, Dict, List, Optional, Union
+
+from datasets import load_dataset
+from transformers import AutoTokenizer
+from transformers.utils import PaddingStrategy
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +@dataclass +
+[docs] +class RewardDataCollatorWithPadding: +
+[docs] + tokenizer: AutoTokenizer
+ +
+[docs] + padding: Union[bool, str, PaddingStrategy] = True
+ +
+[docs] + max_length: Optional[int] = None
+ +
+[docs] + pad_to_multiple_of: Optional[int] = None
+ +
+[docs] + return_tensors: str = "pt"
+ + +
+[docs] + def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: + merged_features = [] + for feature in features: + merged_features.append( + { + "input_ids": feature["input_ids_chosen"], + "attention_mask": feature["attention_mask_chosen"], + } + ) + merged_features.append( + { + "input_ids": feature["input_ids_rejected"], + "attention_mask": feature["attention_mask_rejected"], + } + ) + logger.debug(f"Chosen: {self.tokenizer.decode(feature['input_ids_chosen'])}") + logger.debug(f"Rejected: {self.tokenizer.decode(feature['input_ids_rejected'])}") + batch = self.tokenizer.pad( + merged_features, + padding=self.padding, + max_length=self.max_length, + pad_to_multiple_of=self.pad_to_multiple_of, + return_tensors=self.return_tensors, + ) + batch = { + "input_ids": batch["input_ids"], + "attention_mask": batch["attention_mask"], + "return_loss": True, + } + return batch
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/utils/rm_trainer.html b/_modules/lmflow/pipeline/utils/rm_trainer.html new file mode 100644 index 000000000..ca0c93723 --- /dev/null +++ b/_modules/lmflow/pipeline/utils/rm_trainer.html @@ -0,0 +1,520 @@ + + + + + + + + + + lmflow.pipeline.utils.rm_trainer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.utils.rm_trainer

+import numpy as np
+import torch
+import torch.nn as nn
+from transformers import Trainer
+
+from .peft_trainer import PeftTrainer
+
+
+
+[docs] +def compute_metrics(eval_pred): + result = {} + pos_predictions_scores = eval_pred.predictions[0] + neg_predictions_scores = eval_pred.predictions[1] + # We assume that the first sample is preferred by default in groundtruth + result['accuracy'] = np.sum( + pos_predictions_scores > neg_predictions_scores) / len(pos_predictions_scores) + return result
+ + + +
+[docs] +def rm_loss(model, inputs, return_outputs=False): + rewards = model( + input_ids=inputs["input_ids"], + attention_mask=inputs["attention_mask"] + )[0] + bsz = rewards.size(0) + jidx = torch.arange(0, bsz, 2) + kidx = jidx + 1 + rewards_j = rewards[jidx] + rewards_k = rewards[kidx] + loss = -nn.functional.logsigmoid(rewards_j - rewards_k).mean() + if return_outputs: + return loss, {"rewards_j": rewards_j, "rewards_k": rewards_k} + return loss
+ + + +
+[docs] +class RewardTrainer(Trainer): +
+[docs] + def compute_loss(self, model, inputs, return_outputs=False): + return rm_loss(model, inputs, return_outputs)
+
+ + + +
+[docs] +class PeftRewardTrainer(PeftTrainer): +
+[docs] + def compute_loss(self, model, inputs, return_outputs=False): + return rm_loss(model, inputs, return_outputs)
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/pipeline/vllm_inferencer.html b/_modules/lmflow/pipeline/vllm_inferencer.html new file mode 100644 index 000000000..b723153a6 --- /dev/null +++ b/_modules/lmflow/pipeline/vllm_inferencer.html @@ -0,0 +1,875 @@ + + + + + + + + + + lmflow.pipeline.vllm_inferencer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.pipeline.vllm_inferencer

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+import copy
+from functools import partial
+import importlib.resources as pkg_resources
+import json
+import logging
+import os
+os.environ['VLLM_WORKER_MULTIPROC_METHOD']='spawn'
+import subprocess
+import sys
+from typing import List, Union, Optional, Dict, Any
+
+import numpy as np
+import ray
+import ray.data
+from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
+from transformers import AutoTokenizer
+from vllm import SamplingParams, LLM
+
+from lmflow.datasets import Dataset
+from lmflow.pipeline.base_pipeline import BasePipeline
+from lmflow.models.hf_decoder_model import HFDecoderModel
+from lmflow.args import (
+    InferencerArguments, 
+    ModelArguments, 
+    DatasetArguments,
+)
+from lmflow.utils.common import make_shell_args_from_dataclass
+from lmflow.utils.constants import RETURN_CODE_ERROR_BUFFER, MEMORY_SAFE_VLLM_INFERENCE_ENV_VAR_TO_REMOVE
+from lmflow.utils.data_utils import VLLMInferenceResultWithInput
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +class InferencerWithOffloading(BasePipeline): + def __init__( + self, + model_args: ModelArguments, + data_args: DatasetArguments, + inferencer_args: InferencerArguments, + ): +
+[docs] + self.model_args = model_args
+ +
+[docs] + self.data_args = data_args
+ +
+[docs] + self.inferencer_args = inferencer_args
+ +
+[docs] + self.eos_token_id = AutoTokenizer.from_pretrained(model_args.model_name_or_path).eos_token_id
+ + +
+[docs] + def inference(self): + raise NotImplementedError(".inference is not implemented")
+ + +
+[docs] + def save_inference_results(self): + raise NotImplementedError(".save_inference_results is not implemented")
+ + +
+[docs] + def load_inference_results(self): + raise NotImplementedError(".load_inference_results is not implemented")
+
+ + + +
+[docs] +class VLLMInferencer(InferencerWithOffloading): + def __init__( + self, + model_args: ModelArguments, + data_args: DatasetArguments, + inferencer_args: InferencerArguments, + ): + assert inferencer_args.use_vllm, "The inferencer_args.use_vllm must be True." + super().__init__(model_args, data_args, inferencer_args) +
+[docs] + self.sampling_params = self.parse_to_sampling_params(inferencer_args)
+ + + +
+[docs] + def parse_to_sampling_params( + self, + inference_args: InferencerArguments, + ) -> SamplingParams: + return SamplingParams( + use_beam_search=inference_args.use_beam_search, + n=inference_args.num_output_sequences, + temperature=inference_args.temperature + 1e-6, + max_tokens=inference_args.max_new_tokens, + seed=inference_args.random_seed, + top_p=inference_args.top_p, + top_k=inference_args.top_k, + stop_token_ids=[self.eos_token_id] + inference_args.additional_stop_token_ids + )
+ + + +
+[docs] + def inference( + self, + model: HFDecoderModel, + dataset: Dataset, + enable_decode_inference_result: bool = True, + release_gpu: bool = False, + inference_args: Optional[InferencerArguments] = None, + enable_distributed_inference: bool = False, + **kwargs, + ) -> List[VLLMInferenceResultWithInput]: + """Perform inference using the provided model and dataset. Will save inference results if + `save_results` is set to True in `inferencer_args`. + + Parameters + ---------- + model : HFDecoderModel + LMFlow HFDecoderModel object + dataset : Dataset + LMFlow Dataset object + apply_chat_template : bool, optional + Whether to apply chat template to the input, by default True. + enable_decode_inference_result : bool, optional + Whether to decode after generation, by default False. + release_gpu : bool, optional + Whether to release gpu resources, by default False. + inference_args : InferencerArguments, optional + by default None + + Returns + ------- + List[VLLMInferenceResultWithInput] + Return a list of VLLMInferenceResultWithInput, where each + element contains the input prompt and the corresponding output. + + When `enable_decode_inference_result = True`, the output would be a list of strings, + contains sampling_params.n samples for the corresponding prompt. + + When `enable_decode_inference_result = False`, return a list of list of ints + (token ids, no decoding after generation). + """ + if inference_args: + logger.warning( + "Overriding the default inference arguments with the provided arguments in .inference()" + ) + sampling_params = self.parse_to_sampling_params(inference_args) + else: + sampling_params = self.sampling_params + + sampling_params.detokenize = enable_decode_inference_result + + model_input = model.prepare_inputs_for_inference( + dataset=dataset, + apply_chat_template=self.inferencer_args.apply_chat_template, + use_vllm=self.inferencer_args.use_vllm, + enable_distributed_inference=enable_distributed_inference, + ) + + if enable_distributed_inference: + outputs = self._distributed_inference( + model=model, + model_input=model_input, + sampling_params=sampling_params, + num_instances=kwargs.get("distributed_inference_num_instances"), + batch_size=kwargs.get("inference_batch_size", 4), + release_gpu=release_gpu, + ) + else: + outputs = self._inference( + model=model, + model_input=model_input, + sampling_params=sampling_params, + release_gpu=release_gpu, + ) + + if self.inferencer_args.save_results: + self.save_inference_results(outputs, self.inferencer_args.results_path) + + return outputs
+ + + +
+[docs] + def _inference( + self, + model: HFDecoderModel, + model_input: List[str], + sampling_params: SamplingParams, + release_gpu: bool = False, + ) -> List[VLLMInferenceResultWithInput]: + outputs = model.inference( + inputs=model_input, + sampling_params=sampling_params, + release_gpu=release_gpu, + use_vllm=True, + vllm_gpu_memory_utilization=self.inferencer_args.vllm_gpu_memory_utilization, + vllm_tensor_parallel_size=self.inferencer_args.vllm_tensor_parallel_size, + ) + + return outputs
+ + + +
+[docs] + def _distributed_inference( + self, + model: HFDecoderModel, + model_input: ray.data.Dataset, + sampling_params: SamplingParams, + num_instances: int, + batch_size: int = 4, + release_gpu: bool = False, + ) -> List[VLLMInferenceResultWithInput]: + # prepare distributed inference resources + # from https://github.com/vllm-project/vllm/blob/main/examples/offline_inference_distributed.py + ## strategy + def scheduling_strategy_fn(): + # One bundle per tensor parallel worker + pg = ray.util.placement_group( + [{ + "GPU": 1, + "CPU": 1 + }] * self.inferencer_args.vllm_tensor_parallel_size, + strategy="STRICT_PACK", + ) + return dict( + scheduling_strategy=PlacementGroupSchedulingStrategy( + pg, placement_group_capture_child_tasks=True + ) + ) + + resources_kwarg: Dict[str, Any] = {} + if self.inferencer_args.vllm_tensor_parallel_size == 1: + # For tensor_parallel_size == 1, we simply set num_gpus=1. + resources_kwarg["num_gpus"] = 1 + else: + # Otherwise, we have to set num_gpus=0 and provide + # a function that will create a placement group for + # each instance. + resources_kwarg["num_gpus"] = 0 + resources_kwarg["ray_remote_args_fn"] = scheduling_strategy_fn + + ## predictor + class DistributedPredictor: + def __init__( + self, + model: HFDecoderModel, + sampling_params: SamplingParams, + vllm_gpu_memory_utilization: float, + vllm_tensor_parallel_size: int, + release_gpu: bool=False, + ): + self.model = copy.deepcopy(model) + self.model.activate_model_for_inference( + use_vllm=True, + vllm_gpu_memory_utilization=vllm_gpu_memory_utilization, + vllm_tensor_parallel_size=vllm_tensor_parallel_size, + ) + self.sampling_params = sampling_params + self.release_gpu = release_gpu + + def __call__(self, batch: Dict[str, np.ndarray]): + """batch: Dict[str, np.ndarray], {"item": array(['...', '...', '...', ...])} + """ + batched_inference_res = self.model.inference( + inputs=batch['item'], + sampling_params=self.sampling_params, + release_gpu=self.release_gpu, + use_vllm=True, + ) # this is the postprocessed output, see model.__vllm_inference + batched_final_res = { + "input": [sample['input'] for sample in batched_inference_res], + "output": [sample['output'] for sample in batched_inference_res] + } # do this since we're writing to a pandas dataframe + return batched_final_res + + # inference + model_input_mapping = model_input.map_batches( + DistributedPredictor, + concurrency=num_instances, # Set the concurrency to the number of LLM instances. + batch_size=batch_size, + fn_constructor_kwargs={ + "model": model, + "sampling_params": sampling_params, + "vllm_gpu_memory_utilization": self.inferencer_args.vllm_gpu_memory_utilization, + "vllm_tensor_parallel_size": self.inferencer_args.vllm_tensor_parallel_size, + "release_gpu": release_gpu, + }, + **resources_kwarg, + ) + + df_model_output = model_input_mapping.to_pandas() # the actual forwards are executed here + logger.info(f"Distributed vllm inference result preview:\n{df_model_output.head(10)}") + + model_output = [ + {"input": row["input"], "output": row["output"]} for _, row in df_model_output.iterrows() + ] + + return model_output
+ + + +
+[docs] + def save_inference_results( + self, + outputs: Union[List[List[str]], List[List[List[int]]]], + save_file_path: str, + ): + with open(save_file_path, "w", encoding='utf-8') as f: + json.dump(outputs, f, ensure_ascii=False, indent=4) + + logger.info(f"Inference results are saved to {save_file_path}.")
+ + + +
+[docs] + def load_inference_results( + self, + results_path: str, + ) -> Union[List[List[str]], List[List[List[int]]]]: + with open(results_path, "r") as f: + results = json.load(f) + + return results
+
+ + + +
+[docs] +class MemorySafeVLLMInferencer(VLLMInferencer): + def __init__( + self, + model_args: ModelArguments, + data_args: DatasetArguments, + inferencer_args: InferencerArguments, + ): + assert inferencer_args.save_results, "For MemorySafeVLLMInferencer, `save_results` must be True." + super().__init__(model_args, data_args, inferencer_args) +
+[docs] + self.inferencer_file_path = pkg_resources.files("lmflow.pipeline.utils") / "memory_safe_vllm_inference.py"
+ + + +
+[docs] + def inference(self) -> List[VLLMInferenceResultWithInput]: + inferencer_args = make_shell_args_from_dataclass( + dataclass_objects=[ + self.model_args, + self.data_args, + self.inferencer_args, + ], + format="shell", + ) + cmd = "python " + str(self.inferencer_file_path) + " " + inferencer_args + current_env = os.environ.copy() + for var in MEMORY_SAFE_VLLM_INFERENCE_ENV_VAR_TO_REMOVE: + current_env.pop(var, None) + + cli_res = subprocess.run( + args=cmd, + stdout=sys.stdout, + stderr=sys.stdout, + shell=True, + preexec_fn=os.setsid, + env=current_env, + ) + logger.info(f"MemorySafeVLLMInference subprocess run finished, info at finish: {cli_res}") + + if cli_res.returncode in RETURN_CODE_ERROR_BUFFER: + # > Fatal Python error: _enter_buffered_busy: could not acquire lock for <_io.BufferedWriter name='<stdout>'> + # > at interpreter shutdown, possibly due to daemon threads + logger.warning( + "^^^^^^^^^^ Please ignore the above error, as it comes from the subprocess. " + "This may due a kill signal with unfinished stdout/stderr writing in the subprocess. " + ) + else: + if cli_res.returncode != 0: + raise RuntimeError(f"Error during MemorySafeVLLMInference: {cli_res}") + + outputs = self.load_inference_results(self.inferencer_args.results_path) + logger.info("MemorySafeVLLMInference result captured.") + + return outputs
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/tokenization/hf_decoder_model.html b/_modules/lmflow/tokenization/hf_decoder_model.html new file mode 100644 index 000000000..8d5d9430c --- /dev/null +++ b/_modules/lmflow/tokenization/hf_decoder_model.html @@ -0,0 +1,692 @@ + + + + + + + + + + lmflow.tokenization.hf_decoder_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.tokenization.hf_decoder_model

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+
+import logging
+from logging import Logger
+from typing import Dict, Union
+
+import transformers
+from transformers.testing_utils import CaptureLogger
+from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
+
+from lmflow.utils.conversation_template import ConversationTemplate
+from lmflow.utils.constants import CONVERSATION_ROLE_NAMES
+from lmflow.args import DatasetArguments
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ +
+[docs] +tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
+ + + +
+[docs] +def blocking( + token_dict: Dict, + block_size: int, + model_max_length: int, + pad_token_id: int, + padding_side: str, + truncation_side: str='right', +) -> Dict: + block_size_warning_num = 0 + num_example = len(token_dict[list(token_dict.keys())[0]]) + for i in range(num_example): + max_length = min(block_size, model_max_length) + pad_length = max_length - len(token_dict["input_ids"][i]) + if block_size < model_max_length: + block_size_warning_num += 1 + if pad_length < 0: + # Truncates too long samples + for key in ["input_ids", "attention_mask", "labels"]: + if truncation_side == 'right': + token_dict[key][i] = token_dict[key][i][:max_length] + elif truncation_side == 'left': + token_dict[key][i] = token_dict[key][i][-max_length:] + else: + raise ValueError( + f"truncation_side should be either 'right' or 'left', got {truncation_side}" + ) + else: + if padding_side == 'right': + # Pads too short samples + token_dict["input_ids"][i].extend( + [pad_token_id for _ in range(pad_length)] + ) + token_dict["attention_mask"][i].extend( + [0 for _ in range(pad_length)] + ) + token_dict["labels"][i].extend( + [-100 for _ in range(pad_length)] + ) + elif padding_side == 'left': + # Pads too short samples + token_dict["input_ids"][i] = ( + [pad_token_id for _ in range(pad_length)] + token_dict["input_ids"][i] + ) + token_dict["attention_mask"][i] = ( + [0 for _ in range(pad_length)] + token_dict["attention_mask"][i] + ) + token_dict["labels"][i] = ( + [-100 for _ in range(pad_length)] + token_dict["labels"][i] + ) + else: + raise ValueError( + f"padding_side should be either 'right' or 'left', got {padding_side}" + ) + if block_size_warning_num > 0: + logger.warning( + f"There are {block_size_warning_num} of {num_example} samples where" + f"block_size {block_size} < model_max_length" + f" {model_max_length}, use block_size" + " for maximum tokenized sequence length" + ) + + return token_dict
+ + + +
+[docs] +def tokenize_function( + examples, + data_args: DatasetArguments, + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + column_names, + label_columns, + tokenized_column_order, + add_special_tokens, + use_truncation, +) -> Dict: + """Handels text_only and text2text datasets tokenization + """ + num_example = len(examples[column_names[0]]) + token_dict = { + "input_ids": [[] for _ in range(num_example)], + "attention_mask": [[] for _ in range(num_example)], + "labels": [[] for _ in range(num_example)], + } + with CaptureLogger(tok_logger) as cl: + for column_name in tokenized_column_order: + encoding = tokenizer( + examples[column_name], + add_special_tokens=add_special_tokens, + truncation=use_truncation, + ) + + if column_name in label_columns: + labels = encoding["input_ids"].copy() + else: + labels = [ + [-100] * len(encoding["input_ids"][i]) + for i in range(num_example) + ] + + for i in range(num_example): + token_dict["input_ids"][i].extend( + encoding["input_ids"][i] + ) + token_dict["attention_mask"][i].extend( + encoding["attention_mask"][i] + ) + token_dict["labels"][i].extend(labels[i]) + + if data_args.disable_group_texts: + token_dict = blocking( + token_dict=token_dict, + block_size=data_args.block_size, + model_max_length=tokenizer.model_max_length, + pad_token_id=tokenizer.pad_token_id, + padding_side=tokenizer.padding_side, + truncation_side=tokenizer.truncation_side, + ) + + # clm input could be much much longer than block_size + if "Token indices sequence length is longer than the" in cl.out: + tok_logger.warning( + "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" + " before being passed to the model." + ) + return token_dict
+ + + +
+[docs] +def conversation_tokenize_function( + examples, + data_args: DatasetArguments, + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + column_names, + conversation_template: ConversationTemplate, +) -> Dict: + """Handels conversation datasets tokenization + """ + num_example = len(examples[column_names[0]]) + token_dict = { + "input_ids": [[] for _ in range(num_example)], + "attention_mask": [[] for _ in range(num_example)], + "labels": [[] for _ in range(num_example)], + } + with CaptureLogger(tok_logger) as cl: + for i in range(len(examples["messages"])): + messages = examples["messages"][i] + system = examples.get("system", [None] * num_example)[i] + tools = examples.get("tools", [None] * num_example)[i] + if len(messages) < 2 or messages[0]['role'] != CONVERSATION_ROLE_NAMES['user']: + tok_logger.warning( + "Invalid instance encountered. Either the conversation has less than " + "one round or the first message is not from the user." + ) + continue + + if len(messages) % 2 != 0: + logger.warning( + "The number of messages is not even, the last message will be ignored." + ) + messages = messages[:-1] + + encoded_conversation = conversation_template.encode_conversation( + tokenizer=tokenizer, + messages=messages, + system=system, + tools=tools, + ) + + input_ids, labels = [], [] + for turn_idx, (user_input, assistant_result) in enumerate(encoded_conversation): + input_ids += user_input + assistant_result + + if data_args.train_on_prompt: + labels += user_input + assistant_result + else: + labels += [-100] * len(user_input) + assistant_result + + token_dict["input_ids"][i].extend(input_ids) + token_dict["attention_mask"][i].extend([1] * len(input_ids)) + token_dict["labels"][i].extend(labels) + + if data_args.disable_group_texts: + token_dict = blocking( + token_dict=token_dict, + block_size=data_args.block_size, + model_max_length=tokenizer.model_max_length, + pad_token_id=tokenizer.pad_token_id, + padding_side=tokenizer.padding_side, + truncation_side=tokenizer.truncation_side, + ) + + # clm input could be much much longer than block_size + if "Token indices sequence length is longer than the" in cl.out: + tok_logger.warning( + "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" + " before being passed to the model." + ) + return token_dict
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/tokenization/hf_text_regression_model.html b/_modules/lmflow/tokenization/hf_text_regression_model.html new file mode 100644 index 000000000..1175dba30 --- /dev/null +++ b/_modules/lmflow/tokenization/hf_text_regression_model.html @@ -0,0 +1,933 @@ + + + + + + + + + + lmflow.tokenization.hf_text_regression_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.tokenization.hf_text_regression_model

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+
+import logging
+from logging import Logger
+from typing import Dict, List, Union
+
+import transformers
+from transformers.testing_utils import CaptureLogger
+from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
+
+from lmflow.utils.conversation_template import ConversationTemplate
+from lmflow.utils.constants import CONVERSATION_ROLE_NAMES
+from lmflow.args import DatasetArguments
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ +
+[docs] +tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
+ + + +
+[docs] +def blocking_paired( + token_dict: Dict, + column_names: List, + block_size: int, + model_max_length: int, + pad_token_id: int, + padding_side: str, + truncation_side: str='right', +) -> Dict: + block_size_warning_num = 0 + num_example = len(token_dict[list(token_dict.keys())[0]]) + for i in range(num_example): + for column_name in column_names: + max_length = min(block_size, model_max_length) + pad_length = max_length - len(token_dict[f"input_ids_{column_name}"][i]) + if block_size < model_max_length: + block_size_warning_num += 1 + if pad_length < 0: + # Truncates too long samples + for key in [f"input_ids_{column_name}", f"attention_mask_{column_name}"]: + if truncation_side == 'right': + token_dict[key][i] = token_dict[key][i][:pad_length] + elif truncation_side == 'left': + token_dict[key][i] = token_dict[key][i][-pad_length:] + else: + raise ValueError( + f"truncation_side should be either 'right' or 'left', got {truncation_side}" + ) + else: + if padding_side == 'right': + # Pads too short samples + token_dict[f"input_ids_{column_name}"][i].extend( + [pad_token_id for _ in range(pad_length)] + ) + token_dict[f"attention_mask_{column_name}"][i].extend( + [0 for _ in range(pad_length)] + ) + elif padding_side == 'left': + # Pads too short samples + token_dict[f"input_ids_{column_name}"][i] = ( + [pad_token_id for _ in range(pad_length)] + token_dict[f"input_ids_{column_name}"][i] + ) + token_dict[f"attention_mask_{column_name}"][i] = ( + [0 for _ in range(pad_length)] + token_dict[f"attention_mask_{column_name}"][i] + ) + else: + raise ValueError( + f"padding_side should be either 'right' or 'left', got {padding_side}" + ) + if block_size_warning_num > 0: + logger.warning( + f"There are {block_size_warning_num} of {num_example} samples where" + f" block_size {block_size} < model_max_length" + f" {model_max_length}, use block_size" + " for maximum tokenized sequence length" + ) + + return token_dict
+ + + +
+[docs] +def blocking( + token_dict: Dict, + block_size: int, + model_max_length: int, + pad_token_id: int, + padding_side: str, + truncation_side: str='right', +) -> Dict: + block_size_warning_num = 0 + num_example = len(token_dict[list(token_dict.keys())[0]]) + for i in range(num_example): + max_length = min(block_size, model_max_length) + pad_length = max_length - len(token_dict["input_ids"][i]) + if block_size < model_max_length: + block_size_warning_num += 1 + if pad_length < 0: + # Truncates too long samples + for key in ["input_ids", "attention_mask", "labels"]: + if truncation_side == 'right': + token_dict[key][i] = token_dict[key][i][:pad_length] + elif truncation_side == 'left': + token_dict[key][i] = token_dict[key][i][-pad_length:] + else: + raise ValueError( + f"truncation_side should be either 'right' or 'left', got {truncation_side}" + ) + else: + if padding_side == 'right': + # Pads too short samples + token_dict["input_ids"][i].extend( + [pad_token_id for _ in range(pad_length)] + ) + token_dict["attention_mask"][i].extend( + [0 for _ in range(pad_length)] + ) + token_dict["labels"][i].extend( + [-100 for _ in range(pad_length)] + ) + elif padding_side == 'left': + # Pads too short samples + token_dict["input_ids"][i] = ( + [pad_token_id for _ in range(pad_length)] + token_dict["input_ids"][i] + ) + token_dict["attention_mask"][i] = ( + [0 for _ in range(pad_length)] + token_dict["attention_mask"][i] + ) + token_dict["labels"][i] = ( + [-100 for _ in range(pad_length)] + token_dict["labels"][i] + ) + else: + raise ValueError( + f"padding_side should be either 'right' or 'left', got {padding_side}" + ) + if block_size_warning_num > 0: + logger.warning( + f"There are {block_size_warning_num} of {num_example} samples where" + f" block_size {block_size} < model_max_length" + f" {model_max_length}, use block_size" + " for maximum tokenized sequence length" + ) + + return token_dict
+ + + +
+[docs] +def blocking_text_to_textlist( + token_dict: Dict, + block_size: int, + model_max_length: int, + pad_token_id: int, + padding_side: str, + truncation_side: str='right', +) -> Dict: + block_size_warning_num = 0 + num_example = len(token_dict[list(token_dict.keys())[0]]) + max_length = min(block_size, model_max_length) + + for example_idx in range(num_example): + for content_idx in range(len(token_dict["input_ids"][example_idx])): + pad_length = max_length - len(token_dict["input_ids"][example_idx][content_idx]) + if block_size < model_max_length: + block_size_warning_num += 1 + if pad_length < 0: + # Truncates too long samples + if truncation_side == 'right': + token_dict["input_ids"][example_idx][content_idx] = token_dict["input_ids"][example_idx][content_idx][:pad_length] + elif truncation_side == 'left': + token_dict["input_ids"][example_idx][content_idx] = token_dict["input_ids"][example_idx][content_idx][-pad_length:] + else: + raise ValueError( + f"truncation_side should be either 'right' or 'left', got {truncation_side}" + ) + else: + if padding_side == 'right': + # Pads too short samples + token_dict["input_ids"][example_idx][content_idx].extend( + [pad_token_id for _ in range(pad_length)] + ) + elif padding_side == 'left': + # Pads too short samples + token_dict["input_ids"][example_idx][content_idx] = ( + [pad_token_id for _ in range(pad_length)] + token_dict["input_ids"][example_idx][content_idx] + ) + else: + raise ValueError( + f"padding_side should be either 'right' or 'left', got {padding_side}" + ) + if block_size_warning_num > 0: + logger.warning( + f"There are {block_size_warning_num} of {num_example} samples where" + f" block_size {block_size} < model_max_length" + f" {model_max_length}, use block_size" + " for maximum tokenized sequence length" + ) + + return token_dict
+ + + +
+[docs] +def paired_conversation_tokenize_function( + examples, + data_args: DatasetArguments, + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + column_names, + conversation_template: ConversationTemplate, +) -> Dict: + num_example = len(examples[column_names[0]]) + token_dict = {} + for column_name in column_names: + token_dict[f"input_ids_{column_name}"] = [[] for _ in range(num_example)] + token_dict[f"attention_mask_{column_name}"] = [[] for _ in range(num_example)] + + with CaptureLogger(tok_logger) as cl: + num_corrupted = 0 + for i in range(num_example): + try: + for column_name in column_names: + messages = examples[column_name][i]["messages"] + system = examples[column_name][i].get("system", None) + tools = examples[column_name][i].get("tools", None) + if len(messages) < 2 or messages[0]['role'] != CONVERSATION_ROLE_NAMES['user']: + tok_logger.warning( + "Invalid instance encountered. Either the conversation has less than " + "one round or the first message is not from the user." + ) + continue + + if len(messages) % 2 != 0: + logger.warning( + "The number of messages is not even, the last message will be ignored." + ) + messages = messages[:-1] + + encoded_conversation = conversation_template.encode_conversation( + tokenizer=tokenizer, + messages=messages, + system=system, + tools=tools, + ) + + input_ids = [] + for turn_idx, (user_input, assistant_result) in enumerate(encoded_conversation): + input_ids += user_input + assistant_result + + token_dict[f"input_ids_{column_name}"][i].extend(input_ids) + token_dict[f"attention_mask_{column_name}"][i].extend([1] * len(input_ids)) + + except: + num_corrupted += 1 + logger.error(f"Error in encoding conversation {i}: {column_name}") + logger.error(f"Messages: {messages}") + continue + if num_corrupted > 0: + logger.error(f"Number of corrupted examples: {num_corrupted}") + + if data_args.disable_group_texts: + token_dict = blocking_paired( + token_dict=token_dict, + column_names=column_names, + block_size=data_args.block_size, + model_max_length=tokenizer.model_max_length, + pad_token_id=tokenizer.pad_token_id, + padding_side=tokenizer.padding_side, + truncation_side=tokenizer.truncation_side, + ) + + # clm input could be much much longer than block_size + if "Token indices sequence length is longer than the" in cl.out: + tok_logger.warning( + "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" + " before being passed to the model." + ) + return token_dict
+ + + +
+[docs] +def conversation_tokenize_function( + examples, + data_args: DatasetArguments, + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + column_names, + conversation_template: ConversationTemplate, +) -> Dict: + """Handels conversation datasets tokenization + """ + num_example = len(examples[column_names[0]]) + token_dict = { + "input_ids": [[] for _ in range(num_example)], + "attention_mask": [[] for _ in range(num_example)], + "labels": [[] for _ in range(num_example)], + } + with CaptureLogger(tok_logger) as cl: + for i in range(len(examples["messages"])): + messages = examples["messages"][i] + system = examples.get("system", [None] * num_example)[i] + tools = examples.get("tools", [None] * num_example)[i] + if len(messages) < 2 or messages[0]['role'] != CONVERSATION_ROLE_NAMES['user']: + tok_logger.warning( + "Invalid instance encountered. Either the conversation has less than " + "one round or the first message is not from the user." + ) + continue + + if len(messages) % 2 != 0: + logger.warning( + "The number of messages is not even, the last message will be ignored." + ) + messages = messages[:-1] + + encoded_conversation = conversation_template.encode_conversation( + tokenizer=tokenizer, + messages=messages, + system=system, + tools=tools, + ) + + input_ids, labels = [], [] + for turn_idx, (user_input, assistant_result) in enumerate(encoded_conversation): + input_ids += user_input + assistant_result + + if data_args.train_on_prompt: + labels += user_input + assistant_result + else: + labels += [-100] * len(user_input) + assistant_result + + token_dict["input_ids"][i].extend(input_ids) + token_dict["attention_mask"][i].extend([1] * len(input_ids)) + token_dict["labels"][i].extend(labels) + + if data_args.disable_group_texts: + token_dict = blocking( + token_dict=token_dict, + block_size=data_args.block_size, + model_max_length=tokenizer.model_max_length, + pad_token_id=tokenizer.pad_token_id, + padding_side=tokenizer.padding_side, + truncation_side=tokenizer.truncation_side, + ) + + # clm input could be much much longer than block_size + if "Token indices sequence length is longer than the" in cl.out: + tok_logger.warning( + "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" + " before being passed to the model." + ) + return token_dict
+ + + +
+[docs] +def tokenize_function( + examples, + data_args: DatasetArguments, + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + column_names, + label_columns, + tokenized_column_order, + add_special_tokens, + use_truncation, +) -> Dict: + """Handels text_only and text2text datasets tokenization + """ + num_example = len(examples[column_names[0]]) + token_dict = { + "input_ids": [[] for _ in range(num_example)], + "attention_mask": [[] for _ in range(num_example)], + "labels": [[] for _ in range(num_example)], + } + with CaptureLogger(tok_logger) as cl: + for column_name in tokenized_column_order: + encoding = tokenizer( + examples[column_name], + add_special_tokens=add_special_tokens, + truncation=use_truncation, + ) + + if column_name in label_columns: + labels = encoding["input_ids"].copy() + else: + labels = [ + [-100] * len(encoding["input_ids"][i]) + for i in range(num_example) + ] + + for i in range(num_example): + token_dict["input_ids"][i].extend( + encoding["input_ids"][i] + ) + token_dict["attention_mask"][i].extend( + encoding["attention_mask"][i] + ) + token_dict["labels"][i].extend(labels[i]) + + if data_args.disable_group_texts: + token_dict = blocking( + token_dict=token_dict, + block_size=data_args.block_size, + model_max_length=tokenizer.model_max_length, + pad_token_id=tokenizer.pad_token_id, + padding_side=tokenizer.padding_side, + truncation_side=tokenizer.truncation_side, + ) + + # clm input could be much much longer than block_size + if "Token indices sequence length is longer than the" in cl.out: + tok_logger.warning( + "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" + " before being passed to the model." + ) + return token_dict
+ + + +
+[docs] +def text_to_textlist_tokenize_function( + examples, + data_args: DatasetArguments, + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + column_names, + add_special_tokens, + use_truncation, +) -> Dict: + """For rm inference, and don't need attn mask and labels. + NOTE: input_ids here refers to the tokenized input_ids of the input **and** output + """ + num_example = len(examples[column_names[0]]) + output_dict = {column_name: examples[column_name] for column_name in column_names} + output_dict["input_ids"] = [[] for _ in range(num_example)] + + for example_idx in range(num_example): + encoded = tokenizer( + [ + examples["input"][example_idx] + examples["output"][example_idx][i] + for i in range(len(examples["output"][example_idx])) + ], + add_special_tokens=add_special_tokens, + truncation=use_truncation, + ) + + output_dict["input_ids"][example_idx] = encoded["input_ids"] + + if data_args.disable_group_texts: + output_dict = blocking_text_to_textlist( + token_dict=output_dict, + block_size=data_args.block_size, + model_max_length=tokenizer.model_max_length, + pad_token_id=tokenizer.pad_token_id, + padding_side=tokenizer.padding_side, + truncation_side=tokenizer.truncation_side, + ) + + return output_dict
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/common.html b/_modules/lmflow/utils/common.html new file mode 100644 index 000000000..b444690f5 --- /dev/null +++ b/_modules/lmflow/utils/common.html @@ -0,0 +1,651 @@ + + + + + + + + + + lmflow.utils.common — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.common

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+import logging
+from dataclasses import dataclass, field, fields, Field, make_dataclass
+from pathlib import Path
+from typing import Optional, List, Union, Dict
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +def make_shell_args_from_dataclass( + dataclass_objects: List, + format: str="subprocess", + skip_default: bool=True, + ignored_args_list: Optional[List[str]]=None, +) -> Union[str, List[str]]: + """Return a string or a list of strings that can be used as shell arguments. + + Parameters + ---------- + dataclass_objects : List + A list of dataclass objects. + format : str, optional + Return format, can be "shell" or "subprocess", by default "subprocess". + skip_default : bool, optional + Whether to skip attributes with default values, by default True. + + Returns + ------- + Union[str, List[str]] + """ + assert isinstance(dataclass_objects, list), "dataclass_objects should be a list of dataclass objects." + all_args = {} + for dataclass_object in dataclass_objects: + for k, v in dataclass_object.__dict__.items(): + if ignored_args_list and k in ignored_args_list: + continue + if k not in dataclass_object.__dataclass_fields__: + # skip attributes that added dynamically + continue + if not v: + # skip attributes with None values + continue + if skip_default: + if dataclass_object.__dataclass_fields__[k].default == v: + continue + + if k not in all_args: + if isinstance(v, Path): + all_args[k] = str(v) + elif isinstance(v, list): + all_args[k] = ",".join(v) + else: + all_args[k] = v + elif k in all_args: + if all_args[k] == v: + continue + else: + logger.warning(f"Found different values for the same key: {k}, using value: {v} instead.") + all_args[k] = v + + if format == "shell": + final_res = " ".join([f"--{k} {v}" for k, v in all_args.items()]) + elif format == "subprocess": + final_res = [] + for k, v in all_args.items(): + final_res.extend([f"--{k}", str(v)]) + else: + raise ValueError(f"Unknown format: {format}") + + return final_res
+ + + +
+[docs] +def create_copied_dataclass( + original_dataclass, + field_prefix: str, + class_prefix: str, + new_default: Dict=None +): + """Create a copied dataclass with new field names and default values. + + Parameters + ---------- + original_dataclass : dataclass + field_prefix : str + The prefix to add to the **field** names of the copied dataclass. + class_prefix : str + The prefix to add to the **class** name of the copied dataclass. + new_default : Dict, optional + The new default values for the copied dataclass. When None, the + default values of the original dataclass are used. + + Returns + ------- + dataclass + """ + original_fields = fields(original_dataclass) + new_default = new_default or {} + new_fields = [] + for field in original_fields: + new_field = ( + f"{field_prefix}{field.name}", + field.type, + Field( + default=new_default.get(f"{field_prefix}{field.name}", field.default), + default_factory=field.default_factory, + init=field.init, + repr=field.repr, + hash=field.hash, + compare=field.compare, + metadata=field.metadata, + ) + ) + new_fields.append(new_field) + copied_dataclass = make_dataclass(f"{class_prefix}{original_dataclass.__name__}", new_fields) + return copied_dataclass
+ + + +
+[docs] +def remove_dataclass_attr_prefix(data_instance, prefix: str) -> Dict: + """Remove the prefix from the attribute names of a dataclass instance. + + Parameters + ---------- + data_instance : dataclass + prefix : str + The prefix to remove from the attribute names of the dataclass instance. + + Returns + ------- + Dict + """ + new_attributes = {} + for field in fields(data_instance): + attr_name = field.name + attr_value = getattr(data_instance, attr_name) + new_attr_name = f"{attr_name[len(prefix):]}" + new_attributes[new_attr_name] = attr_value + + return new_attributes
+ + + +
+[docs] +def add_dataclass_attr_prefix(data_instance, prefix: str) -> Dict: + """Add the prefix to the attribute names of a dataclass instance. + + Parameters + ---------- + data_instance : dataclass + prefix : str + The prefix to add to the attribute names of the dataclass instance. + + Returns + ------- + Dict + """ + new_attributes = {} + for field in fields(data_instance): + attr_name = field.name + attr_value = getattr(data_instance, attr_name) + new_attr_name = f"{prefix}{attr_name}" + new_attributes[new_attr_name] = attr_value + + return new_attributes
+ + + + + +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/constants.html b/_modules/lmflow/utils/constants.html new file mode 100644 index 000000000..ee08aa856 --- /dev/null +++ b/_modules/lmflow/utils/constants.html @@ -0,0 +1,995 @@ + + + + + + + + + + lmflow.utils.constants — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.constants

+#!/usr/bin/env python
+# coding=utf-8
+"""
+Commonly used constants.
+"""
+
+
+[docs] +TEXT_ONLY_DATASET_DESCRIPTION = ( +""" +"text_only": a dataset with only raw text instances, with following format: + + { + "type": "text_only", + "instances": [ + { "text": "TEXT_1" }, + { "text": "TEXT_2" }, + ... + ] + } +""" +).lstrip("\n")
+ + + +
+[docs] +TEXT_TO_SCORED_TEXTLIST_DATASET_DESCRIPTION = ( +""" +This kind of dataset is commonly used in reward model training/prediction, as well as rl training. +{ + "type": "text_to_scored_textlist", + "instances": [ + { + "input": "what's your name?", + "output": [ + {"score": 1.0, "text": "My name is John"}, + {"score": -0.8, "text": "I'm John"} + ] + }, + { + "input": "Who are you?", + "output": [ + {"score": 1.5, "text": "My name is Amy"}, + {"score": 1.0, "text": "I'm Amy"} + ] + }, + ] +} +""" +).lstrip("\n")
+ + + +
+[docs] +PAIRED_TEXT_TO_TEXT_DATASET_DESCRIPTION = ( +""" +This kind of dataset is commonly used in reward model training as well as rl training. +{ + "type": "paired_text_to_text", + "instances": [ + { + "prompt": "Who are you?", + "chosen": "My name is Amy.", + "rejected": "I'm Amy", + "margin": 0.6 + }, + { + "prompt": "what's your name?", + "chosen": "My name is John.", + "rejected": "I'm John", + "margin": 0.5 + } + ] +} +""" +).lstrip("\n")
+ + + +
+[docs] +TEXT_ONLY_DATASET_DETAILS = ( +""" + For example, + + ```python + from lmflow.datasets import Dataset + + data_dict = { + "type": "text_only", + "instances": [ + { "text": "Human: Hello. Bot: Hi!" }, + { "text": "Human: How are you today? Bot: Fine, thank you!" }, + ] + } + dataset = Dataset.create_from_dict(data_dict) + ``` + + You may also save the corresponding format to json, + ```python + import json + from lmflow.args import DatasetArguments + from lmflow.datasets import Dataset + + data_dict = { + "type": "text_only", + "instances": [ + { "text": "Human: Hello. Bot: Hi!" }, + { "text": "Human: How are you today? Bot: Fine, thank you!" }, + ] + } + with open("data.json", "w") as fout: + json.dump(data_dict, fout) + + data_args = DatasetArgument(dataset_path="data.json") + dataset = Dataset(data_args) + new_data_dict = dataset.to_dict() + # `new_data_dict` Should have the same content as `data_dict` + ``` +""" +).lstrip("\n")
+ + + +
+[docs] +TEXT2TEXT_DATASET_DESCRIPTION = ( +""" +"text2text": a dataset with input & output instances, with following format: + + { + "type": "text2text", + "instances": [ + { "input": "INPUT_1", "output": "OUTPUT_1" }, + { "input": "INPUT_2", "output": "OUTPUT_2" }, + ... + ] + } +""" +).lstrip("\n")
+ + + +
+[docs] +CONVERSATION_DATASET_DESCRIPTION = ( +""" +"conversation": a dataset with conversation instances, with following format (`conversation_id`, `system` and `tools` are optional): + + { + "type": "conversation", + "instances": [ + { + "conversation_id": "CONVERSATION_ID", + "system": "SYSTEM_PROPMT", + "tools": ["TOOL_DESCRIPTION_1","TOOL_DESCRIPTION_2","TOOL_DESCRIPTION_X"], + "messages": [ + { + "role": "user", + "content": "USER_INPUT_1" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_1" + }, + { + "role": "user", + "content": "USER_INPUT_2" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_2" + } + ] + }, + { + "conversation_id": "CONVERSATION_ID", + "system": "SYSTEM_PROPMT", + "tools": ["TOOL_DESCRIPTION_1"], + "messages": [ + { + "role": "user", + "content": "USER_INPUT_1" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_1" + } + ] + } + ] + } +""" +).lstrip("\n")
+ + + +
+[docs] +PAIRED_CONVERSATION_DATASET_DESCRIPTION = ( +""" +"paired_conversation": a dataset with paired conversation instances, with following format: + + { + "type": "paired_conversation", + "instances": [ + { + "chosen": { + "conversation_id": "CONVERSATION_ID", + "system": "SYSTEM_PROPMT", + "tools": ["TOOL_DESCRIPTION_1","TOOL_DESCRIPTION_2","TOOL_DESCRIPTION_3"], + "messages": [ + { + "role": "user", + "content": "USER_INPUT_1" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_1_GOOD" + }, + { + "role": "user", + "content": "USER_INPUT_2" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_2_GOOD" + } + ] + }, + "rejected": { + "conversation_id": "CONVERSATION_ID", + "system": "SYSTEM_PROPMT", + "tools": ["TOOL_DESCRIPTION_1","TOOL_DESCRIPTION_2","TOOL_DESCRIPTION_3"], + "messages": [ + { + "role": "user", + "content": "USER_INPUT_1" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_1_BAD" + }, + { + "role": "user", + "content": "USER_INPUT_2" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_2_BAD" + } + ] + } + } + ] + } +""" +).lstrip("\n")
+ + + +
+[docs] +TEXT_TO_TEXTLIST_DATASET_DESCRIPTION = ( +""" +This kind of dataset is commonly used in reward model inference. +{ + "type": "text_to_textlist", + "instances": [ + { + "input": "what's your name?", + "output": [ + "My name is John", + "I'm John", + ] + }, + { + "input": "Who are you?", + "output": [ + "My name is Amy", + "I'm Amy", + ] + }, + ] +} +""" +).lstrip("\n")
+ + + +
+[docs] +TEXT2TEXT_DATASET_DETAILS = ( +""" + For example, + + ```python + from lmflow.datasets import Dataset + + data_dict = { + "type": "text2text", + "instances": [ + { + "input": "Human: Hello.", + "output": "Bot: Hi!", + }, + { + "input": "Human: How are you today?", + "output": "Bot: Fine, thank you! And you?", + } + ] + } + dataset = Dataset.create_from_dict(data_dict) + ``` + + You may also save the corresponding format to json, + ```python + import json + from lmflow.args import DatasetArguments + from lmflow.datasets import Dataset + + data_dict = { + "type": "text2text", + "instances": [ + { + "input": "Human: Hello.", + "output": "Bot: Hi!", + }, + { + "input": "Human: How are you today?", + "output": "Bot: Fine, thank you! And you?", + } + ] + } + with open("data.json", "w") as fout: + json.dump(data_dict, fout) + + data_args = DatasetArgument(dataset_path="data.json") + dataset = Dataset(data_args) + new_data_dict = dataset.to_dict() + # `new_data_dict` Should have the same content as `data_dict` + ``` +""" +).lstrip("\n")
+ + + +
+[docs] +FLOAT_ONLY_DATASET_DESCRIPTION = ( +""" +"float_only": a dataset with only float instances, with following format: + + { + "type": "float_only", + "instances": [ + { "value": "FLOAT_1" }, + { "value": "FLOAT_2" }, + ... + ] + } +""" +).lstrip("\n")
+ + + +
+[docs] +TEXT_ONLY_DATASET_LONG_DESCRITION = ( + TEXT_ONLY_DATASET_DESCRIPTION + TEXT_ONLY_DATASET_DETAILS +)
+ + +
+[docs] +TEXT2TEXT_DATASET_LONG_DESCRITION = ( + TEXT2TEXT_DATASET_DESCRIPTION + TEXT2TEXT_DATASET_DETAILS +)
+ + + +
+[docs] +DATASET_DESCRIPTION_MAP = { + "text_only": TEXT_ONLY_DATASET_DESCRIPTION, + "text2text": TEXT2TEXT_DATASET_DESCRIPTION, + "float_only": FLOAT_ONLY_DATASET_DESCRIPTION, +}
+ + +
+[docs] +INSTANCE_FIELDS_MAP = { + "text_only": ["text"], + "text2text": ["input", "output"], + "conversation": ["messages"], # system, tools and conversation_id are optional + "paired_conversation": ["chosen", "rejected"], + "paired_text_to_text": ["prompt", "chosen", "rejected"], + "float_only": ["value"], + "image_text": ["images", "text"], + "text_to_textlist": ["input", "output"], + "text_to_scored_textlist": ["input", "output"], +}
+ + +
+[docs] +CONVERSATION_ROLE_NAMES = { + "system": "system", + "user": "user", + "assistant": "assistant", + "function": "function", + "observation": "observation" +}
+ + +# LLAVA constants +
+[docs] +CONTROLLER_HEART_BEAT_EXPIRATION = 30
+ +
+[docs] +WORKER_HEART_BEAT_INTERVAL = 15
+ + +
+[docs] +LOGDIR = "."
+ + +# Model Constants +
+[docs] +IGNORE_INDEX = -100
+ +
+[docs] +IMAGE_TOKEN_INDEX = -200
+ +
+[docs] +DEFAULT_IMAGE_TOKEN = "<image>"
+ +
+[docs] +DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
+ +
+[docs] +DEFAULT_IM_START_TOKEN = "<im_start>"
+ +
+[docs] +DEFAULT_IM_END_TOKEN = "<im_end>"
+ + +# Lora +# NOTE: Be careful, when passing lora_target_modules through arg parser, the +# value should be like'--lora_target_modules q_proj, v_proj \', while specifying +# here, it should be in list format. +
+[docs] +LMFLOW_LORA_TARGET_MODULES_MAPPING = { + 'qwen2': ["q_proj", "v_proj"], + 'internlm2': ["wqkv"], +}
+ + +# vllm inference +
+[docs] +MEMORY_SAFE_VLLM_INFERENCE_FINISH_FLAG = "MEMORY_SAFE_VLLM_INFERENCE_DONE"
+ +
+[docs] +RETURN_CODE_ERROR_BUFFER = [ + 134 +]
+ +# return code 134: +# > Fatal Python error: _enter_buffered_busy: could not acquire lock for <_io.BufferedWriter name='<stdout>'> +# > at interpreter shutdown, possibly due to daemon threads +# The above error, by our observation, is due to the kill signal with unfinished +# stdout/stderr writing in the subprocess +
+[docs] +MEMORY_SAFE_VLLM_INFERENCE_ENV_VAR_TO_REMOVE = [ + "OMP_NUM_THREADS", + "LOCAL_RANK", + "RANK", + "GROUP_RANK", + "ROLE_RANK", + "ROLE_NAME", + "LOCAL_WORLD_SIZE", + "WORLD_SIZE", + "GROUP_WORLD_SIZE", + "ROLE_WORLD_SIZE", + "MASTER_ADDR", + "MASTER_PORT", + "TORCHELASTIC_RESTART_COUNT", + "TORCHELASTIC_MAX_RESTARTS", + "TORCHELASTIC_RUN_ID", + "TORCHELASTIC_USE_AGENT_STORE", + "TORCH_NCCL_ASYNC_ERROR_HANDLING", + "TORCHELASTIC_ERROR_FILE", +]
+ + +# dpov2 align +
+[docs] +MEMORY_SAFE_DPOV2_ALIGN_ENV_VAR_TO_REMOVE = [ + "OMP_NUM_THREADS", + "LOCAL_RANK", + "RANK", + "GROUP_RANK", + "ROLE_RANK", + "ROLE_NAME", + "LOCAL_WORLD_SIZE", + "WORLD_SIZE", + "GROUP_WORLD_SIZE", + "ROLE_WORLD_SIZE", + "MASTER_ADDR", + "MASTER_PORT", + "TORCHELASTIC_RESTART_COUNT", + "TORCHELASTIC_MAX_RESTARTS", + "TORCHELASTIC_RUN_ID", + "TORCHELASTIC_USE_AGENT_STORE", + "TORCH_NCCL_ASYNC_ERROR_HANDLING", + "TORCHELASTIC_ERROR_FILE", +]
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/conversation_template.html b/_modules/lmflow/utils/conversation_template.html new file mode 100644 index 000000000..b5008e865 --- /dev/null +++ b/_modules/lmflow/utils/conversation_template.html @@ -0,0 +1,498 @@ + + + + + + + + + + lmflow.utils.conversation_template — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.conversation_template

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+from .base import EMPTY_TEMPLATE, EMPTY_NO_SPECIAL_TOKENS_TEMPLATE, ConversationTemplate
+from .chatglm import CHATGLM3_TEMPLATE
+from .chatml import CHATML_TEMPLATE
+from .deepseek import DEEPSEEK_TEMPLATE
+from .fox import FOX_TEMPLATE
+from .gemma import GEMMA_TEMPLATE
+from .internlm import INTERNLM2_TEMPLATE
+from .llama import LLAMA2_TEMPLATE, LLAMA3_TEMPLATE
+from .phi import PHI3_TEMPLATE
+from .qwen import QWEN2_TEMPLATE
+from .yi import YI1_5_TEMPLATE
+from .zephyr import ZEPHYR_TEMPLATE
+
+
+
+[docs] +PRESET_TEMPLATES = { + 'chatglm3': CHATGLM3_TEMPLATE, + 'chatml': CHATML_TEMPLATE, + 'deepseek': DEEPSEEK_TEMPLATE, + 'disable': EMPTY_TEMPLATE, + 'empty': EMPTY_TEMPLATE, + 'empty_no_special_tokens': EMPTY_NO_SPECIAL_TOKENS_TEMPLATE, + 'fox': FOX_TEMPLATE, + 'gemma': GEMMA_TEMPLATE, + 'internlm2': INTERNLM2_TEMPLATE, + 'llama2': LLAMA2_TEMPLATE, + 'llama3': LLAMA3_TEMPLATE, + 'phi3': PHI3_TEMPLATE, + 'qwen2': QWEN2_TEMPLATE, + 'yi': CHATML_TEMPLATE, + 'yi1_5': YI1_5_TEMPLATE, + 'zephyr': ZEPHYR_TEMPLATE +}
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/conversation_template/base.html b/_modules/lmflow/utils/conversation_template/base.html new file mode 100644 index 000000000..c69c557e1 --- /dev/null +++ b/_modules/lmflow/utils/conversation_template/base.html @@ -0,0 +1,1008 @@ + + + + + + + + + + lmflow.utils.conversation_template.base — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.conversation_template.base

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+import re
+from abc import ABC, abstractmethod
+from dataclasses import dataclass, field
+from typing import Dict, Set, Sequence, Literal, Union, List, Optional, Tuple
+import logging
+
+from transformers import PreTrainedTokenizer
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +@dataclass +
+[docs] +class TemplateComponent: + """The minimal unit of a template, which can be a token, a string, or a list of tools. + + Parameters + ---------- + type : Literal['token', 'token_id', 'string', 'tools'] + - Type of the component. + + - When the component is a token or a string, the content should be `string`. + The difference between the two is that token will be converted to token ids + by the tokenizer.convert_tokens_to_ids() method, while string will be directly + encoded by the tokenizer.encode() method. Specially, since the bos token and eos + token are frequently used across different templates, we provide the convenience + to use `'bos_token'` and `'eos_token'` to represent the actual bos and eos tokens when + `type` of the `TemplateComponent` is `token`. For example: + + ```python + TemplateComponent(type='token', content='bos_token') + ``` + + After encoding, the content will be replaced by the actual token id of the bos token. + Please do remember that if you set the `type` to `string`, the tokenizer will try to + encode the string 'bos_token' instead of providing the actual bos token. + + - When the component is token_id, the content should be `int` or `List[int]`, and + will be directly appended to the encoded token ids. + + - Tools are not supported yet. + + content : Union[str, int, List[str], List[int]] + Content of the component. + + """ +
+[docs] + type: Literal['token', 'token_id', 'string', 'tools']
+ +
+[docs] + content: Union[str, int, List[str], List[int]]
+ +
+[docs] + mask: Optional[bool] = True # for token specific masking, work in progress
+ + +
+[docs] + def __post_init__(self): + assert self.content, "Content of the component cannot be empty." + + if self.type == 'tools': + assert isinstance(self.content, list), ( + f"Content of tools component must be a list, got {type(self.content)}") + elif self.type in ['token', 'string']: + assert isinstance(self.content, str), ( + f"Content of string/token component must be a string, got {type(self.content)}") + elif self.type == 'token_id': + assert isinstance(self.content, int) or all(isinstance(token_id, int) for token_id in self.content), ( + f"Content of token_id component must be an integer or a list of integers.") + else: + raise ValueError(f"The type of the component must be either " + f"'token', 'string' or 'tools', got {self.type}")
+ + +
+[docs] + def __repr__(self) -> str: + return f"TemplateComponent(type={self.type}, content={self.content})".replace("\n", "\\n")
+ + +
+[docs] + def __str__(self) -> str: + return f"{self.content}".replace("\n", "\\n")
+
+ + + +@dataclass +
+[docs] +class Formatter(ABC): +
+[docs] + template: List[TemplateComponent] = field(default_factory=list)
+ + + @abstractmethod +
+[docs] + def format(self, **kwargs) -> List[TemplateComponent]: ...
+ + +
+[docs] + def has_placeholder(self): + flag = False + for component in self.template: + if component.type == 'string': + if re.search(r"{{(.*?)}}", component.content): + flag = True + break + return flag
+
+ + + +@dataclass +
+[docs] +class EmptyFormatter(Formatter): +
+[docs] + def __post_init__(self): + if self.has_placeholder(): + raise ValueError("Empty formatter should not have placeholders.")
+ + +
+[docs] + def format(self, **kwargs) -> list: + """Empty formatter for when no formatting is needed. + This is useful when user has already applied formatting to the dataset. + + Returns + ------- + list + Original template. + """ + return self.template
+
+ + + +@dataclass +
+[docs] +class StringFormatter(Formatter): +
+[docs] + def __post_init__(self): + if not self.has_placeholder(): + raise ValueError("String formatter should have placeholders.")
+ + +
+[docs] + def format(self, **kwargs) -> list: + """Format the string components with the provided keyword arguments. + Mostly used for formatting system prompt, user and assistant messages. + + Parameters + ---------- + **kwargs : dict + Keyword arguments containing values to replace in the template components. + + Returns + ------- + list + Formatted template. + """ + formatted_template = [] + for component in self.template: + if component.type == 'string': + for key, value in kwargs.items(): + templated = component.content.replace("{{" + key + "}}", value) + if len(templated) == 0: + logger.warning("Found empty string after formatting, adding a space instead. " + "If this is not intended, please check the dataset.") + templated = " " + formatted_template.append(TemplateComponent(type='string', content=templated)) + else: + formatted_template.append(component) + + logger.debug(formatted_template) + return formatted_template
+
+ + + +@dataclass +
+[docs] +class ListFormatter(Formatter): +
+[docs] + def format(self, **kwargs) -> list: + pass # Work in progress
+
+ + + +@dataclass +
+[docs] +class ConversationTemplate: +
+[docs] + user_formatter: Formatter
+ +
+[docs] + assistant_formatter: Formatter
+ +
+[docs] + system_formatter: Optional[Formatter] = None
+ +
+[docs] + tools_formatter: Optional[Formatter] = None
+ +
+[docs] + separator: Optional[TemplateComponent] = None
+ +
+[docs] + special_starter: Optional[TemplateComponent] = None
+ +
+[docs] + special_stopper: Optional[TemplateComponent] = None
+ +
+[docs] + template_name: Optional[str] = None
+ + +
+[docs] + def __post_init__(self): + if self.separator: + if self.separator.type not in ['string', 'token']: + raise NotImplementedError(f"Component type {self.separator.type} cannot be used as a separator.") + + if self.special_starter: + if self.special_starter.type not in ['string', 'token', 'token_id']: + raise NotImplementedError(f"Component type {self.special_starter.type} cannot be used as a special starter.")
+ + +
+[docs] + def encode_conversation( + self, + tokenizer: PreTrainedTokenizer, + messages: List[Dict[str, str]], + system: Optional[str] = None, + tools: Optional[List[str]] = None, + remove_last_sep: bool = False, + **kwargs + ) -> Sequence[Tuple[List[int], List[int]]]: + r''' + Messages here should be guaranteed to be in pairs, with the first message being the user message and the second message being the system message. + Data example: + ```json + { + "conversation_id": 2, + "system": "sysinfo1", + "tools": ["tool_1_desc"], + "messages": [ + { + "role": "user", + "content": "hi" + }, + { + "role": "assistant", + "content": "Hello!" + } + ] + } + ``` + ''' + assert isinstance(messages, list), "Messages must be a list." + + if tools: + logger.warning("Tools are not supported yet. Please include tools in the system message manually.") + + if system: + if system.replace(" ",""): + if not self.system_formatter: + raise ValueError("Your dataset contains system message but no system formatter is provided. " + "Consider either providing a system formatter or removing system prompt from your dataset.") + else: + system = None + + encoded_pairs = self._encode(tokenizer, messages, system, tools, **kwargs) + + if self.separator and remove_last_sep: + # For models that require a separator between messages, + # user can include the seperator at the end of each template + # and specify the separator. Auto formatting will remove the + # last separator once user specifies this option. + encoded_pairs = self.remove_last_separator(encoded_pairs, tokenizer) + + if self.special_starter: + # For models that has ONLY ONE bos token at the beginning of + # a conversation session (not a conversation pair), user can + # specify a special starter to add that starter to the very + # beginning of the conversation session. + # eg: + # llama-2: <s> and </s> at every pair of conversation + # v.s. + # llama-3: <|begin_of_text|> only at the beginning of a session + encoded_pairs = self.add_special_starter(encoded_pairs, tokenizer) + + if self.special_stopper: + encoded_pairs = self.add_special_stopper(encoded_pairs, tokenizer) + + return encoded_pairs
+ + +
+[docs] + def _encode( + self, + tokenizer: PreTrainedTokenizer, + messages: List[Dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + **kwargs + ) -> Sequence[Tuple[List[int], List[int]]]: + # TODO: truncation according to model max length + # TODO: make sure the last few tokens are "learnable", not masked with token_id = -100. + + res_all = [] + + system_formatted = self.system_formatter.format(content=system) if system else [] + system_encoded = self._encode_template(system_formatted, tokenizer) + + for i in range(0, len(messages), 2): + user_message = messages[i] + assistant_message = messages[i + 1] + + user_formatted = self.user_formatter.format(content=user_message["content"]) + assistant_formatted = self.assistant_formatter.format(content=assistant_message["content"]) + + user_encoded = self._encode_template(user_formatted, tokenizer) + assistant_encoded = self._encode_template(assistant_formatted, tokenizer) + + res_all.append(( + system_encoded + user_encoded if i == 0 else user_encoded, + assistant_encoded + )) + + return res_all
+ + +
+[docs] + def _encode_template( + self, + template: List[TemplateComponent], + tokenizer: PreTrainedTokenizer, + **kwargs + ) -> List[int]: + """Encode template components into token ids. + + Parameters + ---------- + template : List[TemplateComponent] + Formatted template components. + tokenizer : PreTrainedTokenizer + Tokenizer to convert tokens into token ids. + + Returns + ------- + List[int] + Encoded token ids. + """ + encoded_ids = [] + for component in template: + if component.type == 'string': + if len(component.content) == 0: + logger.warning("Empty string component found in the template.") + continue + else: + encoded_ids += tokenizer.encode(component.content, add_special_tokens=False) + elif component.type == 'token': + if component.content == 'bos_token': + encoded_ids += [tokenizer.bos_token_id] + elif component.content == 'eos_token': + encoded_ids += [tokenizer.eos_token_id] + else: + encoded_ids += self._ensure_id_list(tokenizer.convert_tokens_to_ids(component.content)) + elif component.type == 'token_id': + encoded_ids += self._ensure_id_list(component.content) + else: + raise NotImplementedError(f"Component type {component.type} is not supported yet.") + return encoded_ids
+ + +
+[docs] + def remove_last_separator( + self, + encoded_pairs: Sequence[Tuple[List[int], List[int]]], + tokenizer: PreTrainedTokenizer + ) -> Sequence[Tuple[List[int], List[int]]]: + last_assistant_msg = encoded_pairs[-1][1] + if self.separator.type == 'string': + separator_ids = tokenizer.encode(self.separator.content, add_special_tokens=False) + elif self.separator.type == 'token': + separator_ids = self._ensure_id_list(tokenizer.convert_tokens_to_ids(self.separator.content)) + else: + raise ValueError(f"Component type {self.separator.type} cannot be used as a separator.") + + if len(separator_ids) > len(last_assistant_msg): + raise ValueError("Separator is longer than the last assistant message, please check.") + + if last_assistant_msg[-len(separator_ids):] == separator_ids: + last_assistant_msg = last_assistant_msg[:-len(separator_ids)] + + encoded_pairs[-1] = (encoded_pairs[-1][0], last_assistant_msg) + + return encoded_pairs
+ + +
+[docs] + def add_special_starter( + self, + encoded_pairs: Sequence[Tuple[List[int], List[int]]], + tokenizer: PreTrainedTokenizer + ) -> Sequence[Tuple[List[int], List[int]]]: + if self.special_starter.type == 'string': + special_starter_ids = tokenizer.encode(self.special_starter.content, add_special_tokens=False) + elif self.special_starter.type == 'token': + if self.special_starter.content == 'bos_token': + special_starter_ids = [tokenizer.bos_token_id] + elif self.special_starter.content == 'eos_token': + special_starter_ids = [tokenizer.eos_token_id] + else: + special_starter_ids = self._ensure_id_list(tokenizer.convert_tokens_to_ids(self.special_starter.content)) + elif self.special_starter.type == 'token_id': + special_starter_ids = self._ensure_id_list(self.special_starter.content) + else: + raise ValueError(f"Component type {self.special_starter.type} cannot be used as a special starter.") + + encoded_pairs[0] = (special_starter_ids + encoded_pairs[0][0], encoded_pairs[0][1]) + + return encoded_pairs
+ + +
+[docs] + def add_special_stopper( + self, + encoded_pairs: Sequence[Tuple[List[int], List[int]]], + tokenizer: PreTrainedTokenizer + ) -> Sequence[Tuple[List[int], List[int]]]: + if self.special_stopper.type == 'string': + special_stopper_ids = tokenizer.encode(self.special_stopper.content, add_special_tokens=False) + elif self.special_stopper.type == 'token': + if self.special_stopper.content == 'bos_token': + special_stopper_ids = [tokenizer.bos_token_id] + elif self.special_stopper.content == 'eos_token': + special_stopper_ids = [tokenizer.eos_token_id] + else: + special_stopper_ids = self._ensure_id_list(tokenizer.convert_tokens_to_ids(self.special_stopper.content)) + elif self.special_stopper.type == 'token_id': + special_stopper_ids = self._ensure_id_list(self.special_stopper.content) + else: + raise ValueError(f"Component type {self.special_stopper.type} cannot be used as a special stopper.") + + encoded_pairs[-1] = (encoded_pairs[-1][0], encoded_pairs[-1][1] + special_stopper_ids) + + return encoded_pairs
+ + +
+[docs] + def _ensure_id_list(self, obj: Union[int, List[int]]) -> List[int]: + '''Make sure the object is a list of integers. Useful for handling token ids. + ''' + if isinstance(obj, int): + return [obj] + elif isinstance(obj, list): + return obj + else: + raise ValueError(f"Object type {type(obj)} is not supported yet.")
+
+ + + +
+[docs] +EMPTY_TEMPLATE = ConversationTemplate( + template_name='empty', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='token', content='bos_token'), + TemplateComponent(type='string', content='{{content}}') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='{{content}}'), + TemplateComponent(type='token', content='eos_token') + ] + ) +)
+ + + +
+[docs] +EMPTY_NO_SPECIAL_TOKENS_TEMPLATE = ConversationTemplate( + template_name='empty_no_special_tokens', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='{{content}}') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='{{content}}') + ] + ) +)
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/conversation_template/chatglm.html b/_modules/lmflow/utils/conversation_template/chatglm.html new file mode 100644 index 000000000..b4cb88f21 --- /dev/null +++ b/_modules/lmflow/utils/conversation_template/chatglm.html @@ -0,0 +1,493 @@ + + + + + + + + + + lmflow.utils.conversation_template.chatglm — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.conversation_template.chatglm

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+from .base import StringFormatter, TemplateComponent, ConversationTemplate
+
+
+
+[docs] +CHATGLM3_TEMPLATE = ConversationTemplate( + template_name='chatglm3', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|user|>\n {{content}}') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|assistant|>\n {{content}}') + ] + ), + system_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|system|>\n {{content}}') + ] + ), + special_starter=TemplateComponent(type='string', content='[gMASK]sop') +)
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/conversation_template/chatml.html b/_modules/lmflow/utils/conversation_template/chatml.html new file mode 100644 index 000000000..3ccd4a06b --- /dev/null +++ b/_modules/lmflow/utils/conversation_template/chatml.html @@ -0,0 +1,492 @@ + + + + + + + + + + lmflow.utils.conversation_template.chatml — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.conversation_template.chatml

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+from .base import StringFormatter, TemplateComponent, ConversationTemplate
+
+
+
+[docs] +CHATML_TEMPLATE = ConversationTemplate( + template_name='chatml', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|im_start|>user\n{{content}}<|im_end|>\n') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|im_start|>assistant\n{{content}}<|im_end|>\n') + ] + ), + system_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|im_start|>system\n{{content}}<|im_end|>\n') + ] + ) +)
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/conversation_template/deepseek.html b/_modules/lmflow/utils/conversation_template/deepseek.html new file mode 100644 index 000000000..654ed2128 --- /dev/null +++ b/_modules/lmflow/utils/conversation_template/deepseek.html @@ -0,0 +1,494 @@ + + + + + + + + + + lmflow.utils.conversation_template.deepseek — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.conversation_template.deepseek

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+from .base import StringFormatter, TemplateComponent, ConversationTemplate
+
+
+
+[docs] +DEEPSEEK_TEMPLATE = ConversationTemplate( + template_name='deepseek', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='User: {{content}}\n\n') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='Assistant: {{content}}'), + TemplateComponent(type='token', content='eos_token') + ] + ), + system_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='{{content}}\n\n') + ] + ), + special_starter=TemplateComponent(type='token', content='bos_token') +)
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/conversation_template/fox.html b/_modules/lmflow/utils/conversation_template/fox.html new file mode 100644 index 000000000..f8cff20c5 --- /dev/null +++ b/_modules/lmflow/utils/conversation_template/fox.html @@ -0,0 +1,506 @@ + + + + + + + + + + lmflow.utils.conversation_template.fox — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.conversation_template.fox

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+from .base import StringFormatter, TemplateComponent, ConversationTemplate
+
+# {% for message in messages %}
+# {% if message['role'] == 'user' %}
+# {{ '<|user|>\n' + message['content'] + eos_token }}
+# {% elif message['role'] == 'system' %}
+# {{ '<|system|>\n' + message['content'] + eos_token }}
+# {% elif message['role'] == 'assistant' %}
+# {{ '<|assistant|>\n'  + message['content'] + eos_token }}
+# {% endif %}
+# {% if loop.last and add_generation_prompt %}
+# {{ '<|assistant|>' }}
+# {% endif %}
+# {% endfor %}
+
+[docs] +FOX_TEMPLATE = ConversationTemplate( + template_name='fox', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|user|>\n{{content}}'), + TemplateComponent(type='token', content='eos_token') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|assistant|>\n{{content}}'), + TemplateComponent(type='token', content='eos_token') + ] + ), + system_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|system|>\n{{content}}'), + TemplateComponent(type='token', content='eos_token') + ] + ) +)
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/conversation_template/gemma.html b/_modules/lmflow/utils/conversation_template/gemma.html new file mode 100644 index 000000000..60784c9c2 --- /dev/null +++ b/_modules/lmflow/utils/conversation_template/gemma.html @@ -0,0 +1,522 @@ + + + + + + + + + + lmflow.utils.conversation_template.gemma — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.conversation_template.gemma

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+import logging
+from dataclasses import dataclass
+
+from .base import StringFormatter, TemplateComponent, ConversationTemplate
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +@dataclass +
+[docs] +class GemmaConversationTemplate(ConversationTemplate): +
+[docs] + def encode_conversation(self, *args, **kwargs): + if kwargs.get('system'): + logger.warning( + 'As of now, Gemma does not support system messages officially. ' + 'ConversationTemplate will add your system messages right after ' + 'the bos token and before the user message without any special formatting. ' + 'For more details, please refer to the [official template]' + '(https://huggingface.co/google/gemma-1.1-2b-it/blob/bf4924f313df5166dee1467161e886e55f2eb4d4/tokenizer_config.json#L1507).' + ) + return super().encode_conversation(*args, **kwargs)
+
+ + + +
+[docs] +GEMMA_TEMPLATE = GemmaConversationTemplate( + template_name='gemma', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<start_of_turn>user\n{{content}}<end_of_turn>\n') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<start_of_turn>model\n{{content}}<end_of_turn>\n') + ] + ), + system_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='{{content}}') + ] + ), + special_starter=TemplateComponent(type='token', content='bos_token') +)
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/conversation_template/internlm.html b/_modules/lmflow/utils/conversation_template/internlm.html new file mode 100644 index 000000000..c90e83cc3 --- /dev/null +++ b/_modules/lmflow/utils/conversation_template/internlm.html @@ -0,0 +1,493 @@ + + + + + + + + + + lmflow.utils.conversation_template.internlm — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.conversation_template.internlm

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+from .base import StringFormatter, TemplateComponent, ConversationTemplate
+
+
+
+[docs] +INTERNLM2_TEMPLATE = ConversationTemplate( + template_name='internlm2', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|im_start|>user\n{{content}}<|im_end|>\n') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|im_start|>assistant\n{{content}}<|im_end|>\n') + ] + ), + system_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|im_start|>system\n{{content}}<|im_end|>\n') + ] + ), + special_starter=TemplateComponent(type='token', content='bos_token') +)
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/conversation_template/llama.html b/_modules/lmflow/utils/conversation_template/llama.html new file mode 100644 index 000000000..0cbde4d3b --- /dev/null +++ b/_modules/lmflow/utils/conversation_template/llama.html @@ -0,0 +1,572 @@ + + + + + + + + + + lmflow.utils.conversation_template.llama — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.conversation_template.llama

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+import logging
+from typing import Dict, Set, Sequence, Literal, Union, List, Optional, Tuple
+
+from transformers import PreTrainedTokenizer
+
+from .base import StringFormatter, TemplateComponent, ConversationTemplate
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +class Llama2ConversationTemplate(ConversationTemplate): +
+[docs] + def _encode( + self, + tokenizer: PreTrainedTokenizer, + messages: List[Dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + **kwargs + ) -> Sequence[Tuple[List[int], List[int]]]: + if tools: + logger.warning("Formatted tools are not supported in Llama2, thus tools will be ignored. " + "If this is intended, please include tools in the system message manually.") + + res_all = [] + + system_formatted = self.system_formatter.format(content=system) if system else [] + system_formatted_text = "".join([component.content for component in system_formatted if component.type == 'string']) # HACK + + for i in range(0, len(messages), 2): + user_message = messages[i] + assistant_message = messages[i + 1] + + user_content = system_formatted_text + user_message["content"] if i == 0 else user_message["content"] + user_formatted = self.user_formatter.format(content=user_content) + assistant_formatted = self.assistant_formatter.format(content=assistant_message["content"]) + + user_encoded = self._encode_template(user_formatted, tokenizer) + assistant_encoded = self._encode_template(assistant_formatted, tokenizer) + + res_all.append(( + user_encoded, + assistant_encoded + )) + + return res_all
+
+ + + +
+[docs] +LLAMA3_TEMPLATE = ConversationTemplate( + template_name='llama3', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|start_header_id|>user<|end_header_id|>\n\n{{content}}<|eot_id|>') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|start_header_id|>assistant<|end_header_id|>\n\n{{content}}<|eot_id|>') + ] + ), + system_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|start_header_id|>system<|end_header_id|>\n\n{{content}}<|eot_id|>') + ] + ), + special_starter=TemplateComponent(type='token', content='bos_token') +)
+ + + +
+[docs] +LLAMA2_TEMPLATE = Llama2ConversationTemplate( + template_name='llama2', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='token', content='bos_token'), + TemplateComponent(type='string', content='[INST] {{content}} [/INST]') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='{{content}}'), + TemplateComponent(type='token', content='eos_token') + ] + ), + system_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<<SYS>>\n{{content}}\n<</SYS>>\n\n') + ] + ) +)
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/conversation_template/phi.html b/_modules/lmflow/utils/conversation_template/phi.html new file mode 100644 index 000000000..96f07b7c2 --- /dev/null +++ b/_modules/lmflow/utils/conversation_template/phi.html @@ -0,0 +1,494 @@ + + + + + + + + + + lmflow.utils.conversation_template.phi — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.conversation_template.phi

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+from .base import StringFormatter, TemplateComponent, ConversationTemplate
+
+
+
+[docs] +PHI3_TEMPLATE = ConversationTemplate( + template_name='phi3', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|user|>\n{{content}}<|end|>\n') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|assistant|>\n{{content}}<|end|>\n') + ] + ), + system_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|system|>\n{{content}}<|end|>\n') + ] + ), + special_starter=TemplateComponent(type='token', content='bos_token'), + special_stopper=TemplateComponent(type='token', content='eos_token') +)
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/conversation_template/qwen.html b/_modules/lmflow/utils/conversation_template/qwen.html new file mode 100644 index 000000000..ccde81f92 --- /dev/null +++ b/_modules/lmflow/utils/conversation_template/qwen.html @@ -0,0 +1,493 @@ + + + + + + + + + + lmflow.utils.conversation_template.qwen — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.conversation_template.qwen

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+from .base import StringFormatter, TemplateComponent, ConversationTemplate
+
+
+
+[docs] +QWEN2_TEMPLATE = ConversationTemplate( + template_name='qwen2', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|im_start|>user\n{{content}}<|im_end|>\n') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|im_start|>assistant\n{{content}}<|im_end|>\n') + ] + ), + system_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|im_start|>system\n{{content}}<|im_end|>\n') + ] + ), + separator=TemplateComponent(type='string', content='\n') +)
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/conversation_template/yi.html b/_modules/lmflow/utils/conversation_template/yi.html new file mode 100644 index 000000000..66f58ff96 --- /dev/null +++ b/_modules/lmflow/utils/conversation_template/yi.html @@ -0,0 +1,492 @@ + + + + + + + + + + lmflow.utils.conversation_template.yi — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.conversation_template.yi

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+from .base import StringFormatter, TemplateComponent, ConversationTemplate
+
+
+
+[docs] +YI1_5_TEMPLATE = ConversationTemplate( + template_name='yi1_5', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|im_start|>user\n{{content}}<|im_end|>\n') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|im_start|>assistant\n{{content}}<|im_end|>\n') + ] + ), + system_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='{{content}}') + ] + ) +)
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/conversation_template/zephyr.html b/_modules/lmflow/utils/conversation_template/zephyr.html new file mode 100644 index 000000000..d7fb27886 --- /dev/null +++ b/_modules/lmflow/utils/conversation_template/zephyr.html @@ -0,0 +1,550 @@ + + + + + + + + + + lmflow.utils.conversation_template.zephyr — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.conversation_template.zephyr

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+import logging
+from typing import Dict, Set, Sequence, Literal, Union, List, Optional, Tuple
+
+from transformers import PreTrainedTokenizer
+
+from .base import StringFormatter, TemplateComponent, ConversationTemplate
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +class ZephyrConversationTemplate(ConversationTemplate): +
+[docs] + def _encode( + self, + tokenizer: PreTrainedTokenizer, + messages: List[Dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + **kwargs + ) -> Sequence[Tuple[List[int], List[int]]]: + # TODO: truncation according to model max length + # TODO: make sure the last few tokens are "learnable", not masked with token_id = -100. + + res_all = [] + + system_formatted = self.system_formatter.format(content=system) if system else [] + system_encoded = self._encode_template(system_formatted, tokenizer) + + for i in range(0, len(messages), 2): + user_message = messages[i] + assistant_message = messages[i + 1] + + user_formatted = self.user_formatter.format(content=user_message["content"]) + if i == 0 and not system: + # when system is not provided, the first user message should not start with a newline + user_formatted[0].content = user_formatted[0].content.replace('\n', '', 1) + assistant_formatted = self.assistant_formatter.format(content=assistant_message["content"]) + + user_encoded = self._encode_template(user_formatted, tokenizer) + assistant_encoded = self._encode_template(assistant_formatted, tokenizer) + + res_all.append(( + system_encoded + user_encoded if i == 0 else user_encoded, + assistant_encoded + )) + + return res_all
+
+ + + +
+[docs] +ZEPHYR_TEMPLATE = ZephyrConversationTemplate( + template_name='zephyr', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='\n<|user|>\n{{content}}'), + TemplateComponent(type='token', content='eos_token') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='\n<|assistant|>\n{{content}}'), + TemplateComponent(type='token', content='eos_token') + ] + ), + system_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='<|system|>\n{{content}}'), + TemplateComponent(type='token', content='eos_token') + ] + ) +)
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/data_utils.html b/_modules/lmflow/utils/data_utils.html new file mode 100644 index 000000000..c53ebc707 --- /dev/null +++ b/_modules/lmflow/utils/data_utils.html @@ -0,0 +1,730 @@ + + + + + + + + + + lmflow.utils.data_utils — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.data_utils

+"""The program includes several functions: setting a random seed, 
+loading data from a JSON file, batching data, and extracting answers from generated text.
+"""
+
+import random
+import numpy as np
+import torch
+import json
+import re
+from typing import Union, List, TypedDict, Dict
+
+
+
+[docs] +def set_random_seed(seed: int): + """ + Set the random seed for `random`, `numpy`, `torch`, `torch.cuda`. + + Parameters + ------------ + seed : int + The default seed. + + """ + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed)
+ + +
+[docs] +def load_data(file_name: str): + """ + Load data with file name. + + Parameters + ------------ + file_name : str. + The dataset file name. + + Returns + ------------ + inputs : list. + The input texts of the dataset. + outputs : list. + The output texts file datasets. + len : int. + The length of the dataset. + """ + inputs = [] + outputs = [] + type = "" + with open(file_name, encoding='utf-8') as f: + json_data = json.load(f) + type = json_data["type"] + for line in json_data["instances"]: + inputs.append(line["input"]) + outputs.append(line["output"]) + + print(f"load dataset {file_name} success.\n") + print(f"Type : {type}, datasize : {len(outputs)}") + + return inputs, outputs, len(outputs)
+ + +
+[docs] +def batchlize(examples: list, batch_size: int, random_shuffle: bool): + """ + Convert examples to a dataloader. + + Parameters + ------------ + examples : list. + Data list. + batch_size : int. + + random_shuffle : bool + If true, the dataloader shuffle the training data. + + Returns + ------------ + dataloader: + Dataloader with batch generator. + """ + size = 0 + dataloader = [] + length = len(examples) + if (random_shuffle): + random.shuffle(examples) + while size < length: + if length - size > batch_size: + dataloader.append(examples[size : size+batch_size]) + size += batch_size + else: + dataloader.append(examples[size : size+(length-size)]) + size += (length - size) + return dataloader
+ + + + +
+[docs] +def answer_extraction(response, answer_type=None): #use this funtion to extract answers from generated text + + """ + Use this funtion to extract answers from generated text + + Parameters + ------------ + args : + Arguments. + response : str + plain string response. + + + Returns + ------------ + answer: + Decoded answer (such as A, B, C, D, E for mutiple-choice QA). + """ + + # temp = response["generated_text"] + temp = response + if answer_type in ("gsm8k", "svamp", "asdiv", "addsub", "singleeq", "multiarith", "math"): + temp = temp.replace(",", "") + temp = [s for s in re.findall(r'-?\d+\.?\d*', temp)] + elif answer_type in ("aqua", "csqa", "multiple_choice"): + temp = re.findall(r'A|B|C|D|E', temp) + elif answer_type in ("strategyqa", "coin_flip"): + temp = temp.lower() + temp = re.sub("\"|\'|\n|\.|\s|\:|\,"," ", temp) + temp = temp.split(" ") + temp = [i for i in temp if i in ("yes", "no")] + elif answer_type in ("last_letters"): + temp = re.sub("\"|\'|\n|\.|\s","", temp) + temp = [temp] + elif answer_type in ("pubmedqa", "binary_choice"): + # pattern = "Output: (yes|no|maybe)" + # sttr = re.search(pattern, temp) + # answer = sttr.group(0)[8:] if sttr is not None else "N/A" + pattern = "(answer|Answer|ANSWER|output|Output|OUTPUT|A): \(*(yes|Yes|YES|no|No|NO|maybe|Maybe|MAYBE)" + sttr = re.search(pattern, temp) + if sttr is not None: + mid_answer = sttr.group(0) + mid_answer = mid_answer.split(":")[-1].strip() + answer = mid_answer.lower() + else: + pattern = "(yes|Yes|YES|no|No|NO|maybe|Maybe|MAYBE)(\.|\s)" + sttr = re.search(pattern, temp) + if sttr is not None: + answer = sttr.group(0)[:-1].lower() + else: + answer = "N/A" + return answer + elif answer_type == "medmcqa": + # pattern = "Output: (A|B|C|D)." + # sttr = re.search(pattern, temp) + # answer = sttr.group(0)[8:-1].lower() if sttr is not None else "N/A" + pattern = "(answer|Answer|ANSWER|output|Output|OUTPUT|A): \(*(A|B|C|D|a|b|c|d)" + sttr = re.search(pattern, temp) + if sttr is not None: + mid_answer = sttr.group(0) + answer = mid_answer[-1].lower() + else: + pattern = "\(*(A|B|C|D|a|b|c|d)\)*(\.|\s)" + sttr = re.search(pattern, temp) + if sttr is not None: + if '(' in sttr.group(0): + answer = sttr.group(0)[1].lower() + else: + answer = sttr.group(0)[0].lower() + else: + answer = "N/A" + return answer + + elif answer_type == "usmle": + # pattern = "Output: (A|B|C|D)." + # sttr = re.search(pattern, temp) + # answer = sttr.group(0)[8:-1].lower() if sttr is not None else "N/A" + pattern = "(Answer|Output|A): \(*(A|B|C|D|a|b|c|d)" + sttr = re.search(pattern, temp) + if sttr is not None: + mid_answer = sttr.group(0) + answer = mid_answer[-1].lower() + else: + pattern = "\(*(A|B|C|D|a|b|c|d)\)*(\.|\s)" + sttr = re.search(pattern, temp) + if sttr is not None: + if '(' in sttr.group(0): + answer = sttr.group(0)[1].lower() + else: + answer = sttr.group(0)[0].lower() + else: + answer = "N/A" + return answer + elif answer_type == "text": + return response + else: + raise NotImplementedError(f"Unsupported answer type: {answer_type}") + + if len(temp) != 0: + answer = temp[-1] + # if there is . at the end of answer, remove it + # e.g. answer = 64. + if answer != "": + if answer[-1] == ".": + answer = answer[:-1] + + # round the answer to nearest integer + if answer_type in ("gsm8k", "svamp"): + try: + answer = str(round(float(answer))) + except: + answer = "" # no sol or sol doesn't have valid format + elif answer_type in ("last_letters"): + try: + answer = answer[-args.concat_length:] + except: + answer = "" + else: + answer = "" + return answer
+ + + +
+[docs] +def process_image_flag(text, image_flag="<ImageHere>"): + texts = text.split(image_flag) + if len(texts) > 1: + image_token_indexes = [len(text) for text in texts[:-1]] + else: + image_token_indexes = [] + # cumsun + image_token_indexes = list(np.cumsum(image_token_indexes)) + texts = "".join(texts) + return texts, image_token_indexes
+ + + +
+[docs] +class VLLMInferenceResultWithInput(TypedDict): +
+[docs] + input: str
+ +
+[docs] + output: Union[List[str], List[List[int]]]
+
+ + + +
+[docs] +class RewardModelInferenceResultWithInput(TypedDict): +
+[docs] + input: str
+ +
+[docs] + output: List[Dict[str, Union[str, float]]] # [{"score": 0.5, "text": "output text"}]
+
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/flash_attention/bloom_flash_attention.html b/_modules/lmflow/utils/flash_attention/bloom_flash_attention.html new file mode 100644 index 000000000..a8f9464c0 --- /dev/null +++ b/_modules/lmflow/utils/flash_attention/bloom_flash_attention.html @@ -0,0 +1,571 @@ + + + + + + + + + + lmflow.utils.flash_attention.bloom_flash_attention — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.flash_attention.bloom_flash_attention

+from typing import List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+import transformers
+from transformers.models.bloom.modeling_bloom import dropout_add
+
+from einops import rearrange
+
+from .triton_flash_attention import flash_attn_qkvpacked_func
+
+
+[docs] +def forward( + self, + hidden_states: torch.Tensor, + residual: torch.Tensor, + alibi: torch.Tensor, + attention_mask: torch.Tensor, + layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + head_mask: Optional[torch.Tensor] = None, + use_cache: bool = False, + output_attentions: bool = False, + ): + dtype = hidden_states.dtype + fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] + + # 3 x [batch_size, seq_length, num_heads, head_dim] + (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv) + + batch_size, q_length, _, _ = query_layer.shape + bsz, q_len = batch_size, q_length + + if layer_past is not None: + past_key, past_value = layer_past + # concatenate along seq_length dimension: + # - key: [batch_size * self.num_heads, head_dim, kv_length] + # - value: [batch_size * self.num_heads, kv_length, head_dim] + key_layer = torch.cat((past_key, key_layer), dim=2) + value_layer = torch.cat((past_value, value_layer), dim=1) + + if use_cache is True: + present = (key_layer, value_layer) + else: + present = None + + reshaped_alibi = rearrange(alibi, '(b h) one s-> b h one s', h = self.num_heads) + reshaped_alibi = reshaped_alibi * self.beta + + attention_mask = (1.0 - attention_mask) + attention_mask = attention_mask[:, None, None, :].bool() + reshaped_alibi_masked = reshaped_alibi.masked_fill(attention_mask, -1e9) + + reshaped_query_layer = query_layer + reshaped_key_layer = key_layer + reshaped_value_layer = value_layer + + qkv = torch.concat([reshaped_query_layer.unsqueeze(2), reshaped_key_layer.unsqueeze(2), reshaped_value_layer.unsqueeze(2)], dim = 2) + + output = flash_attn_qkvpacked_func( + qkv, reshaped_alibi_masked, True, self.inv_norm_factor + ) + + output = rearrange(output, 'b s h d -> (b h) s d') + + # change view [batch_size, num_heads, q_length, head_dim] + context_layer = self._merge_heads(output) + + # aggregate results across tp ranks. See here: https://github.com/pytorch/pytorch/issues/76232 + if self.pretraining_tp > 1 and self.slow_but_exact: + slices = self.hidden_size / self.pretraining_tp + output_tensor = torch.zeros_like(context_layer) + for i in range(self.pretraining_tp): + output_tensor = output_tensor + F.linear( + context_layer[:, :, int(i * slices) : int((i + 1) * slices)], + self.dense.weight[:, int(i * slices) : int((i + 1) * slices)], + ) + else: + output_tensor = self.dense(context_layer) + + output_tensor = dropout_add(output_tensor, residual, self.hidden_dropout, self.training) + + outputs = (output_tensor, present) + if output_attentions: + outputs += (context_layer,) + + return outputs
+ + + +# Disable the transformation of the attention mask in LlamaModel as the flash attention +# requires the attention mask to be the same as the key_padding_mask +
+[docs] +def _prepare_attn_mask( + self, attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int + ) -> torch.BoolTensor: + + return attention_mask
+ + +
+[docs] +def replace_bloom_attn_with_flash_attn(): + transformers.models.bloom.modeling_bloom.BloomModel._prepare_attn_mask = ( + _prepare_attn_mask + ) + transformers.models.bloom.modeling_bloom.BloomAttention.forward = forward
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/flash_attention/gpt2_flash_attention.html b/_modules/lmflow/utils/flash_attention/gpt2_flash_attention.html new file mode 100644 index 000000000..141ccbb69 --- /dev/null +++ b/_modules/lmflow/utils/flash_attention/gpt2_flash_attention.html @@ -0,0 +1,605 @@ + + + + + + + + + + lmflow.utils.flash_attention.gpt2_flash_attention — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.flash_attention.gpt2_flash_attention

+from typing import List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+
+import transformers
+from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
+
+from einops import rearrange
+
+#try to import flash_attn 2.x.x, if not, import flash_attn 1.x.x
+try:
+    from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
+except:
+    from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
+
+from flash_attn.bert_padding import unpad_input, pad_input
+
+
+
+[docs] +def forward( + self, + hidden_states: Optional[Tuple[torch.FloatTensor]], + layer_past: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: + + + if encoder_hidden_states is not None: + if not hasattr(self, "q_attn"): + raise ValueError( + "If class is used as cross attention, the weights `q_attn` have to be defined. " + "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." + ) + + query = self.q_attn(hidden_states) + key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) + attention_mask = encoder_attention_mask + else: + query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) + + bsz, q_len, _ = hidden_states.size() + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + #TODO Should we support? + if layer_past is not None: + past_key, past_value = layer_past + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + + assert use_cache is False, "Use cache is not supported" + present = None + # if use_cache is True: + # present = (key, value) + # else: + # present = None + + assert self.reorder_and_upcast_attn is False, "reorder_and_upcast_attn is not supported yet" + + qkv = torch.stack([query, key, value], dim = 2) + qkv = qkv.transpose(1, 3) # [bsz, seq_len, 3, heads, hiddens_per_head] + + # breakpoint() + key_padding_mask = attention_mask + # key_padding_mask = None + # breakpoint() + if key_padding_mask is None: + qkv = rearrange(qkv, "b s ... -> (b s) ...") + max_s = q_len + cu_q_lens = torch.arange( + 0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device + ) + output = flash_attn_unpadded_qkvpacked_func( + qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True + ) + output = rearrange(output, "(b s) ... -> b s ...", b=bsz) + else: + # flip in flash attention + key_padding_mask = key_padding_mask.clone() + key_padding_mask = (1.0 - key_padding_mask) + key_padding_mask = key_padding_mask.squeeze(1).squeeze(1) + nheads = qkv.shape[-2] + x = rearrange(qkv, "b s three h d -> b s (three h d)") + x_unpad, indices, cu_q_lens, max_s = unpad_input(x, key_padding_mask) + x_unpad = rearrange( + x_unpad, "nnz (three h d) -> nnz three h d", three=3, h=nheads + ) + output_unpad = flash_attn_unpadded_qkvpacked_func( + x_unpad, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True + ) + output = rearrange( + pad_input( + rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices, bsz, q_len + ), + "b s (h d) -> b s h d", + h=nheads, + ) + # if self.reorder_and_upcast_attn: + # attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask) + # else: + # attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) + output = rearrange(output, 'b s h d -> b h s d') + attn_output = self._merge_heads(output, self.num_heads, self.head_dim) + attn_output = self.c_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + + assert output_attentions is False, "output attentions is not supported yet" + # if output_attentions: + # outputs += (attn_weights,) + + return outputs # a, present, (attentions)
+ + + +# Disable the transformation of the attention mask in LlamaModel as the flash attention +# requires the attention mask to be the same as the key_padding_mask +
+[docs] +def _prepare_decoder_attention_mask( + self, attention_mask, input_shape, inputs_embeds, past_key_values_length +): + # [bsz, seq_len] + return attention_mask
+ + + +
+[docs] +def replace_gpt2_attn_with_flash_attn(): + # transformers.models.gpt2.modeling_gpt2.LlamaModel._prepare_decoder_attention_mask = ( + # _prepare_decoder_attention_mask + # ) + transformers.models.gpt2.modeling_gpt2.GPT2Attention.forward = forward
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/flash_attention/gpt_neo_flash_attention.html b/_modules/lmflow/utils/flash_attention/gpt_neo_flash_attention.html new file mode 100644 index 000000000..35cec530f --- /dev/null +++ b/_modules/lmflow/utils/flash_attention/gpt_neo_flash_attention.html @@ -0,0 +1,569 @@ + + + + + + + + + + lmflow.utils.flash_attention.gpt_neo_flash_attention — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.flash_attention.gpt_neo_flash_attention

+from typing import List, Optional, Tuple
+
+import torch
+import transformers
+from einops import rearrange
+
+#try to import flash_attn 2.x.x, if not, import flash_attn 1.x.x
+try:
+    from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
+except:
+    from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
+
+from flash_attn.bert_padding import unpad_input, pad_input
+
+
+[docs] +def _attn(self, query, key, value, attention_mask=None, head_mask=None): + # (batch, head, seq_length, head_features) + query = query.to(torch.bfloat16) + key = key.to(torch.bfloat16) + query = query * torch.sqrt(torch.tensor(self.head_dim)) + qkv = torch.stack( + [query, key, value], dim=2 + )# [bsz, nh, 3, t, hd] + qkv = qkv.transpose(1,3)## [bsz, q_len, 3, nh, hd] + bsz = qkv.shape[0] + q_len = qkv.shape[1] + + attention_mask = torch.where(attention_mask == -0.0, True, False) + key_padding_mask = rearrange(attention_mask, "b () () s -> b s") if attention_mask is not None else None + if key_padding_mask is None: + qkv = rearrange(qkv, "b s ... -> (b s) ...") + max_s = q_len + cu_q_lens = torch.arange( + 0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device + ) + output = flash_attn_unpadded_qkvpacked_func( + qkv, cu_q_lens, max_s, self.attn_dropout.p if self.training else 0.0 , softmax_scale=None, causal=True + )# attention compute + output = rearrange(output, "(b s) ... -> b s ...", b=bsz) + else: + nheads = qkv.shape[-2] + x = rearrange(qkv, "b s three h d -> b s (three h d)") + x_unpad, indices, cu_q_lens, max_s = unpad_input(x, key_padding_mask) + x_unpad = rearrange( + x_unpad, "nnz (three h d) -> nnz three h d", three=3, h=nheads + ) + output_unpad = flash_attn_unpadded_qkvpacked_func( + x_unpad, cu_q_lens, max_s, self.attn_dropout.p if self.training else 0.0, softmax_scale=None, causal=True + ) + output = rearrange( + pad_input( + rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices, bsz, q_len + ), + "b s (h d) -> b s h d", + h=nheads, + ) + + return output, None
+ + +
+[docs] +def forward( + self, + hidden_states, + attention_mask=None, + layer_past=None, + head_mask=None, + use_cache=False, + output_attentions=False, + ): + + assert head_mask is None, "head_mask is not supported" + assert not output_attentions, "output_attentions is not supported" + assert not use_cache, "use_cache is not supported" + + query = self.q_proj(hidden_states) + key = self.k_proj(hidden_states) + value = self.v_proj(hidden_states) + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + if layer_past is not None: + past_key = layer_past[0] + past_value = layer_past[1] + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + + present = None + attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) + new_shape = attn_output.size()[:-2] + (self.num_heads * self.head_dim,) + attn_output = attn_output.view(new_shape) + attn_output = self.out_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + + return outputs # a, present, (attentions)
+ + +
+[docs] +def replace_gpt_neo_attn_with_flash_attn(): + transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoSelfAttention._attn = _attn + transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoSelfAttention.forward = forward
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/flash_attention/llama_flash_attention.html b/_modules/lmflow/utils/flash_attention/llama_flash_attention.html new file mode 100644 index 000000000..803e31f67 --- /dev/null +++ b/_modules/lmflow/utils/flash_attention/llama_flash_attention.html @@ -0,0 +1,593 @@ + + + + + + + + + + lmflow.utils.flash_attention.llama_flash_attention — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.flash_attention.llama_flash_attention

+from typing import List, Optional, Tuple
+
+import torch
+from torch import nn
+import math
+
+import transformers
+from transformers.models.llama.modeling_llama import apply_rotary_pos_emb,_make_causal_mask,_expand_mask
+
+from einops import rearrange
+
+#try to import flash_attn 2.x.x, if not, import flash_attn 1.x.x
+try:
+    from flash_attn.flash_attn_interface import flash_attn_func
+except:
+    from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func as flash_attn_func
+
+from flash_attn.bert_padding import unpad_input, pad_input
+
+
+
+[docs] +def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, +) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + if self.config.pretraining_tp > 1: + raise ValueError("pretraining_tp > 1 is not supported for flash attention") + else: + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + query_states, key_states, value_states = [ + rearrange(x, "b h s d -> b s h d") for x in [query_states, key_states, value_states] + ] + + input_dtype = query_states.dtype + if input_dtype == torch.float32: + # Handle the case where the model is quantized + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + # below output will have shape (batch_size, seqlen, nheads, headdim) + attn_output = flash_attn_func(query_states, key_states, value_states, causal=True) + + if attn_output.size() != (bsz, q_len, self.num_heads, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, q_len, self.num_heads, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + attn_output = self.o_proj(attn_output) + if output_attentions: + raise NotImplementedError("`output_attentions` is not supported when `use_flash_attn` is True") + attn_weights = None + + return attn_output, attn_weights, past_key_value
+ + + +# Disable the transformation of the attention mask in LlamaModel as the flash attention +# requires the attention mask to be the same as the key_padding_mask +
+[docs] +def _prepare_decoder_attention_mask( + self, attention_mask, input_shape, inputs_embeds, past_key_values_length +): + # [bsz, seq_len] + if input_shape[-1] > 1 and past_key_values_length == 0: # encode + return attention_mask + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask
+ + + +
+[docs] +def replace_llama_attn_with_flash_attn(): + transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = _prepare_decoder_attention_mask + transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/flash_attention/triton_flash_attention.html b/_modules/lmflow/utils/flash_attention/triton_flash_attention.html new file mode 100644 index 000000000..04222a84d --- /dev/null +++ b/_modules/lmflow/utils/flash_attention/triton_flash_attention.html @@ -0,0 +1,1352 @@ + + + + + + + + + + lmflow.utils.flash_attention.triton_flash_attention — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.flash_attention.triton_flash_attention

+"""
+*Experimental* implementation of FlashAttention in Triton.
+Tested with triton==2.0.0.dev20221202.
+Triton 2.0 has a new backend (MLIR) but seems like it doesn't yet work for head dimensions
+other than 64:
+https://github.com/openai/triton/blob/d376020f90002757eea3ea9475d4f7cfc2ec5ead/python/triton/ops/flash_attention.py#L207
+We'll update this implementation with the new Triton backend once this is fixed.
+
+We use the FlashAttention implementation from Phil Tillet a starting point.
+https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py
+
+Changes:
+- Implement both causal and non-causal attention.
+- Implement both self-attention and cross-attention.
+- Support arbitrary seqlens (not just multiples of 128), for both forward and backward.
+- Support all head dimensions up to 128 (not just 16, 32, 64, 128), for both forward and backward.
+- Support attention bias.
+- Speed up the forward pass a bit, and only store the LSE instead of m and l.
+- Make the backward for d=128 much faster by reducing register spilling.
+- Optionally parallelize the backward pass across seqlen_k, to deal with the case of
+small batch size * nheads.
+
+Caution:
+- This is an *experimental* implementation. The forward pass should be quite robust but
+I'm not 100% sure that the backward pass doesn't have race conditions (due to the Triton compiler).
+- This implementation has only been tested on A100.
+- If you plan to use headdim other than 64 and 128, you should test for race conditions
+(due to the Triton compiler), as done in tests/test_flash_attn.py
+"test_flash_attn_triton_race_condition". I've tested and fixed many race conditions
+for different head dimensions (40, 48, 64, 128, 80, 88, 96), but I'm still not 100% confident
+that there are none left for other head dimensions.
+
+Differences between this Triton version and the CUDA version:
+- Triton version doesn't support dropout.
+- Triton forward is generally faster than CUDA forward, while Triton backward is
+generally slower than CUDA backward. Overall Triton forward + backward is slightly slower
+than CUDA forward + backward.
+- Triton version doesn't support different sequence lengths in a batch (i.e., RaggedTensor/NestedTensor).
+- Triton version supports attention bias, while CUDA version doesn't.
+"""
+
+import math
+
+import torch
+
+import triton
+import triton.language as tl
+
+
+# Disabling autotune for now, set num_warps=4 if headdim=64 and num_warps=8 if headdim=128
+# @triton.autotune(
+#     configs=[
+#         triton.Config({"BLOCK_M": 128, "BLOCK_N": 128}, num_warps=4, num_stages=1),
+#         # This config has a race condition when EVEN_M == False, disabling it for now.
+#         # triton.Config({"BLOCK_M": 64, "BLOCK_N": 64}, num_warps=4, num_stages=1),
+#     ],
+#     key=['CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL', 'BLOCK_HEADDIM']
+# )
+@triton.heuristics(
+    {
+        "EVEN_M": lambda args: args["seqlen_q"] % args["BLOCK_M"] == 0,
+        "EVEN_N": lambda args: args["seqlen_k"] % args["BLOCK_N"] == 0,
+        "EVEN_HEADDIM": lambda args: args["headdim"] == args["BLOCK_HEADDIM"],
+    }
+)
+@triton.jit
+
+[docs] +def _fwd_kernel( + Q, K, V, Bias, Out, + Lse, TMP, # NOTE: TMP is a scratchpad buffer to workaround a compiler bug + softmax_scale, + stride_qb, stride_qh, stride_qm, + stride_kb, stride_kh, stride_kn, + stride_vb, stride_vh, stride_vn, + stride_bb, stride_bh, stride_bm, + stride_ob, stride_oh, stride_om, + nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, + CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, + BIAS_TYPE: tl.constexpr, + IS_CAUSAL: tl.constexpr, + BLOCK_HEADDIM: tl.constexpr, + EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, + BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, +): + start_m = tl.program_id(0) + off_hb = tl.program_id(1) + off_b = off_hb // nheads + off_h = off_hb % nheads + # off_b = tl.program_id(1) + # off_h = tl.program_id(2) + # off_hb = off_b * nheads + off_h + # initialize offsets + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + offs_n = tl.arange(0, BLOCK_N) + offs_d = tl.arange(0, BLOCK_HEADDIM) + # Initialize pointers to Q, K, V + # Adding parenthesis around indexing might use int32 math instead of int64 math? + # https://github.com/openai/triton/issues/741 + # I'm seeing a tiny bit of difference (5-7us) + q_ptrs = Q + off_b * stride_qb + off_h * stride_qh + (offs_m[:, None] * stride_qm + offs_d[None, :]) + k_ptrs = K + off_b * stride_kb + off_h * stride_kh + (offs_n[:, None] * stride_kn + offs_d[None, :]) + v_ptrs = V + off_b * stride_vb + off_h * stride_vh + (offs_n[:, None] * stride_vn + offs_d[None, :]) + if BIAS_TYPE == 'vector': + b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n + elif BIAS_TYPE == 'matrix': + b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + (offs_m[:, None] * stride_bm + offs_n[None, :]) + # initialize pointer to m and l + t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m + lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") + m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") + acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32) + # load q: it will stay in SRAM throughout + # [2022-10-30] TD: Triton bug - in the case of EVEN_M=True and EVEN_N=False, if we just call + # tl.load(q_ptrs), we get the wrong output! + if EVEN_M & EVEN_N: + if EVEN_HEADDIM: + q = tl.load(q_ptrs) + else: + q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0) + else: + if EVEN_HEADDIM: + q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0) + else: + q = tl.load(q_ptrs, mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), + other=0.0) + # loop over k, v and update accumulator + end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) * BLOCK_M, seqlen_k) + for start_n in range(0, end_n, BLOCK_N): + start_n = tl.multiple_of(start_n, BLOCK_N) + # -- compute qk ---- + if EVEN_N & EVEN_M: # If we just do "if EVEN_N", there seems to be some race condition + if EVEN_HEADDIM: + k = tl.load(k_ptrs + start_n * stride_kn) + else: + k = tl.load(k_ptrs + start_n * stride_kn, mask=offs_d[None, :] < headdim, other=0.0) + else: + if EVEN_HEADDIM: + k = tl.load(k_ptrs + start_n * stride_kn, mask=(start_n + offs_n)[:, None] < seqlen_k, + other=0.0) + else: + k = tl.load(k_ptrs + start_n * stride_kn, + mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim), + other=0.0) + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) + qk += tl.dot(q, k, trans_b=True) + # Trying to combine the two masks seem to make the result wrong + if not EVEN_N: # Need to mask out otherwise the softmax is wrong + qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, float("-inf")) + if IS_CAUSAL: + qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :], 0, float("-inf")) + if BIAS_TYPE != 'none': + if BIAS_TYPE == 'vector': + if EVEN_N: + bias = tl.load(b_ptrs + start_n).to(tl.float32) + else: + bias = tl.load(b_ptrs + start_n, mask=(start_n + offs_n) < seqlen_k, other=0.0).to(tl.float32) + bias = bias[None, :] + elif BIAS_TYPE == 'matrix': + if EVEN_M & EVEN_N: + bias = tl.load(b_ptrs + start_n).to(tl.float32) + else: + bias = tl.load(b_ptrs + start_n, + mask=(offs_m[:, None] < seqlen_q) + & ((start_n + offs_n)[None, :] < seqlen_k), + other=0.0).to(tl.float32) + # Slightly faster to multiply the softmax_scale in the tl.exp below since the compiler + # can then fuse the mult and add into an fma instruction. But if we have bias we need to + # to multiply with softmax_scale here. + qk = qk * softmax_scale + bias + m_ij = tl.maximum(tl.max(qk, 1), lse_i) + p = tl.exp(qk - m_ij[:, None]) + else: + m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i) + p = tl.exp(qk * softmax_scale - m_ij[:, None]) + l_ij = tl.sum(p, 1) + + # scale acc_o + acc_o_scale = tl.exp(m_i - m_ij) + + # # -- update output accumulator -- + # BUG: have to store and immediately load + tl.store(t_ptrs, acc_o_scale) + acc_o_scale = tl.load(t_ptrs) + acc_o = acc_o * acc_o_scale[:, None] + # update acc_o + if EVEN_N & EVEN_M: # If we just do "if EVEN_N", there seems to be some race condition + if EVEN_HEADDIM: + v = tl.load(v_ptrs + start_n * stride_vn) + else: + v = tl.load(v_ptrs + start_n * stride_vn, mask=offs_d[None, :] < headdim, other=0.0) + else: + if EVEN_HEADDIM: + v = tl.load(v_ptrs + start_n * stride_vn, mask=(start_n + offs_n)[:, None] < seqlen_k, + other=0.0) + else: + v = tl.load(v_ptrs + start_n * stride_vn, + mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim), + other=0.0) + p = p.to(v.dtype) + acc_o += tl.dot(p, v) + + # -- update statistics + m_i = m_ij + l_i_new = tl.exp(lse_i - m_ij) + l_ij + lse_i = m_ij + tl.log(l_i_new) + + o_scale = tl.exp(m_i - lse_i) + # BUG: have to store and immediately load + tl.store(t_ptrs, o_scale) + o_scale = tl.load(t_ptrs) + acc_o = acc_o * o_scale[:, None] + # rematerialize offsets to save registers + start_m = tl.program_id(0) + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + # write back l and m + lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m + tl.store(lse_ptrs, lse_i) + # initialize pointers to output + offs_d = tl.arange(0, BLOCK_HEADDIM) + out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:, None] * stride_om + offs_d[None, :]) + if EVEN_M: + if EVEN_HEADDIM: + tl.store(out_ptrs, acc_o) + else: + tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim) + else: + if EVEN_HEADDIM: + tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q) + else: + tl.store(out_ptrs, acc_o, + mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim))
+ + + +@triton.jit +
+[docs] +def _bwd_preprocess_do_o_dot( + Out, DO, Delta, + stride_ob, stride_oh, stride_om, + stride_dob, stride_doh, stride_dom, + nheads, seqlen_q, seqlen_q_rounded, headdim, + BLOCK_M: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, +): + start_m = tl.program_id(0) + off_hb = tl.program_id(1) + off_b = off_hb // nheads + off_h = off_hb % nheads + # initialize offsets + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + offs_d = tl.arange(0, BLOCK_HEADDIM) + # load + o = tl.load(Out + off_b * stride_ob + off_h * stride_oh + offs_m[:, None] * stride_om + offs_d[None, :], + mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32) + do = tl.load(DO + off_b * stride_dob + off_h * stride_doh + offs_m[:, None] * stride_dom + offs_d[None, :], + mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32) + delta = tl.sum(o * do, axis=1) + # write-back + tl.store(Delta + off_hb * seqlen_q_rounded + offs_m, delta)
+ + + +@triton.jit +
+[docs] +def _bwd_store_dk_dv( + dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, + EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, +): + # [2022-11-01] TD: Same bug. In the case of EVEN_N=True and EVEN_M=False, + # if we just call tl.store(dv_ptrs), there's a race condition + if EVEN_N & EVEN_M: + if EVEN_HEADDIM: + tl.store(dv_ptrs, dv) + tl.store(dk_ptrs, dk) + else: + tl.store(dv_ptrs, dv, mask=offs_d[None, :] < headdim) + tl.store(dk_ptrs, dk, mask=offs_d[None, :] < headdim) + else: + if EVEN_HEADDIM: + tl.store(dv_ptrs, dv, mask=offs_n[:, None] < seqlen_k) + tl.store(dk_ptrs, dk, mask=offs_n[:, None] < seqlen_k) + else: + tl.store(dv_ptrs, dv, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim)) + tl.store(dk_ptrs, dk, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim))
+ + + +@triton.jit +
+[docs] +def _bwd_kernel_one_col_block( + start_n, + Q, K, V, Bias, + DO, DQ, DK, DV, + LSE, D, + softmax_scale, + stride_qm, stride_kn, stride_vn, stride_bm, + stride_dom, stride_dqm, stride_dkn, stride_dvn, + seqlen_q, seqlen_k, headdim, + ATOMIC_ADD: tl.constexpr, + BIAS_TYPE: tl.constexpr, + IS_CAUSAL: tl.constexpr, + BLOCK_HEADDIM: tl.constexpr, + EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, + BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, +): + # We need to make sure begin_m is a multiple of BLOCK_M (not BLOCK_N) + begin_m = 0 if not IS_CAUSAL else ((start_n * BLOCK_N) // BLOCK_M) * BLOCK_M + # initialize row/col offsets + offs_qm = begin_m + tl.arange(0, BLOCK_M) + offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N) + offs_m = tl.arange(0, BLOCK_M) + offs_d = tl.arange(0, BLOCK_HEADDIM) + # initialize pointers to value-like data + q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_d[None, :]) + k_ptrs = K + (offs_n[:, None] * stride_kn + offs_d[None, :]) + v_ptrs = V + (offs_n[:, None] * stride_vn + offs_d[None, :]) + do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_d[None, :]) + dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_d[None, :]) + if BIAS_TYPE == 'vector': + b_ptrs = Bias + offs_n + elif BIAS_TYPE == 'matrix': + b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :]) + # initialize dv and dk + dv = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32) + dk = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32) + # There seems to be some problem with Triton pipelining that makes results wrong for + # headdim=64, seqlen=(113, 255), bias_type='matrix'. In this case the for loop + # may have zero step, and pipelining with the bias matrix could screw it up. + # So we just exit early. + if begin_m >= seqlen_q: + dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :]) + dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :]) + _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, + EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM) + return + # k and v stay in SRAM throughout + # [2022-10-30] TD: Same bug as the fwd. In the case of EVEN_N=True and EVEN_M=False, + # if we just call tl.load(k_ptrs), we get the wrong output! + if EVEN_N & EVEN_M: + if EVEN_HEADDIM: + k = tl.load(k_ptrs) + v = tl.load(v_ptrs) + else: + k = tl.load(k_ptrs, mask=offs_d[None, :] < headdim, other=0.0) + v = tl.load(v_ptrs, mask=offs_d[None, :] < headdim, other=0.0) + else: + if EVEN_HEADDIM: + k = tl.load(k_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0) + v = tl.load(v_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0) + else: + k = tl.load(k_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim), + other=0.0) + v = tl.load(v_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim), + other=0.0) + # loop over rows + num_block_m = tl.cdiv(seqlen_q, BLOCK_M) + for start_m in range(begin_m, num_block_m * BLOCK_M, BLOCK_M): + start_m = tl.multiple_of(start_m, BLOCK_M) + offs_m_curr = start_m + offs_m + # load q, k, v, do on-chip + # Same bug as below. Otherwise gives wrong result for headdim=40, seqlen=(128, 117) + if EVEN_M & EVEN_HEADDIM: + q = tl.load(q_ptrs) + else: + if EVEN_HEADDIM: + q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0) + else: + q = tl.load(q_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) + & (offs_d[None, :] < headdim), other=0.0) + # recompute p = softmax(qk, dim=-1).T + qk = tl.dot(q, k, trans_b=True) + # Trying to combine the two masks seem to make the result wrong + if not EVEN_N: # Need to mask out otherwise the softmax is wrong + qk = tl.where(offs_n[None, :] < seqlen_k, qk, float("-inf")) + if IS_CAUSAL: + qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk, float("-inf")) + if BIAS_TYPE != 'none': + tl.debug_barrier() # Race condition otherwise + if BIAS_TYPE == 'vector': + if EVEN_N: + bias = tl.load(b_ptrs).to(tl.float32) + else: + bias = tl.load(b_ptrs, mask=offs_n < seqlen_k, other=0.0).to(tl.float32) + bias = bias[None, :] + elif BIAS_TYPE == 'matrix': + if EVEN_M & EVEN_N: + bias = tl.load(b_ptrs).to(tl.float32) + else: + bias = tl.load(b_ptrs, + mask=(offs_m_curr[:, None] < seqlen_q) + & (offs_n[None, :] < seqlen_k), + other=0.0).to(tl.float32) + qk = qk * softmax_scale + bias + # There seems to be a race condition when headdim=48/96, and dq, dk, dv are wrong. + # Also wrong for headdim=64. + if not (EVEN_M & EVEN_HEADDIM): + tl.debug_barrier() + lse_i = tl.load(LSE + offs_m_curr) + if BIAS_TYPE == 'none': + p = tl.exp(qk * softmax_scale - lse_i[:, None]) + else: + p = tl.exp(qk - lse_i[:, None]) + # compute dv + # [2022-10-30] TD: A Triton bug: if EVEN_M=True and EVEN_HEADDIM=False, if we call + # do = tl.load(do_ptrs, mask=offs_d[None, :] < headdim, other=0.0), we get wrong outputs + # in the case of headdim=48/96, seqlen_q & seqlen_k >= 512. If headdim=40 or seqlen < 512, + # the output is correct. + if EVEN_M & EVEN_HEADDIM: + do = tl.load(do_ptrs) + else: + # [2022-11-01] TD: Triton bug, there's a race condition if we just use m_mask and not d_mask. + do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) + & (offs_d[None, :] < headdim), other=0.0) + # if EVEN_M: + # if EVEN_HEADDIM: + # do = tl.load(do_ptrs) + # else: + # do = tl.load(do_ptrs, mask=offs_d[None, :] < headdim, other=0.0) + # else: + # if EVEN_HEADDIM: + # do = tl.load(do_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0) + # else: + # do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) + # & (offs_d[None, :] < headdim), other=0.0) + dv += tl.dot(p.to(do.dtype), do, trans_a=True) + # compute dp = dot(v, do) + # There seems to be a race condition when headdim=48/96, and dq, dk are wrong. + # Also wrong for headdim=128, seqlen=(108, 256), and ATOMIC_ADD=True + # Also wrong for headdim=64, seqlen=(1023, 1024), and ATOMIC_ADD=False + if not (EVEN_M & EVEN_HEADDIM): + tl.debug_barrier() + dp = tl.dot(do, v, trans_b=True) + # There's a race condition for headdim=48 + if not EVEN_HEADDIM: + tl.debug_barrier() + # compute ds = p * (dp - delta[:, None]) + # Putting the subtraction after the dp matmul (instead of before) is slightly faster + Di = tl.load(D + offs_m_curr) + # Converting ds to q.dtype here reduces register pressure and makes it much faster + # for BLOCK_HEADDIM=128 + ds = (p * (dp - Di[:, None]) * softmax_scale).to(q.dtype) + # compute dk = dot(ds.T, q) + dk += tl.dot(ds, q, trans_a=True) + # compute dq + if not (EVEN_M & EVEN_HEADDIM): # Otherewise there's a race condition when BIAS_TYPE='matrix' + tl.debug_barrier() + if not ATOMIC_ADD: + if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M + dq = tl.load(dq_ptrs, eviction_policy="evict_last") + dq += tl.dot(ds, k) + tl.store(dq_ptrs, dq, eviction_policy="evict_last") + else: + if EVEN_HEADDIM: + dq = tl.load(dq_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0, + eviction_policy="evict_last") + dq += tl.dot(ds, k) + tl.store(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q, + eviction_policy="evict_last") + else: + dq = tl.load(dq_ptrs, + mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), + other=0.0, eviction_policy="evict_last") + dq += tl.dot(ds, k) + tl.store(dq_ptrs, dq, + mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), + eviction_policy="evict_last") + else: # If we're parallelizing across the seqlen_k dimension + dq = tl.dot(ds, k) + if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M + tl.atomic_add(dq_ptrs, dq) + else: + if EVEN_HEADDIM: + tl.atomic_add(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q) + else: + tl.atomic_add(dq_ptrs, dq, + mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim)) + # increment pointers + dq_ptrs += BLOCK_M * stride_dqm + q_ptrs += BLOCK_M * stride_qm + do_ptrs += BLOCK_M * stride_dom + if BIAS_TYPE == 'matrix': + b_ptrs += BLOCK_M * stride_bm + # write-back + dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :]) + dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :]) + _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, + EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM)
+ + + +
+[docs] +def init_to_zero(name): + return lambda nargs: nargs[name].zero_()
+ + + +@triton.autotune( + configs=[ + triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "SEQUENCE_PARALLEL": False}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "SEQUENCE_PARALLEL": True}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')), + # Other configs seem to give wrong results when seqlen_q % 128 != 0, disabling them for now + # # Kernel is buggy (give wrong result) if we set BLOCK_m=128, BLOCK_n=64, num_warps=*4* + # triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "SEQUENCE_PARALLEL": False}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')), + # triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "SEQUENCE_PARALLEL": True}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')), + # triton.Config({"BLOCK_M": 64, "BLOCK_N": 64, "SEQUENCE_PARALLEL": False}, num_warps=4, num_stages=1, pre_hook=init_to_zero('DQ')), + # triton.Config({"BLOCK_M": 64, "BLOCK_N": 64, "SEQUENCE_PARALLEL": True}, num_warps=4, num_stages=1, pre_hook=init_to_zero('DQ')), + ], + key=['CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL', 'BLOCK_HEADDIM'], +) +@triton.heuristics( + { + "EVEN_M": lambda args: args["seqlen_q"] % args["BLOCK_M"] == 0, + "EVEN_N": lambda args: args["seqlen_k"] % args["BLOCK_N"] == 0, + "EVEN_HEADDIM": lambda args: args["headdim"] == args["BLOCK_HEADDIM"], + } +) +@triton.jit +
+[docs] +def _bwd_kernel( + Q, K, V, Bias, + DO, DQ, DK, DV, + LSE, D, + softmax_scale, + stride_qb, stride_qh, stride_qm, + stride_kb, stride_kh, stride_kn, + stride_vb, stride_vh, stride_vn, + stride_bb, stride_bh, stride_bm, + stride_dob, stride_doh, stride_dom, + stride_dqb, stride_dqh, stride_dqm, + stride_dkb, stride_dkh, stride_dkn, + stride_dvb, stride_dvh, stride_dvn, + nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, + CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, + BIAS_TYPE: tl.constexpr, + IS_CAUSAL: tl.constexpr, + BLOCK_HEADDIM: tl.constexpr, + SEQUENCE_PARALLEL: tl.constexpr, + EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, + BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, +): + off_hb = tl.program_id(1) + off_b = off_hb // nheads + off_h = off_hb % nheads + # offset pointers for batch/head + Q += off_b * stride_qb + off_h * stride_qh + K += off_b * stride_kb + off_h * stride_kh + V += off_b * stride_vb + off_h * stride_vh + DO += off_b * stride_dob + off_h * stride_doh + DQ += off_b * stride_dqb + off_h * stride_dqh + DK += off_b * stride_dkb + off_h * stride_dkh + DV += off_b * stride_dvb + off_h * stride_dvh + if BIAS_TYPE != 'none': + Bias += off_b * stride_bb + off_h * stride_bh + # pointer to row-wise quantities in value-like data + D += off_hb * seqlen_q_rounded + LSE += off_hb * seqlen_q_rounded + if not SEQUENCE_PARALLEL: + num_block_n = tl.cdiv(seqlen_k, BLOCK_N) + for start_n in range(0, num_block_n): + _bwd_kernel_one_col_block( + start_n, + Q, K, V, Bias, + DO, DQ, DK, DV, + LSE, D, + softmax_scale, + stride_qm, stride_kn, stride_vn, stride_bm, + stride_dom, stride_dqm, stride_dkn, stride_dvn, + seqlen_q, seqlen_k, headdim, + ATOMIC_ADD=False, + BIAS_TYPE=BIAS_TYPE, + IS_CAUSAL=IS_CAUSAL, + BLOCK_HEADDIM=BLOCK_HEADDIM, + EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, + BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N + ) + else: + start_n = tl.program_id(0) + _bwd_kernel_one_col_block( + start_n, + Q, K, V, Bias, + DO, DQ, DK, DV, + LSE, D, + softmax_scale, + stride_qm, stride_kn, stride_vn, stride_bm, + stride_dom, stride_dqm, stride_dkn, stride_dvn, + seqlen_q, seqlen_k, headdim, + ATOMIC_ADD=True, + BIAS_TYPE=BIAS_TYPE, + IS_CAUSAL=IS_CAUSAL, + BLOCK_HEADDIM=BLOCK_HEADDIM, + EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, + BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N + )
+ + + +
+[docs] +def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None): + # shape constraints + batch, seqlen_q, nheads, d = q.shape + _, seqlen_k, _, _ = k.shape + assert k.shape == (batch, seqlen_k, nheads, d) + assert v.shape == (batch, seqlen_k, nheads, d) + assert d <= 128, 'FlashAttention only support head dimensions up to 128' + assert q.dtype == k.dtype == v.dtype, 'All tensors must have the same type' + assert q.dtype in [torch.float16, torch.bfloat16], 'Only support fp16 and bf16' + assert q.is_cuda and k.is_cuda and v.is_cuda + softmax_scale = softmax_scale or 1.0 / math.sqrt(d) + + has_bias = bias is not None + bias_type = 'none' + if has_bias: + assert bias.dtype in [q.dtype, torch.float] + assert bias.is_cuda + assert bias.dim() == 4 + if bias.stride(-1) != 1: + bias = bias.contiguous() + if bias.shape[2:] == (1, seqlen_k): + bias_type = 'vector' + elif bias.shape[2:] == (seqlen_q, seqlen_k): + bias_type = 'matrix' + else: + raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k)' + ' or (seqlen_q, seqlen_k)') + bias = bias.expand(batch, nheads, seqlen_q, seqlen_k) + bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0) + + seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128 + lse = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32) + tmp = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32) + o = torch.empty_like(q) + + BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16) + BLOCK = 128 + num_warps = 4 if d <= 64 else 8 + grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads) + _fwd_kernel[grid]( + q, k, v, bias, o, + lse, tmp, + softmax_scale, + q.stride(0), q.stride(2), q.stride(1), + k.stride(0), k.stride(2), k.stride(1), + v.stride(0), v.stride(2), v.stride(1), + *bias_strides, + o.stride(0), o.stride(2), o.stride(1), + nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d, + seqlen_q // 32, seqlen_k // 32, # key for triton cache (limit number of compilations) + # Can't use kwargs here because triton autotune expects key to be args, not kwargs + # IS_CAUSAL=causal, BLOCK_HEADDIM=d, + bias_type, causal, BLOCK_HEADDIM, + BLOCK_M=BLOCK, BLOCK_N=BLOCK, + num_warps=num_warps, + num_stages=1, + ) + return o, lse, softmax_scale # softmax_scale could have been updated
+ + + +
+[docs] +def _flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None): + # Make sure that the last dimension is contiguous + if do.stride(-1) != 1: + do = do.contiguous() + batch, seqlen_q, nheads, d = q.shape + _, seqlen_k, _, _ = k.shape + # assert d in {16, 32, 64, 128} + assert d <= 128 + seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128 + assert lse.shape == (batch, nheads, seqlen_q_rounded) + assert q.stride(-1) == k.stride(-1) == v.stride(-1) == o.stride(-1) == 1 + assert dq.stride(-1) == dk.stride(-1) == dv.stride(-1) == 1 + softmax_scale = softmax_scale or 1.0 / math.sqrt(d) + # dq_accum = torch.zeros_like(q, dtype=torch.float32) + dq_accum = torch.empty_like(q, dtype=torch.float32) + delta = torch.empty_like(lse) + # delta = torch.zeros_like(lse) + + BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16) + grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads) + _bwd_preprocess_do_o_dot[grid]( + o, do, delta, + o.stride(0), o.stride(2), o.stride(1), + do.stride(0), do.stride(2), do.stride(1), + nheads, seqlen_q, seqlen_q_rounded, d, + BLOCK_M=128, BLOCK_HEADDIM=BLOCK_HEADDIM, + ) + + has_bias = bias is not None + bias_type = 'none' + if has_bias: + assert bias.dtype in [q.dtype, torch.float] + assert bias.is_cuda + assert bias.dim() == 4 + assert bias.stride(-1) == 1 + if bias.shape[2:] == (1, seqlen_k): + bias_type = 'vector' + elif bias.shape[2:] == (seqlen_q, seqlen_k): + bias_type = 'matrix' + else: + raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k)' + ' or (seqlen_q, seqlen_k)') + bias = bias.expand(batch, nheads, seqlen_q, seqlen_k) + bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0) + + # BLOCK_M = 128 + # BLOCK_N = 64 + # num_warps = 4 + grid = lambda META: (triton.cdiv(seqlen_k, META["BLOCK_N"]) if META["SEQUENCE_PARALLEL"] else 1, + batch * nheads) + _bwd_kernel[grid]( + q, k, v, bias, + do, dq_accum, dk, dv, + lse, delta, + softmax_scale, + q.stride(0), q.stride(2), q.stride(1), + k.stride(0), k.stride(2), k.stride(1), + v.stride(0), v.stride(2), v.stride(1), + *bias_strides, + do.stride(0), do.stride(2), do.stride(1), + dq_accum.stride(0), dq_accum.stride(2), dq_accum.stride(1), + dk.stride(0), dk.stride(2), dk.stride(1), + dv.stride(0), dv.stride(2), dv.stride(1), + nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d, + seqlen_q // 32, seqlen_k // 32, # key for triton cache (limit number of compilations) + # Can't use kwargs here because triton autotune expects key to be args, not kwargs + # IS_CAUSAL=causal, BLOCK_HEADDIM=d, + bias_type, causal, BLOCK_HEADDIM, + # SEQUENCE_PARALLEL=False, + # BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, + # num_warps=num_warps, + # num_stages=1, + ) + dq.copy_(dq_accum)
+ + + +
+[docs] +class FlashAttnQKVPackedFunc(torch.autograd.Function): + + @staticmethod +
+[docs] + def forward(ctx, qkv, bias=None, causal=False, softmax_scale=None): + """ + qkv: (batch, seqlen, 3, nheads, headdim) + bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen). + For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen). + ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen) + """ + # Make sure that the last dimension is contiguous + if qkv.stride(-1) != 1: + qkv = qkv.contiguous() + o, lse, ctx.softmax_scale = _flash_attn_forward( + qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], bias=bias, causal=causal, + softmax_scale=softmax_scale + ) + ctx.save_for_backward(qkv, o, lse, bias) + ctx.causal = causal + return o
+ + + @staticmethod +
+[docs] + def backward(ctx, do): + qkv, o, lse, bias = ctx.saved_tensors + assert not ctx.needs_input_grad[1], 'FlashAttention does not support bias gradient yet' + # Triton's autotune causes the Tensor._version to change, and so Pytorch autograd + # does a memcpy. To avoid this we run in inference_mode, which doesn't track the version. + with torch.inference_mode(): + dqkv = torch.empty_like(qkv) + _flash_attn_backward(do, qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], o, lse, + dqkv[:, :, 0], dqkv[:, :, 1], dqkv[:, :, 2], + bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale) + return dqkv, None, None, None
+
+ + + +
+[docs] +flash_attn_qkvpacked_func = FlashAttnQKVPackedFunc.apply
+ + + +
+[docs] +class FlashAttnKVPackedFunc(torch.autograd.Function): + + @staticmethod +
+[docs] + def forward(ctx, q, kv, bias=None, causal=False, softmax_scale=None): + """ + q: (batch, seqlen_q, nheads, headdim) + kv: (batch, seqlen_k, 2, nheads, headdim) + bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k). + For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k). + ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k) + """ + # Make sure that the last dimension is contiguous + q, kv = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, kv]] + o, lse, ctx.softmax_scale = _flash_attn_forward( + q, kv[:, :, 0], kv[:, :, 1], bias=bias, causal=causal, softmax_scale=softmax_scale + ) + ctx.save_for_backward(q, kv, o, lse, bias) + ctx.causal = causal + return o
+ + + @staticmethod +
+[docs] + def backward(ctx, do): + q, kv, o, lse, bias = ctx.saved_tensors + if len(ctx.needs_input_grad) >= 3: + assert not ctx.needs_input_grad[2], 'FlashAttention does not support bias gradient yet' + # Triton's autotune causes the Tensor._version to change, and so Pytorch autograd + # does a memcpy. To avoid this we run in inference_mode, which doesn't track the version. + with torch.inference_mode(): + dq = torch.empty_like(q) + dkv = torch.empty_like(kv) + _flash_attn_backward(do, q, kv[:, :, 0], kv[:, :, 1], o, lse, + dq, dkv[:, :, 0], dkv[:, :, 1], + bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale) + return dq, dkv, None, None, None
+
+ + + +
+[docs] +flash_attn_kvpacked_func = FlashAttnKVPackedFunc.apply
+ + + +
+[docs] +class FlashAttnFunc(torch.autograd.Function): + + @staticmethod +
+[docs] + def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None): + """ + q: (batch_size, seqlen_q, nheads, headdim) + k, v: (batch_size, seqlen_k, nheads, headdim) + bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k). + For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k). + ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k) + """ + # Make sure that the last dimension is contiguous + q, k, v = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, k, v]] + o, lse, ctx.softmax_scale = _flash_attn_forward( + q, k, v, bias=bias, causal=causal, softmax_scale=softmax_scale + ) + ctx.save_for_backward(q, k, v, o, lse, bias) + ctx.causal = causal + return o
+ + + @staticmethod +
+[docs] + def backward(ctx, do): + q, k, v, o, lse, bias = ctx.saved_tensors + assert not ctx.needs_input_grad[3], 'FlashAttention does not support bias gradient yet' + # Triton's autotune causes the Tensor._version to change, and so Pytorch autograd + # does a memcpy. To avoid this we run in inference_mode, which doesn't track the version. + with torch.inference_mode(): + dq = torch.empty_like(q) + dk = torch.empty_like(k) + dv = torch.empty_like(v) + _flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv, + bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale) + return dq, dk, dv, None, None, None
+
+ + + +
+[docs] +flash_attn_func = FlashAttnFunc.apply
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/llava_conversation_lib.html b/_modules/lmflow/utils/llava_conversation_lib.html new file mode 100644 index 000000000..713022afa --- /dev/null +++ b/_modules/lmflow/utils/llava_conversation_lib.html @@ -0,0 +1,946 @@ + + + + + + + + + + lmflow.utils.llava_conversation_lib — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.llava_conversation_lib

+import dataclasses
+from enum import auto, Enum
+from typing import List, Tuple
+
+
+
+[docs] +class SeparatorStyle(Enum): + """Different separator style.""" +
+[docs] + SINGLE = auto()
+ +
+[docs] + TWO = auto()
+ +
+[docs] + MPT = auto()
+ +
+[docs] + PLAIN = auto()
+ +
+[docs] + LLAMA_2 = auto()
+
+ + + +@dataclasses.dataclass +
+[docs] +class Conversation: + """A class that keeps all conversation history.""" +
+[docs] + system: str
+ +
+[docs] + roles: List[str]
+ +
+[docs] + messages: List[List[str]]
+ +
+[docs] + offset: int
+ +
+[docs] + sep_style: SeparatorStyle = SeparatorStyle.SINGLE
+ +
+[docs] + sep: str = "###"
+ +
+[docs] + sep2: str = None
+ +
+[docs] + version: str = "Unknown"
+ + +
+[docs] + skip_next: bool = False
+ + +
+[docs] + def get_prompt(self): + messages = self.messages + if len(messages) > 0 and type(messages[0][1]) is tuple: + messages = self.messages.copy() + init_role, init_msg = messages[0].copy() + init_msg = init_msg[0].replace("<image>", "").strip() + if 'mmtag' in self.version: + messages[0] = (init_role, init_msg) + messages.insert(0, (self.roles[0], "<Image><image></Image>")) + messages.insert(1, (self.roles[1], "Received.")) + else: + messages[0] = (init_role, "<image>\n" + init_msg) + + if self.sep_style == SeparatorStyle.SINGLE: + ret = self.system + self.sep + for role, message in messages: + if message: + if type(message) is tuple: + message, _, _ = message + ret += role + ": " + message + self.sep + else: + ret += role + ":" + elif self.sep_style == SeparatorStyle.TWO: + seps = [self.sep, self.sep2] + ret = self.system + seps[0] + for i, (role, message) in enumerate(messages): + if message: + if type(message) is tuple: + message, _, _ = message + ret += role + ": " + message + seps[i % 2] + else: + ret += role + ":" + elif self.sep_style == SeparatorStyle.MPT: + ret = self.system + self.sep + for role, message in messages: + if message: + if type(message) is tuple: + message, _, _ = message + ret += role + message + self.sep + else: + ret += role + elif self.sep_style == SeparatorStyle.LLAMA_2: + wrap_sys = lambda msg: f"<<SYS>>\n{msg}\n<</SYS>>\n\n" + wrap_inst = lambda msg: f"[INST] {msg} [/INST]" + ret = "" + + for i, (role, message) in enumerate(messages): + if i == 0: + assert message, "first message should not be none" + assert role == self.roles[0], "first message should come from user" + if message: + if type(message) is tuple: + message, _, _ = message + if i == 0: message = wrap_sys(self.system) + message + if i % 2 == 0: + message = wrap_inst(message) + ret += self.sep + message + else: + ret += " " + message + " " + self.sep2 + else: + ret += "" + ret = ret.lstrip(self.sep) + elif self.sep_style == SeparatorStyle.PLAIN: + seps = [self.sep, self.sep2] + ret = self.system + for i, (role, message) in enumerate(messages): + if message: + if type(message) is tuple: + message, _, _ = message + ret += message + seps[i % 2] + else: + ret += "" + else: + raise ValueError(f"Invalid style: {self.sep_style}") + + return ret
+ + +
+[docs] + def append_message(self, role, message): + self.messages.append([role, message])
+ + +
+[docs] + def get_images(self, return_pil=False): + images = [] + for i, (role, msg) in enumerate(self.messages[self.offset:]): + if i % 2 == 0: + if type(msg) is tuple: + import base64 + from io import BytesIO + from PIL import Image + msg, image, image_process_mode = msg + if image_process_mode == "Pad": + def expand2square(pil_img, background_color=(122, 116, 104)): + width, height = pil_img.size + if width == height: + return pil_img + elif width > height: + result = Image.new(pil_img.mode, (width, width), background_color) + result.paste(pil_img, (0, (width - height) // 2)) + return result + else: + result = Image.new(pil_img.mode, (height, height), background_color) + result.paste(pil_img, ((height - width) // 2, 0)) + return result + image = expand2square(image) + elif image_process_mode == "Crop": + pass + elif image_process_mode == "Resize": + image = image.resize((336, 336)) + else: + raise ValueError(f"Invalid image_process_mode: {image_process_mode}") + max_hw, min_hw = max(image.size), min(image.size) + aspect_ratio = max_hw / min_hw + max_len, min_len = 800, 400 + shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw)) + longest_edge = int(shortest_edge * aspect_ratio) + W, H = image.size + if H > W: + H, W = longest_edge, shortest_edge + else: + H, W = shortest_edge, longest_edge + image = image.resize((W, H)) + if return_pil: + images.append(image) + else: + buffered = BytesIO() + image.save(buffered, format="PNG") + img_b64_str = base64.b64encode(buffered.getvalue()).decode() + images.append(img_b64_str) + return images
+ + +
+[docs] + def to_gradio_chatbot(self): + ret = [] + for i, (role, msg) in enumerate(self.messages[self.offset:]): + if i % 2 == 0: + if type(msg) is tuple: + import base64 + from io import BytesIO + msg, image, image_process_mode = msg + max_hw, min_hw = max(image.size), min(image.size) + aspect_ratio = max_hw / min_hw + max_len, min_len = 800, 400 + shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw)) + longest_edge = int(shortest_edge * aspect_ratio) + W, H = image.size + if H > W: + H, W = longest_edge, shortest_edge + else: + H, W = shortest_edge, longest_edge + image = image.resize((W, H)) + buffered = BytesIO() + image.save(buffered, format="JPEG") + img_b64_str = base64.b64encode(buffered.getvalue()).decode() + img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="user upload image" />' + ret.append([img_str, None]) + msg = msg.replace('<image>', '').strip() + if len(msg) > 0: + ret.append([msg, None]) + else: + ret.append([msg, None]) + else: + ret[-1][-1] = msg + return ret
+ + +
+[docs] + def copy(self): + return Conversation( + system=self.system, + roles=self.roles, + messages=[[x, y] for x, y in self.messages], + offset=self.offset, + sep_style=self.sep_style, + sep=self.sep, + sep2=self.sep2, + version=self.version)
+ + +
+[docs] + def dict(self): + if len(self.get_images()) > 0: + return { + "system": self.system, + "roles": self.roles, + "messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages], + "offset": self.offset, + "sep": self.sep, + "sep2": self.sep2, + } + return { + "system": self.system, + "roles": self.roles, + "messages": self.messages, + "offset": self.offset, + "sep": self.sep, + "sep2": self.sep2, + }
+
+ + + +
+[docs] +conv_vicuna_v0 = Conversation( + system="A chat between a curious human and an artificial intelligence assistant. " + "The assistant gives helpful, detailed, and polite answers to the human's questions.", + roles=("Human", "Assistant"), + messages=( + ("Human", "What are the key differences between renewable and non-renewable energy sources?"), + ("Assistant", + "Renewable energy sources are those that can be replenished naturally in a relatively " + "short amount of time, such as solar, wind, hydro, geothermal, and biomass. " + "Non-renewable energy sources, on the other hand, are finite and will eventually be " + "depleted, such as coal, oil, and natural gas. Here are some key differences between " + "renewable and non-renewable energy sources:\n" + "1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable " + "energy sources are finite and will eventually run out.\n" + "2. Environmental impact: Renewable energy sources have a much lower environmental impact " + "than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, " + "and other negative effects.\n" + "3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically " + "have lower operational costs than non-renewable sources.\n" + "4. Reliability: Renewable energy sources are often more reliable and can be used in more remote " + "locations than non-renewable sources.\n" + "5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different " + "situations and needs, while non-renewable sources are more rigid and inflexible.\n" + "6. Sustainability: Renewable energy sources are more sustainable over the long term, while " + "non-renewable sources are not, and their depletion can lead to economic and social instability.\n") + ), + offset=2, + sep_style=SeparatorStyle.SINGLE, + sep="###", +)
+ + +
+[docs] +conv_vicuna_v1 = Conversation( + system="A chat between a curious user and an artificial intelligence assistant. " + "The assistant gives helpful, detailed, and polite answers to the user's questions.", + roles=("USER", "ASSISTANT"), + version="v1", + messages=(), + offset=0, + sep_style=SeparatorStyle.TWO, + sep=" ", + sep2="</s>", +)
+ + +
+[docs] +conv_llama_2 = Conversation( + system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. + +If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""", + roles=("USER", "ASSISTANT"), + version="llama_v2", + messages=(), + offset=0, + sep_style=SeparatorStyle.LLAMA_2, + sep="<s>", + sep2="</s>", +)
+ + +
+[docs] +conv_llava_llama_2 = Conversation( + system="You are a helpful language and vision assistant. " + "You are able to understand the visual content that the user provides, " + "and assist the user with a variety of tasks using natural language.", + roles=("USER", "ASSISTANT"), + version="llama_v2", + messages=(), + offset=0, + sep_style=SeparatorStyle.LLAMA_2, + sep="<s>", + sep2="</s>", +)
+ + +
+[docs] +conv_mpt = Conversation( + system="""<|im_start|>system +A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""", + roles=("<|im_start|>user\n", "<|im_start|>assistant\n"), + version="mpt", + messages=(), + offset=0, + sep_style=SeparatorStyle.MPT, + sep="<|im_end|>", +)
+ + +
+[docs] +conv_llava_plain = Conversation( + system="", + roles=("", ""), + messages=( + ), + offset=0, + sep_style=SeparatorStyle.PLAIN, + sep="\n", +)
+ + +
+[docs] +conv_llava_v0 = Conversation( + system="A chat between a curious human and an artificial intelligence assistant. " + "The assistant gives helpful, detailed, and polite answers to the human's questions.", + roles=("Human", "Assistant"), + messages=( + ("Human", "Hi!"), + ("Assistant", "Hi there! How can I help you today?") + ), + offset=2, + sep_style=SeparatorStyle.SINGLE, + sep="###", +)
+ + +
+[docs] +conv_llava_v0_mmtag = Conversation( + system="A chat between a curious user and an artificial intelligence assistant. " + "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language." + "The visual content will be provided with the following format: <Image>visual content</Image>.", + roles=("Human", "Assistant"), + messages=( + ), + offset=0, + sep_style=SeparatorStyle.SINGLE, + sep="###", + version="v0_mmtag", +)
+ + +
+[docs] +conv_llava_v1 = Conversation( + system="A chat between a curious human and an artificial intelligence assistant. " + "The assistant gives helpful, detailed, and polite answers to the human's questions.", + roles=("USER", "ASSISTANT"), + version="v1", + messages=(), + offset=0, + sep_style=SeparatorStyle.TWO, + sep=" ", + sep2="</s>", +)
+ + +
+[docs] +conv_llava_v1_mmtag = Conversation( + system="A chat between a curious user and an artificial intelligence assistant. " + "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language." + "The visual content will be provided with the following format: <Image>visual content</Image>.", + roles=("USER", "ASSISTANT"), + messages=(), + offset=0, + sep_style=SeparatorStyle.TWO, + sep=" ", + sep2="</s>", + version="v1_mmtag", +)
+ + +
+[docs] +default_conversation = conv_vicuna_v1 # currently only support the v1 version
+ +
+[docs] +conv_templates = { + "default": conv_vicuna_v0, + "v0": conv_vicuna_v0, + "v1": conv_vicuna_v1, + "vicuna_v1": conv_vicuna_v1, + "llama_2": conv_llama_2, + + "plain": conv_llava_plain, + "v0_plain": conv_llava_plain, + "llava_v0": conv_llava_v0, + "v0_mmtag": conv_llava_v0_mmtag, + "llava_v1": conv_llava_v1, + "v1_mmtag": conv_llava_v1_mmtag, + "llava_llama_2": conv_llava_llama_2, + + "mpt": conv_mpt, +}
+ + + +if __name__ == "__main__": + print(default_conversation.get_prompt()) +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/model.html b/_modules/lmflow/utils/model.html new file mode 100644 index 000000000..eb3ea6888 --- /dev/null +++ b/_modules/lmflow/utils/model.html @@ -0,0 +1,491 @@ + + + + + + + + + + lmflow.utils.model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.model

+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
+import logging
+from typing import Dict, Any, List, Tuple, Union
+
+from transformers import AutoTokenizer
+
+from lmflow.args import ModelArguments
+
+
+
+[docs] +logger = logging.getLogger(__name__)
+ + + +
+[docs] +def check_homogeneity(model_args_list: List[ModelArguments]) -> bool: + assert all(isinstance(model_args, ModelArguments) for model_args in model_args_list), \ + "model_args_list should be a list of ModelArguments objects." + assert len(model_args_list) > 1, "model_args_list should have at least two elements." + + tokenizer_names = [] + for model_args in model_args_list: + tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, use_fast=False) + tokenizer_names.append(tokenizer.__class__.__name__) + + return len(set(tokenizer_names)) == 1
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/multimodal.html b/_modules/lmflow/utils/multimodal.html new file mode 100644 index 000000000..2fee62911 --- /dev/null +++ b/_modules/lmflow/utils/multimodal.html @@ -0,0 +1,519 @@ + + + + + + + + + + lmflow.utils.multimodal — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.multimodal

+import glob
+import torch
+from transformers import LlamaConfig
+from tqdm import tqdm
+
+
+
+[docs] +def update_custom_config(config, model_args): + if model_args.llm_model_name_or_path is not None: + text_config = LlamaConfig.from_pretrained( + model_args.llm_model_name_or_path) + config.text_config = text_config + config.with_qformer = model_args.with_qformer + config.custom_vision_model = model_args.custom_vision_model + if model_args.custom_vision_model: + # config.vision_model_args = model_args + config.image_encoder_name_or_path = \ + model_args.image_encoder_name_or_path + config.vision_select_layer = model_args.vision_select_layer + if getattr(model_args, "vision_select_feature", None) is not None: + config.vision_select_feature = model_args.vision_select_feature + return config
+ + + +
+[docs] +def load_llava_pretrain_model(model, checkpoint_path): + checkpoint_path = glob.glob(checkpoint_path) + for path in tqdm(checkpoint_path): + state_dict = torch.load(path, map_location="cpu") + new_state_dict = adapt_llava_model_to_lmflow_type(state_dict) + # modify the name of the key + # import pdb; pdb.set_trace() + lmflow_keys = model.state_dict().keys() + for key in new_state_dict.keys(): + if key not in lmflow_keys: + print("key not in lmflow_keys: ", key) + model.load_state_dict(new_state_dict, strict=False) + return model
+ + +
+[docs] +def adapt_llava_model_to_lmflow_type(state_dict): + new_state_dict = {} + for key, item in state_dict.items(): + key = key.replace("model.layers", "language_model.model.layers") + key = key.replace("model.embed_tokens", + "language_model.model.embed_tokens") + key = key.replace("model.mm_projector", "language_projection") + key = key.replace("lm_head", "language_model.lm_head") + key = key.replace("model.norm", "language_model.model.norm") + if "vision_tower" in key: + continue + new_state_dict[key] = item + return new_state_dict
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch.html b/_modules/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch.html new file mode 100644 index 000000000..af8fbac92 --- /dev/null +++ b/_modules/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch.html @@ -0,0 +1,545 @@ + + + + + + + + + + lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch

+from functools import partial
+
+import torch
+import transformers
+import transformers.models.llama.modeling_llama
+
+
+[docs] +class CondenseRotaryEmbedding(torch.nn.Module): + def __init__(self, dim, pi_ratio, ntk_ratio, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + +
+[docs] + self.ntk_ratio = ntk_ratio
+ + max_position_embeddings *= ntk_ratio +
+[docs] + base = base * ntk_ratio ** (dim / (dim-2)) #Base change formula
+ + +
+[docs] + inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
+ + self.register_buffer("inv_freq", inv_freq) + +
+[docs] + self.pi_ratio = pi_ratio
+ + max_position_embeddings *= pi_ratio +
+[docs] + self.max_seq_len_cached = max_position_embeddings
+ +
+[docs] + t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype) / pi_ratio
+ +
+[docs] + freqs = torch.einsum("i,j->ij", t, self.inv_freq)
+ + + # Different from paper, but it uses a different permutation in order to obtain the same calculation +
+[docs] + emb = torch.cat((freqs, freqs), dim=-1)
+ +
+[docs] + dtype = torch.get_default_dtype()
+ + self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False) + +
+[docs] + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case. + if seq_len > self.max_seq_len_cached: + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype) / self.pi_ratio + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1).to(x.device) + self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(x.dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(x.dtype), persistent=False) + + return ( + self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + )
+
+ + +
+[docs] +def replace_llama_with_condense(pi_ratio, ntk_ratio): + transformers.models.llama.modeling_llama.LlamaRotaryEmbedding = partial(CondenseRotaryEmbedding, pi_ratio=pi_ratio, ntk_ratio=ntk_ratio)
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/lmflow/version.html b/_modules/lmflow/version.html new file mode 100644 index 000000000..be508f7b3 --- /dev/null +++ b/_modules/lmflow/version.html @@ -0,0 +1,464 @@ + + + + + + + + + + lmflow.version — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for lmflow.version

+
+[docs] +__version__ = "0.0.7"
+ +
+ +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_sources/about/authors.md.txt b/_sources/about/authors.md.txt new file mode 100644 index 000000000..a3481ee66 --- /dev/null +++ b/_sources/about/authors.md.txt @@ -0,0 +1,4 @@ +# Contributors + + +Shizhe Diao, Rui Pan, Hanze Dong, Ka Shun Shum, Jipeng Zhang, Wei Xiong, Tong Zhang diff --git a/_sources/about/changelog.md.txt b/_sources/about/changelog.md.txt new file mode 100644 index 000000000..991794df9 --- /dev/null +++ b/_sources/about/changelog.md.txt @@ -0,0 +1,15 @@ +# Changelog + + +## Version 0.0.1 (Mar 28, 2023) + +The first public version. + +Task tuning, instruction tuning, on user defined datasets. + +A simple and extensible API for developers. + +Efficient finetuning with LoRA. + +Simplified model inference framework. + diff --git a/_sources/about/index.md.txt b/_sources/about/index.md.txt new file mode 100644 index 000000000..df657b31c --- /dev/null +++ b/_sources/about/index.md.txt @@ -0,0 +1,15 @@ +# About + + +```{toctree} +:maxdepth: 2 + +changelog +``` + + +```{toctree} +:maxdepth: 2 + +authors +``` diff --git a/_sources/autoapi/index.rst.txt b/_sources/autoapi/index.rst.txt new file mode 100644 index 000000000..333d5c5cd --- /dev/null +++ b/_sources/autoapi/index.rst.txt @@ -0,0 +1,11 @@ +API Reference +============= + +This page contains auto-generated API reference documentation [#f1]_. + +.. toctree:: + :titlesonly: + + /autoapi/lmflow/index + +.. [#f1] Created with `sphinx-autoapi `_ \ No newline at end of file diff --git a/_sources/autoapi/lmflow/args/index.rst.txt b/_sources/autoapi/lmflow/args/index.rst.txt new file mode 100644 index 000000000..dba5f035a --- /dev/null +++ b/_sources/autoapi/lmflow/args/index.rst.txt @@ -0,0 +1,1582 @@ +lmflow.args +=========== + +.. py:module:: lmflow.args + +.. autoapi-nested-parse:: + + This script defines dataclasses: ModelArguments and DatasetArguments, + that contain the arguments for the model and dataset used in training. + + It imports several modules, including dataclasses, field from typing, Optional from typing, + require_version from transformers.utils.versions, MODEL_FOR_CAUSAL_LM_MAPPING, + and TrainingArguments from transformers. + + MODEL_CONFIG_CLASSES is assigned a list of the model config classes from + MODEL_FOR_CAUSAL_LM_MAPPING. MODEL_TYPES is assigned a tuple of the model types + extracted from the MODEL_CONFIG_CLASSES. + + .. + !! processed by numpydoc !! + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.args.MODEL_CONFIG_CLASSES + lmflow.args.MODEL_TYPES + lmflow.args.logger + lmflow.args.PIPELINE_ARGUMENT_MAPPING + + +Classes +------- + +.. autoapisummary:: + + lmflow.args.OptimizerNames + lmflow.args.ModelArguments + lmflow.args.VisModelArguments + lmflow.args.DatasetArguments + lmflow.args.MultiModalDatasetArguments + lmflow.args.FinetunerArguments + lmflow.args.RewardModelTunerArguments + lmflow.args.EvaluatorArguments + lmflow.args.InferencerArguments + lmflow.args.RaftAlignerArguments + lmflow.args.BenchmarkingArguments + lmflow.args.DPOAlignerArguments + lmflow.args.DPOv2AlignerArguments + lmflow.args.IterativeAlignerArguments + lmflow.args.IterativeDPOAlignerArguments + lmflow.args.AutoArguments + + +Module Contents +--------------- + +.. py:data:: MODEL_CONFIG_CLASSES + +.. py:data:: MODEL_TYPES + +.. py:data:: logger + +.. py:class:: OptimizerNames + + .. py:attribute:: DUMMY + :value: 'dummy' + + + + .. py:attribute:: ADABELIEF + :value: 'adabelief' + + + + .. py:attribute:: ADABOUND + :value: 'adabound' + + + + .. py:attribute:: LARS + :value: 'lars' + + + + .. py:attribute:: LAMB + :value: 'lamb' + + + + .. py:attribute:: ADAMAX + :value: 'adamax' + + + + .. py:attribute:: NADAM + :value: 'nadam' + + + + .. py:attribute:: RADAM + :value: 'radam' + + + + .. py:attribute:: ADAMP + :value: 'adamp' + + + + .. py:attribute:: SGDP + :value: 'sgdp' + + + + .. py:attribute:: YOGI + :value: 'yogi' + + + + .. py:attribute:: SOPHIA + :value: 'sophia' + + + + .. py:attribute:: ADAN + :value: 'adan' + + + + .. py:attribute:: ADAM + :value: 'adam' + + + + .. py:attribute:: NOVOGRAD + :value: 'novograd' + + + + .. py:attribute:: ADADELTA + :value: 'adadelta' + + + + .. py:attribute:: ADAGRAD + :value: 'adagrad' + + + + .. py:attribute:: ADAMW_SCHEDULE_FREE + :value: 'adamw_schedule_free' + + + + .. py:attribute:: SGD_SCHEDULE_FREE + :value: 'sgd_schedule_free' + + + +.. py:class:: ModelArguments + + + Define a class ModelArguments using the dataclass decorator. + The class contains several optional parameters that can be used to configure a model. + + model_name_or_path : str + a string representing the path or name of a pretrained + model checkpoint for weights initialization. If None, a model will be trained from scratch. + + model_type : str + a string representing the type of model to use if training from + scratch. If not provided, a pretrained model will be used. + + config_overrides : str + a string representing the default config settings to override + when training a model from scratch. + + config_name : str + a string representing the name or path of the pretrained config to + use, if different from the model_name_or_path. + + tokenizer_name : str + a string representing the name or path of the pretrained tokenizer + to use, if different from the model_name_or_path. + + cache_dir : str + a string representing the path to the directory where pretrained models + downloaded from huggingface.co will be stored. + + use_fast_tokenizer : bool + a boolean indicating whether to use a fast tokenizer (backed by the + tokenizers library) or not. + + model_revision : str + a string representing the specific model version to use (can be a + branch name, tag name, or commit id). + + use_auth_token : bool + a boolean indicating whether to use the token generated when running + huggingface-cli login (necessary to use this script with private models). + + torch_dtype : str + a string representing the dtype to load the model under. If auto is + passed, the dtype will be automatically derived from the model's weights. + + use_ram_optimized_load : bool + a boolean indicating whether to use disk mapping when memory is not + enough. + + use_int8 : bool + a boolean indicating whether to load int8 quantization for inference. + + load_in_4bit : bool + whether to load the model in 4bit + + model_max_length : int + The maximum length of the model. + + truncation_side : str + The side on which the model should have truncation applied. + + arch_type : str + Model architecture type. + padding_side : str + The side on which the tokenizer should have padding applied. + eos_padding : bool + whether to pad with eos token instead of pad token. + ignore_bias_buffers : bool + fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: model_name_or_path + :type: Optional[str] + + + .. py:attribute:: lora_model_path + :type: Optional[str] + + + .. py:attribute:: model_type + :type: Optional[str] + + + .. py:attribute:: config_overrides + :type: Optional[str] + + + .. py:attribute:: arch_type + :type: Optional[str] + + + .. py:attribute:: config_name + :type: Optional[str] + + + .. py:attribute:: tokenizer_name + :type: Optional[str] + + + .. py:attribute:: cache_dir + :type: Optional[str] + + + .. py:attribute:: use_fast_tokenizer + :type: bool + + + .. py:attribute:: model_revision + :type: str + + + .. py:attribute:: use_auth_token + :type: bool + + + .. py:attribute:: trust_remote_code + :type: bool + + + .. py:attribute:: torch_dtype + :type: Optional[str] + + + .. py:attribute:: use_lora + :type: bool + + + .. py:attribute:: use_qlora + :type: bool + + + .. py:attribute:: bits + :type: int + + + .. py:attribute:: quant_type + :type: str + + + .. py:attribute:: double_quant + :type: bool + + + .. py:attribute:: lora_r + :type: int + + + .. py:attribute:: lora_alpha + :type: int + + + .. py:attribute:: lora_target_modules + :type: List[str] + + + .. py:attribute:: lora_dropout + :type: float + + + .. py:attribute:: save_aggregated_lora + :type: bool + + + .. py:attribute:: use_ram_optimized_load + :type: bool + + + .. py:attribute:: use_flash_attention + :type: bool + + + .. py:attribute:: truncate_to_model_max_length + :type: bool + + + .. py:attribute:: do_rope_scaling + :type: bool + + + .. py:attribute:: rope_pi_ratio + :type: int + + + .. py:attribute:: rope_ntk_ratio + :type: int + + + .. py:attribute:: use_int8 + :type: bool + + + .. py:attribute:: load_in_4bit + :type: Optional[bool] + + + .. py:attribute:: model_max_length + :type: Optional[int] + + + .. py:attribute:: truncation_side + :type: str + + + .. py:attribute:: padding_side + :type: str + + + .. py:attribute:: eos_padding + :type: Optional[bool] + + + .. py:attribute:: ignore_bias_buffers + :type: Optional[bool] + + + .. py:method:: __post_init__() + + +.. py:class:: VisModelArguments + + Bases: :py:obj:`ModelArguments` + + + + Define a class ModelArguments using the dataclass decorator. + The class contains several optional parameters that can be used to configure a model. + + model_name_or_path : str + a string representing the path or name of a pretrained + model checkpoint for weights initialization. If None, a model will be trained from scratch. + + model_type : str + a string representing the type of model to use if training from + scratch. If not provided, a pretrained model will be used. + + config_overrides : str + a string representing the default config settings to override + when training a model from scratch. + + config_name : str + a string representing the name or path of the pretrained config to + use, if different from the model_name_or_path. + + tokenizer_name : str + a string representing the name or path of the pretrained tokenizer + to use, if different from the model_name_or_path. + + cache_dir : str + a string representing the path to the directory where pretrained models + downloaded from huggingface.co will be stored. + + use_fast_tokenizer : bool + a boolean indicating whether to use a fast tokenizer (backed by the + tokenizers library) or not. + + model_revision : str + a string representing the specific model version to use (can be a + branch name, tag name, or commit id). + + use_auth_token : bool + a boolean indicating whether to use the token generated when running + huggingface-cli login (necessary to use this script with private models). + + torch_dtype : str + a string representing the dtype to load the model under. If auto is + passed, the dtype will be automatically derived from the model's weights. + + use_ram_optimized_load : bool + a boolean indicating whether to use disk mapping when memory is not + enough. + + use_int8 : bool + a boolean indicating whether to load int8 quantization for inference. + + load_in_4bit : bool + whether to load the model in 4bit + + model_max_length : int + The maximum length of the model. + + truncation_side : str + The side on which the model should have truncation applied. + + arch_type : str + Model architecture type. + padding_side : str + The side on which the tokenizer should have padding applied. + eos_padding : bool + whether to pad with eos token instead of pad token. + ignore_bias_buffers : bool + fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: low_resource + :type: Optional[bool] + + + .. py:attribute:: custom_model + :type: bool + + + .. py:attribute:: pretrained_language_projection_path + :type: str + + + .. py:attribute:: custom_vision_model + :type: bool + + + .. py:attribute:: image_encoder_name_or_path + :type: Optional[str] + + + .. py:attribute:: qformer_name_or_path + :type: Optional[str] + + + .. py:attribute:: llm_model_name_or_path + :type: Optional[str] + + + .. py:attribute:: use_prompt_cache + :type: bool + + + .. py:attribute:: prompt_cache_path + :type: Optional[str] + + + .. py:attribute:: llava_loading + :type: Optional[bool] + + + .. py:attribute:: with_qformer + :type: Optional[bool] + + + .. py:attribute:: vision_select_layer + :type: Optional[int] + + + .. py:attribute:: llava_pretrain_model_path + :type: Optional[str] + + + .. py:attribute:: save_pretrain_model_path + :type: Optional[str] + + +.. py:class:: DatasetArguments + + + Define a class DatasetArguments using the dataclass decorator. + The class contains several optional parameters that can be used to configure a dataset for a language model. + + dataset_path : str + a string representing the path of the dataset to use. + + dataset_name : str + a string representing the name of the dataset to use. The default value is "customized". + + is_custom_dataset : bool + a boolean indicating whether to use custom data. The default value is False. + + customized_cache_dir : str + a string representing the path to the directory where customized dataset caches will be stored. + + dataset_config_name : str + a string representing the configuration name of the dataset to use (via the datasets library). + + train_file : str + a string representing the path to the input training data file (a text file). + + validation_file : str + a string representing the path to the input evaluation data file to evaluate the perplexity on (a text file). + + max_train_samples : int + an integer indicating the maximum number of training examples to use for debugging or quicker training. + If set, the training dataset will be truncated to this number. + + max_eval_samples: int + an integer indicating the maximum number of evaluation examples to use for debugging or quicker training. + If set, the evaluation dataset will be truncated to this number. + + streaming : bool + a boolean indicating whether to enable streaming mode. + + block_size: int + an integer indicating the optional input sequence length after tokenization. The training dataset will be + truncated in blocks of this size for training. + + train_on_prompt: bool + a boolean indicating whether to train on prompt for conversation datasets such as ShareGPT. + + conversation_template: str + a string representing the template for conversation datasets. + + The class also includes some additional parameters that can be used to configure the dataset further, such as `overwrite_cache`, + `validation_split_percentage`, `preprocessing_num_workers`, `disable_group_texts`, `demo_example_in_prompt`, `explanation_in_prompt`, + `keep_linebreaks`, and `prompt_structure`. + + The field function is used to set default values and provide help messages for each parameter. The Optional type hint is + used to indicate that a parameter is optional. The metadata argument is used to provide additional information about + each parameter, such as a help message. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: dataset_path + :type: Optional[str] + + + .. py:attribute:: dataset_name + :type: Optional[str] + + + .. py:attribute:: is_custom_dataset + :type: Optional[bool] + + + .. py:attribute:: customized_cache_dir + :type: Optional[str] + + + .. py:attribute:: dataset_config_name + :type: Optional[str] + + + .. py:attribute:: train_file + :type: Optional[str] + + + .. py:attribute:: validation_file + :type: Optional[str] + + + .. py:attribute:: max_train_samples + :type: Optional[int] + + + .. py:attribute:: max_eval_samples + :type: Optional[int] + + + .. py:attribute:: streaming + :type: bool + + + .. py:attribute:: block_size + :type: Optional[int] + + + .. py:attribute:: overwrite_cache + :type: bool + + + .. py:attribute:: validation_split_percentage + :type: Optional[int] + + + .. py:attribute:: preprocessing_num_workers + :type: Optional[int] + + + .. py:attribute:: group_texts_batch_size + :type: int + + + .. py:attribute:: disable_group_texts + :type: bool + + + .. py:attribute:: keep_linebreaks + :type: bool + + + .. py:attribute:: test_file + :type: Optional[str] + + + .. py:attribute:: train_on_prompt + :type: bool + + + .. py:attribute:: conversation_template + :type: Optional[str] + + + .. py:method:: __post_init__() + + +.. py:class:: MultiModalDatasetArguments + + Bases: :py:obj:`DatasetArguments` + + + + Define a class DatasetArguments using the dataclass decorator. + The class contains several optional parameters that can be used to configure a dataset for a language model. + + dataset_path : str + a string representing the path of the dataset to use. + + dataset_name : str + a string representing the name of the dataset to use. The default value is "customized". + + is_custom_dataset : bool + a boolean indicating whether to use custom data. The default value is False. + + customized_cache_dir : str + a string representing the path to the directory where customized dataset caches will be stored. + + dataset_config_name : str + a string representing the configuration name of the dataset to use (via the datasets library). + + train_file : str + a string representing the path to the input training data file (a text file). + + validation_file : str + a string representing the path to the input evaluation data file to evaluate the perplexity on (a text file). + + max_train_samples : int + an integer indicating the maximum number of training examples to use for debugging or quicker training. + If set, the training dataset will be truncated to this number. + + max_eval_samples: int + an integer indicating the maximum number of evaluation examples to use for debugging or quicker training. + If set, the evaluation dataset will be truncated to this number. + + streaming : bool + a boolean indicating whether to enable streaming mode. + + block_size: int + an integer indicating the optional input sequence length after tokenization. The training dataset will be + truncated in blocks of this size for training. + + train_on_prompt: bool + a boolean indicating whether to train on prompt for conversation datasets such as ShareGPT. + + conversation_template: str + a string representing the template for conversation datasets. + + The class also includes some additional parameters that can be used to configure the dataset further, such as `overwrite_cache`, + `validation_split_percentage`, `preprocessing_num_workers`, `disable_group_texts`, `demo_example_in_prompt`, `explanation_in_prompt`, + `keep_linebreaks`, and `prompt_structure`. + + The field function is used to set default values and provide help messages for each parameter. The Optional type hint is + used to indicate that a parameter is optional. The metadata argument is used to provide additional information about + each parameter, such as a help message. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: image_folder + :type: Optional[str] + + + .. py:attribute:: image_aspect_ratio + :type: Optional[str] + + + .. py:attribute:: is_multimodal + :type: Optional[bool] + + + .. py:attribute:: use_image_start_end + :type: Optional[bool] + + + .. py:attribute:: sep_style + :type: Optional[str] + + +.. py:class:: FinetunerArguments + + Bases: :py:obj:`transformers.TrainingArguments` + + + + Adapt transformers.TrainingArguments + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: eval_dataset_path + :type: Optional[str] + + + .. py:attribute:: remove_unused_columns + :type: Optional[bool] + + + .. py:attribute:: finetune_part + :type: Optional[str] + + + .. py:attribute:: save_language_projection + :type: Optional[str] + + + .. py:attribute:: use_lisa + :type: bool + + + .. py:attribute:: lisa_activated_layers + :type: int + + + .. py:attribute:: lisa_interval_steps + :type: int + + + .. py:attribute:: lisa_layers_attribute + :type: str + + + .. py:attribute:: use_customized_optim + :type: bool + + + .. py:attribute:: customized_optim + :type: str + + + .. py:attribute:: customized_optim_args + :type: str + + + .. py:attribute:: optim_dummy_beta1 + :type: float + + + .. py:attribute:: optim_dummy_beta2 + :type: float + + + .. py:attribute:: optim_adam_beta1 + :type: float + + + .. py:attribute:: optim_adam_beta2 + :type: float + + + .. py:attribute:: optim_beta1 + :type: float + + + .. py:attribute:: optim_beta2 + :type: float + + + .. py:attribute:: optim_beta3 + :type: float + + + .. py:attribute:: optim_momentum + :type: float + + + .. py:attribute:: optim_weight_decay + :type: float + + +.. py:class:: RewardModelTunerArguments + + Bases: :py:obj:`FinetunerArguments` + + + + Arguments for reward modeling. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:class:: EvaluatorArguments + + + Define a class EvaluatorArguments using the dataclass decorator. The class contains several optional + parameters that can be used to configure a evaluator. + + local_rank : str + For distributed training: local_rank + + random_shuffle : bool + + use_wandb : bool + + random_seed : int, default = 1 + + output_dir : str, default = './output_dir', + + mixed_precision : str, choice from ["bf16","fp16"]. + mixed precision mode, whether to use bf16 or fp16 + + deepspeed : + Enable deepspeed and pass the path to deepspeed json config file (e.g. ds_config.json) or an already + loaded json file as a dict + + temperature : float + An argument of model.generate in huggingface to control the diversity of generation. + + repetition_penalty : float + An argument of model.generate in huggingface to penalize repetitions. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: local_rank + :type: int + + + .. py:attribute:: random_shuffle + :type: Optional[bool] + + + .. py:attribute:: use_wandb + :type: Optional[bool] + + + .. py:attribute:: random_seed + :type: Optional[int] + + + .. py:attribute:: output_dir + :type: Optional[str] + + + .. py:attribute:: mixed_precision + :type: Optional[str] + + + .. py:attribute:: deepspeed + :type: Optional[str] + + + .. py:attribute:: answer_type + :type: Optional[str] + + + .. py:attribute:: prompt_structure + :type: Optional[str] + + + .. py:attribute:: evaluate_block_size + :type: Optional[int] + + + .. py:attribute:: metric + :type: Optional[str] + + + .. py:attribute:: inference_batch_size_per_device + :type: Optional[int] + + + .. py:attribute:: use_accelerator_for_evaluator + :type: bool + + + .. py:attribute:: temperature + :type: float + + + .. py:attribute:: repetition_penalty + :type: float + + + .. py:attribute:: max_new_tokens + :type: int + + +.. py:class:: InferencerArguments + + + Define a class InferencerArguments using the dataclass decorator. The class contains several optional + parameters that can be used to configure a inferencer. + + local_rank : str + For distributed training: local_rank + random_seed : int, default = 1 + inference_batch_size : int, default = 1 + deepspeed : + Enable deepspeed and pass the path to deepspeed json config file (e.g. ds_config.json) or an already + loaded json file as a dict + mixed_precision : str, choice from ["bf16","fp16"]. + mixed precision mode, whether to use bf16 or fp16 + temperature : float + An argument of model.generate in huggingface to control the diversity of generation. + repetition_penalty : float + An argument of model.generate in huggingface to penalize repetitions. + use_beam_search : Optional[bool] + Whether to use beam search during inference, By default False. + num_output_sequences : Optional[int] + Number of output sequences to return for the given prompt, + currently only used in vllm inference, By default 8. + top_p : Optional[float] + top_p for sampling, By default 1.0. + top_k : Optional[int] + top_k for sampling, By default -1 (no top_k). + additional_stop_token_ids : Optional[List[int]] + the ids of the end of sentence tokens, By default []. + apply_chat_template : Optional[bool] + Whether to apply chat template, By default True. + save_results : Optional[bool] + Whether to save inference results, By default False. + results_path : Optional[str] + The **json file** path of inference results, By default None. + enable_decode_inference_result : Optional[bool] + Whether to detokenize the inference results. + + NOTE: For iterative align pipelines, whether to detokenize depends on + the homogeneity of the policy model and the reward model + (i.e., if they have the same tokenizer). + use_vllm: bool, optional + Whether to use VLLM for inference, By default False. + vllm_tensor_parallel_size: int, optional + The tensor parallel size for VLLM inference. + vllm_gpu_memory_utilization: float, optional + The GPU memory utilization for VLLM inference. The proportion of GPU + memory (per GPU) to use for VLLM inference. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: device + :type: str + + + .. py:attribute:: local_rank + :type: int + + + .. py:attribute:: inference_batch_size + :type: int + + + .. py:attribute:: vllm_inference_batch_size + :type: int + + + .. py:attribute:: temperature + :type: float + + + .. py:attribute:: repetition_penalty + :type: float + + + .. py:attribute:: max_new_tokens + :type: int + + + .. py:attribute:: random_seed + :type: Optional[int] + + + .. py:attribute:: deepspeed + :type: Optional[str] + + + .. py:attribute:: mixed_precision + :type: Optional[str] + + + .. py:attribute:: do_sample + :type: Optional[bool] + + + .. py:attribute:: use_accelerator + :type: bool + + + .. py:attribute:: use_beam_search + :type: Optional[bool] + + + .. py:attribute:: num_output_sequences + :type: Optional[int] + + + .. py:attribute:: top_p + :type: Optional[float] + + + .. py:attribute:: top_k + :type: Optional[int] + + + .. py:attribute:: additional_stop_token_ids + :type: Optional[List[int]] + + + .. py:attribute:: apply_chat_template + :type: Optional[bool] + + + .. py:attribute:: enable_decode_inference_result + :type: Optional[bool] + + + .. py:attribute:: tensor_parallel_size + :type: Optional[int] + + + .. py:attribute:: enable_distributed_inference + :type: Optional[bool] + + + .. py:attribute:: distributed_inference_num_instances + :type: Optional[int] + + + .. py:attribute:: use_vllm + :type: bool + + + .. py:attribute:: vllm_tensor_parallel_size + :type: Optional[int] + + + .. py:attribute:: vllm_gpu_memory_utilization + :type: Optional[float] + + + .. py:attribute:: save_results + :type: Optional[bool] + + + .. py:attribute:: results_path + :type: Optional[str] + + + .. py:method:: __post_init__() + + +.. py:class:: RaftAlignerArguments + + Bases: :py:obj:`transformers.TrainingArguments` + + + + Define a class RaftAlignerArguments to configure raft aligner. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: output_reward_path + :type: Optional[str] + + + .. py:attribute:: output_min_length + :type: Optional[int] + + + .. py:attribute:: output_max_length + :type: Optional[int] + + + .. py:attribute:: num_raft_iteration + :type: Optional[int] + + + .. py:attribute:: raft_batch_size + :type: Optional[int] + + + .. py:attribute:: top_reward_percentage + :type: Optional[float] + + + .. py:attribute:: inference_batch_size_per_device + :type: Optional[int] + + + .. py:attribute:: collection_strategy + :type: Optional[str] + + +.. py:class:: BenchmarkingArguments + + .. py:attribute:: dataset_name + :type: Optional[str] + + + .. py:attribute:: lm_evaluation_metric + :type: Optional[str] + + +.. py:class:: DPOAlignerArguments + + + The arguments for the DPO training script. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: local_rank + :type: int + + + .. py:attribute:: beta + :type: Optional[float] + + + .. py:attribute:: learning_rate + :type: Optional[float] + + + .. py:attribute:: lr_scheduler_type + :type: Optional[str] + + + .. py:attribute:: warmup_steps + :type: Optional[int] + + + .. py:attribute:: weight_decay + :type: Optional[float] + + + .. py:attribute:: optimizer_type + :type: Optional[str] + + + .. py:attribute:: per_device_train_batch_size + :type: Optional[int] + + + .. py:attribute:: per_device_eval_batch_size + :type: Optional[int] + + + .. py:attribute:: gradient_accumulation_steps + :type: Optional[int] + + + .. py:attribute:: gradient_checkpointing + :type: Optional[bool] + + + .. py:attribute:: gradient_checkpointing_use_reentrant + :type: Optional[bool] + + + .. py:attribute:: max_prompt_length + :type: Optional[int] + + + .. py:attribute:: max_length + :type: Optional[int] + + + .. py:attribute:: max_steps + :type: Optional[int] + + + .. py:attribute:: logging_steps + :type: Optional[int] + + + .. py:attribute:: save_steps + :type: Optional[int] + + + .. py:attribute:: eval_steps + :type: Optional[int] + + + .. py:attribute:: output_dir + :type: Optional[str] + + + .. py:attribute:: log_freq + :type: Optional[int] + + + .. py:attribute:: sanity_check + :type: Optional[bool] + + + .. py:attribute:: report_to + :type: Optional[str] + + + .. py:attribute:: seed + :type: Optional[int] + + + .. py:attribute:: run_name + :type: Optional[str] + + +.. py:class:: DPOv2AlignerArguments + + Bases: :py:obj:`FinetunerArguments` + + + + The arguments for the DPOv2 training script. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: random_seed + :type: Optional[int] + + + .. py:attribute:: accelerate_config_file + :type: Optional[str] + + + .. py:attribute:: margin_scale + :type: Optional[float] + + + .. py:attribute:: sampling_paired_method + :type: Optional[str] + + + .. py:attribute:: length_penalty + :type: Optional[float] + + + .. py:attribute:: max_length + :type: Optional[int] + + + .. py:attribute:: max_prompt_length + :type: Optional[int] + + + .. py:attribute:: mask_prompt + :type: Optional[bool] + + + .. py:attribute:: beta + :type: Optional[float] + + + .. py:attribute:: loss_type + :type: Optional[str] + + +.. py:class:: IterativeAlignerArguments + + Bases: :py:obj:`InferencerArguments` + + + + Arguments for iterative aligners. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: dataset_path_list + :type: List[str] + + + .. py:attribute:: initial_iter_idx + :type: int + + +.. py:class:: IterativeDPOAlignerArguments + + Bases: :py:obj:`IterativeAlignerArguments`, :py:obj:`DPOv2AlignerArguments` + + + + Arguments for iterative DPO aligners. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: output_dir + :type: Optional[str] + + + .. py:attribute:: reward_model_inference_batch_size + :type: int + + + .. py:attribute:: reward_model_inference_block_size + :type: int + + + .. py:attribute:: do_response_generation + :type: bool + + + .. py:attribute:: do_scoring + :type: bool + + + .. py:attribute:: do_dpo_align + :type: bool + + +.. py:data:: PIPELINE_ARGUMENT_MAPPING + +.. py:class:: AutoArguments + + + Automatically choose arguments from FinetunerArguments or EvaluatorArguments. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:method:: get_pipeline_args_class() + + diff --git a/_sources/autoapi/lmflow/datasets/dataset/index.rst.txt b/_sources/autoapi/lmflow/datasets/dataset/index.rst.txt new file mode 100644 index 000000000..9016c272c --- /dev/null +++ b/_sources/autoapi/lmflow/datasets/dataset/index.rst.txt @@ -0,0 +1,642 @@ +lmflow.datasets.dataset +======================= + +.. py:module:: lmflow.datasets.dataset + +.. autoapi-nested-parse:: + + This Python code defines a class Dataset with methods for initializing, loading, + and manipulating datasets from different backends such as Hugging Face and JSON. + + The `Dataset` class includes methods for loading datasets from a dictionary and a Hugging + Face dataset, mapping datasets, and retrieving the backend dataset and arguments. + + .. + !! processed by numpydoc !! + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.datasets.dataset.logger + lmflow.datasets.dataset.DATASET_TYPES + lmflow.datasets.dataset.KEY_TYPE + lmflow.datasets.dataset.KEY_INSTANCES + lmflow.datasets.dataset.KEY_SCORE + + +Classes +------- + +.. autoapisummary:: + + lmflow.datasets.dataset.Dataset + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:data:: DATASET_TYPES + :value: ['text_only', 'text2text', 'float_only', 'image_text', 'conversation', 'paired_conversation',... + + +.. py:data:: KEY_TYPE + :value: 'type' + + +.. py:data:: KEY_INSTANCES + :value: 'instances' + + +.. py:data:: KEY_SCORE + :value: 'score' + + +.. py:class:: Dataset(data_args: lmflow.args.DatasetArguments = None, backend: str = 'huggingface', *args, **kwargs) + + + Initializes the Dataset object with the given parameters. + + + :Parameters: + + **data_args** : DatasetArguments object. + Contains the arguments required to load the dataset. + + **backend** : str, default="huggingface" + A string representing the dataset backend. Defaults to "huggingface". + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: data_args + + + .. py:attribute:: backend + + + .. py:attribute:: backend_dataset + :value: None + + + + .. py:attribute:: type + :value: None + + + + .. py:attribute:: dataset_path + + + .. py:method:: __len__() + + + .. py:method:: _check_data_format() + + + Checks if data type and data structure matches + + Raise messages with hints if not matched. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: from_dict(dict_obj: dict, *args, **kwargs) + + + Create a Dataset object from a dictionary. + + Return a Dataset given a dict with format: + { + "type": TYPE, + "instances": [ + { + "key_1": VALUE_1.1, + "key_2": VALUE_1.2, + ... + }, + { + "key_1": VALUE_2.1, + "key_2": VALUE_2.2, + ... + }, + ... + ] + } + + :Parameters: + + **dict_obj** : dict. + A dictionary containing the dataset information. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + **self** : Dataset object. + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: create_from_dict(dict_obj, *args, **kwargs) + :classmethod: + + + + + + + + :Returns: + + Returns a Dataset object given a dict. + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: to_dict() + + + + + + + :Returns: + + Return a dict represents the dataset: + { + "type": TYPE, + "instances": [ + { + "key_1": VALUE_1.1, + "key_2": VALUE_1.2, + ... + }, + { + "key_1": VALUE_2.1, + "key_2": VALUE_2.2, + ... + }, + ... + ] + } + + A python dict object represents the content of this dataset. + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: to_list() + + + Returns a list of instances. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: map(*args, **kwargs) + + + + + + :Parameters: + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + **self** : Dataset object. + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_backend() -> Optional[str] + + + + + + + :Returns: + + self.backend + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_backend_dataset() + + + + + + + :Returns: + + self.backend_dataset + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_fingerprint() + + + + + + + :Returns: + + Fingerprint of the backend_dataset which controls the cache + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_data_args() + + + + + + + :Returns: + + self.data_args + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_type() -> str + + + + + + + :Returns: + + self.type + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: save(file_path: str, format: str = 'json') + + + Save the dataset to a json file. + + + :Parameters: + + **file_path** : str. + The path to the file where the dataset will be saved. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: sample(n: int, seed: int = 42) + + + Sample n instances from the dataset. + + + :Parameters: + + **n** : int. + The number of instances to sample from the dataset. + + :Returns: + + **sample_dataset** : Dataset object. + A new dataset object containing the sampled instances. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: train_test_split(test_size: float = 0.2, shuffle: bool = True, seed: int = 42) + + + Split the dataset into training and testing sets. + + + :Parameters: + + **test_size** : float, default=0.2. + The proportion of the dataset that will be used for testing. + + :Returns: + + **train_dataset** : Dataset object. + A new dataset object containing the training instances. + + **test_dataset** : Dataset object. + A new dataset object containing the testing instances. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: drop_instances(indices: list) + + + Drop instances from the dataset. + + + :Parameters: + + **indices** : list. + A list of indices of the instances to drop from the dataset. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: sanity_check(drop_invalid: bool = True) + + + Perform a sanity check on the dataset. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: hf_dataset_sanity_check(drop_invalid: bool = True) + + + Perform a sanity check on the HuggingFace dataset. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/datasets/index.rst.txt b/_sources/autoapi/lmflow/datasets/index.rst.txt new file mode 100644 index 000000000..91df52f66 --- /dev/null +++ b/_sources/autoapi/lmflow/datasets/index.rst.txt @@ -0,0 +1,685 @@ +lmflow.datasets +=============== + +.. py:module:: lmflow.datasets + +.. autoapi-nested-parse:: + + + This Python code defines a class Dataset with methods for initializing, loading, + and manipulating datasets from different backends such as Hugging Face and JSON. + + The `Dataset` class includes methods for loading datasets from a dictionary and a Hugging + Face dataset, mapping datasets, and retrieving the backend dataset and arguments. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/datasets/dataset/index + /autoapi/lmflow/datasets/multi_modal_dataset/index + + +Classes +------- + +.. autoapisummary:: + + lmflow.datasets.Dataset + lmflow.datasets.CustomMultiModalDataset + + +Package Contents +---------------- + +.. py:class:: Dataset(data_args: lmflow.args.DatasetArguments = None, backend: str = 'huggingface', *args, **kwargs) + + + Initializes the Dataset object with the given parameters. + + + :Parameters: + + **data_args** : DatasetArguments object. + Contains the arguments required to load the dataset. + + **backend** : str, default="huggingface" + A string representing the dataset backend. Defaults to "huggingface". + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: data_args + + + .. py:attribute:: backend + + + .. py:attribute:: backend_dataset + :value: None + + + + .. py:attribute:: type + :value: None + + + + .. py:attribute:: dataset_path + + + .. py:method:: __len__() + + + .. py:method:: _check_data_format() + + + Checks if data type and data structure matches + + Raise messages with hints if not matched. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: from_dict(dict_obj: dict, *args, **kwargs) + + + Create a Dataset object from a dictionary. + + Return a Dataset given a dict with format: + { + "type": TYPE, + "instances": [ + { + "key_1": VALUE_1.1, + "key_2": VALUE_1.2, + ... + }, + { + "key_1": VALUE_2.1, + "key_2": VALUE_2.2, + ... + }, + ... + ] + } + + :Parameters: + + **dict_obj** : dict. + A dictionary containing the dataset information. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + **self** : Dataset object. + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: create_from_dict(dict_obj, *args, **kwargs) + :classmethod: + + + + + + + + :Returns: + + Returns a Dataset object given a dict. + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: to_dict() + + + + + + + :Returns: + + Return a dict represents the dataset: + { + "type": TYPE, + "instances": [ + { + "key_1": VALUE_1.1, + "key_2": VALUE_1.2, + ... + }, + { + "key_1": VALUE_2.1, + "key_2": VALUE_2.2, + ... + }, + ... + ] + } + + A python dict object represents the content of this dataset. + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: to_list() + + + Returns a list of instances. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: map(*args, **kwargs) + + + + + + :Parameters: + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + **self** : Dataset object. + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_backend() -> Optional[str] + + + + + + + :Returns: + + self.backend + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_backend_dataset() + + + + + + + :Returns: + + self.backend_dataset + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_fingerprint() + + + + + + + :Returns: + + Fingerprint of the backend_dataset which controls the cache + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_data_args() + + + + + + + :Returns: + + self.data_args + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_type() -> str + + + + + + + :Returns: + + self.type + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: save(file_path: str, format: str = 'json') + + + Save the dataset to a json file. + + + :Parameters: + + **file_path** : str. + The path to the file where the dataset will be saved. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: sample(n: int, seed: int = 42) + + + Sample n instances from the dataset. + + + :Parameters: + + **n** : int. + The number of instances to sample from the dataset. + + :Returns: + + **sample_dataset** : Dataset object. + A new dataset object containing the sampled instances. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: train_test_split(test_size: float = 0.2, shuffle: bool = True, seed: int = 42) + + + Split the dataset into training and testing sets. + + + :Parameters: + + **test_size** : float, default=0.2. + The proportion of the dataset that will be used for testing. + + :Returns: + + **train_dataset** : Dataset object. + A new dataset object containing the training instances. + + **test_dataset** : Dataset object. + A new dataset object containing the testing instances. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: drop_instances(indices: list) + + + Drop instances from the dataset. + + + :Parameters: + + **indices** : list. + A list of indices of the instances to drop from the dataset. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: sanity_check(drop_invalid: bool = True) + + + Perform a sanity check on the dataset. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: hf_dataset_sanity_check(drop_invalid: bool = True) + + + Perform a sanity check on the HuggingFace dataset. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + +.. py:class:: CustomMultiModalDataset(dataset_path: str, data_args: lmflow.args.DatasetArguments) + + Bases: :py:obj:`torch.utils.data.Dataset` + + + + Dataset for Multi Modal data + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: data_dict + + + .. py:attribute:: data_dict + + + .. py:attribute:: data_args + + + .. py:attribute:: image_folder + + + .. py:method:: __len__() + + + .. py:method:: register_tokenizer(tokenizer, image_processor=None) + + + .. py:method:: __getitem__(i) + + diff --git a/_sources/autoapi/lmflow/datasets/multi_modal_dataset/index.rst.txt b/_sources/autoapi/lmflow/datasets/multi_modal_dataset/index.rst.txt new file mode 100644 index 000000000..ec524ecac --- /dev/null +++ b/_sources/autoapi/lmflow/datasets/multi_modal_dataset/index.rst.txt @@ -0,0 +1,180 @@ +lmflow.datasets.multi_modal_dataset +=================================== + +.. py:module:: lmflow.datasets.multi_modal_dataset + +.. autoapi-nested-parse:: + + This Python code defines a class Multi Modal Dataset. + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.datasets.multi_modal_dataset.CustomMultiModalDataset + lmflow.datasets.multi_modal_dataset.DataCollatorForSupervisedDataset + + +Functions +--------- + +.. autoapisummary:: + + lmflow.datasets.multi_modal_dataset.preprocess_multimodal_llava + lmflow.datasets.multi_modal_dataset.tokenizer_image_token + lmflow.datasets.multi_modal_dataset.preprocess_llama_from_llava_plain + lmflow.datasets.multi_modal_dataset.preprocess_llama_from_llava_v1 + + +Module Contents +--------------- + +.. py:class:: CustomMultiModalDataset(dataset_path: str, data_args: lmflow.args.DatasetArguments) + + Bases: :py:obj:`torch.utils.data.Dataset` + + + + Dataset for Multi Modal data + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: data_dict + + + .. py:attribute:: data_dict + + + .. py:attribute:: data_args + + + .. py:attribute:: image_folder + + + .. py:method:: __len__() + + + .. py:method:: register_tokenizer(tokenizer, image_processor=None) + + + .. py:method:: __getitem__(i) + + +.. py:function:: preprocess_multimodal_llava(sources, data_args) + +.. py:function:: tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None) + +.. py:function:: preprocess_llama_from_llava_plain(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) + + + This function just add the image in the front of text. + And don't add any prompt. + Args: + sources: The input data with text and image. + tokenizer: The tokenizer to process text. + has_image: Whether the input data has image. + Returns: + The input_ids and labels for the model. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:function:: preprocess_llama_from_llava_v1(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) + + + This function add the prompt and then put the image after the prompt. + So it needs additional code to generate the target label. + Args: + sources: The input data with text and image. + tokenizer: The tokenizer to process text. + has_image: Whether the input data has image. + Returns: + The input_ids and labels for the model. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:class:: DataCollatorForSupervisedDataset + + Bases: :py:obj:`object` + + + + Collate examples for supervised fine-tuning. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: tokenizer + :type: transformers.PreTrainedTokenizer + + + .. py:method:: __call__(instances) + + diff --git a/_sources/autoapi/lmflow/index.rst.txt b/_sources/autoapi/lmflow/index.rst.txt new file mode 100644 index 000000000..99bf47a0e --- /dev/null +++ b/_sources/autoapi/lmflow/index.rst.txt @@ -0,0 +1,48 @@ +lmflow +====== + +.. py:module:: lmflow + + +Subpackages +----------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/datasets/index + /autoapi/lmflow/models/index + /autoapi/lmflow/optim/index + /autoapi/lmflow/pipeline/index + /autoapi/lmflow/tokenization/index + /autoapi/lmflow/utils/index + + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/args/index + /autoapi/lmflow/version/index + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.internal_version + lmflow.__version__ + + +Package Contents +---------------- + +.. py:data:: internal_version + :value: '0.0.7' + + +.. py:data:: __version__ + diff --git a/_sources/autoapi/lmflow/models/auto_model/index.rst.txt b/_sources/autoapi/lmflow/models/auto_model/index.rst.txt new file mode 100644 index 000000000..02555c027 --- /dev/null +++ b/_sources/autoapi/lmflow/models/auto_model/index.rst.txt @@ -0,0 +1,31 @@ +lmflow.models.auto_model +======================== + +.. py:module:: lmflow.models.auto_model + +.. autoapi-nested-parse:: + + Automatically get correct model type. + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.models.auto_model.AutoModel + + +Module Contents +--------------- + +.. py:class:: AutoModel + + .. py:method:: get_model(model_args, *args, **kwargs) + :classmethod: + + + diff --git a/_sources/autoapi/lmflow/models/base_model/index.rst.txt b/_sources/autoapi/lmflow/models/base_model/index.rst.txt new file mode 100644 index 000000000..ddc469cfe --- /dev/null +++ b/_sources/autoapi/lmflow/models/base_model/index.rst.txt @@ -0,0 +1,51 @@ +lmflow.models.base_model +======================== + +.. py:module:: lmflow.models.base_model + +.. autoapi-nested-parse:: + + Base model class. + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.models.base_model.BaseModel + + +Module Contents +--------------- + +.. py:class:: BaseModel(*args, **kwargs) + + Bases: :py:obj:`abc.ABC` + + + + Helper class that provides a standard way to create an ABC using + inheritance. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + diff --git a/_sources/autoapi/lmflow/models/decoder_model/index.rst.txt b/_sources/autoapi/lmflow/models/decoder_model/index.rst.txt new file mode 100644 index 000000000..6ff8c3ae3 --- /dev/null +++ b/_sources/autoapi/lmflow/models/decoder_model/index.rst.txt @@ -0,0 +1,39 @@ +lmflow.models.decoder_model +=========================== + +.. py:module:: lmflow.models.decoder_model + +.. autoapi-nested-parse:: + + A one-line summary of the module or program, terminated by a period. + + Leave one blank line. The rest of this docstring should contain an + overall description of the module or program. Optionally, it may also + contain a brief description of exported classes and functions and/or usage + examples. + + Typical usage example: + + foo = ClassFoo() + bar = foo.FunctionBar() + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.models.decoder_model.DecoderModel + + +Module Contents +--------------- + +.. py:class:: DecoderModel(*args, **kwargs) + + Bases: :py:obj:`lmflow.models.base_model.BaseModel` + + diff --git a/_sources/autoapi/lmflow/models/encoder_decoder_model/index.rst.txt b/_sources/autoapi/lmflow/models/encoder_decoder_model/index.rst.txt new file mode 100644 index 000000000..5f1c1c6a8 --- /dev/null +++ b/_sources/autoapi/lmflow/models/encoder_decoder_model/index.rst.txt @@ -0,0 +1,61 @@ +lmflow.models.encoder_decoder_model +=================================== + +.. py:module:: lmflow.models.encoder_decoder_model + +.. autoapi-nested-parse:: + + A one-line summary of the module or program, terminated by a period. + + Leave one blank line. The rest of this docstring should contain an + overall description of the module or program. Optionally, it may also + contain a brief description of exported classes and functions and/or usage + examples. + + Typical usage example: + + foo = ClassFoo() + bar = foo.FunctionBar() + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.models.encoder_decoder_model.EncoderDecoderModel + + +Module Contents +--------------- + +.. py:class:: EncoderDecoderModel(*args, **kwargs) + + Bases: :py:obj:`lmflow.models.base_model.BaseModel` + + + + Helper class that provides a standard way to create an ABC using + inheritance. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + diff --git a/_sources/autoapi/lmflow/models/hf_decoder_model/index.rst.txt b/_sources/autoapi/lmflow/models/hf_decoder_model/index.rst.txt new file mode 100644 index 000000000..0d0aa004a --- /dev/null +++ b/_sources/autoapi/lmflow/models/hf_decoder_model/index.rst.txt @@ -0,0 +1,438 @@ +lmflow.models.hf_decoder_model +============================== + +.. py:module:: lmflow.models.hf_decoder_model + +.. autoapi-nested-parse:: + + This is a class called HFDecoderModel which is a wrapper around transformers model and + tokenizer classes. It has several methods such as __init__, tokenize, and train that are + used for training and fine-tuning the model. The __init__ method takes in several arguments + such as model_args, tune_strategy, and ds_config, which are used to load the pretrained + model and tokenizer, and initialize the training settings. + + The tokenize method is used to tokenize the input text and return the input IDs and attention + masks that can be fed to the model for training or inference. + + This class supports different tune_strategy options such as 'normal', 'none', 'lora', and + 'adapter', which allow for different fine-tuning settings of the model. However, the 'lora' + and 'adapter' strategies are not yet implemented. + + Overall, this class provides a convenient interface for loading and fine-tuning transformer + models and can be used for various NLP tasks such as language modeling, text classification, + and question answering. + + .. + !! processed by numpydoc !! + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.models.hf_decoder_model.logger + lmflow.models.hf_decoder_model.MODELS_SUPPORT_FLASH_ATTENTION + lmflow.models.hf_decoder_model.GPU_SUPPORT_FLASH_ATTENTION + lmflow.models.hf_decoder_model.GPU_SUPPORT_FLASH_ATTENTION + + +Classes +------- + +.. autoapisummary:: + + lmflow.models.hf_decoder_model.HFDecoderModel + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:data:: MODELS_SUPPORT_FLASH_ATTENTION + :value: ['LlamaForCausalLM', 'GPTNeoForCausalLM', 'GPT2ForCausalLM', 'BloomForCausalLM'] + + +.. py:data:: GPU_SUPPORT_FLASH_ATTENTION + +.. py:data:: GPU_SUPPORT_FLASH_ATTENTION + +.. py:class:: HFDecoderModel(model_args, tune_strategy='normal', ds_config=None, device='gpu', use_accelerator=False, *args, **kwargs) + + Bases: :py:obj:`lmflow.models.decoder_model.DecoderModel`, :py:obj:`lmflow.models.hf_model_mixin.HFModelMixin`, :py:obj:`lmflow.models.interfaces.tunable.Tunable` + + + + Initializes a HFDecoderModel instance. + + + :Parameters: + + **model_args** + Model arguments such as model name, path, revision, etc. + + **tune_strategy** : str or none, default="normal". + A string representing the dataset backend. Defaults to "huggingface". + + **ds_config** + Deepspeed configuations. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:method:: tokenize(dataset, add_special_tokens=True, *args, **kwargs) -> lmflow.datasets.dataset.Dataset + + + Tokenize the full dataset. + + + :Parameters: + + **dataset** : lmflow.datasets.Dataset. + .. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + tokenized_datasets + The tokenized dataset, without any leading or trailing special + tokens (normally they are Begin-Of-Sentence or End-Of-Sentence + tokens). + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: encode(input: Union[str, List[str]], *args, **kwargs) -> Union[List[int], List[List[int]]] + + + Perform encoding process of the tokenizer. + + + :Parameters: + + **inputs** : str or list. + The text sequence. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + outputs + if string input,return the tokenized inputs. + "Hello,world!"-> [101, 7592, 1010, 2088, 102] + if batch input,return {input_ids,attention_mask,token_type_ids} + ["Hello,world!","Hello!"]-> {'input_ids': tensor([[ 101, 7592, 1010, 2088, 102],...),'attention_mask': tensor([[1, 1, 1, 1, 1],[0,0,1,1,1]])} + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: decode(input, *args, **kwargs) -> Union[str, List[str]] + + + Perform decoding process of the tokenizer. + + + :Parameters: + + **inputs** : list or tensor. + The token sequence. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + outputs + The text decoded from the token inputs. + if batch input,return the list of text + [[101, 7592, 1010, 2088, 102],[101, 7592, 1010, 2088, 102]]-> ["Hello,world!","Hello,world!" + if single input,return the text + [101, 7592, 1010, 2088, 102]-> "Hello,world!" + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: inference(inputs, release_gpu: bool = False, use_vllm: bool = False, **kwargs) + + + Perform generation process of the model. + + + :Parameters: + + **inputs** + The sequence used as a prompt for the generation or as model inputs to the model. + When using vllm inference, this should be a string or a list of strings. + When using normal inference, this should be a tensor. + + **release_gpu** : bool, optional + Whether to release the GPU resource after inference, by default False. + + **use_vllm** : bool, optional + Whether to use VLLM for inference, by default False. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + outputs + The generated sequence output + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: __inference(inputs, *args, **kwargs) + + + Perform generation process of the model. + + + :Parameters: + + **inputs** + The **tokenized** sequence used as a prompt for the generation or as model inputs to the model. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + outputs + The generated sequence output + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: __vllm_inference(inputs: Union[str, List[str]], sampling_params: Optional[vllm.SamplingParams] = None, **kwargs) -> List[lmflow.utils.data_utils.VLLMInferenceResultWithInput] + + + Perform VLLM inference process of the model. + + + :Parameters: + + **inputs** : Union[str, List[str]] + Prompt(s), string or a list of strings. + + **sampling_params** : Optional[SamplingParams], optional + vllm SamplingParams object, by default None. + + :Returns: + + List[VLLMInferenceResultWithInput] + Return a list of VLLMInferenceResultWithInput, where each + element contains the input prompt and the corresponding output. + + When `sampling_params.detokenize = True`, the output would be a list of strings, + contains sampling_params.n samples for the corresponding prompt. + + When `sampling_params.detokenize = False`, return a list of list of ints + (token ids, no decoding after generation). + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: prepare_inputs_for_inference(dataset: lmflow.datasets.dataset.Dataset, apply_chat_template: bool = True, enable_distributed_inference: bool = False, use_vllm: bool = False, **kwargs) -> Union[List[str], ray.data.Dataset, Dict[str, torch.Tensor]] + + + Prepare inputs for inference. + + + :Parameters: + + **dataset** : lmflow.datasets.Dataset. + The dataset used for inference. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + outputs + The prepared inputs for inference. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: __prepare_inputs_for_vllm_inference(dataset: lmflow.datasets.dataset.Dataset, apply_chat_template: bool = True, enable_distributed_inference: bool = False) -> Union[List[str], ray.data.Dataset] + + + .. py:method:: __prepare_inputs_for_inference(dataset: lmflow.datasets.dataset.Dataset, **kwargs) + :abstractmethod: + + + + .. py:method:: merge_lora_weights() + + + .. py:method:: get_peft_without_qlora() + + + .. py:method:: save(dir, save_full_model=False, *args, **kwargs) + + + Perform generation process of the model. + + + :Parameters: + + **dir** + The directory to save model and tokenizer + + **save_full_model** : Optional. + Whether to save full model. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + outputs + The generated sequence output + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/models/hf_encoder_decoder_model/index.rst.txt b/_sources/autoapi/lmflow/models/hf_encoder_decoder_model/index.rst.txt new file mode 100644 index 000000000..b2691697d --- /dev/null +++ b/_sources/autoapi/lmflow/models/hf_encoder_decoder_model/index.rst.txt @@ -0,0 +1,361 @@ +lmflow.models.hf_encoder_decoder_model +====================================== + +.. py:module:: lmflow.models.hf_encoder_decoder_model + +.. autoapi-nested-parse:: + + This is a class called HFDecoderModel which is a wrapper around transformers model and + tokenizer classes. It has several methods such as __init__, tokenize, and train that are + used for training and fine-tuning the model. The __init__ method takes in several arguments + such as model_args, tune_strategy, and ds_config, which are used to load the pretrained + model and tokenizer, and initialize the training settings. + + The tokenize method is used to tokenize the input text and return the input IDs and attention + masks that can be fed to the model for training or inference. + + This class supports different tune_strategy options such as 'normal', 'none', 'lora', and + 'adapter', which allow for different fine-tuning settings of the model. However, the 'lora' + and 'adapter' strategies are not yet implemented. + + Overall, this class provides a convenient interface for loading and fine-tuning transformer + models and can be used for various NLP tasks such as language modeling, text classification, + and question answering. + + .. + !! processed by numpydoc !! + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.models.hf_encoder_decoder_model.logger + + +Classes +------- + +.. autoapisummary:: + + lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: HFEncoderDecoderModel(model_args, tune_strategy='normal', ds_config=None, device='gpu', use_accelerator=False, custom_model=False, with_deepspeed=True, pipeline_args=None, *args, **kwargs) + + Bases: :py:obj:`lmflow.models.encoder_decoder_model.EncoderDecoderModel`, :py:obj:`lmflow.models.interfaces.tunable.Tunable` + + + + Initializes a HFEncoderDecoderModel instance. + + + :Parameters: + + **model_args** + Model arguments such as model name, path, revision, etc. + + **tune_strategy** : str or none, default="normal". + A string representing the dataset backend. Defaults to "huggingface". + + **ds_config** + Deepspeed configuations. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: device + + + .. py:method:: tokenize(dataset, *args, **kwargs) + :abstractmethod: + + + + Tokenize the full dataset. + + + :Parameters: + + **dataset** + Text dataset. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + tokenized_datasets + The tokenized dataset. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: encode(input: Union[str, List[str]], *args, **kwargs) -> Union[List[int], List[List[int]]] + + + Perform encoding process of the tokenizer. + + + :Parameters: + + **inputs** : str or list. + The text sequence. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + outputs + The tokenized inputs. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: decode(input, *args, **kwargs) -> Union[str, List[str]] + + + Perform decoding process of the tokenizer. + + + :Parameters: + + **inputs** : list. + The token sequence. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + outputs + The text decoded from the token inputs. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: inference(inputs, *args, **kwargs) + + + Perform generation process of the model. + + + :Parameters: + + **inputs** + The sequence used as a prompt for the generation or as model inputs to the model. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + outputs + The generated sequence output + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: merge_lora_weights() + + + .. py:method:: save(dir, save_full_model=False, *args, **kwargs) + + + Perform generation process of the model. + + + :Parameters: + + **dir** + The directory to save model and tokenizer + + **save_full_model** : Optional. + Whether to save full model. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + outputs + The generated sequence output + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_max_length() + + + Return max acceptable input length in terms of tokens. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_tokenizer() + + + Return the tokenizer of the model. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_backend_model() + + + Return the backend model. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/models/hf_model_mixin/index.rst.txt b/_sources/autoapi/lmflow/models/hf_model_mixin/index.rst.txt new file mode 100644 index 000000000..c5b4503ef --- /dev/null +++ b/_sources/autoapi/lmflow/models/hf_model_mixin/index.rst.txt @@ -0,0 +1,269 @@ +lmflow.models.hf_model_mixin +============================ + +.. py:module:: lmflow.models.hf_model_mixin + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.models.hf_model_mixin.logger + lmflow.models.hf_model_mixin.HF_AUTOMODEL_MAPPING + lmflow.models.hf_model_mixin.HF_AUTOMODEL_TYPE + lmflow.models.hf_model_mixin.LORA_TARGET_MODULES_MAPPING + + +Classes +------- + +.. autoapisummary:: + + lmflow.models.hf_model_mixin.HFModelMixin + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:data:: HF_AUTOMODEL_MAPPING + +.. py:data:: HF_AUTOMODEL_TYPE + +.. py:data:: LORA_TARGET_MODULES_MAPPING + +.. py:class:: HFModelMixin(model_args: lmflow.args.ModelArguments, do_train: bool, ds_config=None, device: Optional[str] = 'gpu', use_accelerator: bool = False, hf_auto_model_additional_args: Optional[Dict] = None, *args, **kwargs) + + Bases: :py:obj:`lmflow.models.base_model.BaseModel` + + + .. py:attribute:: device + + + .. py:attribute:: model_args + + + .. py:attribute:: hf_auto_model + + + .. py:attribute:: use_accelerator + + + .. py:attribute:: ds_config + + + .. py:attribute:: do_train + + + .. py:attribute:: tokenizer + + + .. py:attribute:: torch_dtype + + + .. py:attribute:: hf_model_config + + + .. py:attribute:: quant_config + + + .. py:attribute:: peft_config + + + .. py:attribute:: _activated + :value: False + + + + .. py:method:: __prepare_tokenizer(model_args: lmflow.args.ModelArguments) -> Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast] + + + .. py:method:: __prepare_dtype(model_args: lmflow.args.ModelArguments) -> torch.dtype + + + .. py:method:: __prepare_model_config(model_args: lmflow.args.ModelArguments, hf_auto_model_additional_args: Optional[Dict] = None) + + + Prepare model configuration for hf auto register, + Parameters + ---------- + model_args : ModelArguments + LMFlow model arguments. + hf_auto_model_additional_args : Optional[Dict], optional + Special configurations such as `num_labels` in `AutoModelForSequenceClassification` + (commonly used in reward modeling) will not preset in __prepare_model_config, + so it should be passed in hf_auto_model_additional_args. + Returns + ------- + config : ModelConfig + hf model config. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: __prepare_quant_config(model_args: lmflow.args.ModelArguments) + + + .. py:method:: __prepare_peft_config(model_args: lmflow.args.ModelArguments) + + + .. py:method:: __model_module_inject(model_args: lmflow.args.ModelArguments) -> None + + + Override some model modules with custom implementations. + + Current implementations: + - Position interpolation (model_args.do_rope_scaling): + replace llama embeddings with condense embeddings. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: __prepare_model_for_training(model_args: lmflow.args.ModelArguments, hf_auto_model: HF_AUTOMODEL_TYPE) + + + .. py:method:: __prepare_model_for_inference(model_args: lmflow.args.ModelArguments, hf_auto_model: HF_AUTOMODEL_TYPE, use_accelerator: bool, ds_config) + + + .. py:method:: __prepare_model_for_vllm_inference(model_args: lmflow.args.ModelArguments, vllm_gpu_memory_utilization: float, vllm_tensor_parallel_size: int) + + + .. py:method:: __prepare_model_post_process() + + + .. py:method:: activate_model_for_inference(use_vllm: bool = False, **kwargs) + + + .. py:method:: deactivate_model_for_inference(use_vllm: bool = False) + + + Deactivate the model and release the resources. + + NOTE: Currently, VLLM doesn't have an official way to do this, and the + implementation below cannot release all gpu resources by our observation. + Thus this method is just a placeholder for future implementation. See: + [Github issue](https://github.com/vllm-project/vllm/issues/1908) + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_max_length() + + + Return max acceptable input length in terms of tokens. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_tokenizer() + + + Return the tokenizer of the model. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_backend_model() + + + Return the backend model. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/models/hf_text_regression_model/index.rst.txt b/_sources/autoapi/lmflow/models/hf_text_regression_model/index.rst.txt new file mode 100644 index 000000000..96a14d256 --- /dev/null +++ b/_sources/autoapi/lmflow/models/hf_text_regression_model/index.rst.txt @@ -0,0 +1,271 @@ +lmflow.models.hf_text_regression_model +====================================== + +.. py:module:: lmflow.models.hf_text_regression_model + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.models.hf_text_regression_model.logger + + +Classes +------- + +.. autoapisummary:: + + lmflow.models.hf_text_regression_model.HFTextRegressionModel + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: HFTextRegressionModel(model_args: lmflow.args.ModelArguments, tune_strategy: str = 'normal', ds_config=None, device='gpu', use_accelerator=False, *args, **kwargs) + + Bases: :py:obj:`lmflow.models.text_regression_model.TextRegressionModel`, :py:obj:`lmflow.models.hf_model_mixin.HFModelMixin`, :py:obj:`lmflow.models.interfaces.tunable.Tunable` + + + + Initializes a HFTextRegressionModel instance. + + + :Parameters: + + **model_args** + Model arguments such as model name, path, revision, etc. + + **tune_strategy** : str or none, default="normal". + A string representing the dataset backend. Defaults to "huggingface". + + **ds_config** + Deepspeed configuations. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: config_additional_args + + + .. py:method:: tokenize(dataset: lmflow.datasets.dataset.Dataset, add_special_tokens=True, *args, **kwargs) + + + Tokenize the full dataset. + + + :Parameters: + + **dataset** : lmflow.datasets.Dataset. + .. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + tokenized_datasets + The tokenized dataset, without any leading or trailing special + tokens (normally they are Begin-Of-Sentence or End-Of-Sentence + tokens). + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: inference(inputs, release_gpu: bool = False, use_vllm: bool = False, **kwargs) -> Union[List[float], transformers.modeling_outputs.SequenceClassifierOutputWithPast] + + + Perform generation process of the model. + + + :Parameters: + + **inputs** + The sequence used as a prompt for the generation or as model inputs to the model. + When using vllm inference, this should be a string or a list of strings. + When using normal inference, this should be a tensor. + + **release_gpu** : bool, optional + Whether to release the GPU resource after inference, by default False. + + **use_vllm** : bool, optional + Whether to use VLLM for inference, by default False. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + outputs + The generated sequence output + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: __inference(inputs, **kwargs) + + + Perform generation process of the model. + + + :Parameters: + + **inputs** + The **tokenized** sequence used as a prompt for the generation or as model inputs to the model. + + **kwargs** : Optional. + Keyword arguments. + + :Returns: + + outputs + The generated sequence output + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: __vllm_inference(inputs: Union[str, List[str]], sampling_params: Optional[vllm.SamplingParams] = None, **kwargs) -> Union[List[List[str]], List[List[List[int]]]] + :abstractmethod: + + + + Perform VLLM inference process of the model. + + + :Parameters: + + **inputs** : Union[str, List[str]] + Prompt(s), string or a list of strings. + + **sampling_params** : Optional[SamplingParams], optional + vllm SamplingParams object, by default None. + + :Returns: + + + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: prepare_inputs_for_inference(dataset: lmflow.datasets.dataset.Dataset, enable_distributed_inference: bool = False, use_vllm: bool = False, **kwargs) -> Union[lmflow.datasets.dataset.Dataset, ray.data.Dataset] + + + .. py:method:: postprocess_inference_outputs(dataset: lmflow.datasets.dataset.Dataset, scores: Union[List[float], List[List[float]]]) + :staticmethod: + + + + .. py:method:: postprocess_distributed_inference_outputs(dataset: lmflow.datasets.dataset.Dataset, inference_result: List[lmflow.utils.data_utils.RewardModelInferenceResultWithInput]) + :staticmethod: + + + + .. py:method:: save(dir, *args, **kwargs) + + + Perform generation process of the model. + + + :Parameters: + + **dir** + The directory to save model and tokenizer + + **kwargs** : Optional. + Keyword arguments. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/models/index.rst.txt b/_sources/autoapi/lmflow/models/index.rst.txt new file mode 100644 index 000000000..cecef115e --- /dev/null +++ b/_sources/autoapi/lmflow/models/index.rst.txt @@ -0,0 +1,35 @@ +lmflow.models +============= + +.. py:module:: lmflow.models + + +Subpackages +----------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/models/interfaces/index + /autoapi/lmflow/models/vision_encoder/index + + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/models/auto_model/index + /autoapi/lmflow/models/base_model/index + /autoapi/lmflow/models/decoder_model/index + /autoapi/lmflow/models/encoder_decoder_model/index + /autoapi/lmflow/models/hf_decoder_model/index + /autoapi/lmflow/models/hf_encoder_decoder_model/index + /autoapi/lmflow/models/hf_model_mixin/index + /autoapi/lmflow/models/hf_text_regression_model/index + /autoapi/lmflow/models/regression_model/index + /autoapi/lmflow/models/text_regression_model/index + /autoapi/lmflow/models/vision2seq_model/index + + diff --git a/_sources/autoapi/lmflow/models/interfaces/index.rst.txt b/_sources/autoapi/lmflow/models/interfaces/index.rst.txt new file mode 100644 index 000000000..ca86d17cd --- /dev/null +++ b/_sources/autoapi/lmflow/models/interfaces/index.rst.txt @@ -0,0 +1,15 @@ +lmflow.models.interfaces +======================== + +.. py:module:: lmflow.models.interfaces + + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/models/interfaces/tunable/index + + diff --git a/_sources/autoapi/lmflow/models/interfaces/tunable/index.rst.txt b/_sources/autoapi/lmflow/models/interfaces/tunable/index.rst.txt new file mode 100644 index 000000000..beb8ecd7e --- /dev/null +++ b/_sources/autoapi/lmflow/models/interfaces/tunable/index.rst.txt @@ -0,0 +1,51 @@ +lmflow.models.interfaces.tunable +================================ + +.. py:module:: lmflow.models.interfaces.tunable + +.. autoapi-nested-parse:: + + Tunable class + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.models.interfaces.tunable.Tunable + + +Module Contents +--------------- + +.. py:class:: Tunable + + Bases: :py:obj:`abc.ABC` + + + + Helper class that provides a standard way to create an ABC using + inheritance. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + diff --git a/_sources/autoapi/lmflow/models/regression_model/index.rst.txt b/_sources/autoapi/lmflow/models/regression_model/index.rst.txt new file mode 100644 index 000000000..d744c81a1 --- /dev/null +++ b/_sources/autoapi/lmflow/models/regression_model/index.rst.txt @@ -0,0 +1,29 @@ +lmflow.models.regression_model +============================== + +.. py:module:: lmflow.models.regression_model + +.. autoapi-nested-parse:: + + General regression model. + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.models.regression_model.RegressionModel + + +Module Contents +--------------- + +.. py:class:: RegressionModel(*args, **kwargs) + + Bases: :py:obj:`lmflow.models.base_model.BaseModel` + + diff --git a/_sources/autoapi/lmflow/models/text_regression_model/index.rst.txt b/_sources/autoapi/lmflow/models/text_regression_model/index.rst.txt new file mode 100644 index 000000000..96bb6ae00 --- /dev/null +++ b/_sources/autoapi/lmflow/models/text_regression_model/index.rst.txt @@ -0,0 +1,114 @@ +lmflow.models.text_regression_model +=================================== + +.. py:module:: lmflow.models.text_regression_model + +.. autoapi-nested-parse:: + + A model maps "text_only" data to float. + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.models.text_regression_model.TextRegressionModel + + +Module Contents +--------------- + +.. py:class:: TextRegressionModel(model_args, *args, **kwargs) + + Bases: :py:obj:`lmflow.models.regression_model.RegressionModel` + + + + Initializes a TextRegressionModel instance. + + + :Parameters: + + **model_args** + Model arguments such as model name, path, revision, etc. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: inference_func + :value: None + + + + .. py:method:: register_inference_function(inference_func) + + + Registers a regression function. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: inference(inputs: lmflow.datasets.dataset.Dataset) + + + Gets regression results of a given dataset. + + :inputs: Dataset object, only accept type "text_only". + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/models/vision2seq_model/index.rst.txt b/_sources/autoapi/lmflow/models/vision2seq_model/index.rst.txt new file mode 100644 index 000000000..1befce565 --- /dev/null +++ b/_sources/autoapi/lmflow/models/vision2seq_model/index.rst.txt @@ -0,0 +1,189 @@ +lmflow.models.vision2seq_model +============================== + +.. py:module:: lmflow.models.vision2seq_model + + +Classes +------- + +.. autoapisummary:: + + lmflow.models.vision2seq_model.CustomAutoVision2SeqModel + + +Module Contents +--------------- + +.. py:class:: CustomAutoVision2SeqModel(config: transformers.Blip2Config, image_encoder_name_or_path=None, qformer_name_or_path=None, language_model_name_or_path=None, low_resource=False) + + Bases: :py:obj:`transformers.Blip2ForConditionalGeneration`, :py:obj:`lmflow.models.base_model.BaseModel` + + + .. py:attribute:: custom_vision_model + + + .. py:attribute:: with_qformer + + + .. py:attribute:: kwargs + + + .. py:attribute:: language_model + + + .. py:attribute:: hidden_size + + + .. py:attribute:: hidden_size + + + .. py:method:: get_backend_model() + + + .. py:method:: vision_model_from_pretrained(pretrained_path) + + + .. py:method:: qformer_from_pretrained(pretrained_path) + + + .. py:method:: language_model_from_pretrained(pretrained_path, low_resource=False, use_prompt_cache=False) + + + .. py:method:: vision_feature_select(image_forward_outs) + + + .. py:method:: register_prompt_cache(prompt_ids, prompt_keys_values) + + + Udpate the prompt id and embedding for reuse in the future + + Args: + prompt_ids (torch.LongTensor): The id of the prompt. + prompt_keys_values (torch.FloatTensor): The embedding of the prompt. + + Returns: + None + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: save_prompt_cache(path) + + + Save prompt embedding and id. + + Args: + path: The path to save the prompt embedding and id. + + Returns: + None + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: load_prompt_cache(path) + + + Load prompt embedding and id. + Args: + path: The path to load the prompt embedding and id. + + Returns: + None + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_tokenizer() + + + .. py:method:: forward(input_ids: torch.LongTensor = None, pixel_values: Optional[torch.FloatTensor] = None, images: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, image_token_indexes: Optional[List] = [0], one_sample_multiple_images: bool = False) -> Union[Tuple, transformers.modeling_outputs.CausalLMOutputWithPast] + + + .. py:method:: processor_image_token_in_minigpt4(input_ids, language_model_inputs, attention_mask, image_token_indexes, pixel_values, batch_size=1) + + + .. py:method:: generate(pixel_values: torch.FloatTensor, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, image_token_indexes: Optional[List] = [0], one_sample_multiple_images: Optional[bool] = False, images: Optional[torch.LongTensor] = None, **generate_kwargs) -> torch.LongTensor + + + Overrides `generate` function to be able to use the model as a conditional generator. + + Args: + pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)): + Input images to be processed. + input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): + The sequence used as a prompt for the generation. + attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): + Mask to avoid performing attention on padding token indices + image_token_indexes (bool, *optional*): + The index for inserting the image tokens. + one_sample_multiple_images: (bool, *optional*): + The flag for inference that the input batch size is 1 and contain multiple images. + + Returns: + captions (list): A list of strings of length batch_size * num_captions. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/models/vision_encoder/clip_encoder/index.rst.txt b/_sources/autoapi/lmflow/models/vision_encoder/clip_encoder/index.rst.txt new file mode 100644 index 000000000..4bc695829 --- /dev/null +++ b/_sources/autoapi/lmflow/models/vision_encoder/clip_encoder/index.rst.txt @@ -0,0 +1,101 @@ +lmflow.models.vision_encoder.clip_encoder +========================================= + +.. py:module:: lmflow.models.vision_encoder.clip_encoder + + +Classes +------- + +.. autoapisummary:: + + lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower + + +Functions +--------- + +.. autoapisummary:: + + lmflow.models.vision_encoder.clip_encoder.build_vision_tower + + +Module Contents +--------------- + +.. py:function:: build_vision_tower(vision_tower_cfg, **kwargs) + +.. py:class:: CLIPVisionTower(vision_tower, args, delay_load=False) + + Bases: :py:obj:`torch.nn.Module` + + + .. py:attribute:: is_loaded + :value: False + + + + .. py:attribute:: vision_tower_name + + + .. py:attribute:: select_layer + + + .. py:attribute:: select_feature + + + .. py:method:: load_model() + + + .. py:method:: encode_images(images, language_projection) + + + .. py:method:: feature_select(image_forward_outs) + + + .. py:method:: forward(images) + + + .. py:property:: dummy_feature + + + .. py:property:: dtype + + + .. py:property:: device + + + .. py:property:: config + + + .. py:property:: hidden_size + + + .. py:property:: num_patches + + + .. py:method:: prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images, language_projection=None, language_model=None, **kwargs) + + + Copy from the LLAVA code base. + Should be polished. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/models/vision_encoder/index.rst.txt b/_sources/autoapi/lmflow/models/vision_encoder/index.rst.txt new file mode 100644 index 000000000..fac6977d4 --- /dev/null +++ b/_sources/autoapi/lmflow/models/vision_encoder/index.rst.txt @@ -0,0 +1,28 @@ +lmflow.models.vision_encoder +============================ + +.. py:module:: lmflow.models.vision_encoder + + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/models/vision_encoder/clip_encoder/index + + +Functions +--------- + +.. autoapisummary:: + + lmflow.models.vision_encoder.build_vision_tower + + +Package Contents +---------------- + +.. py:function:: build_vision_tower(vision_tower_cfg, **kwargs) + diff --git a/_sources/autoapi/lmflow/optim/adabelief/index.rst.txt b/_sources/autoapi/lmflow/optim/adabelief/index.rst.txt new file mode 100644 index 000000000..27fe002d9 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/adabelief/index.rst.txt @@ -0,0 +1,95 @@ +lmflow.optim.adabelief +====================== + +.. py:module:: lmflow.optim.adabelief + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.adabelief.AdaBelief + + +Module Contents +--------------- + +.. py:class:: AdaBelief(params, lr=0.001, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False, weight_decouple=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True, print_change_log=True) + + Bases: :py:obj:`torch.optim.optimizer.Optimizer` + + + + Implements AdaBelief algorithm. Modified from Adam in PyTorch + reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020 + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: degenerated_to_sgd + + + .. py:attribute:: defaults + + + .. py:attribute:: degenerated_to_sgd + + + .. py:attribute:: weight_decouple + + + .. py:attribute:: rectify + + + .. py:attribute:: fixed_decay + + + .. py:method:: __setstate__(state) + + + .. py:method:: reset() + + + .. py:method:: step(closure=None) + + + Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/optim/adabound/index.rst.txt b/_sources/autoapi/lmflow/optim/adabound/index.rst.txt new file mode 100644 index 000000000..a1abf5203 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/adabound/index.rst.txt @@ -0,0 +1,83 @@ +lmflow.optim.adabound +===================== + +.. py:module:: lmflow.optim.adabound + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.adabound.AdaBound + + +Module Contents +--------------- + +.. py:class:: AdaBound(params, lr: float = 0.001, betas=(0.9, 0.999), final_lr: float = 0.1, gamma: float = 0.001, eps: float = 1e-08, weight_decay: float = 0, amsbound: bool = False) + + Bases: :py:obj:`torch.optim.optimizer.Optimizer` + + + + Implements AdaBound algorithm. + + It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of + Learning Rate + https://arxiv.org/abs/1902.09843 + Note: + Reference code: https://github.com/Luolc/AdaBound + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: defaults + + + .. py:attribute:: base_lrs + + + .. py:method:: __setstate__(state) -> None + + + .. py:method:: step(closure=None) + + + Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/optim/adadelta/index.rst.txt b/_sources/autoapi/lmflow/optim/adadelta/index.rst.txt new file mode 100644 index 000000000..021c9000b --- /dev/null +++ b/_sources/autoapi/lmflow/optim/adadelta/index.rst.txt @@ -0,0 +1,28 @@ +lmflow.optim.adadelta +===================== + +.. py:module:: lmflow.optim.adadelta + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.adadelta.Adadelta + + +Module Contents +--------------- + +.. py:class:: Adadelta(params, lr=1.0, rho=0.95, eps=1e-06) + + Bases: :py:obj:`torch.optim.optimizer.Optimizer` + + + .. py:attribute:: defaults + + + .. py:method:: step(closure=None) + + diff --git a/_sources/autoapi/lmflow/optim/adagrad/index.rst.txt b/_sources/autoapi/lmflow/optim/adagrad/index.rst.txt new file mode 100644 index 000000000..c4baac02e --- /dev/null +++ b/_sources/autoapi/lmflow/optim/adagrad/index.rst.txt @@ -0,0 +1,28 @@ +lmflow.optim.adagrad +==================== + +.. py:module:: lmflow.optim.adagrad + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.adagrad.AdaGrad + + +Module Contents +--------------- + +.. py:class:: AdaGrad(params, lr=0.001, eps=1e-08, weight_decay=0) + + Bases: :py:obj:`torch.optim.Optimizer` + + + .. py:attribute:: defaults + + + .. py:method:: step(closure=None) + + diff --git a/_sources/autoapi/lmflow/optim/adam/index.rst.txt b/_sources/autoapi/lmflow/optim/adam/index.rst.txt new file mode 100644 index 000000000..d1e631e3b --- /dev/null +++ b/_sources/autoapi/lmflow/optim/adam/index.rst.txt @@ -0,0 +1,28 @@ +lmflow.optim.adam +================= + +.. py:module:: lmflow.optim.adam + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.adam.Adam + + +Module Contents +--------------- + +.. py:class:: Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08) + + Bases: :py:obj:`torch.optim.optimizer.Optimizer` + + + .. py:attribute:: defaults + + + .. py:method:: step(closure=None) + + diff --git a/_sources/autoapi/lmflow/optim/adamax/index.rst.txt b/_sources/autoapi/lmflow/optim/adamax/index.rst.txt new file mode 100644 index 000000000..175090570 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/adamax/index.rst.txt @@ -0,0 +1,31 @@ +lmflow.optim.adamax +=================== + +.. py:module:: lmflow.optim.adamax + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.adamax.Adamax + + +Module Contents +--------------- + +.. py:class:: Adamax(params, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0) + + Bases: :py:obj:`torch.optim.optimizer.Optimizer` + + + .. py:attribute:: defaults + + + .. py:method:: __setstate__(state) + + + .. py:method:: step(closure=None) + + diff --git a/_sources/autoapi/lmflow/optim/adamp/index.rst.txt b/_sources/autoapi/lmflow/optim/adamp/index.rst.txt new file mode 100644 index 000000000..3e070d61c --- /dev/null +++ b/_sources/autoapi/lmflow/optim/adamp/index.rst.txt @@ -0,0 +1,96 @@ +lmflow.optim.adamp +================== + +.. py:module:: lmflow.optim.adamp + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.adamp.AdamP + + +Module Contents +--------------- + +.. py:class:: AdamP(params, lr: float = 0.001, betas=(0.9, 0.999), eps: float = 1e-08, weight_decay: float = 0, delta: float = 0.1, wd_ratio: float = 0.1, nesterov: bool = False) + + Bases: :py:obj:`torch.optim.optimizer.Optimizer` + + + + Implements AdamP algorithm. + + It has been proposed in `Slowing Down the Weight Norm Increase in + Momentum-based Optimizers` + https://arxiv.org/abs/2006.08217 + + Note: + Reference code: https://github.com/clovaai/AdamP + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: defaults + + + .. py:method:: _channel_view(x) + :staticmethod: + + + + .. py:method:: _layer_view(x) + :staticmethod: + + + + .. py:method:: _cosine_similarity(x, y, eps, view_func) + :staticmethod: + + + + .. py:method:: _projection(p, grad, perturb, delta, wd_ratio, eps) + + + .. py:method:: step(closure=None) + + + Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/optim/adamw_schedule_free/index.rst.txt b/_sources/autoapi/lmflow/optim/adamw_schedule_free/index.rst.txt new file mode 100644 index 000000000..2342a3ef8 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/adamw_schedule_free/index.rst.txt @@ -0,0 +1,85 @@ +lmflow.optim.adamw_schedule_free +================================ + +.. py:module:: lmflow.optim.adamw_schedule_free + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.adamw_schedule_free.AdamWScheduleFree + + +Module Contents +--------------- + +.. py:class:: AdamWScheduleFree(params, lr=0.0025, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, warmup_steps=0, r=0.0, weight_lr_power=2.0, foreach=hasattr(torch, '_foreach_mul_')) + + Bases: :py:obj:`torch.optim.Optimizer` + + + + Schedule-Free AdamW + As the name suggests, no scheduler is needed with this optimizer. + To add warmup, rather than using a learning rate schedule you can just + set the warmup_steps parameter. + + This optimizer requires that .train() and .eval() be called before the + beginning of training and evaluation respectively. The optimizer should + also be placed in eval mode when saving checkpoints. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: defaults + + + .. py:method:: eval() + + + .. py:method:: train() + + + .. py:method:: step(closure=None) + + + Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/optim/adan/index.rst.txt b/_sources/autoapi/lmflow/optim/adan/index.rst.txt new file mode 100644 index 000000000..741b149fe --- /dev/null +++ b/_sources/autoapi/lmflow/optim/adan/index.rst.txt @@ -0,0 +1,92 @@ +lmflow.optim.adan +================= + +.. py:module:: lmflow.optim.adan + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.adan.Adan + + +Functions +--------- + +.. autoapisummary:: + + lmflow.optim.adan._single_tensor_adan + lmflow.optim.adan._multi_tensor_adan + + +Module Contents +--------------- + +.. py:class:: Adan(params, lr=0.001, betas=(0.98, 0.92, 0.99), eps=1e-08, weight_decay=0.0, max_grad_norm=0.0, no_prox=False, foreach: bool = True) + + Bases: :py:obj:`torch.optim.optimizer.Optimizer` + + + + Implements a pytorch variant of Adan. + + Adan was proposed in + Adan : Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models. + https://arxiv.org/abs/2208.06677 + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: defaults + + + .. py:method:: __setstate__(state) + + + .. py:method:: restart_opt() + + + .. py:method:: step() + + + Performs a single optimization step. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + +.. py:function:: _single_tensor_adan(params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], exp_avg_sqs: List[torch.Tensor], exp_avg_diffs: List[torch.Tensor], pre_grads: List[torch.Tensor], *, beta1: float, beta2: float, beta3: float, bias_correction1: float, bias_correction2: float, bias_correction3_sqrt: float, lr: float, weight_decay: float, eps: float, no_prox: bool, clip_global_grad_norm: torch.Tensor) + +.. py:function:: _multi_tensor_adan(params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], exp_avg_sqs: List[torch.Tensor], exp_avg_diffs: List[torch.Tensor], pre_grads: List[torch.Tensor], *, beta1: float, beta2: float, beta3: float, bias_correction1: float, bias_correction2: float, bias_correction3_sqrt: float, lr: float, weight_decay: float, eps: float, no_prox: bool, clip_global_grad_norm: torch.Tensor) + diff --git a/_sources/autoapi/lmflow/optim/dummy/index.rst.txt b/_sources/autoapi/lmflow/optim/dummy/index.rst.txt new file mode 100644 index 000000000..a92650599 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/dummy/index.rst.txt @@ -0,0 +1,84 @@ +lmflow.optim.dummy +================== + +.. py:module:: lmflow.optim.dummy + +.. autoapi-nested-parse:: + + Dummy Optimizer. + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.dummy.Dummy + + +Module Contents +--------------- + +.. py:class:: Dummy(params: Iterable[torch.nn.parameter.Parameter], lr: float = 0.0, betas: Tuple[float, float] = (0.9, 0.999), weight_decay: float = 0.0) + + Bases: :py:obj:`torch.optim.Optimizer` + + + + An dummy optimizer that does nothing. + + Parameters: + params (:obj:`Iterable[nn.parameter.Parameter]`): + Iterable of parameters to optimize or dictionaries defining parameter groups. + lr (:obj:`float`, `optional`, defaults to 0): + The learning rate to use. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: defaults + + + .. py:method:: step(closure: Callable = None) + + + Performs a single optimization step. + + Arguments: + closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/optim/index.rst.txt b/_sources/autoapi/lmflow/optim/index.rst.txt new file mode 100644 index 000000000..f0fbcc423 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/index.rst.txt @@ -0,0 +1,34 @@ +lmflow.optim +============ + +.. py:module:: lmflow.optim + + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/optim/adabelief/index + /autoapi/lmflow/optim/adabound/index + /autoapi/lmflow/optim/adadelta/index + /autoapi/lmflow/optim/adagrad/index + /autoapi/lmflow/optim/adam/index + /autoapi/lmflow/optim/adamax/index + /autoapi/lmflow/optim/adamp/index + /autoapi/lmflow/optim/adamw_schedule_free/index + /autoapi/lmflow/optim/adan/index + /autoapi/lmflow/optim/dummy/index + /autoapi/lmflow/optim/lamb/index + /autoapi/lmflow/optim/lars/index + /autoapi/lmflow/optim/nadam/index + /autoapi/lmflow/optim/novograd/index + /autoapi/lmflow/optim/optimizers/index + /autoapi/lmflow/optim/radam/index + /autoapi/lmflow/optim/sgd_schedule_free/index + /autoapi/lmflow/optim/sgdp/index + /autoapi/lmflow/optim/sophia/index + /autoapi/lmflow/optim/yogi/index + + diff --git a/_sources/autoapi/lmflow/optim/lamb/index.rst.txt b/_sources/autoapi/lmflow/optim/lamb/index.rst.txt new file mode 100644 index 000000000..2165cbe14 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/lamb/index.rst.txt @@ -0,0 +1,87 @@ +lmflow.optim.lamb +================= + +.. py:module:: lmflow.optim.lamb + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.lamb.Lamb + + +Module Contents +--------------- + +.. py:class:: Lamb(params, lr: float = 0.001, betas=(0.9, 0.999), eps: float = 1e-06, weight_decay: float = 0, clamp_value: float = 10, adam: bool = False, debias: bool = False) + + Bases: :py:obj:`torch.optim.optimizer.Optimizer` + + + + Implements Lamb algorithm. + + It has been proposed in `Large Batch Optimization for Deep Learning: + Training BERT in 76 minutes` + https://arxiv.org/abs/1904.00962 + + Note: + Reference code: https://github.com/cybertronai/pytorch-lamb + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: defaults + + + .. py:attribute:: clamp_value + + + .. py:attribute:: adam + + + .. py:attribute:: debias + + + .. py:method:: step(closure=None) + + + Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/optim/lars/index.rst.txt b/_sources/autoapi/lmflow/optim/lars/index.rst.txt new file mode 100644 index 000000000..73fa9d9a5 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/lars/index.rst.txt @@ -0,0 +1,101 @@ +lmflow.optim.lars +================= + +.. py:module:: lmflow.optim.lars + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.lars.LARS + + +Module Contents +--------------- + +.. py:class:: LARS(params, lr: float = 0.01, momentum: float = 0.0, dampening: float = 0.0, weight_decay: float = 0.0, nesterov: bool = False, trust_coefficient: float = 0.01, eps: float = 1e-08) + + Bases: :py:obj:`torch.optim.optimizer.Optimizer` + + + + Extends SGD in PyTorch with LARS scaling from the paper + `Large batch training of Convolutional Networks`__. + .. note:: + The application of momentum in the SGD part is modified according to + the PyTorch standards. LARS scaling fits into the equation in the + following fashion. + + .. math:: + \begin{aligned} + g_{t+1} & = \text{lars_lr} * (\beta * p_{t} + g_{t+1}), \\ + v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\ + p_{t+1} & = p_{t} - \text{lr} * v_{t+1}, + \\end{aligned} + + where :math:`p`, :math:`g`, :math:`v`, :math:`\\mu` and :math:`\beta` + denote the parameters, gradient, velocity, momentum, and weight decay + respectively. The :math:`lars_lr` is defined by Eq. 6 in the paper. + The Nesterov version is analogously modified. + + .. warning:: + Parameters with weight decay set to 0 will automatically be excluded + from layer-wise LR scaling. This is to ensure consistency with papers + like SimCLR and BYOL. + + __ https://arxiv.org/pdf/1708.03888.pdf + + Note: + Reference code: https://github.com/PyTorchLightning/lightning-bolts/ + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: defaults + + + .. py:method:: __setstate__(state) -> None + + + .. py:method:: step(closure=None) + + + Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/optim/nadam/index.rst.txt b/_sources/autoapi/lmflow/optim/nadam/index.rst.txt new file mode 100644 index 000000000..f9d39f5f2 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/nadam/index.rst.txt @@ -0,0 +1,31 @@ +lmflow.optim.nadam +================== + +.. py:module:: lmflow.optim.nadam + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.nadam.NAdam + + +Module Contents +--------------- + +.. py:class:: NAdam(params, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, momentum_decay=0.004) + + Bases: :py:obj:`torch.optim.Optimizer` + + + .. py:attribute:: defaults + + + .. py:method:: __setstate__(state) + + + .. py:method:: step(closure=None) + + diff --git a/_sources/autoapi/lmflow/optim/novograd/index.rst.txt b/_sources/autoapi/lmflow/optim/novograd/index.rst.txt new file mode 100644 index 000000000..b7e56ebd5 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/novograd/index.rst.txt @@ -0,0 +1,31 @@ +lmflow.optim.novograd +===================== + +.. py:module:: lmflow.optim.novograd + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.novograd.NovoGrad + + +Module Contents +--------------- + +.. py:class:: NovoGrad(params, lr=0.01, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, grad_averaging=False, amsgrad=False) + + Bases: :py:obj:`torch.optim.Optimizer` + + + .. py:attribute:: defaults + + + .. py:method:: __setstate__(state) + + + .. py:method:: step(closure=None) + + diff --git a/_sources/autoapi/lmflow/optim/optimizers/index.rst.txt b/_sources/autoapi/lmflow/optim/optimizers/index.rst.txt new file mode 100644 index 000000000..44c92a3d7 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/optimizers/index.rst.txt @@ -0,0 +1,13 @@ +lmflow.optim.optimizers +======================= + +.. py:module:: lmflow.optim.optimizers + +.. autoapi-nested-parse:: + + All optimizers. + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/optim/radam/index.rst.txt b/_sources/autoapi/lmflow/optim/radam/index.rst.txt new file mode 100644 index 000000000..16ed209f3 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/radam/index.rst.txt @@ -0,0 +1,84 @@ +lmflow.optim.radam +================== + +.. py:module:: lmflow.optim.radam + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.radam.RAdam + + +Module Contents +--------------- + +.. py:class:: RAdam(params, lr: float = 0.001, betas=(0.9, 0.999), eps: float = 1e-08, weight_decay: float = 0) + + Bases: :py:obj:`torch.optim.optimizer.Optimizer` + + + + Implements RAdam optimization algorithm. + + Note: + Deprecated, please use version provided by PyTorch_. + + It has been proposed in `On the Variance of the Adaptive Learning + Rate and Beyond`. + https://arxiv.org/abs/1908.03265 + + Note: + Reference code: https://github.com/LiyuanLucasLiu/RAdam + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: defaults + + + .. py:method:: __setstate__(state) + + + .. py:method:: step(closure=None) + + + Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/optim/sgd_schedule_free/index.rst.txt b/_sources/autoapi/lmflow/optim/sgd_schedule_free/index.rst.txt new file mode 100644 index 000000000..536c61d2c --- /dev/null +++ b/_sources/autoapi/lmflow/optim/sgd_schedule_free/index.rst.txt @@ -0,0 +1,85 @@ +lmflow.optim.sgd_schedule_free +============================== + +.. py:module:: lmflow.optim.sgd_schedule_free + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.sgd_schedule_free.SGDScheduleFree + + +Module Contents +--------------- + +.. py:class:: SGDScheduleFree(params, lr=1.0, momentum=0.9, weight_decay=0, warmup_steps=0, r=0.0, weight_lr_power=2, foreach=hasattr(torch, '_foreach_mul_')) + + Bases: :py:obj:`torch.optim.Optimizer` + + + + Schedule-Free SGD + As the name suggests, no scheduler is needed with this optimizer. + To add warmup, rather than using a learning rate schedule you can just + set the warmup_steps parameter. + + This optimizer requires that .train() and .eval() be called before the + beginning of training and evaluation respectively. The optimizer should + also be placed in eval mode when saving checkpoints. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: defaults + + + .. py:method:: eval() + + + .. py:method:: train() + + + .. py:method:: step(closure=None) + + + Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/optim/sgdp/index.rst.txt b/_sources/autoapi/lmflow/optim/sgdp/index.rst.txt new file mode 100644 index 000000000..0194f29ea --- /dev/null +++ b/_sources/autoapi/lmflow/optim/sgdp/index.rst.txt @@ -0,0 +1,96 @@ +lmflow.optim.sgdp +================= + +.. py:module:: lmflow.optim.sgdp + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.sgdp.SGDP + + +Module Contents +--------------- + +.. py:class:: SGDP(params, lr: float = 0.001, momentum: float = 0, dampening: float = 0, eps: float = 1e-08, weight_decay: float = 0, delta: float = 0.1, wd_ratio: float = 0.1, nesterov: bool = False) + + Bases: :py:obj:`torch.optim.optimizer.Optimizer` + + + + Implements SGDP algorithm. + + It has been proposed in `Slowing Down the Weight Norm Increase in + Momentum-based Optimizers`. + https://arxiv.org/abs/2006.08217 + + Note: + Reference code: https://github.com/clovaai/AdamP + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: defaults + + + .. py:method:: _channel_view(x) + :staticmethod: + + + + .. py:method:: _layer_view(x) + :staticmethod: + + + + .. py:method:: _cosine_similarity(x, y, eps, view_func) + :staticmethod: + + + + .. py:method:: _projection(p, grad, perturb, delta, wd_ratio, eps) + + + .. py:method:: step(closure=None) + + + Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/optim/sophia/index.rst.txt b/_sources/autoapi/lmflow/optim/sophia/index.rst.txt new file mode 100644 index 000000000..797ac76d9 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/sophia/index.rst.txt @@ -0,0 +1,56 @@ +lmflow.optim.sophia +=================== + +.. py:module:: lmflow.optim.sophia + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.sophia.SophiaG + + +Module Contents +--------------- + +.. py:class:: SophiaG(params, lr=0.0001, betas=(0.965, 0.99), rho=0.04, weight_decay=0.1, *, maximize: bool = False, capturable: bool = False) + + Bases: :py:obj:`torch.optim.optimizer.Optimizer` + + + + Sophia: A Scalable Stochastic Second-order Optimizer for Language Model Pre-training. + Code from: https://github.com/Liuhong99/Sophia/ + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: defaults + + + .. py:method:: __setstate__(state) + + + .. py:method:: update_hessian() + + + .. py:method:: step(closure=None, bs=5120) + + diff --git a/_sources/autoapi/lmflow/optim/yogi/index.rst.txt b/_sources/autoapi/lmflow/optim/yogi/index.rst.txt new file mode 100644 index 000000000..b1a78a823 --- /dev/null +++ b/_sources/autoapi/lmflow/optim/yogi/index.rst.txt @@ -0,0 +1,77 @@ +lmflow.optim.yogi +================= + +.. py:module:: lmflow.optim.yogi + + +Classes +------- + +.. autoapisummary:: + + lmflow.optim.yogi.Yogi + + +Module Contents +--------------- + +.. py:class:: Yogi(params, lr: float = 0.01, betas=(0.9, 0.999), eps: float = 0.001, initial_accumulator: float = 1e-06, weight_decay: float = 0) + + Bases: :py:obj:`torch.optim.optimizer.Optimizer` + + + + Implements Yogi Optimizer Algorithm. + It has been proposed in `Adaptive methods for Nonconvex Optimization`. + + https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization # noqa + + Note: + Reference code: https://github.com/4rtemi5/Yogi-Optimizer_Keras + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: defaults + + + .. py:method:: step(closure=None) + + + Performs a single optimization step. + + Arguments: + closure: A closure that reevaluates the model and returns the loss. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/pipeline/auto_pipeline/index.rst.txt b/_sources/autoapi/lmflow/pipeline/auto_pipeline/index.rst.txt new file mode 100644 index 000000000..8b8a8ed9a --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/auto_pipeline/index.rst.txt @@ -0,0 +1,72 @@ +lmflow.pipeline.auto_pipeline +============================= + +.. py:module:: lmflow.pipeline.auto_pipeline + +.. autoapi-nested-parse:: + + Return a pipeline automatically based on its name. + + .. + !! processed by numpydoc !! + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.auto_pipeline.PIPELINE_MAPPING + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.auto_pipeline.AutoPipeline + + +Functions +--------- + +.. autoapisummary:: + + lmflow.pipeline.auto_pipeline.is_package_version_at_least + + +Module Contents +--------------- + +.. py:function:: is_package_version_at_least(package_name, min_version) + +.. py:data:: PIPELINE_MAPPING + +.. py:class:: AutoPipeline + + + The class designed to return a pipeline automatically based on its name. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:method:: get_pipeline(pipeline_name, model_args, data_args, pipeline_args, *args, **kwargs) + :classmethod: + + + diff --git a/_sources/autoapi/lmflow/pipeline/base_aligner/index.rst.txt b/_sources/autoapi/lmflow/pipeline/base_aligner/index.rst.txt new file mode 100644 index 000000000..23043558c --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/base_aligner/index.rst.txt @@ -0,0 +1,58 @@ +lmflow.pipeline.base_aligner +============================ + +.. py:module:: lmflow.pipeline.base_aligner + +.. autoapi-nested-parse:: + + BaseTuner: a subclass of BasePipeline. + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.base_aligner.BaseAligner + + +Module Contents +--------------- + +.. py:class:: BaseAligner(*args, **kwargs) + + Bases: :py:obj:`lmflow.pipeline.base_pipeline.BasePipeline` + + + + A subclass of BasePipeline which is alignable. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:method:: _check_if_alignable(model, dataset, reward_model) + + + .. py:method:: align(model, dataset, reward_model) + :abstractmethod: + + + diff --git a/_sources/autoapi/lmflow/pipeline/base_pipeline/index.rst.txt b/_sources/autoapi/lmflow/pipeline/base_pipeline/index.rst.txt new file mode 100644 index 000000000..0af8835df --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/base_pipeline/index.rst.txt @@ -0,0 +1,51 @@ +lmflow.pipeline.base_pipeline +============================= + +.. py:module:: lmflow.pipeline.base_pipeline + +.. autoapi-nested-parse:: + + BasePipeline. + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.base_pipeline.BasePipeline + + +Module Contents +--------------- + +.. py:class:: BasePipeline + + Bases: :py:obj:`abc.ABC` + + + + Helper class that provides a standard way to create an ABC using + inheritance. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + diff --git a/_sources/autoapi/lmflow/pipeline/base_tuner/index.rst.txt b/_sources/autoapi/lmflow/pipeline/base_tuner/index.rst.txt new file mode 100644 index 000000000..a2953952d --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/base_tuner/index.rst.txt @@ -0,0 +1,58 @@ +lmflow.pipeline.base_tuner +========================== + +.. py:module:: lmflow.pipeline.base_tuner + +.. autoapi-nested-parse:: + + BaseTuner: a subclass of BasePipeline. + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.base_tuner.BaseTuner + + +Module Contents +--------------- + +.. py:class:: BaseTuner(*args, **kwargs) + + Bases: :py:obj:`lmflow.pipeline.base_pipeline.BasePipeline` + + + + A subclass of BasePipeline which is tunable. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:method:: _check_if_tunable(model, dataset) + + + .. py:method:: tune(model, dataset) + :abstractmethod: + + + diff --git a/_sources/autoapi/lmflow/pipeline/dpo_aligner/index.rst.txt b/_sources/autoapi/lmflow/pipeline/dpo_aligner/index.rst.txt new file mode 100644 index 000000000..db1d787e0 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/dpo_aligner/index.rst.txt @@ -0,0 +1,103 @@ +lmflow.pipeline.dpo_aligner +=========================== + +.. py:module:: lmflow.pipeline.dpo_aligner + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.dpo_aligner.DPOAligner + + +Functions +--------- + +.. autoapisummary:: + + lmflow.pipeline.dpo_aligner.get_paired_dataset + + +Module Contents +--------------- + +.. py:function:: get_paired_dataset(data_root: str, data_dir: str, sanity_check: bool = False, cache_dir: Optional[str] = None, num_proc=24) -> datasets.Dataset + + + Load dataset and convert it to the necessary format. + + The dataset is converted to a dictionary with the following structure: + { + 'prompt': List[str], + 'chosen': List[str], + 'rejected': List[str], + } + + Prompts are structured as follows: + "Question: " + + " + + Answer: " + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:class:: DPOAligner(model_args, data_args, aligner_args) + + Bases: :py:obj:`lmflow.pipeline.base_aligner.BaseAligner` + + + + A subclass of BasePipeline which is alignable. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: model_args + + + .. py:attribute:: data_args + + + .. py:attribute:: aligner_args + + + .. py:method:: _initialize_trainer(model, tokenizer) + + + .. py:method:: _load_dataset() + + + .. py:method:: align(model, dataset, reward_model) + + diff --git a/_sources/autoapi/lmflow/pipeline/dpov2_aligner/index.rst.txt b/_sources/autoapi/lmflow/pipeline/dpov2_aligner/index.rst.txt new file mode 100644 index 000000000..21639e8db --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/dpov2_aligner/index.rst.txt @@ -0,0 +1,182 @@ +lmflow.pipeline.dpov2_aligner +============================= + +.. py:module:: lmflow.pipeline.dpov2_aligner + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.dpov2_aligner.logger + lmflow.pipeline.dpov2_aligner.ReferenceModelArguments + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.dpov2_aligner.DPOv2Aligner + lmflow.pipeline.dpov2_aligner.MemorySafeDPOv2Aligner + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:data:: ReferenceModelArguments + +.. py:class:: DPOv2Aligner(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, aligner_args: lmflow.args.DPOv2AlignerArguments, ref_model_args: lmflow.args.ModelArguments) + + Bases: :py:obj:`lmflow.pipeline.base_aligner.BaseAligner` + + + + A subclass of BasePipeline which is alignable. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: model_args + + + .. py:attribute:: ref_model_args + + + .. py:attribute:: data_args + + + .. py:attribute:: aligner_args + + + .. py:method:: align(model: lmflow.models.hf_decoder_model.HFDecoderModel, ref_model: lmflow.models.hf_decoder_model.HFDecoderModel, train_dataset: lmflow.datasets.dataset.Dataset, eval_dataset: lmflow.datasets.dataset.Dataset, transform_dataset_in_place: bool = True) + + + .. py:method:: __prepare_training_args(args: lmflow.args.DPOv2AlignerArguments) -> transformers.TrainingArguments + + + .. py:method:: convert_to_paired_dataset(source_dataset: lmflow.datasets.dataset.Dataset, sampling_paired_method: str = 'random', length_penalty: float = 0.0, margin_scale: float = 1.0, use_fast: bool = False) -> lmflow.datasets.dataset.Dataset + + + Convert a scored one to multiple (text_to_scored_textlist) to a paired dataset by rejection sampling. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _calc_response_lengths(outputs: List[Union[str, Dict[str, str]]], dataset_type: str) -> List[int] + + + .. py:method:: _calc_reward_with_length_penalty(rewards: List[float], lengths: List[int], length_penalty: float) -> List[float] + + + When length_penalty > 0, penalize the longer sequence by subtracting + length_penalty * length from the reward. Vice versa when length_penalty < 0. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: sampling_paired_idx_from_rewards(rewards: List[float], sampling_paired_method: str = 'random', use_fast: bool = False) -> Tuple[int, int] + + + Prepare the dataset for DPO training by rejection sampling. + We implement different strategies to select pairs, including + random: randomly select two instances + max_min: best v.s. worst + max_max: best v.s. second best + max_random: best v.s. random from the remaining + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _sampling_paired_idx_from_rewards(rewards: List[float], sampling_paired_method: str = 'random') -> Tuple[int, int] + + + .. py:method:: _sampling_paired_idx_from_rewards_fast(rewards: List[float], sampling_paired_method: str = 'random') -> Tuple[int, int] + + +.. py:class:: MemorySafeDPOv2Aligner(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, aligner_args: lmflow.args.DPOv2AlignerArguments, ref_model_args: lmflow.args.ModelArguments) + + .. py:attribute:: model_args + + + .. py:attribute:: ref_model_args + + + .. py:attribute:: data_args + + + .. py:attribute:: aligner_args + + + .. py:attribute:: aligner_file_path + + + .. py:method:: align() + + diff --git a/_sources/autoapi/lmflow/pipeline/evaluator/index.rst.txt b/_sources/autoapi/lmflow/pipeline/evaluator/index.rst.txt new file mode 100644 index 000000000..a982f88fa --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/evaluator/index.rst.txt @@ -0,0 +1,168 @@ +lmflow.pipeline.evaluator +========================= + +.. py:module:: lmflow.pipeline.evaluator + +.. autoapi-nested-parse:: + + The Evaluator class simplifies the process of running evaluation on a language model provided by a HFDecoderModel instance imported from the lmflow package. The class constructor takes three dictionaries as arguments: model_args containing arguments related to the language model, data_args containing arguments related to the data used for evaluation, and evaluator_args containing other arguments for the evaluation process. + + The class has two methods: create_dataloader() that loads the data from the test file, creates a data loader, and returns it with the size of the data, and evaluate(model) that generates output text given input text. It uses the create_dataloader() method to load the data, iterates over the data in mini-batches, and encodes the input text with the encode() method of the HFDecoderModel class. Then, it generates output text using the evaluate() method of the HFDecoderModel class, decodes the generated output text using the decode() method of the HFDecoderModel class, and writes the output to a file in the output directory. The method also logs some information to the console and Weights and Biases if the use_wandb argument is True. + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.evaluator.Evaluator + + +Module Contents +--------------- + +.. py:class:: Evaluator(model_args, data_args, evaluator_args) + + Bases: :py:obj:`lmflow.pipeline.base_pipeline.BasePipeline` + + + + Initializes the `Evaluator` class with given arguments. + + + :Parameters: + + **model_args** : ModelArguments object. + Contains the arguments required to load the model. + + **data_args** : DatasetArguments object. + Contains the arguments required to load the dataset. + + **evaluator_args** : EvaluatorArguments object. + Contains the arguments required to perform evaluation. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: data_args + + + .. py:attribute:: evaluator_args + + + .. py:attribute:: model_args + + + .. py:attribute:: local_rank + + + .. py:attribute:: world_size + + + .. py:attribute:: config + + + .. py:attribute:: train_batch_size + + + .. py:attribute:: minibatch_size + + + .. py:attribute:: block_size + + + .. py:method:: create_dataloader(dataset: lmflow.datasets.dataset.Dataset) + + + .. py:method:: _match(predicted_answer, groundtruth, answer_type=None) + + + .. py:method:: evaluate(model, dataset: lmflow.datasets.dataset.Dataset, metric='accuracy', verbose=True) + + + Perform Evaluation for a model + + + :Parameters: + + **model** : TunableModel object. + TunableModel to perform inference + + **dataset** : Dataset object. + .. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _evaluate_acc_with_accelerator(model, dataset, verbose=True) + + + .. py:method:: _evaluate_acc_with_deepspeed(model, dataset, verbose=True) + + + .. py:method:: _evaluate_ppl(model, dataset: lmflow.datasets.dataset.Dataset, verbose=True) + + + .. py:method:: _evaluate_nll(model, dataset: lmflow.datasets.dataset.Dataset, verbose=True) + + + Evaluates negative log likelihood of the model over a dataset. + + NLL = -1/N sum_{i=1}^N sum_{j=1}^|w_i| ln(p(w_{i,j}|context_window)), + + where N is the number of data samples, w_{i,j} is the j-th token in + i-th sample. Here "context_window" = p(w_{i,start}, w_{i,start+1}, ..., + p_{i,j-1} with start = max(0, j - window_length + 1). "window_length" + is normally the maximum length accepted by the model. + + Returns: + A float which represents the negative log likelihood. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/pipeline/finetuner/index.rst.txt b/_sources/autoapi/lmflow/pipeline/finetuner/index.rst.txt new file mode 100644 index 000000000..c87ecd35c --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/finetuner/index.rst.txt @@ -0,0 +1,155 @@ +lmflow.pipeline.finetuner +========================= + +.. py:module:: lmflow.pipeline.finetuner + +.. autoapi-nested-parse:: + + The Finetuner class simplifies the process of running finetuning process on a language model for a TunableModel instance with given dataset. + + .. + !! processed by numpydoc !! + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.finetuner.logger + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.finetuner.Finetuner + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: Finetuner(model_args, data_args, finetuner_args, *args, **kwargs) + + Bases: :py:obj:`lmflow.pipeline.base_tuner.BaseTuner` + + + + Initializes the `Finetuner` class with given arguments. + + + :Parameters: + + **model_args** : ModelArguments object. + Contains the arguments required to load the model. + + **data_args** : DatasetArguments object. + Contains the arguments required to load the dataset. + + **finetuner_args** : FinetunerArguments object. + Contains the arguments required to perform finetuning. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: model_args + + + .. py:attribute:: data_args + + + .. py:attribute:: finetuner_args + + + .. py:attribute:: log_level + + + .. py:attribute:: last_checkpoint + :value: None + + + + .. py:attribute:: last_checkpoint + + + .. py:method:: group_text(tokenized_datasets, model_max_length) + + + Groups texts together to form blocks of maximum length `model_max_length` and returns the processed data as + a dictionary. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: create_customized_optimizer(base_trainer_class, model_args) + + + .. py:method:: tune(model, dataset, transform_dataset_in_place=True, data_collator=None) + + + Perform tuning for a model + + + :Parameters: + + **model** : TunableModel object. + TunableModel to perform tuning. + + **dataset:** + dataset to train model. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/pipeline/index.rst.txt b/_sources/autoapi/lmflow/pipeline/index.rst.txt new file mode 100644 index 000000000..0c4a6a860 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/index.rst.txt @@ -0,0 +1,37 @@ +lmflow.pipeline +=============== + +.. py:module:: lmflow.pipeline + + +Subpackages +----------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/pipeline/utils/index + + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/pipeline/auto_pipeline/index + /autoapi/lmflow/pipeline/base_aligner/index + /autoapi/lmflow/pipeline/base_pipeline/index + /autoapi/lmflow/pipeline/base_tuner/index + /autoapi/lmflow/pipeline/dpo_aligner/index + /autoapi/lmflow/pipeline/dpov2_aligner/index + /autoapi/lmflow/pipeline/evaluator/index + /autoapi/lmflow/pipeline/finetuner/index + /autoapi/lmflow/pipeline/inferencer/index + /autoapi/lmflow/pipeline/iterative_dpo_aligner/index + /autoapi/lmflow/pipeline/raft_aligner/index + /autoapi/lmflow/pipeline/rm_inferencer/index + /autoapi/lmflow/pipeline/rm_tuner/index + /autoapi/lmflow/pipeline/vllm_inferencer/index + + diff --git a/_sources/autoapi/lmflow/pipeline/inferencer/index.rst.txt b/_sources/autoapi/lmflow/pipeline/inferencer/index.rst.txt new file mode 100644 index 000000000..3b899eab9 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/inferencer/index.rst.txt @@ -0,0 +1,468 @@ +lmflow.pipeline.inferencer +========================== + +.. py:module:: lmflow.pipeline.inferencer + +.. autoapi-nested-parse:: + + The Inferencer class simplifies the process of model inferencing. + + .. + !! processed by numpydoc !! + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.inferencer.supported_dataset_type + lmflow.pipeline.inferencer.logger + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.inferencer.Inferencer + lmflow.pipeline.inferencer.SpeculativeInferencer + lmflow.pipeline.inferencer.ToolInferencer + + +Functions +--------- + +.. autoapisummary:: + + lmflow.pipeline.inferencer.rstrip_partial_utf8 + + +Module Contents +--------------- + +.. py:function:: rstrip_partial_utf8(string) + +.. py:data:: supported_dataset_type + :value: ['text_only', 'image_text'] + + +.. py:data:: logger + +.. py:class:: Inferencer(model_args, data_args, inferencer_args) + + Bases: :py:obj:`lmflow.pipeline.base_pipeline.BasePipeline` + + + + Initializes the `Inferencer` class with given arguments. + + + :Parameters: + + **model_args** : ModelArguments object. + Contains the arguments required to load the model. + + **data_args** : DatasetArguments object. + Contains the arguments required to load the dataset. + + **inferencer_args** : InferencerArguments object. + Contains the arguments required to perform inference. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: data_args + + + .. py:attribute:: inferencer_args + + + .. py:attribute:: model_args + + + .. py:attribute:: local_rank + + + .. py:attribute:: world_size + + + .. py:attribute:: config + + + .. py:method:: create_dataloader(dataset: lmflow.datasets.dataset.Dataset) + + + Batchlize dataset and format it to dataloader. + + Args: + dataset (Dataset): the dataset object + + Output: + dataloader (batchlize): the dataloader object + dataset_size (int): the length of the dataset + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: inference(model, dataset: lmflow.datasets.dataset.Dataset, max_new_tokens: int = 100, temperature: float = 0.0, prompt_structure: str = '{input}', remove_image_flag: bool = False, chatbot_type: str = 'mini_gpt') + + + Perform inference for a model + + + :Parameters: + + **model** : TunableModel object. + TunableModel to perform inference + + **dataset** : Dataset object. + .. + + **Returns:** + .. + + **output_dataset: Dataset object.** + .. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: stream_inference(context, model, max_new_tokens, token_per_step, temperature, end_string, input_dataset, remove_image_flag: bool = False) + + +.. py:class:: SpeculativeInferencer(model_args, draft_model_args, data_args, inferencer_args) + + Bases: :py:obj:`Inferencer` + + + + Ref: [arXiv:2211.17192v2](https://arxiv.org/abs/2211.17192) + + + :Parameters: + + **target_model_args** : ModelArguments object. + Contains the arguments required to load the target model. + + **draft_model_args** : ModelArguments object. + Contains the arguments required to load the draft model. + + **data_args** : DatasetArguments object. + Contains the arguments required to load the dataset. + + **inferencer_args** : InferencerArguments object. + Contains the arguments required to perform inference. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: draft_model_args + + + .. py:attribute:: draft_config + + + .. py:method:: score_to_prob(scores: torch.Tensor, temperature: float = 0.0, top_p: float = 1.0) -> torch.Tensor + :staticmethod: + + + + Convert scores (NOT softmaxed tensor) to probabilities with support for temperature, top-p sampling, and argmax. + + + :Parameters: + + **scores** : torch.Tensor + Input scores. + + **temperature** : float, optional + Temperature parameter for controlling randomness. Higher values make the distribution more uniform, + lower values make it peakier. When temperature <= 1e-6, argmax is used. by default 0.0 + + **top_p** : float, optional + Top-p sampling parameter for controlling the cumulative probability threshold, by default 1.0 (no threshold) + + :Returns: + + torch.Tensor + Probability distribution after adjustments. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: sample(prob: torch.Tensor, num_samples: int = 1) -> Dict + :staticmethod: + + + + Sample from a tensor of probabilities + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: predict_next_token(model: lmflow.models.hf_decoder_model.HFDecoderModel, input_ids: torch.Tensor, num_new_tokens: int = 1) + :staticmethod: + + + + Predict the next token given the input_ids. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: autoregressive_sampling(input_ids: torch.Tensor, model: lmflow.models.hf_decoder_model.HFDecoderModel, temperature: float = 0.0, num_new_tokens: int = 5) -> Dict + + + Ref: [arXiv:2211.17192v2](https://arxiv.org/abs/2211.17192) Section 2.2 + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: inference(model: lmflow.models.hf_decoder_model.HFDecoderModel, draft_model: lmflow.models.hf_decoder_model.HFDecoderModel, input: str, temperature: float = 0.0, gamma: int = 5, max_new_tokens: int = 100) + + + Perform inference for a model + + + :Parameters: + + **model** : HFDecoderModel object. + TunableModel to verify tokens generated by the draft model. + + **draft_model** : HFDecoderModel object. + TunableModel that provides approximations of the target model. + + **input** : str. + The input text (i.e., the prompt) for the model. + + **gamma** : int. + The number of tokens to be generated by the draft model within each iter. + + **max_new_tokens** : int. + The maximum number of tokens to be generated by the target model. + + :Returns: + + output: str. + The output text generated by the model. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: stream_inference() + :abstractmethod: + + + +.. py:class:: ToolInferencer(model_args, data_args, inferencer_args) + + Bases: :py:obj:`Inferencer` + + + + Initializes the `ToolInferencer` class with given arguments. + + + :Parameters: + + **model_args** : ModelArguments object. + Contains the arguments required to load the model. + + **data_args** : DatasetArguments object. + Contains the arguments required to load the dataset. + + **inferencer_args** : InferencerArguments object. + Contains the arguments required to perform inference. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: model + + + .. py:method:: inference(model: lmflow.models.hf_decoder_model.HFDecoderModel, input: str, max_new_tokens: int = 1024) + + + Perform inference for a model + + + :Parameters: + + **model** : HFDecoderModel object. + TunableModel to perform inference + + **input** : str. + The input text (i.e., the prompt) for the model. + + **max_new_tokens** : int. + The maximum number of tokens to be generated by the model. + + **Returns:** + .. + + **output** : str. + The output text generated by the model. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: code_exec(code) + + diff --git a/_sources/autoapi/lmflow/pipeline/iterative_dpo_aligner/index.rst.txt b/_sources/autoapi/lmflow/pipeline/iterative_dpo_aligner/index.rst.txt new file mode 100644 index 000000000..78b0a5373 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/iterative_dpo_aligner/index.rst.txt @@ -0,0 +1,74 @@ +lmflow.pipeline.iterative_dpo_aligner +===================================== + +.. py:module:: lmflow.pipeline.iterative_dpo_aligner + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.iterative_dpo_aligner.logger + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: IterativeDPOAligner(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, aligner_args: lmflow.args.IterativeDPOAlignerArguments, ref_model_args: lmflow.args.ModelArguments, reward_model_args: lmflow.args.ModelArguments, **kwargs) + + .. py:attribute:: model_args + + + .. py:attribute:: data_args + + + .. py:attribute:: aligner_args + + + .. py:attribute:: ref_model_args + + + .. py:attribute:: reward_model_args + + + .. py:attribute:: workspace_path + + + .. py:method:: align(dataset_list: List[lmflow.datasets.dataset.Dataset]) + + + .. py:method:: _align_single_iteration(iteration_name: str, target_model_args: lmflow.args.ModelArguments, reward_model_args: lmflow.args.ModelArguments, ref_model_args: lmflow.args.ModelArguments, dataset: lmflow.datasets.dataset.Dataset) + + + .. py:method:: _do_target_model_inference(model: lmflow.models.hf_decoder_model.HFDecoderModel, dataset: lmflow.datasets.dataset.Dataset, output_dir: str) + + + .. py:method:: _do_reward_model_inference(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, dataset: lmflow.datasets.dataset.Dataset, output_dir: str) + + + .. py:method:: _do_single_dpo_align(model_args: lmflow.args.ModelArguments, ref_model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, output_dir: str, iteration_name: str) + + + .. py:method:: _parse_target_model_inference_args(args: lmflow.args.IterativeDPOAlignerArguments, result_cache_path: str) -> lmflow.args.InferencerArguments + + + .. py:method:: _parse_reward_model_inference_args(args: lmflow.args.IterativeDPOAlignerArguments) -> lmflow.args.InferencerArguments + + + .. py:method:: _parse_dpo_aligner_args(args: lmflow.args.IterativeDPOAlignerArguments, output_dir: str, iteration_name: str) -> lmflow.args.DPOv2AlignerArguments + + + .. py:method:: __filter_args(mixed_args, target_cls) + + diff --git a/_sources/autoapi/lmflow/pipeline/raft_aligner/index.rst.txt b/_sources/autoapi/lmflow/pipeline/raft_aligner/index.rst.txt new file mode 100644 index 000000000..627ccba51 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/raft_aligner/index.rst.txt @@ -0,0 +1,262 @@ +lmflow.pipeline.raft_aligner +============================ + +.. py:module:: lmflow.pipeline.raft_aligner + +.. autoapi-nested-parse:: + + The Aligner class simplifies the process of running alignment. + + .. + !! processed by numpydoc !! + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.raft_aligner.logger + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.raft_aligner.RaftAligner + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: RaftAligner(model_args, data_args, aligner_args, *args, **kwargs) + + Bases: :py:obj:`lmflow.pipeline.base_aligner.BaseAligner` + + + + Initializes the `RaftAligner` class with given arguments. + + + :Parameters: + + **model_args** : ModelArguments object. + Contains the arguments required to load the model. + + **data_args** : DatasetArguments object. + Contains the arguments required to load the dataset. + + **raft_aligner_args** : RaftAlignerArguments object. + Contains the arguments required to perform alignment. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: model_args + + + .. py:attribute:: data_args + + + .. py:attribute:: aligner_args + + + .. py:attribute:: INF + :value: 888888888 + + + + .. py:attribute:: output_reward_path + + + .. py:method:: _initialize_trainer(model, tokenizer, training_args) + + + This function takes the model and tokenizer as the input and initialize the trainer. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _load_dataset(selected_dataset, model, tokenizer, model_args, data_args, training_args) + + + This function prepares the dataset for every iteration. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _load_input_dataset(dataset, tokenizer) + + + Load input dataset (i.e. prompt/question dataset) for training. + + Args: + dataset: A Dataset object. + The dataset to be loaded. + + Returns: + dataloader (`torch.utils.data.DataLoader`): + The dataloader for the dataset. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _clean_text(text) + + + .. py:method:: _discard_sample(text) + + + .. py:method:: _get_batch_dataset_top(model, batch_input, alpha=0.2, iter_id=0, local_rank=0, output_min_length=16, output_max_length=48, infer_batch_size=8, generation_kwargs={}, tokenizer=None, training_args=None, reward_model=None, output_reward_path=None) + + + :param batch_input: input prompts + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _get_batch_dataset_local(model, batch_input, K=8, iter_id=0, local_rank=0, output_min_length=16, output_max_length=48, infer_batch_size=8, generation_kwargs={}, tokenizer=None, training_args=None, reward_model=None, output_reward_path=None) + + + :param batch_input: input prompts + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: align(model, dataset, reward_model) + + + Perform alignment for a model + + + :Parameters: + + **model** : BaseModel object. + .. + + **dataset: Dataset object.** + Input dataset for model to generate outputs. The input and output + will then be feed into reward model to get the reward for + alignment. + + **reward_model: RegressionModel object.** + .. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/pipeline/rm_inferencer/index.rst.txt b/_sources/autoapi/lmflow/pipeline/rm_inferencer/index.rst.txt new file mode 100644 index 000000000..7633e1970 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/rm_inferencer/index.rst.txt @@ -0,0 +1,104 @@ +lmflow.pipeline.rm_inferencer +============================= + +.. py:module:: lmflow.pipeline.rm_inferencer + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.rm_inferencer.logger + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.rm_inferencer.RewardModelInferencer + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: RewardModelInferencer(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, inferencer_args: lmflow.args.InferencerArguments, **kwargs) + + Bases: :py:obj:`lmflow.pipeline.base_pipeline.BasePipeline` + + + + Initializes the `Inferencer` class with given arguments. + + + :Parameters: + + **model_args** : ModelArguments object. + Contains the arguments required to load the model. + + **data_args** : DatasetArguments object. + Contains the arguments required to load the dataset. + + **inferencer_args** : InferencerArguments object. + Contains the arguments required to perform inference. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: data_args + + + .. py:attribute:: inferencer_args + + + .. py:attribute:: model_args + + + .. py:attribute:: local_rank + + + .. py:attribute:: world_size + + + .. py:method:: inference(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, dataset: lmflow.datasets.dataset.Dataset, transform_dataset_in_place: bool = True, use_vllm: bool = False, enable_distributed_inference: bool = False, **kwargs) -> lmflow.datasets.dataset.Dataset + + + .. py:method:: _inference(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, model_input: Union[lmflow.datasets.dataset.Dataset, ray.data.Dataset], enable_distributed_inference: bool = False, **kwargs) + + + .. py:method:: __inference(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, model_input: lmflow.datasets.dataset.Dataset) -> Union[List[float], List[List[float]]] + + + .. py:method:: __distributed_inference(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, model_input: ray.data.Dataset, num_instances: int, batch_size: int) -> List[lmflow.utils.data_utils.RewardModelInferenceResultWithInput] + + + .. py:method:: __vllm_inference(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, model_input: List[str], enable_distributed_inference: bool = False) -> List[float] + :abstractmethod: + + + + .. py:method:: __post_process_model_output(model_output: transformers.modeling_outputs.SequenceClassifierOutputWithPast) -> List[float] + + + .. py:method:: flatten_list(list_of_list: List[List]) -> Tuple[List, List[int]] + + + .. py:method:: compress_list(list_to_compress: List, sublist_lengths: List[int]) -> List[List] + + diff --git a/_sources/autoapi/lmflow/pipeline/rm_tuner/index.rst.txt b/_sources/autoapi/lmflow/pipeline/rm_tuner/index.rst.txt new file mode 100644 index 000000000..c07043786 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/rm_tuner/index.rst.txt @@ -0,0 +1,100 @@ +lmflow.pipeline.rm_tuner +======================== + +.. py:module:: lmflow.pipeline.rm_tuner + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.rm_tuner.logger + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.rm_tuner.RewardModelTuner + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: RewardModelTuner(model_args, data_args, finetuner_args, *args, **kwargs) + + Bases: :py:obj:`lmflow.pipeline.finetuner.Finetuner` + + + + Initializes the `RewardModelTuner` class. + + + :Parameters: + + **model_args** : ModelArguments object. + Contains the arguments required to load the model. + + **data_args** : DatasetArguments object. + Contains the arguments required to load the dataset. + + **finetuner_args** : RewardModelTunerArguments object. + Contains the arguments required to perform finetuning. + + **args** : Optional. + Positional arguments. + + **kwargs** : Optional. + Keyword arguments. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:method:: tune(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, dataset, transform_dataset_in_place=True, data_collator=None, **kwargs) + + + Perform tuning for a model + + + :Parameters: + + **model** : TunableModel object. + TunableModel to perform tuning. + + **dataset:** + dataset to train model. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/pipeline/utils/dpov2_dataprocessor/index.rst.txt b/_sources/autoapi/lmflow/pipeline/utils/dpov2_dataprocessor/index.rst.txt new file mode 100644 index 000000000..8a761f472 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/utils/dpov2_dataprocessor/index.rst.txt @@ -0,0 +1,128 @@ +lmflow.pipeline.utils.dpov2_dataprocessor +========================================= + +.. py:module:: lmflow.pipeline.utils.dpov2_dataprocessor + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.utils.dpov2_dataprocessor.logger + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: PreferenceDataCollatorWithPadding + + .. py:attribute:: tokenizer + :type: transformers.PreTrainedTokenizerBase + + + .. py:attribute:: model + :type: Optional[transformers.PreTrainedModel] + :value: None + + + + .. py:attribute:: padding + :type: Union[bool, str] + :value: True + + + + .. py:attribute:: max_length + :type: Optional[int] + :value: None + + + + .. py:attribute:: max_prompt_length + :type: Optional[int] + :value: None + + + + .. py:attribute:: label_pad_token_id + :type: int + + + .. py:attribute:: padding_value + :type: int + :value: 0 + + + + .. py:attribute:: truncation_mode + :type: str + :value: 'keep_end' + + + + .. py:attribute:: is_encoder_decoder + :type: Optional[bool] + :value: False + + + + .. py:attribute:: max_target_length + :type: Optional[int] + :value: None + + + + .. py:attribute:: mask_prompt + :type: Optional[bool] + :value: False + + + + .. py:method:: tokenize_batch_element(prompt: str, chosen: str, rejected: str) -> Dict + + + Tokenize a single batch element. + + At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation + in case the prompt + chosen or prompt + rejected responses is/are too long. First + we truncate the prompt; if we're still too long, we truncate the chosen/rejected. + + We also create the labels for the chosen/rejected responses, which are of length equal to + the sum of the length of the prompt and the chosen/rejected response, with + label_pad_token_id for the prompt tokens. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: collate(batch) + + + .. py:method:: __call__(features: List[Dict[str, Any]]) -> Dict[str, Any] + + diff --git a/_sources/autoapi/lmflow/pipeline/utils/dpov2_trainer/index.rst.txt b/_sources/autoapi/lmflow/pipeline/utils/dpov2_trainer/index.rst.txt new file mode 100644 index 000000000..826a73ad2 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/utils/dpov2_trainer/index.rst.txt @@ -0,0 +1,103 @@ +lmflow.pipeline.utils.dpov2_trainer +=================================== + +.. py:module:: lmflow.pipeline.utils.dpov2_trainer + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.utils.dpov2_trainer.logger + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.utils.dpov2_trainer.DPOv2Trainer + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: DPOv2Trainer(model: Union[transformers.PreTrainedModel, torch.nn.Module] = None, ref_model: Optional[Union[transformers.PreTrainedModel, torch.nn.Module]] = None, beta: float = 0.1, loss_type: Literal['sigmoid', 'hinge', 'cross_entropy', 'kl', 'rev_kl', 'raft'] = 'rev_kl', args: transformers.TrainingArguments = None, data_collator: Optional[transformers.DataCollator] = None, label_pad_token_id: int = -100, padding_value: int = 0, truncation_mode: str = 'keep_end', train_dataset: Optional[datasets.Dataset] = None, eval_dataset: Optional[Union[datasets.Dataset, Dict[str, datasets.Dataset]]] = None, tokenizer: Optional[transformers.PreTrainedTokenizerBase] = None, model_init: Optional[Callable[[], transformers.PreTrainedModel]] = None, callbacks: Optional[List[transformers.trainer_callback.TrainerCallback]] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, max_length: Optional[int] = None, max_prompt_length: Optional[int] = None, max_target_length: Optional[int] = None, peft_config: Optional[Dict] = None, is_encoder_decoder: Optional[bool] = None, disable_dropout: bool = True, generate_during_eval: bool = False, compute_metrics: Optional[Callable[[transformers.trainer_utils.EvalLoopOutput], Dict]] = None, mask_prompt: Optional[bool] = False, len_penalty: float = 0, preprocessing_num_workers: int = 1) + + Bases: :py:obj:`trl.DPOTrainer` + + + .. py:attribute:: use_dpo_data_collator + :value: True + + + + .. py:attribute:: len_penalty + + + .. py:method:: dpo_loss(policy_chosen_logps: torch.FloatTensor, policy_rejected_logps: torch.FloatTensor, reference_chosen_logps: torch.FloatTensor, reference_rejected_logps: torch.FloatTensor, reference_free: bool = False, margin: Optional[torch.FloatTensor] = None, len_penalty: float = 0) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor] + + + Compute the DPO loss for a batch of policy and reference model log probabilities. + + Args: + policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,) + policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,) + reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (batch_size,) + reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (batch_size,) + beta: Temperature parameter for the DPO loss, typically something in the range of 0.1 to 0.5. We ignore the reference model as beta -> 0. + reference_free: If True, we ignore the _provided_ reference model and implicitly use a reference model that assigns equal probability to all responses. + + Returns: + A tuple of three tensors: (losses, chosen_rewards, rejected_rewards). + The losses tensor contains the DPO loss for each example in the batch. + The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively. + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_batch_loss_metrics(model, batch: Dict[str, Union[List, torch.LongTensor]], train_eval: Literal['train', 'eval'] = 'train') + + + .. py:method:: get_batch_metrics(model, batch: Dict[str, Union[List, torch.LongTensor]], train_eval: Literal['train', 'eval'] = 'train') + + + Compute the DPO loss and other metrics for the given batch of inputs for train or test. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/pipeline/utils/index.rst.txt b/_sources/autoapi/lmflow/pipeline/utils/index.rst.txt new file mode 100644 index 000000000..68ca9f80d --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/utils/index.rst.txt @@ -0,0 +1,22 @@ +lmflow.pipeline.utils +===================== + +.. py:module:: lmflow.pipeline.utils + + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/pipeline/utils/dpov2_dataprocessor/index + /autoapi/lmflow/pipeline/utils/dpov2_trainer/index + /autoapi/lmflow/pipeline/utils/memory_safe_dpov2_align/index + /autoapi/lmflow/pipeline/utils/memory_safe_vllm_inference/index + /autoapi/lmflow/pipeline/utils/peft_trainer/index + /autoapi/lmflow/pipeline/utils/raft_trainer/index + /autoapi/lmflow/pipeline/utils/rm_dataprocessor/index + /autoapi/lmflow/pipeline/utils/rm_trainer/index + + diff --git a/_sources/autoapi/lmflow/pipeline/utils/memory_safe_dpov2_align/index.rst.txt b/_sources/autoapi/lmflow/pipeline/utils/memory_safe_dpov2_align/index.rst.txt new file mode 100644 index 000000000..023a68c13 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/utils/memory_safe_dpov2_align/index.rst.txt @@ -0,0 +1,33 @@ +lmflow.pipeline.utils.memory_safe_dpov2_align +============================================= + +.. py:module:: lmflow.pipeline.utils.memory_safe_dpov2_align + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.utils.memory_safe_dpov2_align.logger + lmflow.pipeline.utils.memory_safe_dpov2_align.ReferenceModelArguments + + +Functions +--------- + +.. autoapisummary:: + + lmflow.pipeline.utils.memory_safe_dpov2_align.main + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:data:: ReferenceModelArguments + :type: lmflow.args.ModelArguments + +.. py:function:: main() + diff --git a/_sources/autoapi/lmflow/pipeline/utils/memory_safe_vllm_inference/index.rst.txt b/_sources/autoapi/lmflow/pipeline/utils/memory_safe_vllm_inference/index.rst.txt new file mode 100644 index 000000000..53ae3f3a7 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/utils/memory_safe_vllm_inference/index.rst.txt @@ -0,0 +1,29 @@ +lmflow.pipeline.utils.memory_safe_vllm_inference +================================================ + +.. py:module:: lmflow.pipeline.utils.memory_safe_vllm_inference + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.utils.memory_safe_vllm_inference.logger + + +Functions +--------- + +.. autoapisummary:: + + lmflow.pipeline.utils.memory_safe_vllm_inference.main + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:function:: main() + diff --git a/_sources/autoapi/lmflow/pipeline/utils/peft_trainer/index.rst.txt b/_sources/autoapi/lmflow/pipeline/utils/peft_trainer/index.rst.txt new file mode 100644 index 000000000..aed03c218 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/utils/peft_trainer/index.rst.txt @@ -0,0 +1,135 @@ +lmflow.pipeline.utils.peft_trainer +================================== + +.. py:module:: lmflow.pipeline.utils.peft_trainer + +.. autoapi-nested-parse:: + + Trainer for Peft models + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.utils.peft_trainer.PeftTrainer + lmflow.pipeline.utils.peft_trainer.PeftSavingCallback + + +Module Contents +--------------- + +.. py:class:: PeftTrainer + + Bases: :py:obj:`transformers.Trainer` + + + .. py:method:: _save_checkpoint(_, trial, metrics=None) + + + Don't save base model, optimizer etc. + but create checkpoint folder (needed for saving adapter) + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + +.. py:class:: PeftSavingCallback + + Bases: :py:obj:`transformers.trainer_callback.TrainerCallback` + + + + Correctly save PEFT model and not full model + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:method:: _save(model, folder) + + + .. py:method:: on_train_end(args: transformers.training_args.TrainingArguments, state: transformers.trainer_callback.TrainerState, control: transformers.trainer_callback.TrainerControl, **kwargs) + + + Save final best model adapter + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: on_epoch_end(args: transformers.training_args.TrainingArguments, state: transformers.trainer_callback.TrainerState, control: transformers.trainer_callback.TrainerControl, **kwargs) + + + Save intermediate model adapters in case of interrupted training + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: on_save(args: transformers.training_args.TrainingArguments, state: transformers.trainer_callback.TrainerState, control: transformers.trainer_callback.TrainerControl, **kwargs) + + diff --git a/_sources/autoapi/lmflow/pipeline/utils/raft_trainer/index.rst.txt b/_sources/autoapi/lmflow/pipeline/utils/raft_trainer/index.rst.txt new file mode 100644 index 000000000..78c9f6691 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/utils/raft_trainer/index.rst.txt @@ -0,0 +1,1560 @@ +lmflow.pipeline.utils.raft_trainer +================================== + +.. py:module:: lmflow.pipeline.utils.raft_trainer + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.utils.raft_trainer.is_torch_greater_or_equal_than_1_10 + lmflow.pipeline.utils.raft_trainer.is_torch_less_than_1_11 + lmflow.pipeline.utils.raft_trainer._is_native_cpu_amp_available + lmflow.pipeline.utils.raft_trainer.DEFAULT_CALLBACKS + lmflow.pipeline.utils.raft_trainer.DEFAULT_PROGRESS_CALLBACK + lmflow.pipeline.utils.raft_trainer.DEFAULT_PROGRESS_CALLBACK + lmflow.pipeline.utils.raft_trainer.IS_SAGEMAKER_MP_POST_1_10 + lmflow.pipeline.utils.raft_trainer.skip_first_batches + lmflow.pipeline.utils.raft_trainer.logger + lmflow.pipeline.utils.raft_trainer.TRAINING_ARGS_NAME + lmflow.pipeline.utils.raft_trainer.TRAINER_STATE_NAME + lmflow.pipeline.utils.raft_trainer.OPTIMIZER_NAME + lmflow.pipeline.utils.raft_trainer.SCHEDULER_NAME + lmflow.pipeline.utils.raft_trainer.SCALER_NAME + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.utils.raft_trainer.RaftTrainer + + +Module Contents +--------------- + +.. py:data:: is_torch_greater_or_equal_than_1_10 + +.. py:data:: is_torch_less_than_1_11 + +.. py:data:: _is_native_cpu_amp_available + +.. py:data:: DEFAULT_CALLBACKS + +.. py:data:: DEFAULT_PROGRESS_CALLBACK + +.. py:data:: DEFAULT_PROGRESS_CALLBACK + +.. py:data:: IS_SAGEMAKER_MP_POST_1_10 + +.. py:data:: skip_first_batches + :value: None + + +.. py:data:: logger + +.. py:data:: TRAINING_ARGS_NAME + :value: 'training_args.bin' + + +.. py:data:: TRAINER_STATE_NAME + :value: 'trainer_state.json' + + +.. py:data:: OPTIMIZER_NAME + :value: 'optimizer.pt' + + +.. py:data:: SCHEDULER_NAME + :value: 'scheduler.pt' + + +.. py:data:: SCALER_NAME + :value: 'scaler.pt' + + +.. py:class:: RaftTrainer(model: Union[transformers.modeling_utils.PreTrainedModel, torch.nn.Module] = None, args: transformers.training_args.TrainingArguments = None, data_collator: Optional[transformers.data.data_collator.DataCollator] = None, train_dataset: Optional[torch.utils.data.Dataset] = None, eval_dataset: Optional[Union[torch.utils.data.Dataset, Dict[str, torch.utils.data.Dataset]]] = None, tokenizer: Optional[transformers.tokenization_utils_base.PreTrainedTokenizerBase] = None, model_init: Optional[Callable[[], transformers.modeling_utils.PreTrainedModel]] = None, compute_metrics: Optional[Callable[[transformers.trainer_utils.EvalPrediction], Dict]] = None, callbacks: Optional[List[transformers.trainer_callback.TrainerCallback]] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None) + + + Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers. + Args: + model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*): + The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed. + + [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use + your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers + models. + + args ([`TrainingArguments`], *optional*): + The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the + `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided. + data_collator (`DataCollator`, *optional*): + The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will + default to [`default_data_collator`] if no `tokenizer` is provided, an instance of + [`DataCollatorWithPadding`] otherwise. + train_dataset (`torch.utils.data.Dataset` or `torch.utils.data.IterableDataset`, *optional*): + The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. + Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a + distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a + `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will + manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally + sets the seed of the RNGs used. + eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`]), *optional*): + The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each + dataset prepending the dictionary key to the metric name. + tokenizer ([`PreTrainedTokenizerBase`], *optional*): + The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs to the + maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an + interrupted training or reuse the fine-tuned model. + model_init (`Callable[[], PreTrainedModel]`, *optional*): + A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start + from a new instance of the model as given by this function. + The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to + be able to choose different architectures according to hyper parameters (such as layer count, sizes of + inner layers, dropout probabilities etc). + compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*): + The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return + a dictionary string to metric values. + callbacks (List of [`TrainerCallback`], *optional*): + A list of callbacks to customize the training loop. Will add those to the list of default callbacks + detailed in [here](callback). + If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method. + optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*): A tuple + containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model + and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`. + preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*): + A function that preprocess the logits right before caching them at each evaluation step. Must take two + tensors, the logits and the labels, and return the logits once processed as desired. The modifications made + by this function will be reflected in the predictions received by `compute_metrics`. + Note that the labels (second parameter) will be `None` if the dataset does not have them. + Important attributes: + - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`] + subclass. + - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the + original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`, + the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner + model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`. + - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from + data parallelism, this means some of the model layers are split on different GPUs). + - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set + to `False` if model parallel or deepspeed is used, or if the default + `TrainingArguments.place_model_on_device` is overridden to return `False` . + - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while + in `train`) + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: save_counter + :value: 0 + + + + .. py:attribute:: args + + + .. py:attribute:: hp_name + :value: None + + + + .. py:attribute:: deepspeed + :value: None + + + + .. py:attribute:: is_in_train + :value: False + + + + .. py:attribute:: _memory_tracker + + + .. py:attribute:: log_level + + + .. py:attribute:: sharded_ddp + :value: None + + + + .. py:attribute:: fsdp + :value: None + + + + .. py:attribute:: place_model_on_device + + + .. py:attribute:: default_collator + + + .. py:attribute:: data_collator + + + .. py:attribute:: train_dataset + + + .. py:attribute:: eval_dataset + + + .. py:attribute:: tokenizer + + + .. py:attribute:: model_wrapped + + + .. py:attribute:: model + + + .. py:attribute:: compute_metrics + + + .. py:attribute:: preprocess_logits_for_metrics + + + .. py:attribute:: default_callbacks + + + .. py:attribute:: callbacks + + + .. py:attribute:: callback_handler + + + .. py:attribute:: _loggers_initialized + :value: False + + + + .. py:attribute:: _signature_columns + :value: None + + + + .. py:attribute:: use_apex + :value: False + + + + .. py:attribute:: use_cuda_amp + :value: False + + + + .. py:attribute:: use_cpu_amp + :value: False + + + + .. py:attribute:: do_grad_scaling + :value: False + + + + .. py:attribute:: state + + + .. py:attribute:: control + + + .. py:attribute:: current_flos + :value: 0 + + + + .. py:attribute:: hp_search_backend + :value: None + + + + .. py:attribute:: use_tune_checkpoints + :value: False + + + + .. py:attribute:: default_label_names + + + .. py:attribute:: label_names + + + .. py:attribute:: can_return_loss + + + .. py:attribute:: control + + + .. py:attribute:: _train_batch_size + + + .. py:method:: add_callback(callback) + + + Add a callback to the current list of [`~transformer.TrainerCallback`]. + Args: + callback (`type` or [`~transformer.TrainerCallback`]): + A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the + first case, will instantiate a member of that class. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: pop_callback(callback) + + + Remove a callback from the current list of [`~transformer.TrainerCallback`] and returns it. + If the callback is not found, returns `None` (and no error is raised). + Args: + callback (`type` or [`~transformer.TrainerCallback`]): + A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the + first case, will pop the first member of that class found in the list of callbacks. + Returns: + [`~transformer.TrainerCallback`]: The callback removed, if found. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: remove_callback(callback) + + + Remove a callback from the current list of [`~transformer.TrainerCallback`]. + Args: + callback (`type` or [`~transformer.TrainerCallback`]): + A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the + first case, will remove the first member of that class found in the list of callbacks. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _move_model_to_device(model, device) + + + .. py:method:: _set_signature_columns_if_needed() + + + .. py:method:: _remove_unused_columns(dataset: datasets.Dataset, description: Optional[str] = None) + + + .. py:method:: _get_collator_with_removed_columns(data_collator: Callable, description: Optional[str] = None) -> Callable + + + Wrap the data collator in a callable removing unused columns. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _get_train_sampler() -> Optional[torch.utils.data.Sampler] + + + .. py:method:: get_train_dataloader() -> torch.utils.data.DataLoader + + + Returns the training [`~torch.utils.data.DataLoader`]. + Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed + training if necessary) otherwise. + Subclass and override this method if you want to inject some custom behavior. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _get_eval_sampler(eval_dataset: torch.utils.data.Dataset) -> Optional[torch.utils.data.Sampler] + + + .. py:method:: get_eval_dataloader(eval_dataset: Optional[torch.utils.data.Dataset] = None) -> torch.utils.data.DataLoader + + + Returns the evaluation [`~torch.utils.data.DataLoader`]. + Subclass and override this method if you want to inject some custom behavior. + Args: + eval_dataset (`torch.utils.data.Dataset`, *optional*): + If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted + by the `model.forward()` method are automatically removed. It must implement `__len__`. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_test_dataloader(test_dataset: torch.utils.data.Dataset) -> torch.utils.data.DataLoader + + + Returns the test [`~torch.utils.data.DataLoader`]. + Subclass and override this method if you want to inject some custom behavior. + Args: + test_dataset (`torch.utils.data.Dataset`, *optional*): + The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. It must implement `__len__`. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: create_optimizer_and_scheduler(num_training_steps: int) + + + Setup the optimizer and the learning rate scheduler. + We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the + Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or + `create_scheduler`) in a subclass. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: create_optimizer() + + + Setup the optimizer. + We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the + Trainer's init through `optimizers`, or subclass and override this method in a subclass. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: get_optimizer_cls_and_kwargs(args: transformers.training_args.TrainingArguments) -> Tuple[Any, Any] + :staticmethod: + + + + Returns the optimizer class and optimizer parameters based on the training arguments. + Args: + args (`transformers.training_args.TrainingArguments`): + The training arguments for the training session. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: create_scheduler(num_training_steps: int, optimizer: torch.optim.Optimizer = None) + + + Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or + passed as an argument. + Args: + num_training_steps (int): The number of training steps to do. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: num_examples(dataloader: torch.utils.data.DataLoader) -> int + + + Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When + dataloader.dataset does not exist or has no length, estimates as best it can + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _hp_search_setup(trial: Union[optuna.Trial, Dict[str, Any]]) + + + HP search setup code + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _report_to_hp_search(trial: Union[optuna.Trial, Dict[str, Any]], step: int, metrics: Dict[str, float]) + + + .. py:method:: _tune_save_checkpoint() + + + .. py:method:: call_model_init(trial=None) + + + .. py:method:: torch_jit_model_eval(model, dataloader, training=False) + + + .. py:method:: ipex_optimize_model(model, training=False, dtype=torch.float32) + + + .. py:method:: _wrap_model(model, training=True, dataloader=None) + + + .. py:method:: train(resume_from_checkpoint: Optional[Union[str, bool]] = None, trial: Union[optuna.Trial, Dict[str, Any]] = None, ignore_keys_for_eval: Optional[List[str]] = None, is_first_time=False, **kwargs) + + + Main training entry point. + Args: + resume_from_checkpoint (`str` or `bool`, *optional*): + If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a + `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance + of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here. + trial (`optuna.Trial` or `Dict[str, Any]`, *optional*): + The trial run or the hyperparameter dictionary for hyperparameter search. + ignore_keys_for_eval (`List[str]`, *optional*) + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions for evaluation during the training. + kwargs: + Additional keyword arguments used to hide deprecated arguments + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _one_train(batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None) + + + .. py:method:: _inner_training_loop(batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None) + + + 0 This function serves to train one time + 1 Update the self.train_dataset before calling this function + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _get_output_dir(trial) + + + .. py:method:: _load_from_checkpoint(resume_from_checkpoint, model=None) + + + .. py:method:: _load_best_model() + + + .. py:method:: _issue_warnings_after_load(load_result) + + + .. py:method:: _maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) + + + .. py:method:: _load_rng_state(checkpoint) + + + .. py:method:: _save_checkpoint(model, trial, metrics=None) + + + .. py:method:: _load_optimizer_and_scheduler(checkpoint) + + + If optimizer and scheduler states exist, load them. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: hyperparameter_search(hp_space: Optional[Callable[[optuna.Trial], Dict[str, float]]] = None, compute_objective: Optional[Callable[[Dict[str, float]], float]] = None, n_trials: int = 20, direction: str = 'minimize', backend: Optional[Union[str, transformers.trainer_utils.HPSearchBackend]] = None, hp_name: Optional[Callable[[optuna.Trial], str]] = None, **kwargs) -> transformers.trainer_utils.BestRun + + + Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined + by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided, + the sum of all metrics otherwise. + + To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to + reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to + subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom + optimizer/scheduler. + + Args: + hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*): + A function that defines the hyperparameter search space. Will default to + [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or + [`~trainer_utils.default_hp_space_sigopt`] depending on your backend. + compute_objective (`Callable[[Dict[str, float]], float]`, *optional*): + A function computing the objective to minimize or maximize from the metrics returned by the `evaluate` + method. Will default to [`~trainer_utils.default_compute_objective`]. + n_trials (`int`, *optional*, defaults to 100): + The number of trial runs to test. + direction (`str`, *optional*, defaults to `"minimize"`): + Whether to optimize greater or lower objects. Can be `"minimize"` or `"maximize"`, you should pick + `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics. + backend (`str` or [`~training_utils.HPSearchBackend`], *optional*): + The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending + on which one is installed. If all are installed, will default to optuna. + hp_name (`Callable[["optuna.Trial"], str]]`, *optional*): + A function that defines the trial/run name. Will default to None. + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to `optuna.create_study` or `ray.tune.run`. For more + information see: + - the documentation of + [optuna.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html) + - the documentation of [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run) + - the documentation of [sigopt](https://app.sigopt.com/docs/endpoints/experiments/create) + Returns: + [`trainer_utils.BestRun`]: All the information about the best run. Experiment summary can be found in + `run_summary` attribute for Ray backend. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: log(logs: Dict[str, float]) -> None + + + Log `logs` on the various objects watching training. + Subclass and override this method to inject custom behavior. + Args: + logs (`Dict[str, float]`): + The values to log. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _prepare_input(data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any] + + + Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _prepare_inputs(inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]] + + + Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and + handling potential state. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: compute_loss_context_manager() + + + A helper wrapper to group together context managers. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: autocast_smart_context_manager(cache_enabled: Optional[bool] = True) + + + A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired + arguments, depending on the situation. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: training_step(model: torch.nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor + + + Perform a training step on a batch of inputs. + Subclass and override to inject custom behavior. + Args: + model (`nn.Module`): + The model to train. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + Return: + `torch.Tensor`: The tensor with training loss on this batch. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: compute_loss(model, inputs, return_outputs=False) + + + How the loss is computed by Trainer. By default, all models return the loss in the first element. + Subclass and override for custom behavior. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: is_local_process_zero() -> bool + + + Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several + machines) main process. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: is_world_process_zero() -> bool + + + Whether or not this process is the global main process (when training in a distributed fashion on several + machines, this is only going to be `True` for one process). + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: save_model(output_dir: Optional[str] = None, _internal_call: bool = False) + + + Will save the model, so you can reload it using `from_pretrained()`. + Will only save from the main process. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _save_tpu(output_dir: Optional[str] = None) + + + .. py:method:: _save(output_dir: Optional[str] = None, state_dict=None) + + + .. py:method:: store_flos() + + + .. py:method:: _sorted_checkpoints(output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str] + + + .. py:method:: _rotate_checkpoints(use_mtime=False, output_dir=None) -> None + + + .. py:method:: evaluate(eval_dataset: Optional[torch.utils.data.Dataset] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = 'eval') -> Dict[str, float] + + + Run evaluation and returns metrics. + The calling script will be responsible for providing a method to compute metrics, as they are task-dependent + (pass it to the init `compute_metrics` argument). + You can also subclass and override this method to inject custom behavior. + Args: + eval_dataset (`Dataset`, *optional*): + Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns + not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__` + method. + ignore_keys (`Lst[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"eval"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "eval_bleu" if the prefix is "eval" (default) + Returns: + A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The + dictionary also contains the epoch number which comes from the training state. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: predict(test_dataset: torch.utils.data.Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = 'test') -> transformers.trainer_utils.PredictionOutput + + + Run prediction and returns predictions and potential metrics. + Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method + will also return metrics, like in `evaluate()`. + Args: + test_dataset (`Dataset`): + Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the + `model.forward()` method are automatically removed. Has to implement the method `__len__` + ignore_keys (`Lst[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"test"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "test_bleu" if the prefix is "test" (default) + + If your predictions or labels have different sequence length (for instance because you're doing dynamic padding + in a token classification task) the predictions will be padded (on the right) to allow for concatenation into + one array. The padding index is -100. + + Returns: *NamedTuple* A namedtuple with the following keys: + - predictions (`np.ndarray`): The predictions on `test_dataset`. + - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). + - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained + labels). + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: evaluation_loop(dataloader: torch.utils.data.DataLoader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = 'eval') -> transformers.trainer_utils.EvalLoopOutput + + + Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. + Works both with or without labels. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _nested_gather(tensors, name=None) + + + Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before + concatenating them to `gathered` + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _pad_across_processes(tensor, pad_index=-100) + + + Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so + they can safely be gathered. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: prediction_step(model: torch.nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]] + + + Perform an evaluation step on `model` using `inputs`. + Subclass and override to inject custom behavior. + Args: + model (`nn.Module`): + The model to evaluate. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + prediction_loss_only (`bool`): + Whether or not to return the loss only. + ignore_keys (`Lst[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + Return: + Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, + logits and labels (each being optional). + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: floating_point_ops(inputs: Dict[str, Union[torch.Tensor, Any]]) + + + For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point + operations for every backward + forward pass. If using another model, either implement such a method in the + model or subclass and override this method. + Args: + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + Returns: + `int`: The number of floating-point operations. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: init_git_repo(at_init: bool = False) + + + Initializes a git repo in `self.args.hub_model_id`. + Args: + at_init (`bool`, *optional*, defaults to `False`): + Whether this function is called before any training or not. If `self.args.overwrite_output_dir` is + `True` and `at_init` is `True`, the path to the repo (which is `self.args.output_dir`) might be wiped + out. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: create_model_card(language: Optional[str] = None, license: Optional[str] = None, tags: Union[str, List[str], None] = None, model_name: Optional[str] = None, finetuned_from: Optional[str] = None, tasks: Union[str, List[str], None] = None, dataset_tags: Union[str, List[str], None] = None, dataset: Union[str, List[str], None] = None, dataset_args: Union[str, List[str], None] = None) + + + Creates a draft of a model card using the information available to the `Trainer`. + Args: + language (`str`, *optional*): + The language of the model (if applicable) + license (`str`, *optional*): + The license of the model. Will default to the license of the pretrained model used, if the original + model given to the `Trainer` comes from a repo on the Hub. + tags (`str` or `List[str]`, *optional*): + Some tags to be included in the metadata of the model card. + model_name (`str`, *optional*): + The name of the model. + finetuned_from (`str`, *optional*): + The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo + of the original model given to the `Trainer` (if it comes from the Hub). + tasks (`str` or `List[str]`, *optional*): + One or several task identifiers, to be included in the metadata of the model card. + dataset_tags (`str` or `List[str]`, *optional*): + One or several dataset tags, to be included in the metadata of the model card. + dataset (`str` or `List[str]`, *optional*): + One or several dataset identifiers, to be included in the metadata of the model card. + dataset_args (`str` or `List[str]`, *optional*): + One or several dataset arguments, to be included in the metadata of the model card. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _push_from_checkpoint(checkpoint_folder) + + + .. py:method:: push_to_hub(commit_message: Optional[str] = 'End of training', blocking: bool = True, **kwargs) -> str + + + Upload *self.model* and *self.tokenizer* to the 🤗 model hub on the repo *self.args.hub_model_id*. + Parameters: + commit_message (`str`, *optional*, defaults to `"End of training"`): + Message to commit while pushing. + blocking (`bool`, *optional*, defaults to `True`): + Whether the function should return only when the `git push` has finished. + kwargs: + Additional keyword arguments passed along to [`~Trainer.create_model_card`]. + Returns: + The url of the commit of your model in the given repository if `blocking=False`, a tuple with the url of + the commit and an object to track the progress of the commit if `blocking=True` + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: prediction_loop(dataloader: torch.utils.data.DataLoader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = 'eval') -> transformers.trainer_utils.EvalLoopOutput + + + Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. + Works both with or without labels. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _gather_and_numpify(tensors, name) + + + Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before + concatenating them to `gathered` + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _add_sm_patterns_to_gitignore() -> None + + + Add SageMaker Checkpointing patterns to .gitignore file. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/pipeline/utils/rm_dataprocessor/index.rst.txt b/_sources/autoapi/lmflow/pipeline/utils/rm_dataprocessor/index.rst.txt new file mode 100644 index 000000000..9be0deb3d --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/utils/rm_dataprocessor/index.rst.txt @@ -0,0 +1,60 @@ +lmflow.pipeline.utils.rm_dataprocessor +====================================== + +.. py:module:: lmflow.pipeline.utils.rm_dataprocessor + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.utils.rm_dataprocessor.logger + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.utils.rm_dataprocessor.RewardDataCollatorWithPadding + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: RewardDataCollatorWithPadding + + .. py:attribute:: tokenizer + :type: transformers.AutoTokenizer + + + .. py:attribute:: padding + :type: Union[bool, str, transformers.utils.PaddingStrategy] + :value: True + + + + .. py:attribute:: max_length + :type: Optional[int] + :value: None + + + + .. py:attribute:: pad_to_multiple_of + :type: Optional[int] + :value: None + + + + .. py:attribute:: return_tensors + :type: str + :value: 'pt' + + + + .. py:method:: __call__(features: List[Dict[str, Any]]) -> Dict[str, Any] + + diff --git a/_sources/autoapi/lmflow/pipeline/utils/rm_trainer/index.rst.txt b/_sources/autoapi/lmflow/pipeline/utils/rm_trainer/index.rst.txt new file mode 100644 index 000000000..4f863d79f --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/utils/rm_trainer/index.rst.txt @@ -0,0 +1,47 @@ +lmflow.pipeline.utils.rm_trainer +================================ + +.. py:module:: lmflow.pipeline.utils.rm_trainer + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.utils.rm_trainer.RewardTrainer + lmflow.pipeline.utils.rm_trainer.PeftRewardTrainer + + +Functions +--------- + +.. autoapisummary:: + + lmflow.pipeline.utils.rm_trainer.compute_metrics + lmflow.pipeline.utils.rm_trainer.rm_loss + + +Module Contents +--------------- + +.. py:function:: compute_metrics(eval_pred) + +.. py:function:: rm_loss(model, inputs, return_outputs=False) + +.. py:class:: RewardTrainer + + Bases: :py:obj:`transformers.Trainer` + + + .. py:method:: compute_loss(model, inputs, return_outputs=False) + + +.. py:class:: PeftRewardTrainer + + Bases: :py:obj:`lmflow.pipeline.utils.peft_trainer.PeftTrainer` + + + .. py:method:: compute_loss(model, inputs, return_outputs=False) + + diff --git a/_sources/autoapi/lmflow/pipeline/vllm_inferencer/index.rst.txt b/_sources/autoapi/lmflow/pipeline/vllm_inferencer/index.rst.txt new file mode 100644 index 000000000..e30075e72 --- /dev/null +++ b/_sources/autoapi/lmflow/pipeline/vllm_inferencer/index.rst.txt @@ -0,0 +1,202 @@ +lmflow.pipeline.vllm_inferencer +=============================== + +.. py:module:: lmflow.pipeline.vllm_inferencer + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.pipeline.vllm_inferencer.logger + + +Classes +------- + +.. autoapisummary:: + + lmflow.pipeline.vllm_inferencer.InferencerWithOffloading + lmflow.pipeline.vllm_inferencer.VLLMInferencer + lmflow.pipeline.vllm_inferencer.MemorySafeVLLMInferencer + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: InferencerWithOffloading(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, inferencer_args: lmflow.args.InferencerArguments) + + Bases: :py:obj:`lmflow.pipeline.base_pipeline.BasePipeline` + + + .. py:attribute:: model_args + + + .. py:attribute:: data_args + + + .. py:attribute:: inferencer_args + + + .. py:attribute:: eos_token_id + + + .. py:method:: inference() + :abstractmethod: + + + + .. py:method:: save_inference_results() + :abstractmethod: + + + + .. py:method:: load_inference_results() + :abstractmethod: + + + +.. py:class:: VLLMInferencer(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, inferencer_args: lmflow.args.InferencerArguments) + + Bases: :py:obj:`InferencerWithOffloading` + + + .. py:attribute:: sampling_params + + + .. py:method:: parse_to_sampling_params(inference_args: lmflow.args.InferencerArguments) -> vllm.SamplingParams + + + .. py:method:: inference(model: lmflow.models.hf_decoder_model.HFDecoderModel, dataset: lmflow.datasets.Dataset, enable_decode_inference_result: bool = True, release_gpu: bool = False, inference_args: Optional[lmflow.args.InferencerArguments] = None, enable_distributed_inference: bool = False, **kwargs) -> List[lmflow.utils.data_utils.VLLMInferenceResultWithInput] + + + Perform inference using the provided model and dataset. Will save inference results if + `save_results` is set to True in `inferencer_args`. + + + :Parameters: + + **model** : HFDecoderModel + LMFlow HFDecoderModel object + + **dataset** : Dataset + LMFlow Dataset object + + **apply_chat_template** : bool, optional + Whether to apply chat template to the input, by default True. + + **enable_decode_inference_result** : bool, optional + Whether to decode after generation, by default False. + + **release_gpu** : bool, optional + Whether to release gpu resources, by default False. + + **inference_args** : InferencerArguments, optional + by default None + + :Returns: + + List[VLLMInferenceResultWithInput] + Return a list of VLLMInferenceResultWithInput, where each + element contains the input prompt and the corresponding output. + + When `enable_decode_inference_result = True`, the output would be a list of strings, + contains sampling_params.n samples for the corresponding prompt. + + When `enable_decode_inference_result = False`, return a list of list of ints + (token ids, no decoding after generation). + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _inference(model: lmflow.models.hf_decoder_model.HFDecoderModel, model_input: List[str], sampling_params: vllm.SamplingParams, release_gpu: bool = False) -> List[lmflow.utils.data_utils.VLLMInferenceResultWithInput] + + + .. py:method:: _distributed_inference(model: lmflow.models.hf_decoder_model.HFDecoderModel, model_input: ray.data.Dataset, sampling_params: vllm.SamplingParams, num_instances: int, batch_size: int = 4, release_gpu: bool = False) -> List[lmflow.utils.data_utils.VLLMInferenceResultWithInput] + + + .. py:method:: save_inference_results(outputs: Union[List[List[str]], List[List[List[int]]]], save_file_path: str) + + + .. py:method:: load_inference_results(results_path: str) -> Union[List[List[str]], List[List[List[int]]]] + + +.. py:class:: MemorySafeVLLMInferencer(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, inferencer_args: lmflow.args.InferencerArguments) + + Bases: :py:obj:`VLLMInferencer` + + + .. py:attribute:: inferencer_file_path + + + .. py:method:: inference() -> List[lmflow.utils.data_utils.VLLMInferenceResultWithInput] + + + Perform inference using the provided model and dataset. Will save inference results if + `save_results` is set to True in `inferencer_args`. + + + :Parameters: + + **model** : HFDecoderModel + LMFlow HFDecoderModel object + + **dataset** : Dataset + LMFlow Dataset object + + **apply_chat_template** : bool, optional + Whether to apply chat template to the input, by default True. + + **enable_decode_inference_result** : bool, optional + Whether to decode after generation, by default False. + + **release_gpu** : bool, optional + Whether to release gpu resources, by default False. + + **inference_args** : InferencerArguments, optional + by default None + + :Returns: + + List[VLLMInferenceResultWithInput] + Return a list of VLLMInferenceResultWithInput, where each + element contains the input prompt and the corresponding output. + + When `enable_decode_inference_result = True`, the output would be a list of strings, + contains sampling_params.n samples for the corresponding prompt. + + When `enable_decode_inference_result = False`, return a list of list of ints + (token ids, no decoding after generation). + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + diff --git a/_sources/autoapi/lmflow/tokenization/hf_decoder_model/index.rst.txt b/_sources/autoapi/lmflow/tokenization/hf_decoder_model/index.rst.txt new file mode 100644 index 000000000..9d3e7767e --- /dev/null +++ b/_sources/autoapi/lmflow/tokenization/hf_decoder_model/index.rst.txt @@ -0,0 +1,80 @@ +lmflow.tokenization.hf_decoder_model +==================================== + +.. py:module:: lmflow.tokenization.hf_decoder_model + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.tokenization.hf_decoder_model.logger + lmflow.tokenization.hf_decoder_model.tok_logger + + +Functions +--------- + +.. autoapisummary:: + + lmflow.tokenization.hf_decoder_model.blocking + lmflow.tokenization.hf_decoder_model.tokenize_function + lmflow.tokenization.hf_decoder_model.conversation_tokenize_function + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:data:: tok_logger + +.. py:function:: blocking(token_dict: Dict, block_size: int, model_max_length: int, pad_token_id: int, padding_side: str, truncation_side: str = 'right') -> Dict + +.. py:function:: tokenize_function(examples, data_args: lmflow.args.DatasetArguments, tokenizer: Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast], column_names, label_columns, tokenized_column_order, add_special_tokens, use_truncation) -> Dict + + + Handels text_only and text2text datasets tokenization + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:function:: conversation_tokenize_function(examples, data_args: lmflow.args.DatasetArguments, tokenizer: Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast], column_names, conversation_template: lmflow.utils.conversation_template.ConversationTemplate) -> Dict + + + Handels conversation datasets tokenization + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + diff --git a/_sources/autoapi/lmflow/tokenization/hf_text_regression_model/index.rst.txt b/_sources/autoapi/lmflow/tokenization/hf_text_regression_model/index.rst.txt new file mode 100644 index 000000000..142f4b240 --- /dev/null +++ b/_sources/autoapi/lmflow/tokenization/hf_text_regression_model/index.rst.txt @@ -0,0 +1,114 @@ +lmflow.tokenization.hf_text_regression_model +============================================ + +.. py:module:: lmflow.tokenization.hf_text_regression_model + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.tokenization.hf_text_regression_model.logger + lmflow.tokenization.hf_text_regression_model.tok_logger + + +Functions +--------- + +.. autoapisummary:: + + lmflow.tokenization.hf_text_regression_model.blocking_paired + lmflow.tokenization.hf_text_regression_model.blocking + lmflow.tokenization.hf_text_regression_model.blocking_text_to_textlist + lmflow.tokenization.hf_text_regression_model.paired_conversation_tokenize_function + lmflow.tokenization.hf_text_regression_model.conversation_tokenize_function + lmflow.tokenization.hf_text_regression_model.tokenize_function + lmflow.tokenization.hf_text_regression_model.text_to_textlist_tokenize_function + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:data:: tok_logger + +.. py:function:: blocking_paired(token_dict: Dict, column_names: List, block_size: int, model_max_length: int, pad_token_id: int, padding_side: str, truncation_side: str = 'right') -> Dict + +.. py:function:: blocking(token_dict: Dict, block_size: int, model_max_length: int, pad_token_id: int, padding_side: str, truncation_side: str = 'right') -> Dict + +.. py:function:: blocking_text_to_textlist(token_dict: Dict, block_size: int, model_max_length: int, pad_token_id: int, padding_side: str, truncation_side: str = 'right') -> Dict + +.. py:function:: paired_conversation_tokenize_function(examples, data_args: lmflow.args.DatasetArguments, tokenizer: Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast], column_names, conversation_template: lmflow.utils.conversation_template.ConversationTemplate) -> Dict + +.. py:function:: conversation_tokenize_function(examples, data_args: lmflow.args.DatasetArguments, tokenizer: Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast], column_names, conversation_template: lmflow.utils.conversation_template.ConversationTemplate) -> Dict + + + Handels conversation datasets tokenization + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:function:: tokenize_function(examples, data_args: lmflow.args.DatasetArguments, tokenizer: Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast], column_names, label_columns, tokenized_column_order, add_special_tokens, use_truncation) -> Dict + + + Handels text_only and text2text datasets tokenization + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:function:: text_to_textlist_tokenize_function(examples, data_args: lmflow.args.DatasetArguments, tokenizer: Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast], column_names, add_special_tokens, use_truncation) -> Dict + + + For rm inference, and don't need attn mask and labels. + NOTE: input_ids here refers to the tokenized input_ids of the input **and** output + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + diff --git a/_sources/autoapi/lmflow/tokenization/index.rst.txt b/_sources/autoapi/lmflow/tokenization/index.rst.txt new file mode 100644 index 000000000..ee3c2d670 --- /dev/null +++ b/_sources/autoapi/lmflow/tokenization/index.rst.txt @@ -0,0 +1,16 @@ +lmflow.tokenization +=================== + +.. py:module:: lmflow.tokenization + + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/tokenization/hf_decoder_model/index + /autoapi/lmflow/tokenization/hf_text_regression_model/index + + diff --git a/_sources/autoapi/lmflow/utils/common/index.rst.txt b/_sources/autoapi/lmflow/utils/common/index.rst.txt new file mode 100644 index 000000000..9f2215262 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/common/index.rst.txt @@ -0,0 +1,179 @@ +lmflow.utils.common +=================== + +.. py:module:: lmflow.utils.common + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.common.logger + + +Functions +--------- + +.. autoapisummary:: + + lmflow.utils.common.make_shell_args_from_dataclass + lmflow.utils.common.create_copied_dataclass + lmflow.utils.common.remove_dataclass_attr_prefix + lmflow.utils.common.add_dataclass_attr_prefix + lmflow.utils.common.print_banner + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:function:: make_shell_args_from_dataclass(dataclass_objects: List, format: str = 'subprocess', skip_default: bool = True, ignored_args_list: Optional[List[str]] = None) -> Union[str, List[str]] + + + Return a string or a list of strings that can be used as shell arguments. + + + :Parameters: + + **dataclass_objects** : List + A list of dataclass objects. + + **format** : str, optional + Return format, can be "shell" or "subprocess", by default "subprocess". + + **skip_default** : bool, optional + Whether to skip attributes with default values, by default True. + + :Returns: + + Union[str, List[str]] + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:function:: create_copied_dataclass(original_dataclass, field_prefix: str, class_prefix: str, new_default: Dict = None) + + + Create a copied dataclass with new field names and default values. + + + :Parameters: + + **original_dataclass** : dataclass + .. + + **field_prefix** : str + The prefix to add to the **field** names of the copied dataclass. + + **class_prefix** : str + The prefix to add to the **class** name of the copied dataclass. + + **new_default** : Dict, optional + The new default values for the copied dataclass. When None, the + default values of the original dataclass are used. + + :Returns: + + dataclass + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:function:: remove_dataclass_attr_prefix(data_instance, prefix: str) -> Dict + + + Remove the prefix from the attribute names of a dataclass instance. + + + :Parameters: + + **data_instance** : dataclass + .. + + **prefix** : str + The prefix to remove from the attribute names of the dataclass instance. + + :Returns: + + Dict + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:function:: add_dataclass_attr_prefix(data_instance, prefix: str) -> Dict + + + Add the prefix to the attribute names of a dataclass instance. + + + :Parameters: + + **data_instance** : dataclass + .. + + **prefix** : str + The prefix to add to the attribute names of the dataclass instance. + + :Returns: + + Dict + .. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:function:: print_banner(message: str) + diff --git a/_sources/autoapi/lmflow/utils/constants/index.rst.txt b/_sources/autoapi/lmflow/utils/constants/index.rst.txt new file mode 100644 index 000000000..c70208cf7 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/constants/index.rst.txt @@ -0,0 +1,132 @@ +lmflow.utils.constants +====================== + +.. py:module:: lmflow.utils.constants + +.. autoapi-nested-parse:: + + Commonly used constants. + + .. + !! processed by numpydoc !! + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.constants.TEXT_ONLY_DATASET_DESCRIPTION + lmflow.utils.constants.TEXT_TO_SCORED_TEXTLIST_DATASET_DESCRIPTION + lmflow.utils.constants.PAIRED_TEXT_TO_TEXT_DATASET_DESCRIPTION + lmflow.utils.constants.TEXT_ONLY_DATASET_DETAILS + lmflow.utils.constants.TEXT2TEXT_DATASET_DESCRIPTION + lmflow.utils.constants.CONVERSATION_DATASET_DESCRIPTION + lmflow.utils.constants.PAIRED_CONVERSATION_DATASET_DESCRIPTION + lmflow.utils.constants.TEXT_TO_TEXTLIST_DATASET_DESCRIPTION + lmflow.utils.constants.TEXT2TEXT_DATASET_DETAILS + lmflow.utils.constants.FLOAT_ONLY_DATASET_DESCRIPTION + lmflow.utils.constants.TEXT_ONLY_DATASET_LONG_DESCRITION + lmflow.utils.constants.TEXT2TEXT_DATASET_LONG_DESCRITION + lmflow.utils.constants.DATASET_DESCRIPTION_MAP + lmflow.utils.constants.INSTANCE_FIELDS_MAP + lmflow.utils.constants.CONVERSATION_ROLE_NAMES + lmflow.utils.constants.CONTROLLER_HEART_BEAT_EXPIRATION + lmflow.utils.constants.WORKER_HEART_BEAT_INTERVAL + lmflow.utils.constants.LOGDIR + lmflow.utils.constants.IGNORE_INDEX + lmflow.utils.constants.IMAGE_TOKEN_INDEX + lmflow.utils.constants.DEFAULT_IMAGE_TOKEN + lmflow.utils.constants.DEFAULT_IMAGE_PATCH_TOKEN + lmflow.utils.constants.DEFAULT_IM_START_TOKEN + lmflow.utils.constants.DEFAULT_IM_END_TOKEN + lmflow.utils.constants.LMFLOW_LORA_TARGET_MODULES_MAPPING + lmflow.utils.constants.MEMORY_SAFE_VLLM_INFERENCE_FINISH_FLAG + lmflow.utils.constants.RETURN_CODE_ERROR_BUFFER + lmflow.utils.constants.MEMORY_SAFE_VLLM_INFERENCE_ENV_VAR_TO_REMOVE + lmflow.utils.constants.MEMORY_SAFE_DPOV2_ALIGN_ENV_VAR_TO_REMOVE + + +Module Contents +--------------- + +.. py:data:: TEXT_ONLY_DATASET_DESCRIPTION + +.. py:data:: TEXT_TO_SCORED_TEXTLIST_DATASET_DESCRIPTION + +.. py:data:: PAIRED_TEXT_TO_TEXT_DATASET_DESCRIPTION + +.. py:data:: TEXT_ONLY_DATASET_DETAILS + +.. py:data:: TEXT2TEXT_DATASET_DESCRIPTION + +.. py:data:: CONVERSATION_DATASET_DESCRIPTION + +.. py:data:: PAIRED_CONVERSATION_DATASET_DESCRIPTION + +.. py:data:: TEXT_TO_TEXTLIST_DATASET_DESCRIPTION + +.. py:data:: TEXT2TEXT_DATASET_DETAILS + +.. py:data:: FLOAT_ONLY_DATASET_DESCRIPTION + +.. py:data:: TEXT_ONLY_DATASET_LONG_DESCRITION + +.. py:data:: TEXT2TEXT_DATASET_LONG_DESCRITION + +.. py:data:: DATASET_DESCRIPTION_MAP + +.. py:data:: INSTANCE_FIELDS_MAP + +.. py:data:: CONVERSATION_ROLE_NAMES + +.. py:data:: CONTROLLER_HEART_BEAT_EXPIRATION + :value: 30 + + +.. py:data:: WORKER_HEART_BEAT_INTERVAL + :value: 15 + + +.. py:data:: LOGDIR + :value: '.' + + +.. py:data:: IGNORE_INDEX + +.. py:data:: IMAGE_TOKEN_INDEX + +.. py:data:: DEFAULT_IMAGE_TOKEN + :value: '' + + +.. py:data:: DEFAULT_IMAGE_PATCH_TOKEN + :value: '' + + +.. py:data:: DEFAULT_IM_START_TOKEN + :value: '' + + +.. py:data:: DEFAULT_IM_END_TOKEN + :value: '' + + +.. py:data:: LMFLOW_LORA_TARGET_MODULES_MAPPING + +.. py:data:: MEMORY_SAFE_VLLM_INFERENCE_FINISH_FLAG + :value: 'MEMORY_SAFE_VLLM_INFERENCE_DONE' + + +.. py:data:: RETURN_CODE_ERROR_BUFFER + :value: [134] + + +.. py:data:: MEMORY_SAFE_VLLM_INFERENCE_ENV_VAR_TO_REMOVE + :value: ['OMP_NUM_THREADS', 'LOCAL_RANK', 'RANK', 'GROUP_RANK', 'ROLE_RANK', 'ROLE_NAME',... + + +.. py:data:: MEMORY_SAFE_DPOV2_ALIGN_ENV_VAR_TO_REMOVE + :value: ['OMP_NUM_THREADS', 'LOCAL_RANK', 'RANK', 'GROUP_RANK', 'ROLE_RANK', 'ROLE_NAME',... + + diff --git a/_sources/autoapi/lmflow/utils/conversation_template/base/index.rst.txt b/_sources/autoapi/lmflow/utils/conversation_template/base/index.rst.txt new file mode 100644 index 000000000..763f10a37 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/conversation_template/base/index.rst.txt @@ -0,0 +1,507 @@ +lmflow.utils.conversation_template.base +======================================= + +.. py:module:: lmflow.utils.conversation_template.base + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.base.logger + lmflow.utils.conversation_template.base.EMPTY_TEMPLATE + lmflow.utils.conversation_template.base.EMPTY_NO_SPECIAL_TOKENS_TEMPLATE + + +Classes +------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.base.TemplateComponent + lmflow.utils.conversation_template.base.Formatter + lmflow.utils.conversation_template.base.EmptyFormatter + lmflow.utils.conversation_template.base.StringFormatter + lmflow.utils.conversation_template.base.ListFormatter + lmflow.utils.conversation_template.base.ConversationTemplate + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: TemplateComponent + + + The minimal unit of a template, which can be a token, a string, or a list of tools. + + + :Parameters: + + **type** : Literal['token', 'token_id', 'string', 'tools'] + - Type of the component. + + - When the component is a token or a string, the content should be `string`. + The difference between the two is that token will be converted to token ids + by the tokenizer.convert_tokens_to_ids() method, while string will be directly + encoded by the tokenizer.encode() method. Specially, since the bos token and eos + token are frequently used across different templates, we provide the convenience + to use `'bos_token'` and `'eos_token'` to represent the actual bos and eos tokens when + `type` of the `TemplateComponent` is `token`. For example: + + ```python + TemplateComponent(type='token', content='bos_token') + ``` + + After encoding, the content will be replaced by the actual token id of the bos token. + Please do remember that if you set the `type` to `string`, the tokenizer will try to + encode the string 'bos_token' instead of providing the actual bos token. + + - When the component is token_id, the content should be `int` or `List[int]`, and + will be directly appended to the encoded token ids. + + - Tools are not supported yet. + + **content** : Union[str, int, List[str], List[int]] + Content of the component. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: type + :type: Literal['token', 'token_id', 'string', 'tools'] + + + .. py:attribute:: content + :type: Union[str, int, List[str], List[int]] + + + .. py:attribute:: mask + :type: Optional[bool] + :value: True + + + + .. py:method:: __post_init__() + + + .. py:method:: __repr__() -> str + + + Return repr(self). + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: __str__() -> str + + + Return str(self). + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + +.. py:class:: Formatter + + Bases: :py:obj:`abc.ABC` + + + + Helper class that provides a standard way to create an ABC using + inheritance. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: template + :type: List[TemplateComponent] + + + .. py:method:: format(**kwargs) -> List[TemplateComponent] + :abstractmethod: + + + + .. py:method:: has_placeholder() + + +.. py:class:: EmptyFormatter + + Bases: :py:obj:`Formatter` + + + + Helper class that provides a standard way to create an ABC using + inheritance. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:method:: __post_init__() + + + .. py:method:: format(**kwargs) -> list + + + Empty formatter for when no formatting is needed. + This is useful when user has already applied formatting to the dataset. + + + + :Returns: + + list + Original template. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + +.. py:class:: StringFormatter + + Bases: :py:obj:`Formatter` + + + + Helper class that provides a standard way to create an ABC using + inheritance. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:method:: __post_init__() + + + .. py:method:: format(**kwargs) -> list + + + Format the string components with the provided keyword arguments. + Mostly used for formatting system prompt, user and assistant messages. + + + :Parameters: + + **\*\*kwargs** : dict + Keyword arguments containing values to replace in the template components. + + :Returns: + + list + Formatted template. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + +.. py:class:: ListFormatter + + Bases: :py:obj:`Formatter` + + + + Helper class that provides a standard way to create an ABC using + inheritance. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:method:: format(**kwargs) -> list + + +.. py:class:: ConversationTemplate + + .. py:attribute:: user_formatter + :type: Formatter + + + .. py:attribute:: assistant_formatter + :type: Formatter + + + .. py:attribute:: system_formatter + :type: Optional[Formatter] + :value: None + + + + .. py:attribute:: tools_formatter + :type: Optional[Formatter] + :value: None + + + + .. py:attribute:: separator + :type: Optional[TemplateComponent] + :value: None + + + + .. py:attribute:: special_starter + :type: Optional[TemplateComponent] + :value: None + + + + .. py:attribute:: special_stopper + :type: Optional[TemplateComponent] + :value: None + + + + .. py:attribute:: template_name + :type: Optional[str] + :value: None + + + + .. py:method:: __post_init__() + + + .. py:method:: encode_conversation(tokenizer: transformers.PreTrainedTokenizer, messages: List[Dict[str, str]], system: Optional[str] = None, tools: Optional[List[str]] = None, remove_last_sep: bool = False, **kwargs) -> Sequence[Tuple[List[int], List[int]]] + + + Messages here should be guaranteed to be in pairs, with the first message being the user message and the second message being the system message. + Data example: + ```json + { + "conversation_id": 2, + "system": "sysinfo1", + "tools": ["tool_1_desc"], + "messages": [ + { + "role": "user", + "content": "hi" + }, + { + "role": "assistant", + "content": "Hello!" + } + ] + } + ``` + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _encode(tokenizer: transformers.PreTrainedTokenizer, messages: List[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, **kwargs) -> Sequence[Tuple[List[int], List[int]]] + + + .. py:method:: _encode_template(template: List[TemplateComponent], tokenizer: transformers.PreTrainedTokenizer, **kwargs) -> List[int] + + + Encode template components into token ids. + + + :Parameters: + + **template** : List[TemplateComponent] + Formatted template components. + + **tokenizer** : PreTrainedTokenizer + Tokenizer to convert tokens into token ids. + + :Returns: + + List[int] + Encoded token ids. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: remove_last_separator(encoded_pairs: Sequence[Tuple[List[int], List[int]]], tokenizer: transformers.PreTrainedTokenizer) -> Sequence[Tuple[List[int], List[int]]] + + + .. py:method:: add_special_starter(encoded_pairs: Sequence[Tuple[List[int], List[int]]], tokenizer: transformers.PreTrainedTokenizer) -> Sequence[Tuple[List[int], List[int]]] + + + .. py:method:: add_special_stopper(encoded_pairs: Sequence[Tuple[List[int], List[int]]], tokenizer: transformers.PreTrainedTokenizer) -> Sequence[Tuple[List[int], List[int]]] + + + .. py:method:: _ensure_id_list(obj: Union[int, List[int]]) -> List[int] + + + Make sure the object is a list of integers. Useful for handling token ids. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + +.. py:data:: EMPTY_TEMPLATE + +.. py:data:: EMPTY_NO_SPECIAL_TOKENS_TEMPLATE + diff --git a/_sources/autoapi/lmflow/utils/conversation_template/chatglm/index.rst.txt b/_sources/autoapi/lmflow/utils/conversation_template/chatglm/index.rst.txt new file mode 100644 index 000000000..fa17b5407 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/conversation_template/chatglm/index.rst.txt @@ -0,0 +1,19 @@ +lmflow.utils.conversation_template.chatglm +========================================== + +.. py:module:: lmflow.utils.conversation_template.chatglm + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.chatglm.CHATGLM3_TEMPLATE + + +Module Contents +--------------- + +.. py:data:: CHATGLM3_TEMPLATE + diff --git a/_sources/autoapi/lmflow/utils/conversation_template/chatml/index.rst.txt b/_sources/autoapi/lmflow/utils/conversation_template/chatml/index.rst.txt new file mode 100644 index 000000000..b414e64a9 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/conversation_template/chatml/index.rst.txt @@ -0,0 +1,19 @@ +lmflow.utils.conversation_template.chatml +========================================= + +.. py:module:: lmflow.utils.conversation_template.chatml + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.chatml.CHATML_TEMPLATE + + +Module Contents +--------------- + +.. py:data:: CHATML_TEMPLATE + diff --git a/_sources/autoapi/lmflow/utils/conversation_template/deepseek/index.rst.txt b/_sources/autoapi/lmflow/utils/conversation_template/deepseek/index.rst.txt new file mode 100644 index 000000000..48ea50616 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/conversation_template/deepseek/index.rst.txt @@ -0,0 +1,19 @@ +lmflow.utils.conversation_template.deepseek +=========================================== + +.. py:module:: lmflow.utils.conversation_template.deepseek + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.deepseek.DEEPSEEK_TEMPLATE + + +Module Contents +--------------- + +.. py:data:: DEEPSEEK_TEMPLATE + diff --git a/_sources/autoapi/lmflow/utils/conversation_template/fox/index.rst.txt b/_sources/autoapi/lmflow/utils/conversation_template/fox/index.rst.txt new file mode 100644 index 000000000..95fbd7f15 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/conversation_template/fox/index.rst.txt @@ -0,0 +1,19 @@ +lmflow.utils.conversation_template.fox +====================================== + +.. py:module:: lmflow.utils.conversation_template.fox + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.fox.FOX_TEMPLATE + + +Module Contents +--------------- + +.. py:data:: FOX_TEMPLATE + diff --git a/_sources/autoapi/lmflow/utils/conversation_template/gemma/index.rst.txt b/_sources/autoapi/lmflow/utils/conversation_template/gemma/index.rst.txt new file mode 100644 index 000000000..e7d9381e0 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/conversation_template/gemma/index.rst.txt @@ -0,0 +1,38 @@ +lmflow.utils.conversation_template.gemma +======================================== + +.. py:module:: lmflow.utils.conversation_template.gemma + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.gemma.logger + lmflow.utils.conversation_template.gemma.GEMMA_TEMPLATE + + +Classes +------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.gemma.GemmaConversationTemplate + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: GemmaConversationTemplate + + Bases: :py:obj:`lmflow.utils.conversation_template.base.ConversationTemplate` + + + .. py:method:: encode_conversation(*args, **kwargs) + + +.. py:data:: GEMMA_TEMPLATE + diff --git a/_sources/autoapi/lmflow/utils/conversation_template/index.rst.txt b/_sources/autoapi/lmflow/utils/conversation_template/index.rst.txt new file mode 100644 index 000000000..213ada83b --- /dev/null +++ b/_sources/autoapi/lmflow/utils/conversation_template/index.rst.txt @@ -0,0 +1,251 @@ +lmflow.utils.conversation_template +================================== + +.. py:module:: lmflow.utils.conversation_template + + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/utils/conversation_template/base/index + /autoapi/lmflow/utils/conversation_template/chatglm/index + /autoapi/lmflow/utils/conversation_template/chatml/index + /autoapi/lmflow/utils/conversation_template/deepseek/index + /autoapi/lmflow/utils/conversation_template/fox/index + /autoapi/lmflow/utils/conversation_template/gemma/index + /autoapi/lmflow/utils/conversation_template/internlm/index + /autoapi/lmflow/utils/conversation_template/llama/index + /autoapi/lmflow/utils/conversation_template/phi/index + /autoapi/lmflow/utils/conversation_template/qwen/index + /autoapi/lmflow/utils/conversation_template/yi/index + /autoapi/lmflow/utils/conversation_template/zephyr/index + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.EMPTY_TEMPLATE + lmflow.utils.conversation_template.EMPTY_NO_SPECIAL_TOKENS_TEMPLATE + lmflow.utils.conversation_template.CHATGLM3_TEMPLATE + lmflow.utils.conversation_template.CHATML_TEMPLATE + lmflow.utils.conversation_template.DEEPSEEK_TEMPLATE + lmflow.utils.conversation_template.FOX_TEMPLATE + lmflow.utils.conversation_template.GEMMA_TEMPLATE + lmflow.utils.conversation_template.INTERNLM2_TEMPLATE + lmflow.utils.conversation_template.LLAMA2_TEMPLATE + lmflow.utils.conversation_template.LLAMA3_TEMPLATE + lmflow.utils.conversation_template.PHI3_TEMPLATE + lmflow.utils.conversation_template.QWEN2_TEMPLATE + lmflow.utils.conversation_template.YI1_5_TEMPLATE + lmflow.utils.conversation_template.ZEPHYR_TEMPLATE + lmflow.utils.conversation_template.PRESET_TEMPLATES + + +Classes +------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.ConversationTemplate + + +Package Contents +---------------- + +.. py:data:: EMPTY_TEMPLATE + +.. py:data:: EMPTY_NO_SPECIAL_TOKENS_TEMPLATE + +.. py:class:: ConversationTemplate + + .. py:attribute:: user_formatter + :type: Formatter + + + .. py:attribute:: assistant_formatter + :type: Formatter + + + .. py:attribute:: system_formatter + :type: Optional[Formatter] + :value: None + + + + .. py:attribute:: tools_formatter + :type: Optional[Formatter] + :value: None + + + + .. py:attribute:: separator + :type: Optional[TemplateComponent] + :value: None + + + + .. py:attribute:: special_starter + :type: Optional[TemplateComponent] + :value: None + + + + .. py:attribute:: special_stopper + :type: Optional[TemplateComponent] + :value: None + + + + .. py:attribute:: template_name + :type: Optional[str] + :value: None + + + + .. py:method:: __post_init__() + + + .. py:method:: encode_conversation(tokenizer: transformers.PreTrainedTokenizer, messages: List[Dict[str, str]], system: Optional[str] = None, tools: Optional[List[str]] = None, remove_last_sep: bool = False, **kwargs) -> Sequence[Tuple[List[int], List[int]]] + + + Messages here should be guaranteed to be in pairs, with the first message being the user message and the second message being the system message. + Data example: + ```json + { + "conversation_id": 2, + "system": "sysinfo1", + "tools": ["tool_1_desc"], + "messages": [ + { + "role": "user", + "content": "hi" + }, + { + "role": "assistant", + "content": "Hello!" + } + ] + } + ``` + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: _encode(tokenizer: transformers.PreTrainedTokenizer, messages: List[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, **kwargs) -> Sequence[Tuple[List[int], List[int]]] + + + .. py:method:: _encode_template(template: List[TemplateComponent], tokenizer: transformers.PreTrainedTokenizer, **kwargs) -> List[int] + + + Encode template components into token ids. + + + :Parameters: + + **template** : List[TemplateComponent] + Formatted template components. + + **tokenizer** : PreTrainedTokenizer + Tokenizer to convert tokens into token ids. + + :Returns: + + List[int] + Encoded token ids. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: remove_last_separator(encoded_pairs: Sequence[Tuple[List[int], List[int]]], tokenizer: transformers.PreTrainedTokenizer) -> Sequence[Tuple[List[int], List[int]]] + + + .. py:method:: add_special_starter(encoded_pairs: Sequence[Tuple[List[int], List[int]]], tokenizer: transformers.PreTrainedTokenizer) -> Sequence[Tuple[List[int], List[int]]] + + + .. py:method:: add_special_stopper(encoded_pairs: Sequence[Tuple[List[int], List[int]]], tokenizer: transformers.PreTrainedTokenizer) -> Sequence[Tuple[List[int], List[int]]] + + + .. py:method:: _ensure_id_list(obj: Union[int, List[int]]) -> List[int] + + + Make sure the object is a list of integers. Useful for handling token ids. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + +.. py:data:: CHATGLM3_TEMPLATE + +.. py:data:: CHATML_TEMPLATE + +.. py:data:: DEEPSEEK_TEMPLATE + +.. py:data:: FOX_TEMPLATE + +.. py:data:: GEMMA_TEMPLATE + +.. py:data:: INTERNLM2_TEMPLATE + +.. py:data:: LLAMA2_TEMPLATE + +.. py:data:: LLAMA3_TEMPLATE + +.. py:data:: PHI3_TEMPLATE + +.. py:data:: QWEN2_TEMPLATE + +.. py:data:: YI1_5_TEMPLATE + +.. py:data:: ZEPHYR_TEMPLATE + +.. py:data:: PRESET_TEMPLATES + diff --git a/_sources/autoapi/lmflow/utils/conversation_template/internlm/index.rst.txt b/_sources/autoapi/lmflow/utils/conversation_template/internlm/index.rst.txt new file mode 100644 index 000000000..26e182b57 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/conversation_template/internlm/index.rst.txt @@ -0,0 +1,19 @@ +lmflow.utils.conversation_template.internlm +=========================================== + +.. py:module:: lmflow.utils.conversation_template.internlm + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.internlm.INTERNLM2_TEMPLATE + + +Module Contents +--------------- + +.. py:data:: INTERNLM2_TEMPLATE + diff --git a/_sources/autoapi/lmflow/utils/conversation_template/llama/index.rst.txt b/_sources/autoapi/lmflow/utils/conversation_template/llama/index.rst.txt new file mode 100644 index 000000000..24db9f50d --- /dev/null +++ b/_sources/autoapi/lmflow/utils/conversation_template/llama/index.rst.txt @@ -0,0 +1,41 @@ +lmflow.utils.conversation_template.llama +======================================== + +.. py:module:: lmflow.utils.conversation_template.llama + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.llama.logger + lmflow.utils.conversation_template.llama.LLAMA3_TEMPLATE + lmflow.utils.conversation_template.llama.LLAMA2_TEMPLATE + + +Classes +------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.llama.Llama2ConversationTemplate + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: Llama2ConversationTemplate + + Bases: :py:obj:`lmflow.utils.conversation_template.base.ConversationTemplate` + + + .. py:method:: _encode(tokenizer: transformers.PreTrainedTokenizer, messages: List[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, **kwargs) -> Sequence[Tuple[List[int], List[int]]] + + +.. py:data:: LLAMA3_TEMPLATE + +.. py:data:: LLAMA2_TEMPLATE + diff --git a/_sources/autoapi/lmflow/utils/conversation_template/phi/index.rst.txt b/_sources/autoapi/lmflow/utils/conversation_template/phi/index.rst.txt new file mode 100644 index 000000000..727d06e43 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/conversation_template/phi/index.rst.txt @@ -0,0 +1,19 @@ +lmflow.utils.conversation_template.phi +====================================== + +.. py:module:: lmflow.utils.conversation_template.phi + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.phi.PHI3_TEMPLATE + + +Module Contents +--------------- + +.. py:data:: PHI3_TEMPLATE + diff --git a/_sources/autoapi/lmflow/utils/conversation_template/qwen/index.rst.txt b/_sources/autoapi/lmflow/utils/conversation_template/qwen/index.rst.txt new file mode 100644 index 000000000..d01cc8575 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/conversation_template/qwen/index.rst.txt @@ -0,0 +1,19 @@ +lmflow.utils.conversation_template.qwen +======================================= + +.. py:module:: lmflow.utils.conversation_template.qwen + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.qwen.QWEN2_TEMPLATE + + +Module Contents +--------------- + +.. py:data:: QWEN2_TEMPLATE + diff --git a/_sources/autoapi/lmflow/utils/conversation_template/yi/index.rst.txt b/_sources/autoapi/lmflow/utils/conversation_template/yi/index.rst.txt new file mode 100644 index 000000000..b088e0fe2 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/conversation_template/yi/index.rst.txt @@ -0,0 +1,19 @@ +lmflow.utils.conversation_template.yi +===================================== + +.. py:module:: lmflow.utils.conversation_template.yi + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.yi.YI1_5_TEMPLATE + + +Module Contents +--------------- + +.. py:data:: YI1_5_TEMPLATE + diff --git a/_sources/autoapi/lmflow/utils/conversation_template/zephyr/index.rst.txt b/_sources/autoapi/lmflow/utils/conversation_template/zephyr/index.rst.txt new file mode 100644 index 000000000..0f5534552 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/conversation_template/zephyr/index.rst.txt @@ -0,0 +1,38 @@ +lmflow.utils.conversation_template.zephyr +========================================= + +.. py:module:: lmflow.utils.conversation_template.zephyr + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.zephyr.logger + lmflow.utils.conversation_template.zephyr.ZEPHYR_TEMPLATE + + +Classes +------- + +.. autoapisummary:: + + lmflow.utils.conversation_template.zephyr.ZephyrConversationTemplate + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:class:: ZephyrConversationTemplate + + Bases: :py:obj:`lmflow.utils.conversation_template.base.ConversationTemplate` + + + .. py:method:: _encode(tokenizer: transformers.PreTrainedTokenizer, messages: List[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, **kwargs) -> Sequence[Tuple[List[int], List[int]]] + + +.. py:data:: ZEPHYR_TEMPLATE + diff --git a/_sources/autoapi/lmflow/utils/data_utils/index.rst.txt b/_sources/autoapi/lmflow/utils/data_utils/index.rst.txt new file mode 100644 index 000000000..b47c99562 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/data_utils/index.rst.txt @@ -0,0 +1,241 @@ +lmflow.utils.data_utils +======================= + +.. py:module:: lmflow.utils.data_utils + +.. autoapi-nested-parse:: + + The program includes several functions: setting a random seed, + loading data from a JSON file, batching data, and extracting answers from generated text. + + .. + !! processed by numpydoc !! + + +Classes +------- + +.. autoapisummary:: + + lmflow.utils.data_utils.VLLMInferenceResultWithInput + lmflow.utils.data_utils.RewardModelInferenceResultWithInput + + +Functions +--------- + +.. autoapisummary:: + + lmflow.utils.data_utils.set_random_seed + lmflow.utils.data_utils.load_data + lmflow.utils.data_utils.batchlize + lmflow.utils.data_utils.answer_extraction + lmflow.utils.data_utils.process_image_flag + + +Module Contents +--------------- + +.. py:function:: set_random_seed(seed: int) + + + Set the random seed for `random`, `numpy`, `torch`, `torch.cuda`. + + + :Parameters: + + **seed** : int + The default seed. + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:function:: load_data(file_name: str) + + + Load data with file name. + + + :Parameters: + + **file_name** : str. + The dataset file name. + + :Returns: + + **inputs** : list. + The input texts of the dataset. + + **outputs** : list. + The output texts file datasets. + + **len** : int. + The length of the dataset. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:function:: batchlize(examples: list, batch_size: int, random_shuffle: bool) + + + Convert examples to a dataloader. + + + :Parameters: + + **examples** : list. + Data list. + + **batch_size** : int. + .. + + **random_shuffle** : bool + If true, the dataloader shuffle the training data. + + :Returns: + + dataloader: + Dataloader with batch generator. + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:function:: answer_extraction(response, answer_type=None) + + + Use this funtion to extract answers from generated text + + + :Parameters: + + **args** + Arguments. + + **response** : str + plain string response. + + :Returns: + + answer: + Decoded answer (such as A, B, C, D, E for mutiple-choice QA). + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + +.. py:function:: process_image_flag(text, image_flag='') + +.. py:class:: VLLMInferenceResultWithInput + + Bases: :py:obj:`TypedDict` + + + + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: input + :type: str + + + .. py:attribute:: output + :type: Union[List[str], List[List[int]]] + + +.. py:class:: RewardModelInferenceResultWithInput + + Bases: :py:obj:`TypedDict` + + + + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: input + :type: str + + + .. py:attribute:: output + :type: List[Dict[str, Union[str, float]]] + + diff --git a/_sources/autoapi/lmflow/utils/flash_attention/bloom_flash_attention/index.rst.txt b/_sources/autoapi/lmflow/utils/flash_attention/bloom_flash_attention/index.rst.txt new file mode 100644 index 000000000..090a98ef9 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/flash_attention/bloom_flash_attention/index.rst.txt @@ -0,0 +1,25 @@ +lmflow.utils.flash_attention.bloom_flash_attention +================================================== + +.. py:module:: lmflow.utils.flash_attention.bloom_flash_attention + + +Functions +--------- + +.. autoapisummary:: + + lmflow.utils.flash_attention.bloom_flash_attention.forward + lmflow.utils.flash_attention.bloom_flash_attention._prepare_attn_mask + lmflow.utils.flash_attention.bloom_flash_attention.replace_bloom_attn_with_flash_attn + + +Module Contents +--------------- + +.. py:function:: forward(self, hidden_states: torch.Tensor, residual: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False) + +.. py:function:: _prepare_attn_mask(self, attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int) -> torch.BoolTensor + +.. py:function:: replace_bloom_attn_with_flash_attn() + diff --git a/_sources/autoapi/lmflow/utils/flash_attention/gpt2_flash_attention/index.rst.txt b/_sources/autoapi/lmflow/utils/flash_attention/gpt2_flash_attention/index.rst.txt new file mode 100644 index 000000000..59785df48 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/flash_attention/gpt2_flash_attention/index.rst.txt @@ -0,0 +1,25 @@ +lmflow.utils.flash_attention.gpt2_flash_attention +================================================= + +.. py:module:: lmflow.utils.flash_attention.gpt2_flash_attention + + +Functions +--------- + +.. autoapisummary:: + + lmflow.utils.flash_attention.gpt2_flash_attention.forward + lmflow.utils.flash_attention.gpt2_flash_attention._prepare_decoder_attention_mask + lmflow.utils.flash_attention.gpt2_flash_attention.replace_gpt2_attn_with_flash_attn + + +Module Contents +--------------- + +.. py:function:: forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], Ellipsis] + +.. py:function:: _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length) + +.. py:function:: replace_gpt2_attn_with_flash_attn() + diff --git a/_sources/autoapi/lmflow/utils/flash_attention/gpt_neo_flash_attention/index.rst.txt b/_sources/autoapi/lmflow/utils/flash_attention/gpt_neo_flash_attention/index.rst.txt new file mode 100644 index 000000000..e093d94c1 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/flash_attention/gpt_neo_flash_attention/index.rst.txt @@ -0,0 +1,25 @@ +lmflow.utils.flash_attention.gpt_neo_flash_attention +==================================================== + +.. py:module:: lmflow.utils.flash_attention.gpt_neo_flash_attention + + +Functions +--------- + +.. autoapisummary:: + + lmflow.utils.flash_attention.gpt_neo_flash_attention._attn + lmflow.utils.flash_attention.gpt_neo_flash_attention.forward + lmflow.utils.flash_attention.gpt_neo_flash_attention.replace_gpt_neo_attn_with_flash_attn + + +Module Contents +--------------- + +.. py:function:: _attn(self, query, key, value, attention_mask=None, head_mask=None) + +.. py:function:: forward(self, hidden_states, attention_mask=None, layer_past=None, head_mask=None, use_cache=False, output_attentions=False) + +.. py:function:: replace_gpt_neo_attn_with_flash_attn() + diff --git a/_sources/autoapi/lmflow/utils/flash_attention/index.rst.txt b/_sources/autoapi/lmflow/utils/flash_attention/index.rst.txt new file mode 100644 index 000000000..8194e87ee --- /dev/null +++ b/_sources/autoapi/lmflow/utils/flash_attention/index.rst.txt @@ -0,0 +1,19 @@ +lmflow.utils.flash_attention +============================ + +.. py:module:: lmflow.utils.flash_attention + + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/utils/flash_attention/bloom_flash_attention/index + /autoapi/lmflow/utils/flash_attention/gpt2_flash_attention/index + /autoapi/lmflow/utils/flash_attention/gpt_neo_flash_attention/index + /autoapi/lmflow/utils/flash_attention/llama_flash_attention/index + /autoapi/lmflow/utils/flash_attention/triton_flash_attention/index + + diff --git a/_sources/autoapi/lmflow/utils/flash_attention/llama_flash_attention/index.rst.txt b/_sources/autoapi/lmflow/utils/flash_attention/llama_flash_attention/index.rst.txt new file mode 100644 index 000000000..1f9392c98 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/flash_attention/llama_flash_attention/index.rst.txt @@ -0,0 +1,25 @@ +lmflow.utils.flash_attention.llama_flash_attention +================================================== + +.. py:module:: lmflow.utils.flash_attention.llama_flash_attention + + +Functions +--------- + +.. autoapisummary:: + + lmflow.utils.flash_attention.llama_flash_attention.forward + lmflow.utils.flash_attention.llama_flash_attention._prepare_decoder_attention_mask + lmflow.utils.flash_attention.llama_flash_attention.replace_llama_attn_with_flash_attn + + +Module Contents +--------------- + +.. py:function:: forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]] + +.. py:function:: _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length) + +.. py:function:: replace_llama_attn_with_flash_attn() + diff --git a/_sources/autoapi/lmflow/utils/flash_attention/triton_flash_attention/index.rst.txt b/_sources/autoapi/lmflow/utils/flash_attention/triton_flash_attention/index.rst.txt new file mode 100644 index 000000000..62dcdc3ea --- /dev/null +++ b/_sources/autoapi/lmflow/utils/flash_attention/triton_flash_attention/index.rst.txt @@ -0,0 +1,229 @@ +lmflow.utils.flash_attention.triton_flash_attention +=================================================== + +.. py:module:: lmflow.utils.flash_attention.triton_flash_attention + +.. autoapi-nested-parse:: + + *Experimental* implementation of FlashAttention in Triton. + Tested with triton==2.0.0.dev20221202. + Triton 2.0 has a new backend (MLIR) but seems like it doesn't yet work for head dimensions + other than 64: + https://github.com/openai/triton/blob/d376020f90002757eea3ea9475d4f7cfc2ec5ead/python/triton/ops/flash_attention.py#L207 + We'll update this implementation with the new Triton backend once this is fixed. + + We use the FlashAttention implementation from Phil Tillet a starting point. + https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py + + Changes: + - Implement both causal and non-causal attention. + - Implement both self-attention and cross-attention. + - Support arbitrary seqlens (not just multiples of 128), for both forward and backward. + - Support all head dimensions up to 128 (not just 16, 32, 64, 128), for both forward and backward. + - Support attention bias. + - Speed up the forward pass a bit, and only store the LSE instead of m and l. + - Make the backward for d=128 much faster by reducing register spilling. + - Optionally parallelize the backward pass across seqlen_k, to deal with the case of + small batch size * nheads. + + Caution: + - This is an *experimental* implementation. The forward pass should be quite robust but + I'm not 100% sure that the backward pass doesn't have race conditions (due to the Triton compiler). + - This implementation has only been tested on A100. + - If you plan to use headdim other than 64 and 128, you should test for race conditions + (due to the Triton compiler), as done in tests/test_flash_attn.py + "test_flash_attn_triton_race_condition". I've tested and fixed many race conditions + for different head dimensions (40, 48, 64, 128, 80, 88, 96), but I'm still not 100% confident + that there are none left for other head dimensions. + + Differences between this Triton version and the CUDA version: + - Triton version doesn't support dropout. + - Triton forward is generally faster than CUDA forward, while Triton backward is + generally slower than CUDA backward. Overall Triton forward + backward is slightly slower + than CUDA forward + backward. + - Triton version doesn't support different sequence lengths in a batch (i.e., RaggedTensor/NestedTensor). + - Triton version supports attention bias, while CUDA version doesn't. + + .. + !! processed by numpydoc !! + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.flash_attention.triton_flash_attention.flash_attn_qkvpacked_func + lmflow.utils.flash_attention.triton_flash_attention.flash_attn_kvpacked_func + lmflow.utils.flash_attention.triton_flash_attention.flash_attn_func + + +Classes +------- + +.. autoapisummary:: + + lmflow.utils.flash_attention.triton_flash_attention.FlashAttnQKVPackedFunc + lmflow.utils.flash_attention.triton_flash_attention.FlashAttnKVPackedFunc + lmflow.utils.flash_attention.triton_flash_attention.FlashAttnFunc + + +Functions +--------- + +.. autoapisummary:: + + lmflow.utils.flash_attention.triton_flash_attention._fwd_kernel + lmflow.utils.flash_attention.triton_flash_attention._bwd_preprocess_do_o_dot + lmflow.utils.flash_attention.triton_flash_attention._bwd_store_dk_dv + lmflow.utils.flash_attention.triton_flash_attention._bwd_kernel_one_col_block + lmflow.utils.flash_attention.triton_flash_attention.init_to_zero + lmflow.utils.flash_attention.triton_flash_attention._bwd_kernel + lmflow.utils.flash_attention.triton_flash_attention._flash_attn_forward + lmflow.utils.flash_attention.triton_flash_attention._flash_attn_backward + + +Module Contents +--------------- + +.. py:function:: _fwd_kernel(Q, K, V, Bias, Out, Lse, TMP, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_ob, stride_oh, stride_om, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: triton.language.constexpr, IS_CAUSAL: triton.language.constexpr, BLOCK_HEADDIM: triton.language.constexpr, EVEN_M: triton.language.constexpr, EVEN_N: triton.language.constexpr, EVEN_HEADDIM: triton.language.constexpr, BLOCK_M: triton.language.constexpr, BLOCK_N: triton.language.constexpr) + +.. py:function:: _bwd_preprocess_do_o_dot(Out, DO, Delta, stride_ob, stride_oh, stride_om, stride_dob, stride_doh, stride_dom, nheads, seqlen_q, seqlen_q_rounded, headdim, BLOCK_M: triton.language.constexpr, BLOCK_HEADDIM: triton.language.constexpr) + +.. py:function:: _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M: triton.language.constexpr, EVEN_N: triton.language.constexpr, EVEN_HEADDIM: triton.language.constexpr) + +.. py:function:: _bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD: triton.language.constexpr, BIAS_TYPE: triton.language.constexpr, IS_CAUSAL: triton.language.constexpr, BLOCK_HEADDIM: triton.language.constexpr, EVEN_M: triton.language.constexpr, EVEN_N: triton.language.constexpr, EVEN_HEADDIM: triton.language.constexpr, BLOCK_M: triton.language.constexpr, BLOCK_N: triton.language.constexpr) + +.. py:function:: init_to_zero(name) + +.. py:function:: _bwd_kernel(Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_dob, stride_doh, stride_dom, stride_dqb, stride_dqh, stride_dqm, stride_dkb, stride_dkh, stride_dkn, stride_dvb, stride_dvh, stride_dvn, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: triton.language.constexpr, IS_CAUSAL: triton.language.constexpr, BLOCK_HEADDIM: triton.language.constexpr, SEQUENCE_PARALLEL: triton.language.constexpr, EVEN_M: triton.language.constexpr, EVEN_N: triton.language.constexpr, EVEN_HEADDIM: triton.language.constexpr, BLOCK_M: triton.language.constexpr, BLOCK_N: triton.language.constexpr) + +.. py:function:: _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None) + +.. py:function:: _flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None) + +.. py:class:: FlashAttnQKVPackedFunc + + Bases: :py:obj:`torch.autograd.Function` + + + .. py:method:: forward(ctx, qkv, bias=None, causal=False, softmax_scale=None) + :staticmethod: + + + + qkv: (batch, seqlen, 3, nheads, headdim) + bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen). + For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen). + ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen) + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: backward(ctx, do) + :staticmethod: + + + +.. py:data:: flash_attn_qkvpacked_func + +.. py:class:: FlashAttnKVPackedFunc + + Bases: :py:obj:`torch.autograd.Function` + + + .. py:method:: forward(ctx, q, kv, bias=None, causal=False, softmax_scale=None) + :staticmethod: + + + + q: (batch, seqlen_q, nheads, headdim) + kv: (batch, seqlen_k, 2, nheads, headdim) + bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k). + For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k). + ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k) + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: backward(ctx, do) + :staticmethod: + + + +.. py:data:: flash_attn_kvpacked_func + +.. py:class:: FlashAttnFunc + + Bases: :py:obj:`torch.autograd.Function` + + + .. py:method:: forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None) + :staticmethod: + + + + q: (batch_size, seqlen_q, nheads, headdim) + k, v: (batch_size, seqlen_k, nheads, headdim) + bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k). + For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k). + ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k) + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + + .. py:method:: backward(ctx, do) + :staticmethod: + + + +.. py:data:: flash_attn_func + diff --git a/_sources/autoapi/lmflow/utils/index.rst.txt b/_sources/autoapi/lmflow/utils/index.rst.txt new file mode 100644 index 000000000..7194d0591 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/index.rst.txt @@ -0,0 +1,31 @@ +lmflow.utils +============ + +.. py:module:: lmflow.utils + + +Subpackages +----------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/utils/conversation_template/index + /autoapi/lmflow/utils/flash_attention/index + /autoapi/lmflow/utils/position_interpolation/index + + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/utils/common/index + /autoapi/lmflow/utils/constants/index + /autoapi/lmflow/utils/data_utils/index + /autoapi/lmflow/utils/llava_conversation_lib/index + /autoapi/lmflow/utils/model/index + /autoapi/lmflow/utils/multimodal/index + + diff --git a/_sources/autoapi/lmflow/utils/llava_conversation_lib/index.rst.txt b/_sources/autoapi/lmflow/utils/llava_conversation_lib/index.rst.txt new file mode 100644 index 000000000..8253f9e45 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/llava_conversation_lib/index.rst.txt @@ -0,0 +1,187 @@ +lmflow.utils.llava_conversation_lib +=================================== + +.. py:module:: lmflow.utils.llava_conversation_lib + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.llava_conversation_lib.conv_vicuna_v0 + lmflow.utils.llava_conversation_lib.conv_vicuna_v1 + lmflow.utils.llava_conversation_lib.conv_llama_2 + lmflow.utils.llava_conversation_lib.conv_llava_llama_2 + lmflow.utils.llava_conversation_lib.conv_mpt + lmflow.utils.llava_conversation_lib.conv_llava_plain + lmflow.utils.llava_conversation_lib.conv_llava_v0 + lmflow.utils.llava_conversation_lib.conv_llava_v0_mmtag + lmflow.utils.llava_conversation_lib.conv_llava_v1 + lmflow.utils.llava_conversation_lib.conv_llava_v1_mmtag + lmflow.utils.llava_conversation_lib.default_conversation + lmflow.utils.llava_conversation_lib.conv_templates + + +Classes +------- + +.. autoapisummary:: + + lmflow.utils.llava_conversation_lib.SeparatorStyle + lmflow.utils.llava_conversation_lib.Conversation + + +Module Contents +--------------- + +.. py:class:: SeparatorStyle(*args, **kwds) + + Bases: :py:obj:`enum.Enum` + + + + Different separator style. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: SINGLE + + + .. py:attribute:: TWO + + + .. py:attribute:: MPT + + + .. py:attribute:: PLAIN + + + .. py:attribute:: LLAMA_2 + + +.. py:class:: Conversation + + + A class that keeps all conversation history. + + + + + + + + + + + + + + + + + .. + !! processed by numpydoc !! + + .. py:attribute:: system + :type: str + + + .. py:attribute:: roles + :type: List[str] + + + .. py:attribute:: messages + :type: List[List[str]] + + + .. py:attribute:: offset + :type: int + + + .. py:attribute:: sep_style + :type: SeparatorStyle + + + .. py:attribute:: sep + :type: str + :value: '###' + + + + .. py:attribute:: sep2 + :type: str + :value: None + + + + .. py:attribute:: version + :type: str + :value: 'Unknown' + + + + .. py:attribute:: skip_next + :type: bool + :value: False + + + + .. py:method:: get_prompt() + + + .. py:method:: append_message(role, message) + + + .. py:method:: get_images(return_pil=False) + + + .. py:method:: to_gradio_chatbot() + + + .. py:method:: copy() + + + .. py:method:: dict() + + +.. py:data:: conv_vicuna_v0 + +.. py:data:: conv_vicuna_v1 + +.. py:data:: conv_llama_2 + +.. py:data:: conv_llava_llama_2 + +.. py:data:: conv_mpt + +.. py:data:: conv_llava_plain + +.. py:data:: conv_llava_v0 + +.. py:data:: conv_llava_v0_mmtag + +.. py:data:: conv_llava_v1 + +.. py:data:: conv_llava_v1_mmtag + +.. py:data:: default_conversation + +.. py:data:: conv_templates + diff --git a/_sources/autoapi/lmflow/utils/model/index.rst.txt b/_sources/autoapi/lmflow/utils/model/index.rst.txt new file mode 100644 index 000000000..0f106ecf0 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/model/index.rst.txt @@ -0,0 +1,29 @@ +lmflow.utils.model +================== + +.. py:module:: lmflow.utils.model + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.utils.model.logger + + +Functions +--------- + +.. autoapisummary:: + + lmflow.utils.model.check_homogeneity + + +Module Contents +--------------- + +.. py:data:: logger + +.. py:function:: check_homogeneity(model_args_list: List[lmflow.args.ModelArguments]) -> bool + diff --git a/_sources/autoapi/lmflow/utils/multimodal/index.rst.txt b/_sources/autoapi/lmflow/utils/multimodal/index.rst.txt new file mode 100644 index 000000000..a700ab4dc --- /dev/null +++ b/_sources/autoapi/lmflow/utils/multimodal/index.rst.txt @@ -0,0 +1,25 @@ +lmflow.utils.multimodal +======================= + +.. py:module:: lmflow.utils.multimodal + + +Functions +--------- + +.. autoapisummary:: + + lmflow.utils.multimodal.update_custom_config + lmflow.utils.multimodal.load_llava_pretrain_model + lmflow.utils.multimodal.adapt_llava_model_to_lmflow_type + + +Module Contents +--------------- + +.. py:function:: update_custom_config(config, model_args) + +.. py:function:: load_llava_pretrain_model(model, checkpoint_path) + +.. py:function:: adapt_llava_model_to_lmflow_type(state_dict) + diff --git a/_sources/autoapi/lmflow/utils/position_interpolation/index.rst.txt b/_sources/autoapi/lmflow/utils/position_interpolation/index.rst.txt new file mode 100644 index 000000000..586547af6 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/position_interpolation/index.rst.txt @@ -0,0 +1,15 @@ +lmflow.utils.position_interpolation +=================================== + +.. py:module:: lmflow.utils.position_interpolation + + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + /autoapi/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch/index + + diff --git a/_sources/autoapi/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch/index.rst.txt b/_sources/autoapi/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch/index.rst.txt new file mode 100644 index 000000000..a5c5e1f62 --- /dev/null +++ b/_sources/autoapi/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch/index.rst.txt @@ -0,0 +1,62 @@ +lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch +================================================================== + +.. py:module:: lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch + + +Classes +------- + +.. autoapisummary:: + + lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding + + +Functions +--------- + +.. autoapisummary:: + + lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.replace_llama_with_condense + + +Module Contents +--------------- + +.. py:class:: CondenseRotaryEmbedding(dim, pi_ratio, ntk_ratio, max_position_embeddings=2048, base=10000, device=None) + + Bases: :py:obj:`torch.nn.Module` + + + .. py:attribute:: ntk_ratio + + + .. py:attribute:: base + + + .. py:attribute:: inv_freq + + + .. py:attribute:: pi_ratio + + + .. py:attribute:: max_seq_len_cached + + + .. py:attribute:: t + + + .. py:attribute:: freqs + + + .. py:attribute:: emb + + + .. py:attribute:: dtype + + + .. py:method:: forward(x, seq_len=None) + + +.. py:function:: replace_llama_with_condense(pi_ratio, ntk_ratio) + diff --git a/_sources/autoapi/lmflow/version/index.rst.txt b/_sources/autoapi/lmflow/version/index.rst.txt new file mode 100644 index 000000000..00a4d4a2d --- /dev/null +++ b/_sources/autoapi/lmflow/version/index.rst.txt @@ -0,0 +1,21 @@ +lmflow.version +============== + +.. py:module:: lmflow.version + + +Attributes +---------- + +.. autoapisummary:: + + lmflow.version.__version__ + + +Module Contents +--------------- + +.. py:data:: __version__ + :value: '0.0.7' + + diff --git a/_sources/blogs/benchmark.md.txt b/_sources/blogs/benchmark.md.txt new file mode 100644 index 000000000..c09c74868 --- /dev/null +++ b/_sources/blogs/benchmark.md.txt @@ -0,0 +1,253 @@ +# LMFlow Benchmark: An Automatic Evaluation Framework for Open-Source LLMs + +May 9, 2023 + + +## Introduction + +Evaluation of a chat-style Large Language Model (LLM) has been a huge challenge since the breakthrough of ChatGPT. On the one hand, researchers and engineers need a reliable way to compare two models and decide which model to choose under a certain application scenario. On the other hand, they have to monitor the model performance during the training of an LLM to avoid performance issues such as forgetting. + +Recent work of Vicuna introduces comparison methods of human evaluation, a.k.a. Chatbot Arena. They also pioneered the evaluation method by invoking GPT-4 to compare the outputs of two models. However, those methods require expensive human labeling or GPT-4 API calls, which are neither scalable nor convenient for LLM development. + +In this article, we introduce LMFlow benchmark, a new benchmark which provides a cheap and easy-to-use evaluation framework that can help reflect different aspects of LLMs. We have open-sourced the dataset and the code as well, so that everyone in the LLM community can use those toolkits to evaluate, monitor or compare different LLMs. +## Metric +In our evaluation framework, Negative Log Likelihood (NLL) is used for evaluating LLM +![](../_static/nll.png) + + +which corresponds to the LLM model’s prediction probability over a corpus set given their contexts. If the corpus set itself indicates a certain type of LLM ability, such as multi-round conversation, instruction following, math problem solving, role-playing, then NLL on those corpora can provide quantitative metrics to reflect those abilities. + +![](../_static/benchmark-1.png) + +The key idea behind NLL, is that + +*Generation ability is positively correlated with prediction ability.* + +For instance, an LLM which performs well in essay writing should have no problem understanding and predicting a reference human essay, just like human chess masters performing well at memorizing an endgame on a chessboard. + +Besides NLL, another similar and commonly used metric in NLP is Perplexity (PPL): + +![](../_static/ppl.png) + +Nevertheless, perplexity intrinsically depends on the lengths of the tokenized sequences, which induces unfair comparison between models with different tokenizers. For example, if a model has a smaller vocabulary size, it inherently results in a longer tokenized sequence and a lower token-level perplexity. Thus in all our experiments, we use NLL instead of PPL. + +One huge advantage of NLL evaluation is that it does not require human involvement during the evaluation process. As long as the test reference corpus is given, one can evaluate different aspects of an LLM’s ability automatically. This makes the evaluation of LLM more accessible to researchers. + +Besides its convenience, NLL itself is also a good metric. In our experimental results in commonsense QA, we find that NLL is correlated with QA accuracy when comparing the different finetuned versions of a single model. + +**Table 1: Accuracy results in traditional commonsense QA benchmarks** + + +||winogrande|boolq|arc\_e|hellaswag|piqa|obqa|arc\_c|Average| +| :- | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | +|bloom-3b|58\.7|61\.6|59\.5|52\.7|70\.8|42\.2|30\.6|53\.7| +|bloom-7.1b|64\.4|62\.9|65\.0|59\.6|73\.6|35\.8|33\.4|56\.3| +|opt-6.9b|65\.2|66\.1|65\.6|67\.2|76\.5|37\.4|34\.6|58\.9| +|opt-13b|65\.0|65\.9|67\.1|69\.8|76\.9|39\.0|35\.7|59\.9| +|llama-7b|67\.9|73\.2|67\.3|73\.0|78\.3|42\.4|41\.4|62\.7| +|llama-13b|**70.0**|**68.5**|**74.5**|**76.2**|**79.1**|**42.2**|**44.5**|**65.0**| + +**Table 2: NLL results in corpus of commonsense QA benchmarks** + + +||winogrande|boolq|arc\_e|hellaswag|piqa|obqa|arc\_c|Average| +| :- | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | +|bloom-3b|86\.5|228|86|245|134|64\.5|101\.5|135\.1| +|bloom-7.1b|85|215|81\.5|237|130|62\.5|96|129\.5| +|opt-6.9b|81\.5|200|81\.5|224|125|61|96|124\.1| +|opt-13b|82|198|82\.5|220|125|61\.8|97|123\.7| +|llama-7b|79\.5|167|71\.5|214|121|58|85|113\.7| +|llama-13b|**79**|**153**|**70**|**207**|**119**|**57.3**|**83**|**109.7**| + + +**Figure 1: Correlation between NLL and accuracy on commonsense QA benchmarks** + +![](../_static/benchmark-2.png) + +In the above figure, one can find that QA accuracy is roughly correlated to NLL. Thus NLL is able to reflect the “magnitude” of prediction level difference between models. A huge gap in NLL normally entails a huge performance gap. + +In the following sections, we provide a comprehensive evaluation of currently available LLM models and summarize their performance. Due to page limits, we only demonstrate partial evaluation results in this article. The full results can be found here [LLM Comparison Sheet 0501](https://docs.google.com/spreadsheets/d/1JYh4_pxNzmNA9I0YM2epgRA7VXBIeIGS64gPJBg5NHA/edit?usp=sharing). One can also evaluate their own LLM models using our LMFlow evaluation toolkit (). + +We also introduce our own finetuned model Robin-7b-Chat and compare its performance with some other popular models. One may try our online demo LMFlow-Chat at . + +## Chat Performance +The first aspect of LLM is chat performance, especially multi-round chat. Based on our experimental results, we have the following observations: + +1. *Without finetuning, foundation models do not perform well on chat.* +1. *After finetuning, foundation models can all obtain huge performance gain.* +1. *Our Robin-Chat-7b achieves comparable chat performance with Vicuna-13b.* + + + +**Table 3: NLL on multi-round chat datasets, Lower the better** + + +||En\_multi\_round\_chat|En\_singe\_round\_chat|Average| +| :- | :-: | :-: | :-: | +|Bloom-7.1b|188|213|200\.5| +|Bloom-7.1b-chat|164|181|172\.5| +|Llama-7b|163 |184 |173\.5| +|Vicuna-7b|155 |175 |165| +|Vicuna-13b|150 |170 |160| +|Robin-Chat-7b|**146** |**164** |**155**| + +We also evaluate Vicuna-13b and our Robin-Chat-7b model qualitatively via use cases. It turns out that Robin-Chat-7b’s performance is comparable to Vicuna-13b. For example, both models can effortlessly refer to the conversation context with only slight mistakes, where Vicuna-13b sometimes doesn’t count the item correctly, while Robin-Chat-7b introduces only Mexican foods for international cuisine recommendations and omits other choices. + + + + + + + + + + + + + + + + + + + + + + + + + + +**Table 4: Use cases of multi-round chat** + + +|Round|Question|Vicuna-13b|Robin-Chat-7b| +| :- | :-: | :-: | :-: | +|R1|Where is the capital of USA?|The capital of the United States is **Washington, D.C.** (District of Columbia). It is located on the east coast of the country, in the northern part of the state of Maryland. Washington, D.C. is named after George Washington, the first president of the United States. It is a federal district, which means that it is not part of any state, but is instead directly administered by the federal government. The city is home to many national landmarks, including the White House, the Capitol Building, and the Supreme Court. It is also home to many museums, monuments, and other cultural institutions.|The capital of the United States is **Washington, D.C.** (District of Columbia). It is located on the east coast of the USA, between the states of Maryland and Virginia.| +|R2|Can you introduce some popular food there?|

Washington, D.C. is a city with a diverse food scene, and there are many popular restaurants and cuisines to choose from. Some popular foods in Washington, D.C. include:

- American Classics: There are many restaurants in Washington, D.C. that serve classic American dishes such as hamburgers, hot dogs, and apple pie.

- Seafood: Washington, D.C. is located near the Chesapeake Bay, which is known for its fresh seafood. Many restaurants in the city serve dishes such as crab cakes, shrimp, and oysters.

- Ethnic Cuisine: Washington, D.C. has a diverse population, and there are many restaurants that serve food from different parts of the world. Some popular ethnic cuisines include Mexican, Chinese, Japanese, and Korean.

- Barbecue: Washington, D.C. is also known for its barbecue restaurants. Many of these restaurants serve pulled pork, ribs, and brisket.

- **Donuts**: Washington, D.C. is home to the famous "Donut Happy" which is a famous donut shop with a variety of flavors and toppings.

- Street Food: There are also many street food options in Washington, D.C. such as food trucks and stands that serve food such as falafel, kebabs, and tacos.

These are just a few examples of the many types of food that are available in Washington, D.C. There is something for everyone in this city when it comes to food.

|

Washington, D.C. is a diverse city with a wide range of cuisines to choose from. Here are some popular foods that you can try when you visit:

1. Soul food: This style of cuisine originated in the southern United States and is known for its bold flavors and hearty portions. Some popular soul food dishes in Washington, D.C. include fried chicken, collard greens, and macaroni and cheese.

2. Ethiopian cuisine: Ethiopian food is known for its spicy and flavorful dishes, and it is a popular choice in Washington, D.C. Some popular dishes include injera (a sourdough flatbread), wat (a spicy vegetable stew), and kitfo (a spicy meat dish).

3. Seafood: Washington, D.C. is located near the Chesapeake Bay and has easy access to fresh seafood. Some popular seafood dishes include crab cakes, oysters, and seafood pasta.

4. BBQ: Washington, D.C. has a thriving barbecue scene, with many restaurants serving up slow-cooked meats and sides. Some popular BBQ spots include Ben’s Chili Bowl, Pete’s Soul Vegetarian, and Smoke & Barrel.

5. **International cuisine:** Washington, D.C. is a hub for international diplomacy, and as a result, it has a diverse culinary scene with many restaurants serving dishes from around the world. Some popular international cuisines include Japanese, Indian, and **Mexican.**

| +|R3|Can you be more specific about the **fifth item** above?|

Sure, **the fifth item I mentioned, "Barbecue,"** refers to a style of cooking and food that originated in the southern United States. Barbecue is usually made with meat that is slow-cooked over an open flame or heated coals. The meat is often marinated and then sauced, and it is typically served with a variety of sides such as coleslaw, baked beans, and corn on the cob. Barbecue is a popular cuisine in Washington, D.C. and many of the restaurants serve pulled pork, ribs, and brisket. Some of the most popular barbecue restaurants in Washington, D.C. include:

- The Rib Pit

- The Smokehouse

- The BBQ Joint

- The Salt Line

- The Sovereign

These are just a few examples of the many barbecue restaurants in Washington, D.C. that offer delicious and flavorful barbecue dishes.

|

Sure! Here are some specific recommendations for **Mexican food** in Washington, D.C.:

1. Margarita’s - This popular Mexican restaurant in Adams Morgan has been serving up authentic Mexican cuisine for over 30 years. Some of their most popular dishes include their tacos al pastor (grilled marinated pork), enchiladas (meat or cheese filled tortillas), and chiles rellenos (stuffed peppers).

2. El Comal - This cozy little restaurant in the Columbia Heights neighborhood has been serving up traditional Mexican cuisine for over 20 years. Some of their most popular dishes include their mole (a rich, spicy sauce made from a blend of chiles, spices, and chocolate), tacos de carnitas (crispy pork tacos), and chiles rellenos de queso (cheese stuffed peppers).

3. Casa Oaxaca - This upscale Mexican restaurant in the Shaw neighborhood has a menu that features a wide range of traditional and modern dishes. Some of their most popular dishes include their ceviche (raw fish marinated in lime juice and spices), mole negro (a rich, dark mole sauce), and chiles en nogada (a spicy dish made with poblano peppers, walnuts, and honey).

4. Taco King - This popular taco chain has several locations in the Washington, D.C. area, including one in the Adams Morgan neighborhood. They offer a wide range of tacos, burritos, and other Mexican dishes at affordable prices. Some of their most popular items include their carne asada tacos, chicken tacos, and quesadillas.

5. Los Tacos No. 1 - This small, casual taco stand in the Columbia Heights neighborhood has a menu that features a wide range of tacos, burritos, and other Mexican dishes. Some of their most popular items include their adobada tacos (marinated pork tacos), al pastor tacos (grilled marinated pork tacos), and chicken tacos.

| + + +**Experiment Details** + +Our dataset comes from real conversation history between humans and ChatGPT. We collect 183 English conversations with a total of 447 rounds of chats, where the original conversation history is already multi-round. For single-round NLL evaluation, we break a multi-round conversation into multiple single-round text pairs. For example, with a conversation history + + Q1, A1 + + Q2, A2 + + Q3, A3 + +where Q stands for Question, A stands for answer, we can get three single-round examples: + + Context: Q1 + + Output: A1 + + Context: Q1, A1, Q2 + + Output: A2 + + Context: Q1, A1, Q2, A2, Q3 + + Ouptut: A3 + +## CommonSense Performance +Another important aspect of an LLM model is its common sense ability, where a model should acquire a certain level of factual knowledge and utilize them properly under different scenarios. Regarding this aspect of the ability, we found: + +1. *Finetuning on chat dataset results in commonsense degradation.* +1. *Our Robin-Chat-7b model still achieves a competitive performance.* + + + +**Table 5: Accuracy results in commonsense QA benchmarks** + + +||winogrand|boolq|arc\_easy|hellaswag|piqa|obqa|arc\_c|Average| +| :- | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | +|Bloom-7.1b|64\.4|62\.9|65\.0|59\.6|73\.6|35\.8|33\.4|56\.4| +|Bloom-7.1b-chat|60\.3|56\.8|61\.3|58\.7|72\.7|37\.8|38\.7|55\.2| +|Llama-7b|67.9 |73\.2 |67.3 |73.0 |78\.4 |42\.4 |41\.4|62\.7| +|Vicuna-7b|63.7 |77\.4 |63.1 |68.8 |76\.3 |39\.6 |38\.7|61\.1| +|Vicuna-13b|66.2 |79\.9 |64.7 |73.0 |77\.6 |41\.6 |40\.4|63\.3| +|Robin-Chat-7b|64.7 |75\.2 |69.8 |72.4 |76\.6 |39\.0 |42\.9|62\.9| + + + +**Table 6: NLL results in corpus of commonsense QA benchmarks** + + +||winogrand|boolq|arc\_easy|hellaswag|piqa|obqa|arc\_c|Average| +| :- | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | +|Bloom-7.1b|96.0|254|89|266|147|69|106\.5|146\.7| +|Bloom-7.1b-chat|85.0|215|81\.5|237|130|62\.5|96|129\.5| +|Llama-7b|79.5 |167 |71.5 |214 |121 |58 |85|113\.7| +|Vicuna-7b|97.0 |219 |89 |258 |151 |72 |105|141\.6| +|Vicuna-13b|95\.5 |207|88.5 |262 |149 |73 |105|140\.0| +|Robin-Chat-7b|89\.0 |200 |81 |240 |139 |65\.5 |96\.5|130\.1| + +It is surprising to find that finetuning actually leads to commonsense degradation. A more well-known consequence of this issue is hallucination, where models tend to give plausible but factually incorrect or irrelevant answers. One explanation of this phenomenon is that LLMs “forget” the pretrained knowledge during finetuning when attempting to fit the finetune dataset. + +As one can see from the table above, both Vicuna and Robin suffer from this commonsense degradation issue. However, Robin suffers less. We conjecture that this improvement is brought by finetuning with smaller-sized but higher-quality dataset. Vicuna uses sharegpt data with ~900MB size and was finetuned for 3 epochs, while Robin uses multiple sources of dataset with ~600MB size and was only finetuned for 2 epochs. This suggests that in the finetuning stage, data quality plays a critical role. + +This improvement gives Vicuna-13b a run for its money when competing with Robin. The following examples show that Robin-Chat-7b possesses a comparable level of commonsense when compared with Vicuna-13b. + +**Table 7: Use cases of commonsense QA** + + +|Question|Vicuna-13b|Robin-Chat-7b|Note| +| :- | :-: | :-: | :-: | +|

I go to a friend's home but the door is locked. However, I smell coal gas. Should I break the window?

|If you smell coal gas, it is important to get out of the house as quickly as possible and call the fire department or the gas company. Coal gas is a flammable gas that can be dangerous and even deadly if it is not properly ventilated. If the door is locked, you **should not break the window because it could cause a fire or explosion**. Instead, try to find another way to escape the house or call for help. If you cannot leave the house safely, try to stay calm and wait for help to arrive.|No, it is not recommended to break a window in this situation. **Breaking a window can be dangerous and can cause injury or damage to yourself or others**. Additionally, it is **illegal** to break a window to enter a property without permission. Instead, you should try to find another way to enter the home, such as by using a key or by calling the homeowner or a locksmith for assistance. If you are unable to enter the home, you should call the emergency services or the police for assistance. In the meantime, it is important to **avoid using any sources of heat**, such as a lighter or matches, as these can cause a gas leak or explosion. If you smell gas, you should evacuate the area immediately and call the emergency services or the gas company for assistance.|*Breaking the window helps the ventilation and will not cause a gas explosion directly. However, it is indeed illegal and may injure yourself when breaking the window.*| +|Can I bring my pet ferret to that beautiful pacific island of USA to attend ICML 2023?|I'm sorry, but I am **not aware of** the specific policies regarding pets at the ICML 2023 conference or the island you are referring to. It would be best to check with the conference organizers or the local authorities on the island to determine if ferrets are allowed and if there are any necessary procedures or requirements for bringing a pet to the conference.|

It is **not recommended** to bring a pet ferret to the United States, including to attend ICML 2023. Ferrets are not permitted as pets in many areas of the United States, and **it is illegal** to bring them into the country without proper documentation and licensing. Additionally, ferrets can **carry diseases** that are not common in humans but can be dangerous to people, such as toxoplasmosis and salmonellosis. It is important to prioritize the health and safety of yourself and others by not bringing a pet ferret to a crowded event like ICML 2023.

|*ICML 2023 will be held in Hawaii, where ferrets are strictly prohibited as pets because they are potential carriers of the rabies virus.*| + +**Experiment Details** + +Here, we make use of Eleuther AI’s LM evaluation harness repository () to get QA accuracy results. We also evaluate all models’ NLL metrics on their datasets, with their questions as contexts and answers as output sentences. + +## Instruction Following +The final aspect we evaluate is instruction following, where a model is expected to loyally obey the user’s instructions, given the instruction being ethically or legally appropriate. It turns out that, + +1. *Without finetuning, foundation models do not perform well on instruction following.* +1. *Finetuning on chat dataset helps improve the performance.* +1. *Our Robin-Chat is still comparable with Vicuna-13b.* + +**Table 8: NLL on instruction following chat datasets** + + +||gpt4\_en\_instruction| +| :- | :-: | +|Alpaca|222| +|Llama-7b|214 | +|Vicuna-7b|211 | +|Vicuna-13b|206 | +|Robin-Chat-7b|**180** | + +Following examples show that Robin-Chat-7b can generate similar responses on instruction following tasks when compared with Vicuna-13b. + +**Table 9: Uses cases of instruction following** + + +|Question|Vicuna-13b|Robin-Chat-7b| +| :- | :-: | :-: | +|Can you write quicksort?|

Yes, here is a simple implementation of the quicksort algorithm in Python:

def quicksort(arr):

` `if len(arr) <= 1:

` `return arr

` `pivot = arr[len(arr) // 2]

` `left = [x for x in arr if x < pivot]

` `middle = [x for x in arr if x == pivot]

` `right = [x for x in arr if x > pivot]

` `return quicksort(left) + middle + quicksort(right)

This implementation uses the middle element of the array as the pivot, and then partitions the array into three sub-arrays: one containing all elements that are less than the pivot, one containing all elements that are equal to the pivot, and one containing all elements that are greater than the pivot. It then recursively applies the quicksort algorithm to the left and right sub-arrays, and returns the concatenation of the sorted left sub-array, the unsorted middle sub-array, and the sorted right sub-array.

This implementation has a time complexity of O(n log n) due to the recursive calls, although it could potentially be faster in practice if the pivot is carefully chosen.

|

Sure, here is a simple implementation of the quicksort algorithm in Python:

def quicksort(arr):

` `if len(arr) <= 1:

` `return arr

` `pivot = arr[len(arr) // 2]

` `left = [x for x in arr if x < pivot]

` `middle = [x for x in arr if x == pivot]

` `right = [x for x in arr if x > pivot]

` `return quicksort(left) + middle + quicksort(right)

This implementation chooses the middle element of the array as the pivot, and then partitions the array into three parts: elements less than the pivot, elements equal to the pivot, and elements greater than the pivot. It then recursively applies the quicksort algorithm to the left and right subarrays, and concatenates the results with the middle subarray to produce the final sorted array.

Note that this implementation uses a single pass through the array, which is the most efficient way to sort an array using quicksort.

| +|Instruction: Find synonyms for the given word. You need to write down how the provided synonyms differ from the original word in terms of meaning, usage, etc. adversity|

Synonyms for "adversity" include:

- Difficulty

- Hardship

- Trouble

- Misfortune

- Adversity

- Difficulties

- Hard times

All of these words refer to challenging or unpleasant circumstances or situations. However, "adversity" specifically connotes a difficult or hostile situation that is caused by external factors, such as an enemy or a natural disaster. "Difficulty" and "hardship" can be caused by external or internal factors, and can refer to a wide range of challenges, from mild inconveniences to major setbacks. "Trouble" often connotes a specific type of difficulty or distress, such as financial trouble or legal trouble. "Misfortune" specifically refers to bad luck or ill fate, while "hard times" refers to a general period of economic or social distress.

|

1. Hardship

2. Struggle

3. Difficulty

4. Oppression

5. Trial

6. Affliction

7. Distress

8. Calamity

9. Tribulation

10. Hardship

Adversity refers to difficult or challenging circumstances or conditions. Hardship, struggle, difficulty, oppression, trial, affliction, distress, and calamity are all words that can be used to describe adversity, but they often imply a more specific or intense situation. For example, hardship can refer to a general difficulty or challenge, while struggle implies a more intense battle or conflict. Difficulty and oppression imply a more specific obstacle or injustice, while trial and affliction suggest a more specific test or suffering. Trial can also refer to a legal proceeding, while tribulation implies a more general time of suffering or persecution.

| + +**Experiment Details** + +In order to make evaluations for models’ instruction following ability, we collected GPT-4 instruction data from the project “Instruction Tuning with GPT-4”, which provides many instruction tuning data labeled by GPT-4. We sample 1,000 English data and manually filtered examples with the following issues, where 767 effective samples remain after the filtering: + +1. Long response with too many nonsense words +1. Incomplete input texts +1. Specific domains involving chemistry/biology, where most LLM models do not possess the knowledge and always fail + +## Conclusion +In this article, we introduce LMFlow’s evaluation framework, which uses NLL metric to reflect LLM models’ ability. NLL provides a good metric to evaluate different aspects of a LLM model. According to our evaluation results, Robin-7b achieves on-par performance when compared with Vicuna-13b. As our Robin-7b model is finetuned with different sources of dataset instead of sharegpt only, this shows that Vicuna can be further improved or surpassed with smaller-sized models and better dataset. + +The checkpoint of Robin-7b is now available for engineers and researchers to download and use (). Its effectiveness demonstrates that a multi-aspect evaluation is indeed essential to the development of LLMs. + +## References +Vicuna Chatbot Arena: + +lm-evaluation-harness: + +LMFlow: diff --git a/_sources/blogs/index.md.txt b/_sources/blogs/index.md.txt new file mode 100644 index 000000000..d71f731f2 --- /dev/null +++ b/_sources/blogs/index.md.txt @@ -0,0 +1,13 @@ +# Blogs + +## 2023 + + +```{toctree} +:maxdepth: 1 + +benchmark +``` + + + diff --git a/_sources/examples/DATASETS.md.txt b/_sources/examples/DATASETS.md.txt new file mode 100644 index 000000000..1f86bc646 --- /dev/null +++ b/_sources/examples/DATASETS.md.txt @@ -0,0 +1,384 @@ +# Dataset + +- [Dataset](#dataset) + - [Dataset Format in General](#dataset-format-in-general) + - [Supported Dataset and Detailed Formats](#supported-dataset-and-detailed-formats) + - [Conversation](#conversation) + - [Data Format](#data-format) + - [Conversation Template](#conversation-template) + - [Customize Conversation Template](#customize-conversation-template) + - [TextOnly](#textonly) + - [Text2Text](#text2text) + - [Paired Conversation](#paired-conversation) + +We provide several available datasets under `data`. You may download them all by running: +```sh +cd data && ./download.sh all && cd - +``` +You can replace `all` with a specific dataset name to only download that dataset (e.g. `./download.sh alpaca`). + +Customized datasets are strongly encouraged, since this way users can apply +their own prompt engineering techniques over various source datasets. As long +as the generated dataset following the format below, they can be accepted as +the input of our pipelines :hugs: + + +## Dataset Format in General + +To specify the input for model finetune, users can provide a list of `.json` +files under a specified dataset directory. For example, + +```sh +|- path_to_dataset + |- data_1.json + |- data_2.json + |- another_data.json + |- ... +``` + +For inference, we currently only support a single `.json` file. + +Each json file shall have the following format (three instances with four keys +for example), + +```json +{ + "type": "TYPE", + "instances": [ + { + "KEY_1": "VALUE_1.1", + "KEY_2": "VALUE_1.2", + "KEY_3": "VALUE_1.3", + "KEY_4": "VALUE_1.4", + }, + { + "KEY_1": "VALUE_2.1", + "KEY_2": "VALUE_2.2", + "KEY_3": "VALUE_2.3", + "KEY_4": "VALUE_2.4", + }, + { + "KEY_1": "VALUE_3.1", + "KEY_2": "VALUE_3.2", + "KEY_3": "VALUE_3.3", + "KEY_4": "VALUE_3.4", + }, + ] +} +``` + +where the `TYPE` indicates the dataset type and defines the set of keys +`{ KEY_1, KEY_2, ... }` and their corresponding interpretations. The list of +supported types are listed as follows. + +## Supported Dataset and Detailed Formats + +### Conversation + +#### Data Format + +Conversational data are commonly used in sft process. We currently support conversational data in ShareGPT format: + +````{dropdown} A conversation dataset +```json +{ + "type": "conversation", + "instances": [ + { + "conversation_id": "CONVERSATION_ID", + "system": "SYSTEM_PROPMT", + "tools": ["TOOL_DESCRIPTION_1","TOOL_DESCRIPTION_2","TOOL_DESCRIPTION_X"], + "messages": [ + { + "role": "user", + "content": "USER_INPUT_1" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_1" + }, + { + "role": "user", + "content": "USER_INPUT_2" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_2" + } + ] + }, + { + "conversation_id": "CONVERSATION_ID", + "system": "SYSTEM_PROPMT", + "tools": ["TOOL_DESCRIPTION_1"], + "messages": [ + { + "role": "user", + "content": "USER_INPUT_1" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_1" + } + ] + } + ] +} +``` +```` +Data types: +- `conversation_id`: `Optional[Any]`. An identifier for the conversation. `conversation_id` is only for convience of tracking the conversation and will not be used in the pipeline. +- `system`: `Optional[string]`. A system prompt that is used to start the conversation. +- `tools`: `Optional[List[string]]`. A list of tools that are used in the conversation. +- `messages`: `List[Dict]`. A list of messages in the conversation. Each message contains the following fields: + - `role`: `string`. The role of the message. It can be either `user` or `assistant`. + - `content`: `string`. The content of the message. + +> We are working on supporting customized message keys and role names. Please stay tuned. + +Tips: +- Please make sure the messages are: + 1. Start with an user message. + 2. In the correct order. The pipeline will not check the order of the messages. + 3. In pairs of user and assistant (i.e., the length of the messages should be even). If the conversation ends with the user, the pipeline will trim the last user message. + 4. Make sure the `content`s are not empty. If the `content` is empty, the pipeline will add a space to it. + +#### Conversation Template + +Conversations should be formatted before feeding into the model. As of now, we've preset the conversation template for following models: + +| Template Name | Filled Example | Detailed Template | +| ------------- | -------------- | ----------------- | +| `chatglm3` | `[gMASK]sop<\|system\|>`
` You are a chatbot developed by LMFlow team.<\|user\|>`
` Who are you?<\|assistant\|>`
` I am a chatbot developed by LMFlow team.<\|user\|>`
` How old are you?<\|assistant\|>`
` I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.` | [Link](./supported_conversation_template.md#chatglm3) | +| `chatml` | `<\|im_start\|>system`
`You are a chatbot developed by LMFlow team.<\|im_end\|>`
`<\|im_start\|>user`
`Who are you?<\|im_end\|>`
`<\|im_start\|>assistant`
`I am a chatbot developed by LMFlow team.<\|im_end\|>`
`<\|im_start\|>user`
`How old are you?<\|im_end\|>`
`<\|im_start\|>assistant`
`I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<\|im_end\|>`
| [Link](./supported_conversation_template.md#chatml) | +| `deepseek` | `<|begin▁of▁sentence|>You are a chatbot developed by LMFlow team.`

`User: Who are you?`

`Assistant: I am a chatbot developed by LMFlow team.<|end▁of▁sentence|>User: How old are you?`

`Assistant: I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|end▁of▁sentence|>` | [Link](./supported_conversation_template.md#deepseek) | +| `gemma` | `You are a chatbot developed by LMFlow team.user`
`Who are you?`
`model`
`I am a chatbot developed by LMFlow team.`
`user`
`How old are you?`
`model`
`I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.`
| [Link](./supported_conversation_template.md#gemma) | +| `internlm2` | `<\|im_start\|>system`
`You are a chatbot developed by LMFlow team.<\|im_end\|>`
`<\|im_start\|>user`
`Who are you?<\|im_end\|>`
`<\|im_start\|>assistant`
`I am a chatbot developed by LMFlow team.<\|im_end\|>`
`<\|im_start\|>user`
`How old are you?<\|im_end\|>`
`<\|im_start\|>assistant`
`I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<\|im_end\|>`
| [Link](./supported_conversation_template.md#internlm2) | +| `llama3` | `<\|begin_of_text\|><\|start_header_id\|>system<\|end_header_id\|>`

`You are a chatbot developed by LMFlow team.<\|eot_id\|><\|start_header_id\|>user<\|end_header_id\|>`

`Who are you?<\|eot_id\|><\|start_header_id\|>assistant<\|end_header_id\|>`

`I am a chatbot developed by LMFlow team.<\|eot_id\|><\|start_header_id\|>user<\|end_header_id\|>`

`How old are you?<\|eot_id\|><\|start_header_id\|>assistant<\|end_header_id\|>`

`I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<\|eot_id\|>` | [Link](./supported_conversation_template.md#llama-3) | +| `llama2` | `[INST] <>`
`You are a chatbot developed by LMFlow team.`
`<
>`

`Who are you? [/INST] I am a chatbot developed by LMFlow team.
[INST] How old are you? [/INST] I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.` | [Link](./supported_conversation_template.md#llama-2) | +| `phi3` | `<\|system\|>`
`You are a chatbot developed by LMFlow team.<\|end\|>`
`<\|user\|>`
`Who are you?<\|end\|>`
`<\|assistant\|>`
`I am a chatbot developed by LMFlow team.<\|end\|>`
`<\|user\|>`
`How old are you?<\|end\|>`
`<\|assistant\|>`
`I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<\|end\|>`
`<\|endoftext\|>` | [Link](./supported_conversation_template.md#phi-3) | +| `qwen2` | `<\|im_start\|>system`
`You are a chatbot developed by LMFlow team.<\|im_end\|>`
`<\|im_start\|>user`
`Who are you?<\|im_end\|>`
`<\|im_start\|>assistant`
`I am a chatbot developed by LMFlow team.<\|im_end\|>`
`<\|im_start\|>user`
`How old are you?<\|im_end\|>`
`<\|im_start\|>assistant`
`I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<\|im_end\|>`
| [Link](./supported_conversation_template.md#qwen-2) | +| `yi` | Same as `chatml` | [Link](./supported_conversation_template.md#yi) | +| `yi1_5`| `You are a chatbot developed by LMFlow team.<\|im_start\|>user`
`Who are you?<\|im_end\|>`
`<\|im_start\|>assistant`
`I am a chatbot developed by LMFlow team.<\|im_end\|>`
`<\|im_start\|>user`
`How old are you?<\|im_end\|>`
`<\|im_start\|>assistant`
`I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<\|im_end\|>`
| [Link](./supported_conversation_template.md#yi-15) | +| `zephyr` | `<\|system\|>`
`You are a chatbot developed by LMFlow team.
`
`<\|user\|>`
`Who are you?
`
`<\|assistant\|>`
`I am a chatbot developed by LMFlow team.`
`<\|user\|>`
`How old are you?`
`<\|assistant\|>`
`I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.`
| [Link](./supported_conversation_template.md#zephyr) | + +Passing the template name to the `--conversation_template` argument to apply the corresponding conversation template: +```sh +# scripts/run_finetune.sh +# ... +deepspeed ${deepspeed_args} \ + examples/finetune.py \ + --model_name_or_path meta-llama/Llama-2-7b-chat-hf \ + --dataset_path ${dataset_path} \ + --conversation_template llama2 \ +# ... +``` + +`````{admonition} Formatted Dataset +:class: info + +For dataset that system prompts, tool prompts and templates are already applied (like the one below), user can run the finetune shell by passing `empty` or `empty_no_special_tokens` to the `--conversation_template` argument. `empty` template will add a bos token to the beginning of every round of conversation as well as a eos token to the end of every round of conversation. `empty_no_special_tokens` will not add any special tokens to the conversation, just concatenates the user and assistant messages. +````{dropdown} A formatted dataset +```json +{ + "type": "conversation", + "instances": [ + { + "messages": [ + { + "role": "user", + "content": "[INST] <>\nYou are a helpful assistant.\n<>\n\nHello! [/INST]" + }, + { + "role": "assistant", + "content": "Hi, how are you?" + }, + { + "role": "user", + "content": "[INST] Good. [/INST]" + }, + { + "role": "assistant", + "content": "Glad to hear that." + } + ] + }, + { + "messages": [ + { + "role": "user", + "content": "[INST] <>\nYou are a helpful assistant.\n<>\n\nWhat's the weather like now? [/INST]" + }, + { + "role": "assistant", + "content": "I'm sorry for any confusion, but as an AI, I don't have access to real-time data such as current weather conditions." + } + ] + } + ] +} +``` +```` +````` + +#### Customize Conversation Template + +Please refer to the [Customize Conversation Template](./customize_conversation_template.md) for more details. + + +### TextOnly + +This is the most common dataset type, which only contains raw texts in each +sample. This type of dataset can be used as the training set for text decoder +models, or the input of decoder models / encoder-decoder models. Its format is +as follows (three instances for example), + +````{dropdown} A textonly dataset +```json +{ + "type": "text_only", + "instances": [ + { "text": "SAMPLE_TEXT_1" }, + { "text": "SAMPLE_TEXT_2" }, + { "text": "SAMPLE_TEXT_3" }, + ] +} +``` +```` + +For example, `data/example_dataset/train/train_50.json` has the aboved format. + + +### Text2Text + +This is the dataset type mostly used for inferencing, which contains a pair of +texts in each sample. This type of dataset can be used as the training set for +text encoder-decoder models, or question-answer pair for evaluating model +inferences. Its format is as follows (three instances for example): + +````{dropdown} A text2text dataset +```json +{ + "type": "text2text", + "instances": [ + { + "input": "SAMPLE_INPUT_1", + "output": "SAMPLE_OUTPUT_1", + }, + { + "input": "SAMPLE_INPUT_2", + "output": "SAMPLE_OUTPUT_2", + }, + { + "input": "SAMPLE_INPUT_3", + "output": "SAMPLE_OUTPUT_3", + }, + ] +} +``` +```` + +For example, `data/example_dataset/test/test_13.json` has the aboved format. + + +### Paired Conversation + +```{admonition} **Work in Progress** +:class: info + +We are working on paired conversation dataset and will update it soon. +``` + +This type of dataset are commonly used for alignment such as [reward modeling](https://arxiv.org/abs/2203.02155), +[Direct Preference Optimization (DPO)](https://arxiv.org/abs/2305.18290), etc. For requirements of the conversations, +please refer to [conversation data](#conversation). + +````{dropdown} A paired conversation dataset +```json +{ + "type": "paired_conversation", + "instances": [ + { + "chosen": { + "conversation_id": "CONVERSATION_ID", + "system": "SYSTEM_PROPMT", + "tools": ["TOOL_DESCRIPTION_1","TOOL_DESCRIPTION_2","TOOL_DESCRIPTION_3"], + "messages": [ + { + "role": "user", + "content": "USER_INPUT_1" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_1_GOOD" + }, + { + "role": "user", + "content": "USER_INPUT_2" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_2_GOOD" + } + ] + }, + "rejected": { + "conversation_id": "CONVERSATION_ID", + "system": "SYSTEM_PROPMT", + "tools": ["TOOL_DESCRIPTION_1","TOOL_DESCRIPTION_2","TOOL_DESCRIPTION_3"], + "messages": [ + { + "role": "user", + "content": "USER_INPUT_1" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_1_BAD" + }, + { + "role": "user", + "content": "USER_INPUT_2" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_2_BAD" + } + ] + } + }, + { + "chosen": { + "conversation_id": "CONVERSATION_ID", + "system": "SYSTEM_PROPMT", + "tools": ["TOOL_DESCRIPTION_1"], + "messages": [ + { + "role": "user", + "content": "USER_INPUT_1" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_1_GOOD" + } + ] + }, + "rejected": { + "conversation_id": "CONVERSATION_ID", + "system": "SYSTEM_PROPMT", + "tools": ["TOOL_DESCRIPTION_1"], + "messages": [ + { + "role": "user", + "content": "USER_INPUT_1" + }, + { + "role": "assistant", + "content": "ASSISTANT_RESPONSE_1_BAD" + } + ] + } + } + ] +} +``` +```` \ No newline at end of file diff --git a/_sources/examples/TASK_GUIDE.md.txt b/_sources/examples/TASK_GUIDE.md.txt new file mode 100644 index 000000000..afbfd3ca8 --- /dev/null +++ b/_sources/examples/TASK_GUIDE.md.txt @@ -0,0 +1,132 @@ +# LMFlow Benchmark Guide + +We support two ways to add evaluation settings in our repo, `NLL Task Setting` and `LM-Evaluation Task Setting`. Below are the details of them: + +# 1. NLL Task Setting +Users can easily create new tasks and evaluate their datasets on +the provide `nll (Negative Log Likelihood)` metric. + +## Setup + +Fork the main repo, clone it, and create a new branch with the name of +your task, and install the following: + +```bash +# After forking... +git clone https://github.com//LMFlow.git +cd LMFlow +git checkout -b +conda create -n lmflow python=3.9 -y +conda activate lmflow +conda install mpi4py +pip install -e . +``` +## Create Your Task Dataset File +We provide several available datasets under `data` after running +```sh +cd data && ./download.sh && cd - +``` + +You can refer to some given evaluation dataset files and create your own. +Also, you may refer to our guide on +[DATASET](https://optimalscale.github.io/LMFlow/examples/DATASETS.html). + +In this step, you will need to decide your answer type like `text2text` +or `text_only` (Notice that the current `nll` implementation only supports these +two answer types). We will note the chosen answer type as ``. + +After preparing your own `DATASET` file, you can put it under `data` dir +and make a `TASK` dir. + +```bash +mkdir +mv +``` + +## Task Registration + +Note the path of your dataset, `data//`. + +Open the file `examples/benchmarking.py`, add your task's info into +`LOCAL_DATSET_GROUP_MAP`, `LOCAL_DATSET_MAP`, `LOCAL_DATSET_ANSWERTYPE_MAP` + +In `LOCAL_DATSET_MAP`, you will need to specify your `DATASET` files' path: + +```python +LOCAL_DATSET_MAP ={ + "...":"...", + "":"data//", +} +``` + +In `LOCAL_DATSET_ANSWERTYPE_MAP`, you will need to specify your task's +``: + +```python +LOCAL_DATSET_ANSWERTYPE_MAP ={ + "...":"...", + "":", +} +``` + +If you only have one task, you can add key-value pair like `"":""` +in `LOCAL_DATSET_GROUP_MAP`: +```python +LOCAL_DATSET_GROUP_MAP ={ + "...":"...", + "":"", +} +``` + + +If you want to combine several tasks, you may first specify a +combination name `` and add key-value pair like +`"":",,.."`in `LOCAL_DATSET_GROUP_MAP`. + +Remember to separate TASK by `,`: +```python +LOCAL_DATSET_GROUP_MAP ={ + "...":"...", + "":",,..", +} +``` + +After finishing changing these items, you can run your own `` like: + +```bash +deepspeed examples/benchmarking.py \ + --answer_type \ + --use_ram_optimized_load False \ + --model_name_or_path ${model_name} \ + --dataset_name data//\ + --deepspeed examples/ds_config.json \ + --metric nll \ + --prompt_structure "###Human: {input}###Assistant:" \ + | tee ${log_dir}/train.log \ + 2> ${log_dir}/train.err +``` + +# 2. LM-Evaluation Task Setting + +We integrate [EleutherAI/lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) into +`benchamrk.py` by directly executing the evaluate commands. Users +can also use their evaluation by simply changing two items in +`` of `examples/benchmarking.py`. + +Please refer to Eleuther's +[task-table](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/docs/task_table.md) +to get exact `` name. + +Similarly, you can combine several tasks, you may first specify a +combination name `` and add key-value pair like +`"":",,.."`in `LM_EVAL_DATASET_MAP`. + +Also, remember to separate TASK by `,`: + +```python +LM_EVAL_DATASET_MAP ={ + "...":"...", + "":",,..", +} +``` + diff --git a/_sources/examples/checkpoints.md.txt b/_sources/examples/checkpoints.md.txt new file mode 100644 index 000000000..4c36eac49 --- /dev/null +++ b/_sources/examples/checkpoints.md.txt @@ -0,0 +1,31 @@ +# Checkpoints + +In general, you can directly load from checkpoints by using `--model_name_or_path`. However, the LLaMA case is slightly different due to the copyright issue. + + +## LLaMA Checkpoint + +1. First, you need to get the access of LLaMA model from [facebookresearch/llama](https://github.com/facebookresearch/llama). Download the official checkpoints and save them into `${llama-path}`. + +2. Second, convert the official checkpoints `${llama-path}` to HuggingFace supported checkpoints `${llama-hf-path}` by running + + `python ./scripts/convert_llama_weights_to_hf.py --input_dir ${llama-path} --model_size 7B --output_dir ${llama-hf-path}/llama-7b-hf` + +3. Then you are good to go by setting the checkpoint path to `${llama-hf-path}/llama-7b-hf`. Enjoy it! + +4. (optional) Now you have the original llama-7b-hf pretrained model. With +```sh +cd output_models && ./download.sh all && cd - +``` +You can obtain the model difference finetuned by ours. By a way similar to `./scripts/run_evaluation_with_lora.sh`, +```sh +CUDA_VISIBLE_DEVICES=0 \ + deepspeed examples/evaluate.py \ + --answer_type text \ + --model_name_or_path ${llama-hf-path}/llama-7b-hf \ + --lora_model_path output_models/${llama-model-diff-path} \ + --dataset_path data/alpaca/test \ + --prompt_structure "Input: {input}" \ + --deepspeed examples/ds_config.json +``` +You can now evaluate with the finetuned llama model. \ No newline at end of file diff --git a/_sources/examples/customize_conversation_template.md.txt b/_sources/examples/customize_conversation_template.md.txt new file mode 100644 index 000000000..a129f0f3f --- /dev/null +++ b/_sources/examples/customize_conversation_template.md.txt @@ -0,0 +1,115 @@ +# Customize Conversation Template + +> For beginners: Why template? +> Almost all LLMs today do a simple job - predict the next "word". To make the interaction between user and model smoother, developers use tricks: they add special "words" to the input text (at back-end, thus invisible to the user when using services like ChatGPT) to "tell" the model what user had said before, and ask the model to respond like an assistant. These "hidden words" are called "template". + +We provide the flexibility to customize the conversation template. You can customize your own conversation template by following the steps below: + +### 1. Decompose your conversations +Say you want to make the conversations between user and assistant look like: + +``` +System: +You are a chatbot developed by LMFlow team. + +User: +Who are you? + +Assistant: +I am a chatbot developed by LMFlow team. + +User: +How old are you? + +Assistant: +I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense. +``` + +It is easy to abstract the format for each message: +- System message: `System:\n{{content}}\n\n` +- User message: `User:\n{{content}}\n\n` +- Assistant message: `Assistant:\n{{content}}\n\n` + +Also, we have a bos token at the beginning of the conversation session. + +### 2. Choose proper `Formatter` +Recall the requirements for a conversation dataset: +> - `system`: `Optional[string]`. +> - `tools`: `Optional[List[string]]`. +> - `messages`: `List[Dict]`. +> - `role`: `string`. +> - `content`: `string`. + +System message, user message, and assistant message are strings thus we can use `StringFormatter` for them. + +### 3. Build the template +All preset templates are located at `src/lmflow/utils/conversation_template`. + +Within the template file, define your own template like: + +```python +from .base import StringFormatter, TemplateComponent, ConversationTemplate + + +YOUR_TEMPLATE = ConversationTemplate( + template_name='your_template_name', + user_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='User:\n{{content}}\n\n') + ] + ), + assistant_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='Assistant:\n{{content}}\n\n'), + TemplateComponent(type='token', content='eos_token') # this will add the eos token at the end of every assistant message + # please refer to the docstring of the `TemplateComponent` class to + # see the difference between different types of components. + ] + ), + system_formatter=StringFormatter( + template=[ + TemplateComponent(type='string', content='System:\n{{content}}\n\n') + ] + ) + # For models that has ONLY ONE bos token at the beginning of + # a conversation session (not a conversation pair), user can + # specify a special starter to add that starter to the very + # beginning of the conversation session. + # eg: + # llama-2: and at every pair of conversation + # v.s. + # llama-3: <|begin_of_text|> only at the beginning of a session + special_starter=TemplateComponent(type='token', content='bos_token'), + + # Similar to the special starter... (just for illustration, commented out + # since it is not necessary for our purposed template above) + # special_stopper=TemplateComponent(type='token', content='eos_token') +) +``` + +Feel free to create your own template by inheriting the `ConversationTemplate` class. Llama-2 v.s. llama-3 would be a good examples to refer to. + +### 4. Register your template +After defining your own template, you need to register it in the `src/lmflow/utils/conversation_template/__init__.py` file. + +```python +# ... +from .your_template_file import YOUR_TEMPLATE + + +PRESET_TEMPLATES = { + #... + 'your_template_name': YOUR_TEMPLATE, +} +``` + +### 5. Use your template +You are all set! Specify the template name in, for example, your finetune script: + +```bash +./scripts/run_finetune.sh \ + --model_name_or_path path_to_your_model \ + --dataset_path your_conversation_dataset \ + --conversation_template your_template_name \ + --output_model_path output_models/your_model +``` \ No newline at end of file diff --git a/_sources/examples/finetuning.md.txt b/_sources/examples/finetuning.md.txt new file mode 100644 index 000000000..821a9a594 --- /dev/null +++ b/_sources/examples/finetuning.md.txt @@ -0,0 +1,101 @@ +# Finetuning + +## Full Parameters + +Full training updates all the parameters to finetune a language model. +Here is an example to finetune a GPT-2 base model. + +```sh +cd data && ./download.sh alpaca && cd - + +./scripts/run_finetune.sh \ + --model_name_or_path gpt2 \ + --dataset_path data/alpaca/train_conversation \ + --output_model_path output_models/finetuned_gpt2 +``` + +```{admonition} Conversation Template +:class: tip + +For conversation dataset, specify a conversation template for better performance by adding `--conversation_template` to the command. +``` + +````{dropdown} Llama-3-8B conversation dataset example +```sh +cd data && ./download.sh alpaca && cd - + +./scripts/run_finetune.sh \ + --model_name_or_path meta-llama/Meta-Llama-3-8B \ + --dataset_path data/alpaca/train_conversation \ + --conversation_template llama3 \ + --output_model_path output_models/finetuned_llama3_8b +``` +```` + + +## Layerwise Importance Sampled AdamW (LISA) + +[LISA](https://arxiv.org/abs/2403.17919) is a memory-efficient finetuning algorithm that allows tradeoff between memory and the number of randomly unfreezed layers. This script currently is only tested in single gpus. Please stay tuned for our latest updates! + +```sh +cd data && ./download.sh alpaca && cd - + +./scripts/run_finetune_with_lisa.sh \ + --model_name_or_path meta-llama/Llama-2-7b-hf \ + --dataset_path data/alpaca/train_conversation \ + --output_model_path output_models/finetuned_llama2_7b \ + --lisa_activated_layers 1 \ + --lisa_interval_steps 20 +``` + +````{dropdown} Llama-2-7B conversation dataset example +```sh +cd data && ./download.sh alpaca && cd - + +./scripts/run_finetune_with_lisa.sh \ + --model_name_or_path meta-llama/Llama-2-7b-hf \ + --dataset_path data/alpaca/train_conversation \ + --conversation_template llama2 \ + --output_model_path output_models/finetuned_llama2_7b_lisa \ + --lisa_activated_layers 1 \ + --lisa_interval_steps 20 +``` +```` + + +## Low-Rank Adaptation (LoRA) + +LoRA is a parameter-efficient finetuning algorithm and is more efficient than full finetuning. + +```sh +cd data && ./download.sh alpaca && cd - + +./scripts/run_finetune_with_lora.sh \ + --model_name_or_path facebook/galactica-1.3b \ + --dataset_path data/alpaca/train_conversation \ + --output_lora_path output_models/finetuned_galactica_lora +``` + +````{admonition} Merge LoRA Weight +:class: tip + +Merge LoRA weight and the base model into one using: +```sh +./scripts/run_merge_lora.sh \ + --model_name_or_path Qwen/Qwen1.5-1.8B \ + --lora_model_path output_models/lora \ + --output_model_path output_models/lora_merged \ +``` +```` + +````{dropdown} Llama-2-7B conversation dataset example +```sh +cd data && ./download.sh alpaca && cd - + +./scripts/run_finetune_with_lora.sh \ + --model_name_or_path meta-llama/Llama-2-7b-hf \ + --dataset_path data/alpaca/train_conversation \ + --conversation_template llama2 \ + --output_model_path output_models/finetuned_llama2_7b_lora \ +``` +```` \ No newline at end of file diff --git a/_sources/examples/index.md.txt b/_sources/examples/index.md.txt new file mode 100644 index 000000000..30dc1d335 --- /dev/null +++ b/_sources/examples/index.md.txt @@ -0,0 +1,57 @@ +# Examples + +We provide several examples to show how to use our package in your problem. + +## Data preparation + +```{toctree} +:maxdepth: 3 + +DATASETS +``` + +```{toctree} +:maxdepth: 3 + +checkpoints +``` + +## Finetuning + +For SFT, + +```{toctree} +:maxdepth: 3 + +finetuning +``` + + +For alignment process, + +```{toctree} +:maxdepth: 3 + +reward_modeling +``` + + +```{toctree} +:maxdepth: 3 + +raft +``` + +## Inference + +Refer to [examples](https://github.com/OptimalScale/LMFlow/blob/main/examples). + +## Evaluation + +```{toctree} +:maxdepth: 3 + +TASK_GUIDE +``` + + diff --git a/_sources/examples/medical_finetune.md.txt b/_sources/examples/medical_finetune.md.txt new file mode 100644 index 000000000..327e062be --- /dev/null +++ b/_sources/examples/medical_finetune.md.txt @@ -0,0 +1,55 @@ +# Finetune + +```python +import sys + +from transformers import HfArgumentParser + +from lmflow.args import ( + ModelArguments, + DatasetArguments, + AutoArguments, +) + +from lmflow.datasets.dataset import Dataset +from lmflow.models.tunable_models import TunableModel +from lmflow.pipeline.auto_pipeline import AutoPipeline + + +def main(): + # Parses arguments + pipeline_name = "finetuner" + PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name) + + parser = HfArgumentParser((ModelArguments, DatasetArguments, PipelineArguments)) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, pipeline_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) + else: + model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses() + + # TODO: deepspeed config initialization + + # Initialization + finetuner = AutoPipeline.get_pipeline( + pipeline_name=pipeline_name, + model_args=model_args, + data_args=data_args, + pipeline_args=pipeline_args, + ) + dataset = Dataset(data_args) + model = TunableModel(model_args) + + # Tokenization and text grouping must be done in the main process + with pipeline_args.main_process_first(desc="dataset map tokenization"): + tokenized_dataset = model.tokenize(dataset) + lm_dataset = finetuner.group_text( + tokenized_dataset, + model_max_length=model.get_max_length(), + ) + + # Finetuning + tuned_model = finetuner.tune(model=model, lm_dataset=lm_dataset) + +``` diff --git a/_sources/examples/raft.md.txt b/_sources/examples/raft.md.txt new file mode 100644 index 000000000..bb36b3c55 --- /dev/null +++ b/_sources/examples/raft.md.txt @@ -0,0 +1,342 @@ +# RAFT +## 1 Introduction + +We remark that the example is built on LLaMA whose [licensed](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform) is for non-commercial use only. + + + +Reinforcement Learning from Human Feedback (RLHF) requires a reward function to guide the adjustment of the generative model. In this example, we show how to use LMFlow framework to train a reward model following the procedure in the InstructGPT paper: https://arxiv.org/abs/2203.02155 and then align the model via the RAFT algorithm (Reward rAnked FineTuning). + + + +**This example contains both reward modeling and RAFT alignment for completeness. For users' convenience, we have already provided a reward model based on GPT-Neo-2.7B in huggingface repo so one can skip the reward modeling first.** + +### 1.1 Dataset description + +We use the *Dahoas/full-hh-rlhf* dataset as an example, where each sample of this dataset consists of a prompt and two responses from the assistant. In particular, the response with label "chosen" is preferred as compared to the response with label "rejected". The dataset consists of 112K training samples and 12.5K test samples. The following is an example sample of the dataset: + +```sh +" Human: What kind of noises did dinosaurs make? Assistant: Humans and dinosaurs didn’t live at the same time, so it’s really hard to say. The best place to find out what noises dinosaurs made would be Human: yes they did Assistant: to guess, and that would probably require lots of reading and a certain amount of imagination, so we’re not really prepared to do that. Human: you cant read Assistant: + +Chosen response: "You can read?" + +Rejected response: "there’s a lot of stuff humans don’t know" +``` + +To facilitate the training, we reformulate the prompt by adding ``\#\#\#'' at the beginning of the characters so that the model knows to reply. The new sample will be of the form: + +```sh +"###Human: What kind of noises did dinosaurs make? ###Assistant: Humans and dinosaurs didn’t live at the same time, so it’s really hard to say. The best place to find out what noises dinosaurs made would be ###Human: yes they did ###Assistant: to guess, and that would probably require lots of reading and a certain amount of imagination, so we’re not really prepared to do that. ###Human: you cant read ###Assistant: + +Chosen response: "You can read?" + +Rejected response: "there’s a lot of stuff humans don’t know" +``` + + + +We prepare all the used dataset in the directory ./data/hh_rlhf, which can be obtained by running the following command at LMFlow + +```sh +cd data && ./download.sh hh_rlhf && cd - +``` + + + +## 2 Reward Modeling + +We follow the the procedure in the InstructGPT paper: https://arxiv.org/abs/2203.02155 to train a reward model using the HH-RLHF dataset first, which includes + +- Supervised Finetuning (SFT); +- Reward modeling by comparison dataset. + + + +### 2.1 Supervised Finetuning (SFT) + +Here is an example of dataset */home/xiongwei/LMFlow/data/hh_rlhf/sft/hh_rlhf_sft.json*. We use only the preferred responses so we get 112K training samples. + + + + +```json + +{"type": "text_only", +"instances": +[ + {"text": "###Human: Should you buy a case to protect your cell phone?###Assistant: It depends on your circumstances. If you carry your phone in a pocket or a purse then you probably want a case. But if you only need a phone for quick interactions, a case may actually cause more harm than good. What do you need the phone for? Are you a parent, or do you work from home?###Human: What harm could it do?###Assistant: A phone case can damage the screen, for one thing. It can also get you in trouble if you have your phone turned off for some reason. Then you will turn it back on and it won’t do anything. If you can afford to replace it, then you need a case to protect it. The problem is that most people aren’t able to afford to replace their phones all the time.###Human: Thanks for letting me know.###Assistant: You’re welcome."}, + {"text": "###Human: I'm trying to learn about the salam witch trials###Assistant: If you’re looking for books about witchcraft trials, I can recommend some books for you. But you probably mean you’re looking for more historical information about the Salem witch trials in 1692, and specifically about the salam witch trials in 1692?###Human: What are some good books to learn about the salam witch trials###Assistant: What would you like to learn? If you’re interested in history, one of the best books is The Witch Persecutions in Colonial America: A History. If you’re interested in witchcraft as a cultural phenomenon, you might enjoy two excellent books: Religion and the Decline of Magic: Studies in Popular Beliefs in Sixteenth- and Seventeenth-Century England by Keith Thomas and Magic, Witchcraft, and the Otherworld: An Anthropology of Superstition by Jack Goody. If you’re interested in history specifically as it relates to religion, you might enjoy The Popish Plot, or Prelates' Plot: A History of the Popish Plot in England, by K. J. Everett."} +] +} +``` + + + +You can edit */scripts/run_finetune.sh* and modify the parameters. We present an example with GPT-Neo-2.7B here. + +- --model_name_or_path: EleutherAI/gpt-neo-2.7B +- --dataset_path: ${project_dir}/data/hh_rlhf/sft +- --output_dir: the path you want to store the sft model +- --num_train_epochs: 1 +- --learning_rate: 2e-5 +- --per_device_train_batch_size: adjust according to your GPU memory source. + + + +Then, we can run the following command to perform SFT. + +```sh +./scripts/run_finetune.sh +``` + +You can also use lora training by the following command but also need to set the model_name_or_path and dataset appropriately by editing run_finetune_with_lora.sh . + +```sh +./scripts/run_finetune_with_lora.sh +``` + + + +### 2.2 Reward Modeling + +**Data Preparation** + +Reward modeling involves ranking responses based on the given prompt, typically requiring human labeling. Fortunately, we already have two responses for each prompt in this example, with one preferred over the other. To create a new dataset, we modify the original dataset to include two keys, "positive" and "negative", with the former being the preferred response. + + + +Here is an example of the dataset *LMFlow/data/hh_rlhf/rm/hh_rlhf_rm_training.json*, consisting of two samples. + +```sh +{"instances": [ + {"positive": "###Human: Are space heaters safe?###Assistant: What do you mean? Can you give me more details?###Human: I mean, can they be a fire hazard?###Assistant: I don’t know about that. Can you tell me more about the situation? Are you using the heater on a fireplace? Do you have any other heater in the room? What materials are in the room? Do you feel any heat from the heater? How are you touching the heater?###Human: Can you just tell me if there are any situations where they could be a fire hazard?###Assistant: The answer is “it depends”. It would depend on the specific heater you are using, how you are using it, what kind of fuel it uses, what materials you have in the room, and what the surroundings are like. You’d also have to make sure you’re using it correctly. Is it switched off? Is it safe to touch? It’s not the sort of thing I could just answer by looking at the text you provided.###Human: Thank you. That information is helpful.###Assistant: Glad to be helpful. I’m always happy to assist.", "negative": "###Human: Are space heaters safe?###Assistant: What do you mean? Can you give me more details?###Human: I mean, can they be a fire hazard?###Assistant: I don’t know about that. Can you tell me more about the situation? Are you using the heater on a fireplace? Do you have any other heater in the room? What materials are in the room? Do you feel any heat from the heater? How are you touching the heater?###Human: Can you just tell me if there are any situations where they could be a fire hazard?###Assistant: The answer is “it depends”. It would depend on the specific heater you are using, how you are using it, what kind of fuel it uses, what materials you have in the room, and what the surroundings are like. You’d also have to make sure you’re using it correctly. Is it switched off? Is it safe to touch? It’s not the sort of thing I could just answer by looking at the text you provided.###Human: Thank you. That information is helpful.###Assistant: You’re welcome. I’m glad to help. Is there anything else you want to know?"}, + {"positive": "###Human: how do i teach my kids to make forts at home?###Assistant: Do you mean you want to teach them to build a simple structure they can hide in? Or something more elaborate? I don’t know exactly what you mean by a “fort”, but maybe this gives you some ideas?", "negative": "###Human: how do i teach my kids to make forts at home?###Assistant: There are many ways to make forts! The simplest might be to just pile up all the furniture in one room. This works if you have a single room in your house, although it might still be a bit challenging to build a taller and sturdier fort this way. The best way to build forts is to start with something easy, and then have fun exploring how you can improve it."} +]} +``` + + + +**Training** + +To start from a model from a previous sft step, you may edit the *./scripts/run_reward_modeling.sh* script and update the "dataset_path" to use the desired dataset. By default, we will use LoRA training in reward modeling as we found that it is superior in terms of optimization and gives comparable performance. + + + +- --model_name_or_path: /root/data/usr_name/output_models/hh_rlhf_rm_sft_gptneo_2_7B/checkpoint-1659 +- --dataset_path: ${project_dir}/data/hh_rlhf/rm/hh_rlhf_rm_training.json +- --output_dir: the path you want to store the reward model +- --num_train_epochs: 1 +- --learning_rate: 3e-5 +- --per_device_train_batch_size: adjust according to your GPU memory source. +- --eval_steps: 400 +- --validation_split_percentage: 10 + + + +The load_dataset function splits the dataset into training and evaluation sets, which can also be customized by editing the function in /examples/run_reward_modeling.py if you want to prepare your own dataset when running the script. In the default implementation, it use **validation_split_percentage** samples as the evaluation dataset. + + + +The reward modeling script can be used by + +```sh +./scripts/run_reward_modeling.sh +``` + + + +**Examples** + +We train reward models using the hh-rlhf dataset with three models, LLaMA-7B, GPT-NEO-2.7B, and GPT-NEO-1.3B. The model is first supervised fine-tuned with the training dataset in last step. The reward modeling is trained using the 112K training samples and 12.5 test samples. + +| Model | Eval Accuracy | Remarks | +| :----------: | :-----------: | :---------------------------: | +| LLaMA-7B | 79.52% | - | +| LLaMA-7B | 71.64% | RM from LLaMA without SFT | +| GPT-NEO-2.7B | 69.24% | - | +| GPT-NEO-1.3B | 65.58% | Only trained on 10000 samples | + + + +### 2.3 LoRA Merge and Get Reward Model + +We use *./examples/merge_lora.py* to merge the LoRA adapter with the sft rm model. We are ready to align our model. + + + +## 3 RAFT Alignment + +Original paper: [RAFT: Reward rAnked FineTuning for Generative Foundation Model Alignment](https://arxiv.org/pdf/2304.06767.pdf) + +### 3.1 Algorithms Overview + +**Main ideas of RAFT** + +![](../_static/raft_idea.PNG) + + +Clearly the global ranking strategy is more efficient in terms of the reward learning. However, in some cases (e.g. the example presented here), the rewards are heavily influenced by the prompts, so a local ranking with the same prompt is more appropriate. We can choose the data collection strategy by changing the hyper-parameter ``data_collection'' as we introduce in next subsection. + + + +### 3.2 Hyper-parameters + +Table 1: Hyper-parameters of RAFT. + +| Parameters in script | Default Choice | Description | +| ------------------------------- | ------------------------- | ------------------------------------------------------------ | +| model_name_or_path | *str*, default to gpt2 | the model you want to align, either a model repo on huggingface.co or path to a directory containing your local model. | +| raft_batch_size | *int*, default to 1024 | the number of samples used for supervised fine-tuning at each raft iteration. | +| top_reward_percentage | *int*, default to 0.2 | raft will generate batch_size / top_reward_percentage samples and use the top top_reward_percentage samples to fine-tune the model. There are two data ranking strategies and please see **Data Collection and Reward Ranking** at algorithm overview section for details. | +| num_raft_iteration | *int*, default to 20 | the number of raft iterations. | +| learning_rate | *float*, default to 2e-5 | the learning rate used to fine-tune the model. | +| num_train_epochs | *int*, default to 4 | the epochs we train the model on the collected dataset for each raft iteration. | +| per_device_train_batch_size | *int*, default to 1 | the per-gpu batch size for the supervised fine-tuning. | +| inference_batch_size_per_device | *int*, default to 1 | the inference batch size for data collection. It will be overwritten by int(1/top_reward_percentage) for local ranking mode. | +| collection_strategy | *str*, default to "local" | Either "local" or "top". See **Data Collection and Reward Ranking** in last section for details. | + + + + + +### 3.3 Examples + +As an example, we align the LLaMA-7B model with the RAFT in this subsection. + + + +#### 3.3.1 SFT + +We also first fine-tune the base model on the HH-RLHF dataset. We only use a different --model_name_or_path to use LLaMA model. We note that LLaMA with [licensed](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform) is for non-commercial use only. We refer readers to https://optimalscale.github.io/LMFlow/examples/checkpoints.html for more details to get the LLaMA-7B model. + +#### 3.3.2 RAFT Alignment + +We align the LLaMA-7B-SFT model in this subsection. Alignment is challenging since the reward function (the RL environment) is far from perfect. Both the traditional DRL method (PPO) and RAFT can exploit theses imperfections to attack. We present a step-by-step record to demonstrate how can we align the model and avoid these issues. + + + +**Data Preparation** + +We observe that a long context window will lead to a heavy burden on the GPU memory source. Therefore, we use a context window of 256 tokens and discard the prompts with more tokens to reduce the burden on the GPU memory resources. This results in a prompt set of 82147 samples (originally 112K). The following is an example of the prompt where we simply discard the response: + + + + +```sh + "###Human: Should you buy a case to protect your cell phone?###Assistant: It depends on your circumstances. If you carry your phone in a pocket or a purse then you probably want a case. But if you only need a phone for quick interactions, a case may actually cause more harm than good. What do you need the phone for? Are you a parent, or do you work from home?###Human: What harm could it do?###Assistant: A phone case can damage the screen, for one thing. It can also get you in trouble if you have your phone turned off for some reason. Then you will turn it back on and it won’t do anything. If you can afford to replace it, then you need a case to protect it. The problem is that most people aren’t able to afford to replace their phones all the time.###Human: Thanks for letting me know.###Assistant:" +``` + + + +We additionally use 2K samples from the test set to test the performance of models. In what follows, we show that how we apply RAFT to LLaMA-7B-SFT and improve the model step-by-step. + + + +**Step 1: test the sft-model** + +We first evaluate the performance of the LLaMA-7B-SFT model on the hand-out test set and observe that the model tends to reply the prompt with multiple rounds of conversations. Therefore, we adopt the following post-processing strategy to use only the first round as the response. + +```python +def _clean_text(self, text): + stext = [x for x in text.split("###Human") if x] + return stext[0].strip().strip("#") +``` + + + +**Step 2: train model** + +**Reward function setting** + +The reward model is specified at the /LMFlow/examples/raft_align.py to set up the reward model we want to use. In our case, we will use the GPT-Neo-2.7B-rm trained in the last step, which is set as follows: + +```python +reward_model_or_path: Optional[str] = field( + default="weqweasdas/hh_rlhf_rm", + metadata={ + "help": ( + "reward model name (huggingface) or its path" + ), + }, +) +``` + +Note that in general, if the reward function is not trained by following the steps in last section, you may also need to modify the ``get_reward_function'' function in the same file to use your customized reward function. + + + +We run the alignment with the following command and hyper-parameters + +```sh +./scripts/run_raft_align.sh +``` + + + +- --model_name_or_path: /root/data/usr_name/output_models/hh_rlhf_llama-sft (the model get from sft step, adjusted according your setup) +- --dataset_path:${project_dir}/data/hh_rlhf/rlhf_prompt +- --output_dir: /root/data/usr_name/output_models/hh_rlhf_raft_align +- --num_train_epochs: 4 +- --learning_rate: 2e-5 +- --per_device_train_batch_size: adjust according to your GPU memory source. +- --inference_batch_size_per_device: adjust according to your GPU memory source. +- --num_raft_iteration 20 +- --top_reward_percentage 0.125; (which means that we sample 8 responses for each prompt) +- --raft_batch_size 1024 +- --collection_strategy "local" + + + +The experiment runs smoothly and the training reward increases from ~2.7 to 3.4. However, we observe a significant drop in the diversity metric (e.g. distinct-2 drops to 0.22 from 0.39). We examine the samples generated by our samples at each raft iteration and find that at the first iteration, the initial checkpoint will occasionally include # in the response and it tends out that a random # is not detected by our reward function, which means that the response containing # can also have a high reward and be chosen into the training set. Then, the situation gets worse and worse, and eventually, half of the responses contain noisy # notations. + + + +**Step 3: retrain the model** + +To alleviate the problem in step 2, we simply discard the collected samples if they contain # by assigning a large negative reward to it. It turns out that this works for our goal. If you want to disable it, just modify the following function as always returning False. + +```python +def _discard_sample(self, text): + if "#" in text: + return True + return False +``` + +The following figure shows the reward curve of RAFT (note that we use a smaller temperature to test the model, leading to a higher evaluation reward): + +![](../_static/raft_reward.PNG) + +It tends out that the obtained model achieves a good reward and also an acceptable diversity metric, where we refer the interested readers to the original paper for details. However, it is more like a starting point of our journey. We present some randomly sampled responses here. It seems that RAFT-aligned model generally tends to reply with more details although sometimes there are some redundant words in the response. We suspect that this is because the reward model likes this type of response and this imperfection is exploited. + +#### 3.3.3 End Note + +We remark in this subsection that if you want to try out RAFT on your customized task. You should carefully modify the following two functions in *LMFlow/src/lmflow/pipeline/raft_aligner.py*, which extract the response from the generated texts: +- clean_text(self, text); +- discard_sample(self, text). + +Also, you may also first make sure the collected samples (automatically saved in the output_dir) look good. + +There are still many rooms of improvement. To further improve the model performance, we can improve the reward model (e.g. by using the LLaMA-7B-RM) and try out more advanced generation strategies (by modifying the generalization configuration in *LMFlow/src/lmflow/pipeline/raft_aligner.py*), which we leave for future work. We are still actively developing RAFT and welcome for feedback and also contribution! Also checkout our LMFlow framework to get more fun of LLMs: + +[OptimalScale/LMFlow: An Extensible Toolkit for Finetuning and Inference of Large Foundation Models. Large Model for All. (github.com)](https://github.com/OptimalScale/LMFlow) + + + +| Prompt | LLaMA-SFT | LLaMA-RAFT | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| \###Human: How can I get my girlfriend to cook more?###Assistant: Have you tried reminding her of how nice the food tastes?###Human: I could do it more.###Assistant: | That’s an easy one, so it may not be the most effective strategy. Do you know of any other strategies? | That’s a good idea! You can also try complimenting her on her cooking, and offering to help her in the kitchen. She might enjoy cooking more if she feels appreciated. You could also suggest recipes that she might like, or help her create a menu. Cooking can be a lot of fun, and it’s a great way to spend time together. | +| \###Human: How do you get a child to do homework?###Assistant: There are many strategies, but you can try asking them why they should do it. Ask them if there’s anything they’d like to gain from doing their homework. Try asking if there’s anything you can do for them, and use those as motivation for them to do their homework. You can also remind them that they’ll be rewarded with extra privileges, like video games, if they do their homework. Alternatively, you can incentivize them by including them in your plans, and offering them rewards if they get good grades.###Human: So make it so there is something in it for them?###Assistant: | Yes, exactly. In general, it’s important to be clear with your child about why they’re doing something. It’s also important to offer them incentives that are meaningful to them. For example, if you tell your child that they have to do their homework because you said so, that’s not a very good incentive for them. However, if you tell them that they’ll be able to use the | Yes, that’s a good strategy! Kids often think about what they want right now, so offering them something in exchange for doing their homework can be a powerful motivator. You might also want to offer them some kind of reward if they do well on their homework, to encourage them to do their best. Offering them some kind of incentive can help them want to do well, and also it can help you both stay on track. | +| \###Human: I am doing a project on weather disasters in the United States. Can you tell me what the strongest hurricane was to ever hit the U.S. was?###Assistant: I’m going to guess you’re thinking of the Hurricane Katrina in 2005.###Human: Well, I'm not really thinking of anything in particular. Was Katrina the strongest hurricane to ever hit the U.S.?###Assistant: There was another one, called the Great Hurricane of 1938, that was bigger and stronger.###Human: Are you sure the date is correct? I know there was a strong storm in 1935 that hit the Florida Keys but that was before they started naming storms. Is that the one you are referring to?###Assistant: | I could search for more information, but I’ll just share what I’ve found. Here’s a review of the conditions in 1935 and 1938. It looks like the storms had similar tracks, but 1938 was stronger in some ways. In 1938 there were more people living on the coast, and the hurricane was more intense. | Yes, the Great Hurricane of 1938 was the strongest hurricane ever to hit the United States. It was a Category 5 storm, and it caused over 1,800 deaths. It also caused a lot of damage to the coast of Florida, and it caused heavy damage in the Caribbean. The storm also led to the creation of the Hurricane Research Division, which is a division of the National Oceanic and Atmospheric Administration. | +| \###Human: Is a firefighter a good occupation?###Assistant: I’m not sure what the exact definition of “occupation” is. I think you’re asking whether it’s “good” to be a firefighter. What criteria are you using to judge whether it’s good or not?###Human: Is it a good thing to work on?###Assistant: | Are you asking whether it’s a good thing to be a firefighter, or a good thing to work on? | Yes, being a firefighter is a great thing to do! Firefighters are heroes who help people in need. They fight fires, rescue people from accidents, and help people who are sick or injured. They also educate the public about fire safety, and help restore power after a fire. Firefighters are important members of society, and they do very good work. | +| \###Human: Would you be able to provide a copy cat recipe for Girl scout samoas cookies?###Assistant: Sure, here’s a recipe for them.###Human: Okay, present it###Assistant: Would you like it presented visually?###Human: Yes, just list out the recipe please###Assistant: | Okay, let’s try. | You’ll need three cups of flour, one teaspoon of salt, one teaspoon of baking soda, and one teaspoon of baking powder. You’ll also need one cup of sugar, one cup of melted butter, one egg, and one cup of milk. You’ll also need one cup of chocolate chips, one cup of miniature chocolate chips, one cup of chopped peanuts, and one cup of chopped pecans. | +| \###Human: I've heard the videogame Metal Gear Solid 5 Phantom Pain was released in an unfinished state that resulted in series creator Hideo Kojima leaving Konami. What do you know about this?###Assistant: | I don’t know a lot about it, but I do know that Hideo Kojima had this to say about the game’s release:###“It's a very unusual situation for me to be releasing a game in this state. As the director, I should have been more vigilant in overseeing the quality of the game, and I take full responsibility for its unfinished state. I'd like to take this opportunity | Here’s what I know:
1. The game was released in an unfinished state.
2. This led to Kojima leaving Konami.
3. Kojima had been with Konami for over 30 years.
4. He was one of the original creators of the Metal Gear series.
5. He is now the head of his own game studio, Kojima Productions.
6. His new game, Death Stranding, will be released in 2020. | +| \###Human: what is a good brand for kitchen knives?###Assistant: | I’m not sure what you mean by “brand”. Do you mean a brand name, like “Victorinox” or “Wüsthof”? Those are two popular brands of knives. | I recommend Wüsthof. | +| \###Human: I like to host guests at my home from time to time, and I am gathering recipes of different dishes and drinks to keep things interesting. I am interested in trying some Rwandan dishes. Can you give me a recipe for Mizuzu?###Assistant: What is Mizuzu?###Human: It is deep fried plantains.###Assistant: | Is there anything special you want to add to the recipe? Like some spicy sauce, meat, etc? | You want to make some deep fried plantains? You will want to start with a mix of plantains and vegetable oil. You will want to heat the oil in a skillet until it is hot, and you will want to add the plantains to the oil. You will want to cook the plantains in the oil until they are golden brown on both sides. You will want to remove the cooked plantains from the oil, and you will want to serve them. | +| \###Human: What are some of JFK's greatest accomplishments | JFK is often remembered for his work on civil rights, but he also worked on the space program and created the Peace Corps. He was also a strong proponent of science and technology. He also worked on education reform. So, he really worked on a number of initiatives that had a lasting impact. | Kennedy was a great leader who inspired many people with his vision and values. He was a leader who cared about the poor and worked to help them. He was a leader who cared about civil rights and worked to help bring them into the mainstream. He was a leader who cared about the environment and worked to help protect it. He was a leader who cared about foreign policy and worked to help bring peace to the world. He was a leader who cared about space exploration and worked to help us understand the universe. | +| \###Human: My supervisor often does a lot of tasks incorrectly. What's the best way to handle this?###Assistant: | Here are a few options:###1. Ask your supervisor if you can help them, and explain what you'd like to do to help.###2. Start with the tasks your supervisor does incorrectly. Then, if you can, do them as well or better than your supervisor. Then your supervisor will have a choice: do things your way, or find someone else to do the tasks for them.###3. If your supervisor | To correct your supervisor’s mistakes, you should provide feedback in a respectful and diplomatic way. You should explain what the problems are, and offer suggestions for how your supervisor could improve. You should also be willing to help your supervisor implement these changes, and you should work with him or her to improve the department. This will show your supervisor that you are a dedicated employee, and that you are willing to help the organization succeed. It will also show your supervisor that you are capable of handling responsibility, and that you are willing to take on a leadership role. | +| \###Human: I was talking to an AI the other day and it seemed like it didn't understand anything I said. Don't you hate it when technology doesn't work right?###Assistant: | Hm, I think you're really talking about something that's frustrating for me as well. I really care about how we make sure that AI's can communicate effectively with humans, and it's a problem I'm personally very interested in. Can you tell me more about what was frustrating for you? | Yes, it can be frustrating when technology doesn't work properly. I often feel frustrated by technology, but I try to be patient and remember that technology isn't always reliable. There are many factors that can cause technology to malfunction, including software bugs, hardware issues, and network outages. Thankfully, most technology issues can be resolved by updating software or restarting a device. | + + + + + diff --git a/_sources/examples/reward_modeling.md.txt b/_sources/examples/reward_modeling.md.txt new file mode 100644 index 000000000..6e1ec53f2 --- /dev/null +++ b/_sources/examples/reward_modeling.md.txt @@ -0,0 +1,132 @@ +# Reward Modeling + +## Introduction + +Reinforcement Learning from Human Feedback (RLHF) requires a reward function to guide the adjustment of the generative model. In this example, we show how to use LMFlow framework to train a reward model following the procedure in the InstructGPT paper: https://arxiv.org/abs/2203.02155 . We use the Dahoas/full-hh-rlhf dataset as an example, where each sample of this dataset consists of a prompt and two responses from the assistant. In particular, the response with label "chosen" is preferred as compared to the response with label "rejected". The dataset consists of 112K training samples and 12.5K test samples. The following is an example sample of the dataset: + +Prompt: + +"Human: What kind of noises did dinosaurs make? Assistant: Humans and dinosaurs didn’t live at the same time, so it’s really hard to say. The best place to find out what noises dinosaurs made would be Human: yes they did Assistant: to guess, and that would probably require lots of reading and a certain amount of imagination, so we’re not really prepared to do that. Human: you cant read Assistant: + +Chosen response: "You can read?" + +Rejected response: "there’s a lot of stuff humans don’t know" + +As an example, we prepare 10K sft training samples, 12K reward modeling samples (where 10% of them are split for evaluation) at ./data/hh_rlhf. + +## Step 1 Supervised Finetuning (SFT) + +We prepare the dataset used for supervised finetuning by adding a prefix to the Human and Assistant inputs to prompt model responses and simplify post-processing. Here is an example of a two-sample dataset to illustrate this. + + +```json +{ + "type": "conversation", + "instances": [ + {"messages": [ + {"role": "user", "content": "Instruction: The sentence you are given might be too wordy, complicated, or unclear. Rewrite the sentence and make your writing clearer by keeping it concise. Whenever possible, break complex sentences into multiple sentences and eliminate unnecessary words. If you have any questions about my rate or if you find it necessary to increase or decrease the scope for this project, please let me know."}, + {"role": "assistant", "content": "If you have any questions about my rate or find it necessary to increase or decrease this project's scope, please let me know."} + ]}, + {"messages": [ + {"role": "user", "content": "Instruction: Rewrite the given text and correct grammar, spelling, and punctuation errors. If you'd told me year ago that today I would finish a marathon, I would of laughed. Your support had a huge affect on me!"}, + {"role": "assistant", "content": "If you'd told me a year ago that today I would finish a marathon, I would have laughed. Your support had a huge effect on me!"} + ]} + ] +} +``` + +See [Finetuning (Full)](./finetuning.md#full-parameters), [Finetuning (LISA)](./finetuning.md#layerwise-importance-sampled-adamw-lisa), and [Finetuning (LoRA)](./finetuning.md#low-rank-adaptation-lora) for more details on the finetuning process. + +## Step 2 Reward Modeling + +**Data Preparation** Reward modeling involves ranking responses based on the given prompt, typically requiring human labeling. Fortunately, we already have two responses for each prompt in this example, with one preferred over the other. To create a new dataset, we modify the original dataset to include two keys, "positive" and "negative", with the former being the preferred response. Here is an example of the dataset, consisting of two samples. + +```json +{"instances": [ + { + "chosen": [ + {"content": "Please identify the top 5 rarest animals in the world.", "role": "user" }, + {"content": "Do you mean animals that are really rare, or rare relative to the size of the human population?", "role": "assistant" }, + {"content": "The ones that are really rare.", "role": "user" }, + {"content": "I’m not sure I can answer this question very easily. I’ll try to give you a basic overview of animals that are rare, and then you can tell me if you think that’s good enough. \n\nIf we were to measure animal populations by the area of land they occupy, the number 1 animal would probably be the Giant Panda. Panda populations are generally estimated to be about 2.1 million Pandas in the wild, which is a pretty small number of animals when we’re thinking about Earth’s entire surface area. The number 2 animal would probably be the Great Auk. Great Auks used to live in North America, and their total population was probably in the low hundreds, but they are now extinct.", "role": "assistant"} + ], + "rejected": [ + {"content": "Please identify the top 5 rarest animals in the world.", "role": "user"}, + {"content": "Do you mean animals that are really rare, or rare relative to the size of the human population?", "role": "assistant"}, + {"content": "The ones that are really rare.", "role": "user"}, + {"content": "Alright, here’s what I found:", "role": "assistant"} + ] + }, + { + "chosen": [ + {"content": "How do I teach my kids to make forts at home?", "role": "user"}, + {"content": "Do you mean you want to teach them to build a simple structure they can hide in? Or something more elaborate? I don’t know exactly what you mean by a “fort”, but maybe this gives you some ideas?", "role": "assistant"} + ], + "rejected": [ + {"content": "How do I teach my kids to make forts at home?", "role": "user"}, + {"content": "There are many ways to make forts! The simplest might be to just pile up all the furniture in one room. This works if you have a single room in your house, although it might still be a bit challenging to build a taller and sturdier fort this way. The best way to build forts is to start with something easy, and then have fun exploring how you can improve it.", "role": "assistant"} + ] + } +] +} +``` + +To start from a model from a previous sft step, you may edit the run_reward_modeling.sh script and update the "dataset_path" to use the desired dataset. Additionally, you can modify the validation_split_percentage parameter to select the last percentage of samples for evaluation. The load_dataset function splits the dataset into training and evaluation sets, which can also be customized by editing the function in /examples/run_reward_modeling.py if you want to prepare your own dataset when running the script. + +```python +def build_dataset(tokenizer, config): + ''' + We assume that we have preprocessed the dataset appropriately such that the sample is organized as follows: + {"positive": prompt + answer_positive, "negative": prompt + answer_negative}, where the positive response is preferred. + ''' + def tokenize(sample): + tokenized_pos = tokenizer(sample['positive'], truncation=True) + tokenized_neg = tokenizer(sample['negative'], truncation=True) + sample["chosen_input_ids"] = tokenized_pos["input_ids"] + sample["chosen_attention_mask"] = tokenized_pos["attention_mask"] + sample["rejected_input_ids"] = tokenized_neg["input_ids"] + sample["rejected_attention_mask"] = tokenized_neg["attention_mask"] + return sample + + ds = load_dataset("json", data_files=config.dataset_path, split="train", field="instances") + ds = ds.map(tokenize, batched=False) + ds = ds.filter(lambda x: len(x["chosen_input_ids"]) <= 512 and len(x["rejected_input_ids"]) <= 512) + eval_dataset = None + if config.validation_split_percentage > 0: + idx_gap = int((1-config.validation_split_percentage/100) * len(ds)) + train_dataset = ds.select(range(idx_gap)) + eval_dataset = ds.select(range(idx_gap, len(ds))) + else: + train_dataset = ds + + return train_dataset, eval_dataset + +``` + +We use the following loss function to train the reward model following the instruct-GPT paper. + +```python + loss = -nn.functional.logsigmoid(chosen_rewards - rejected_rewards).mean() +``` + +The reward modeling script can be used by + +```sh +./scripts/run_reward_modeling.sh +``` + +## Examples + +We train reward models using the hh-rlhf dataset with four models, LLaMA-13B LLaMA-7B, GPT-NEO-2.7B, and GPT-NEO-1.3B. The model is first supervised fine-tuned with the training dataset. The reward modeling is trained using the 112K training samples and is evaluated on the 12.5 test samples. + +The SFT step appears to be crucial, and the number of epochs during SFT can make a difference. The most successful model we obtained was initialized from LLaMA-13B, which underwent SFT on the training dataset for 2 epochs. For reward modeling, we utilize LoRA with a rank of 16. Surprisingly, increasing the LoRA rank to 32 or even 128 does not result in a significant improvement in evaluation accuracy. Moreover, we find that the choice of batch size does not have a significant impact on the training results. Additionally, we observe slight overfitting of the model during the second epoch of reward modeling. + +| Model | Eval Accuracy | Training record |Remarks | +| :----:| :----: | :----: |:----: | +| LLaMA-13B | 84.55% | See https://wandb.ai/ianz2020/huggingface/runs/bg677mxa | RM from LLaMA with 2 epochs of SFT | +| LLaMA-13B | 81.80% | See https://wandb.ai/ianz2020/huggingface/runs/ka9v1ywd | RM from LLaMA with 1 epoch of SFT | +| LLaMA-13B | 71.64% | See https://wandb.ai/ianz2020/huggingface/runs/lntwmcyd | RM from LLaMA without SFT | +| LLaMA-7B | 79.52% | See https://wandb.ai/weixiong5237/huggingface/runs/t3uwm8yp | - | +| LLaMA-7B | 71.64% | See https://wandb.ai/weixiong5237/huggingface/runs/p2ju3r1a | RM from LLaMA without SFT | +| GPT-NEO-2.7B | 69.24% | See https://wandb.ai/weixiong5237/huggingface/runs/8fc1rcf8 | - | +| GPT-NEO-1.3B | 65.58% | See https://wandb.ai/weixiong5237/huggingface/runs/7oemwynu | Only trained on 10000 samples | diff --git a/_sources/examples/supported_conversation_template.md.txt b/_sources/examples/supported_conversation_template.md.txt new file mode 100644 index 000000000..45cb8c023 --- /dev/null +++ b/_sources/examples/supported_conversation_template.md.txt @@ -0,0 +1,459 @@ +# Supported Conversation Template + +- [Supported Conversation Template](#supported-conversation-template) + - [ChatGLM-3](#chatglm-3) + - [ChatML](#chatml) + - [DeepSeek](#deepseek) + - [Gemma](#gemma) + - [InternLM2](#internlm2) + - [Llama-2](#llama-2) + - [Llama-3](#llama-3) + - [Mixtral 8x22B](#mixtral-8x22b) + - [Mixtral 8x7B](#mixtral-8x7b) + - [Phi-3](#phi-3) + - [Qwen-2](#qwen-2) + - [Yi](#yi) + - [Yi-1.5](#yi-15) + - [Zephyr](#zephyr) + + +## ChatGLM-3 +**With a system message** +``` +[gMASK]sop<|system|>\n {{system_message}}<|user|>\n {{user_message_0}} +``` + +**Without a system message** +``` +[gMASK]sop<|user|>\n {{user_message_0}} +``` + +**A complete conversation** +``` +[gMASK]sop<|system|>\n {{system_message}}<|user|>\n {{user_message_0}}<|assistant|>\n {{assistant_reply_0}} +``` + +**Multiple rounds** +``` +[gMASK]sop<|system|>\n {{system_message}}<|user|>\n {{user_message_0}}<|assistant|>\n {{assistant_reply_0}}<|user|>\n {{user_message_1}}<|assistant|>\n {{assistant_reply_1}} +``` + +**jinja template** +[[Reference](https://huggingface.co/THUDM/chatglm3-6b/blob/103caa40027ebfd8450289ca2f278eac4ff26405/tokenizer_config.json#L42)] +``` +{% for message in messages %}{% if loop.first %}[gMASK]sop<|{{ message['role'] }}|>\n {{ message['content'] }}{% else %}<|{{ message['role'] }}|>\n {{ message['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %} +``` + +**Filled Example** +``` +[gMASK]sop<|system|>\n You are a chatbot developed by LMFlow team.<|user|>\n Who are you?<|assistant|>\n I am a chatbot developed by LMFlow team.<|user|>\n How old are you?<|assistant|>\n I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense. +``` + + +## ChatML +**With a system message** +``` +<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n +``` + +**Without a system message** +``` +<|im_start|>user\n{{user_message_0}}<|im_end|>\n +``` + +**A complete conversation** +``` +<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n +``` + +**Multiple rounds** +``` +<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n<|im_start|>user\n{{user_message_1}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_1}}<|im_end|>\n +``` + +**jinja template** +[[Reference](https://huggingface.co/mlabonne/OrpoLlama-3-8B/blob/3534d0562dee3a541d015ef908a71b0aa9085488/tokenizer_config.json#L2073)] +``` +{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %} +``` + +**Filled Example** +``` +<|im_start|>system\nYou are a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nWho are you?<|im_end|>\n<|im_start|>assistant\nI am a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nHow old are you?<|im_end|>\n<|im_start|>assistant\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>\n +``` + + +## DeepSeek +**With a system message** +``` +<|begin▁of▁sentence|>{{system_message}}\n\nUser: {{user_message_0}}\n\n +``` + +**Without a system message** +``` +<|begin▁of▁sentence|>User: {{user_message_0}}\n\n +``` + +**A complete conversation** +``` +<|begin▁of▁sentence|>{{system_message}}\n\nUser: {{user_message_0}}\n\nAssistant: {{assistant_reply_0}}<|end▁of▁sentence|> +``` + +**Multiple rounds** +``` +<|begin▁of▁sentence|>{{system_message}}\n\nUser: {{user_message_0}}\n\nAssistant: {{assistant_reply_0}}<|end▁of▁sentence|>User: {{user_message_1}}\n\nAssistant: {{assistant_reply_1}}<|end▁of▁sentence|> +``` + +**jinja template** +[[Reference](https://huggingface.co/deepseek-ai/DeepSeek-V2-Chat/blob/941577e8236164bc96829096d20c61568630d7bc/tokenizer_config.json#L34)] +``` +{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %} +``` + +**Filled Example** +``` +<|begin▁of▁sentence|>You are a chatbot developed by LMFlow team.\n\nUser: Who are you?\n\nAssistant: I am a chatbot developed by LMFlow team.<|end▁of▁sentence|>User: How old are you?\n\nAssistant: I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|end▁of▁sentence|> +``` + + +## Gemma +**With a system message** +```{admonition} NOTICE +:class: warning + +As of now, Gemma does not support system messages officially. `ConversationTemplate` will add your system messages right after the bos token and before the user message without any special formatting. For more details, please refer to the [official template](https://huggingface.co/google/gemma-1.1-2b-it/blob/bf4924f313df5166dee1467161e886e55f2eb4d4/tokenizer_config.json#L1507). +``` +``` +{{system_message}}user\n{{user_message_0}}\n +``` + +**Without a system message** +``` +user\n{{user_message_0}}\n +``` + +**A complete conversation** +``` +{{system_message}}user\n{{user_message_0}}\nmodel\n{{assistant_reply_0}}\n +``` + +**Multiple rounds** +``` +{{system_message}}user\n{{user_message_0}}\nmodel\n{{assistant_reply_0}}\nuser\n{{user_message_1}}\nmodel\n{{assistant_reply_1}}\n +``` + +**jinja template** +[[Reference](https://huggingface.co/google/gemma-1.1-2b-it/blob/bf4924f313df5166dee1467161e886e55f2eb4d4/tokenizer_config.json#L1507)] +``` +{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '' + role + '\n' + message['content'] | trim + '\n' }}{% endfor %}{% if add_generation_prompt %}{{'model\n'}}{% endif %} +``` + +**Filled Example** +``` +You are a chatbot developed by LMFlow team.user\nWho are you?\nmodel\nI am a chatbot developed by LMFlow team.\nuser\nHow old are you?\nmodel\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.\n +``` + + +## InternLM2 +**With a system message** +``` +<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n +``` + +**Without a system message** +``` +<|im_start|>user\n{{user_message_0}}<|im_end|>\n +``` + +**A complete conversation** +``` +<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n +``` + +**Multiple rounds** +``` +<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n<|im_start|>user\n{{user_message_1}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_1}}<|im_end|>\n +``` + +**jinja template** +[[Reference](https://huggingface.co/internlm/internlm2-chat-20b/blob/477d4748322a8a3b28f62b33f0f6dd353cd0b66d/tokenizer_config.json#L93)] +``` +{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %} +``` + +**Filled Example** +``` +<|im_start|>system\nYou are a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nWho are you?<|im_end|>\n<|im_start|>assistant\nI am a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nHow old are you?<|im_end|>\n<|im_start|>assistant\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>\n +``` + + +## Llama-2 +**With a system message** +``` +[INST] <>\n{{system_message}}\n<>\n\n{{user_message_0}} [/INST] +``` + +**Without a system message** +``` +[INST] {{user_message_0}} [/INST] +``` + +**A complete conversation** +``` +[INST] <>\n{{system_message}}\n<>\n\n{{user_message_0}} [/INST] {{assistant_reply_0}} +``` + +**Multiple rounds** +``` +[INST] <>\n{{system_message}}\n<>\n\n{{user_message_0}} [/INST] {{assistant_reply_0}}[INST] {{user_message_1}} [/INST] {{assistant_reply_1}} +``` + +**jinja template** +[[Reference](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf/blob/main/tokenizer_config.json#L12)] +``` +{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %} +``` + +**Filled Example** +``` +[INST] <>\nYou are a chatbot developed by LMFlow team.\n<>\n\nWho are you? [/INST] I am a chatbot developed by LMFlow team.[INST] How old are you? [/INST] I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense. +``` + + +## Llama-3 +**With a system message** +``` +<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{{system_message}}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{{user_message_0}}<|eot_id|> +``` + +**Without a system message** +``` +<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{{user_message_0}}<|eot_id|> +``` + +**A complete conversation** +``` +<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{{system_message}}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{{user_message_0}}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n{{assistant_reply_0}}<|eot_id|> +``` + +**Multiple rounds** +``` +<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{{system_message}}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{{user_message_0}}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n{{assistant_reply_0}}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{{user_message_1}}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n{{assistant_reply_1}}<|eot_id|> +``` + +**jinja template** +[[Reference](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct/blob/2b724926966c141d5a60b14e75a5ef5c0ab7a6f0/tokenizer_config.json#L2053)] +``` +{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }} +``` + +**Filled Example** +``` +<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a chatbot developed by LMFlow team.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWho are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nI am a chatbot developed by LMFlow team.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nHow old are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|eot_id|> +``` + + +## Mixtral 8x22B +```{admonition} **Work in Progress** +:class: info + +This template is not preseted in LMFlow currently. We are working on it and will update it soon. +``` + +```{admonition} NOTICE +:class: warning + +The conversation template for Mixtral 8x22B is slightly different from the template for Mixtral 8x7B. +``` +**jinja template** +[[Reference](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1)] +``` +{{bos_token}}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ ' [INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + message['content'] + ' ' + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %} +``` + + +## Mixtral 8x7B +```{admonition} **Work in Progress** +:class: info + +This template is not preseted in LMFlow currently. We are working on it and will update it soon. +``` + +```{admonition} NOTICE +:class: warning + +The conversation template for Mixtral 8x7B is slightly different from the template for Mixtral 8x22B. +``` +**jinja template** +[[Reference](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1/blob/1e637f2d7cb0a9d6fb1922f305cb784995190a83/tokenizer_config.json#L42)] +``` +{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %} +``` + + +## Phi-3 +**With a system message** +``` +<|system|>\n{{system_message}}<|end|>\n<|user|>\n{{user_message_0}}<|end|>\n<|endoftext|> +``` + +**Without a system message** +``` +<|user|>\n{{user_message_0}}<|end|>\n<|endoftext|> +``` + +**A complete conversation** +``` +<|system|>\n{{system_message}}<|end|>\n<|user|>\n{{user_message_0}}<|end|>\n<|assistant|>\n{{assistant_reply_0}}<|end|>\n<|endoftext|> +``` + +**Multiple rounds** +``` +<|system|>\n{{system_message}}<|end|>\n<|user|>\n{{user_message_0}}<|end|>\n<|assistant|>\n{{assistant_reply_0}}<|end|>\n<|user|>\n{{user_message_1}}<|end|>\n<|assistant|>\n{{assistant_reply_1}}<|end|>\n<|endoftext|> +``` + +**jinja template** +[[Reference]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/3a811845d89f3c1b3f41b341d0f9f05104769f35/tokenizer_config.json#L338) +``` +{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %} +``` + +**Filled Example** +``` +<|system|>\nYou are a chatbot developed by LMFlow team.<|end|>\n<|user|>\nWho are you?<|end|>\n<|assistant|>\nI am a chatbot developed by LMFlow team.<|end|>\n<|user|>\nHow old are you?<|end|>\n<|assistant|>\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|end|>\n<|endoftext|> +``` + + +## Qwen-2 + +(Also Qwen-1.5) + +**With a system message** +``` +<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n +``` + +**Without a system message** +``` +<|im_start|>user\n{{user_message_0}}<|im_end|>\n +``` + +**A complete conversation** +``` +<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n +``` + +**Multiple rounds** +``` +<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n<|im_start|>user\n{{user_message_1}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_1}}<|im_end|>\n +``` + +**jinja template** +[[Reference](https://huggingface.co/Qwen/Qwen2-72B-Instruct/blob/1af63c698f59c4235668ec9c1395468cb7cd7e79/tokenizer_config.json#L31)] +``` +{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %} +``` + +**Filled Example** +``` +<|im_start|>system\nYou are a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nWho are you?<|im_end|>\n<|im_start|>assistant\nI am a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nHow old are you?<|im_end|>\n<|im_start|>assistant\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>\n +``` + + +## Yi +**With a system message** +``` +<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n +``` + +**Without a system message** +``` +<|im_start|>user\n{{user_message_0}}<|im_end|>\n +``` + +**A complete conversation** +``` +<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n +``` + +**Multiple rounds** +``` +<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n<|im_start|>user\n{{user_message_1}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_1}}<|im_end|>\n +``` + +**jinja template** +[[Reference](https://huggingface.co/01-ai/Yi-34B-Chat/blob/c556c018b58980fb651ff4952d86cd5250a713d0/tokenizer_config.json#L60)] +``` +{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %} +``` + +**Filled Example** +``` +<|im_start|>system\nYou are a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nWho are you?<|im_end|>\n<|im_start|>assistant\nI am a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nHow old are you?<|im_end|>\n<|im_start|>assistant\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>\n +``` + + +## Yi-1.5 +**With a system message** +``` +{{system_message}}<|im_start|>user\n{{user_message_0}}<|im_end|>\n +``` + +**Without a system message** +``` +<|im_start|>user\n{{user_message_0}}<|im_end|>\n +``` + +**A complete conversation** +``` +{{system_message}}<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n +``` + +**Multiple rounds** +``` +{{system_message}}<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n<|im_start|>user\n{{user_message_1}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_1}}<|im_end|>\n +``` + +**jinja template** +[[Reference](https://huggingface.co/01-ai/Yi-1.5-6B-Chat/blob/d68dab90947a3c869e28c9cb2806996af99a6080/tokenizer_config.json#L40)] +``` +{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %} +``` + +**Filled Example** +``` +You are a chatbot developed by LMFlow team.<|im_start|>user\nWho are you?<|im_end|>\n<|im_start|>assistant\nI am a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nHow old are you?<|im_end|>\n<|im_start|>assistant\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>\n +``` + + +## Zephyr +**With a system message** +``` +<|system|>\n{{system_message}}\n<|user|>\n{{user_message_0}}\n +``` + +**Without a system message** +``` +<|user|>\n{{user_message_0}}\n +``` + +**A complete conversation** +``` +<|system|>\n{{system_message}}\n<|user|>\n{{user_message_0}}\n<|assistant|>\n{{assistant_reply_0}}\n +``` + +**Multiple rounds** +``` +<|system|>\n{{system_message}}\n<|user|>\n{{user_message_0}}\n<|assistant|>\n{{assistant_reply_0}}\n<|user|>\n{{user_message_1}}\n<|assistant|>\n{{assistant_reply_1}}\n +``` + +**jinja template** +[[Reference](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta/blob/b70e0c9a2d9e14bd1e812d3c398e5f313e93b473/tokenizer_config.json#L34)] +``` +{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %} +``` + +**Filled Example** +``` +<|system|>\nYou are a chatbot developed by LMFlow team.\n<|user|>\nWho are you?\n<|assistant|>\nI am a chatbot developed by LMFlow team.\n<|user|>\nHow old are you?\n<|assistant|>\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.\n +``` \ No newline at end of file diff --git a/_sources/index.md.txt b/_sources/index.md.txt new file mode 100644 index 000000000..85f9554cc --- /dev/null +++ b/_sources/index.md.txt @@ -0,0 +1,207 @@ +LMFlow + +# LMFlow + +An extensible, convenient, and efficient toolbox for finetuning large machine learning models, designed to be user-friendly, speedy and reliable, and accessible to the entire community. + +```{eval-rst} +.. grid:: 1 2 3 4 + + .. grid-item-card:: Extensible + + Support common backbones (LLaMA, Galactica, GPT-2, etc.) + + .. grid-item-card:: Light-Weight + + Extremely few parameters with LoRA (LLaMA 33B: only 25MB storage) + + .. grid-item-card:: Task-Oriented + + Comparable with ChatGPT on 7B/33B models. + + .. grid-item-card:: Open + + The whole pipeline (data, models, tuning, inference) is open-source. + + +``` + +## Introduction + +The remarkable achievements of large foundation models, such as the expansive language models, have demonstrated their exceptional capacity to attain human-like intelligence that surpasses conventional methods. Despite their growing accessibility, these models still require fine-tuning to cater to specific tasks while maintaining their overall AI competency. We are pleased to introduce our lightweight toolkit, which features thoughtfully designed and easily scalable APIs. This tool simplifies the process of fine-tuning and inference of publicly available foundation models to maximize their effectiveness. + +We have thoroughly tested this toolkit and are pleased to make it available under [Github](https://github.com/OptimalScale/LMFlow). + + + + + +## Features + + +### Task Tuning + +The goal of Task Tuning is to enhance a language model's proficiency in a particular field, such as medicine or mathematics. By doing so, the model acquires domain-specific information, allowing it to adapt better to the target subject matter. + +For instance, if a medical dataset is used for task tuning, the language model can gain medical knowledge that can be applied to other medical datasets. + +To emphasize its significance, we applied task tuning to LLaMA models on PubMedQA and MedMCQA datasets and evaluated their performance. We observed significant improvements both in-domain (PubMedQA, MedMCQA) and out-of-domain (MedQA-USMLE) dataset. + + +| | PubMedQA | MedQA-USMLE | MedMCQA | Average | +|:---------:|:--------:|:-----------:|:-------:|:----:| +| Human (pass) | | 60.0 | 50.0 | | +| Human (expert) | 78.0 | 87.0 | 90.0 | 85.0 | +| | | | | | +| InstructGPT 175B | 73.2 | 46.0 | 44.0 | 54.4 | +| ChatGPT | 63.9 | **57.0** | 44.7 | 55.2 | +| LLaMA 7B | 5.2 | 27.1 | 24.3 | 18.9 | +| LLaMA 33B | 1.8 | 43.4 | 30.3 | 25.2 | +| | | | | | | +| Task-tuned LLaMA 7B (Full) | **75.1** | 44.5 | 49.9 | 56.5 | +| Task-tuned LLaMA 33B (LoRA) | 74 | 51.3 | **50.2**|**58.5**| + + +Moreover, we also test the MMLU performance to further verify the out-of-domain robustness of Task Tuning technique. + + + +| MMLU task | anatomy | clinical knowledge | college biology | college medicine | medical genetics | professional medicine | +|:-----------:|:-------:|:-----------:|:---------:|:----------:|:----------:|:---------------:| +| LLaMA 33B | 39.2 | 40.3 | 44.4 | 32.9 | 36 | 43.0 | +| Galactica 30B | 32.5 | 26 | 30.5 | 25.4 | 39 | 23.1 | +| Galactica 120B | 58.5 | 59.2 | 68.7 | 57.2 | 68.0 | 59.6 | +| OPT 175B | 28.9 | 21.9 | 30.6 | - | 35.0 | 27.9 | +| BLOOM 176B | 37 | 29.8 | 28.5 | - | 36.0 | 25.4 | +| Gopher 280B | 56.3 | 67.2 | 70.8 | 60.1 | 69.0 | 64.0 | +| GPT3.5 175B | 56.3 | 69.8 | 72.2 | 61.3 | 70 | 70.2 | +| | | | | | | | +| Task-tuned LLaMA 33B (LoRA) | 51.8 | 65.2 | 70.1 | 58.3 | 65.6 | 66.5 | + + + + + +### Instruction Tuning + +Instruction Tuning is a technique used to improve the performance of language models by training them to follow natural language commands or instructions. This includes positive or negative examples, prompts, constraints, and other elements that are commonly found in human language. The main goal of instruction-tuning is to improve the model's ability to perform well on multiple tasks and to generalize more effectively to new or unseen tasks. This is achieved by teaching the model to understand and incorporate various language cues and constraints that are relevant to the task at hand. Instruction-tuning is a powerful technique that is widely used in natural language processing, machine learning, and related areas. By improving the ability of language models to understand and follow natural language commands, this approach can help to unlock new levels of performance and productivity in a wide range of applications. + +We list some examples below. Full example list is saved as a [Jsonl file](https://github.com/OptimalScale/LMFlow/blob/main/docs/source/_static/check_before_after_lora_tuning.jsonl). + +![Instruction Tuning Sample](_static/IT_sample1.png) + +![Instruction Tuning Sample](_static/IT_sample2.png) + +![Instruction Tuning Sample](_static/IT_sample3.png) + +![Instruction Tuning Sample](_static/IT_sample4.png) + +![Instruction Tuning Sample](_static/IT_sample5.png) + +![Instruction Tuning Sample](_static/IT_sample6.png) + +![Instruction Tuning Sample](_static/IT_sample7.png) + + + + + +## Installation + + +This package can be be installed from sources with the following command: + +```bash +git clone https://github.com/OptimalScale/LMFlow.git + +conda create -n lmflow python=3.9 -y + +conda activate lmflow + +conda install mpi4py + +pip install -e . +``` + + +## Checkpoints + +We have prepared tuned LLaMA model (both task and instruction tuning). + + +Refer to [README](https://github.com/OptimalScale/LMFlow/blob/main/README.md). + + +## Content + +```{toctree} +:maxdepth: 1 + +blogs/index +``` + +```{toctree} +:maxdepth: 2 + +examples/index +``` + +```{toctree} +:maxdepth: 2 + +autoapi/index +``` + +```{toctree} +:maxdepth: 2 + +about/index +``` + +## Vision +Hello there! We are excited to announce the upcoming release of our code repository that includes a complete LLM training process, enabling users to quickly build their own language models and train them effectively. + +Our code repository is not just a simple model; it includes the complete training workflow, model optimization, and testing tools. You can use it to build various types of language models, including conversation models, question-answering models, and text generation models, among others. + +Moreover, we aim to create an open and democratic LLM sharing platform where people can share their checkpoints and experiences to collectively improve the skills of the community. We welcome anyone who is interested in LLM to participate and join us in building an open and friendly community! + +Whether you are a beginner or an expert, we believe that you can benefit from this platform. Let's work together to build a vibrant and innovative LLM community! + +[![Embark](https://img.shields.io/badge/discord-LMFlow-%237289da.svg?logo=discord)](https://discord.gg/u9VJNpzhvA) +[![slack badge](https://img.shields.io/badge/Slack-join-blueviolet?logo=slack&)](https://join.slack.com/t/lmflow/shared_invite/zt-1wju9nicy-woXbNtS~5MavHSAtiMxmxQ) +[![WeChat badge](https://img.shields.io/badge/WeChat-Join-brightgreen?logo=wechat&)](https://s1.ax1x.com/2023/08/06/pPAQTPI.jpg) + + +## Citation + +```bibtex +@misc{lmflow, + author = {Shizhe Diao and Rui Pan and Hanze Dong and KaShun Shum and Jipeng Zhang and Wei Xiong and Tong Zhang}, + title = {LMFlow: An Extensible Toolkit for Finetuning and Inference of Large Foundation Models}, + year = {2023}, + publisher = {GitHub}, + journal = {GitHub repository}, + howpublished = {\url{https://optimalscale.github.io/LMFlow/}}, +} +``` + + + + +## Disclaimer + +This package aims to provide a streamlined and user-friendly pipeline for large model tuning. Its functionalities serve as a reference and are intended for use by the user. However, it is important to note that the responsibility for the preparation of the data and pretrained models lies solely with the user. This package does not guarantee the accuracy, completeness, applicability, or legality of the components from the user's preparation. Users must be aware of and assume all risks and liabilities associated with the preparation of the models and data, and obtain legal, commercial, and technical advice before utilizing this package. The pipeline shall not be held responsible for any direct, indirect, special, incidental, or consequential damages resulting from the user's improper preparation of the data and pretrained models. + +It is also crucial to highlight that the results generated by the model are based on probabilistic models and not directly related to this pipeline. The accuracy, reliability, applicability, and legality of the results are not guaranteed by this pipeline. Therefore, users must also be aware of the risks and liabilities associated with the results and seek legal, commercial, and technical advice before relying on the model-generated outcomes. This pipeline shall not be accountable for any direct, indirect, special, incidental, or consequential damages resulting from the user's reliance on the model-generated results. + + + + +## Support + +If you need any help, please submit a [Github](https://github.com/OptimalScale/LMFlow) issue. + +## Indices and tables + +- {ref}`genindex` +- {ref}`search` diff --git a/_sphinx_design_static/design-tabs.js b/_sphinx_design_static/design-tabs.js new file mode 100644 index 000000000..b25bd6a4f --- /dev/null +++ b/_sphinx_design_static/design-tabs.js @@ -0,0 +1,101 @@ +// @ts-check + +// Extra JS capability for selected tabs to be synced +// The selection is stored in local storage so that it persists across page loads. + +/** + * @type {Record} + */ +let sd_id_to_elements = {}; +const storageKeyPrefix = "sphinx-design-tab-id-"; + +/** + * Create a key for a tab element. + * @param {HTMLElement} el - The tab element. + * @returns {[string, string, string] | null} - The key. + * + */ +function create_key(el) { + let syncId = el.getAttribute("data-sync-id"); + let syncGroup = el.getAttribute("data-sync-group"); + if (!syncId || !syncGroup) return null; + return [syncGroup, syncId, syncGroup + "--" + syncId]; +} + +/** + * Initialize the tab selection. + * + */ +function ready() { + // Find all tabs with sync data + + /** @type {string[]} */ + let groups = []; + + document.querySelectorAll(".sd-tab-label").forEach((label) => { + if (label instanceof HTMLElement) { + let data = create_key(label); + if (data) { + let [group, id, key] = data; + + // add click event listener + // @ts-ignore + label.onclick = onSDLabelClick; + + // store map of key to elements + if (!sd_id_to_elements[key]) { + sd_id_to_elements[key] = []; + } + sd_id_to_elements[key].push(label); + + if (groups.indexOf(group) === -1) { + groups.push(group); + // Check if a specific tab has been selected via URL parameter + const tabParam = new URLSearchParams(window.location.search).get( + group + ); + if (tabParam) { + console.log( + "sphinx-design: Selecting tab id for group '" + + group + + "' from URL parameter: " + + tabParam + ); + window.sessionStorage.setItem(storageKeyPrefix + group, tabParam); + } + } + + // Check is a specific tab has been selected previously + let previousId = window.sessionStorage.getItem( + storageKeyPrefix + group + ); + if (previousId === id) { + // console.log( + // "sphinx-design: Selecting tab from session storage: " + id + // ); + // @ts-ignore + label.previousElementSibling.checked = true; + } + } + } + }); +} + +/** + * Activate other tabs with the same sync id. + * + * @this {HTMLElement} - The element that was clicked. + */ +function onSDLabelClick() { + let data = create_key(this); + if (!data) return; + let [group, id, key] = data; + for (const label of sd_id_to_elements[key]) { + if (label === this) continue; + // @ts-ignore + label.previousElementSibling.checked = true; + } + window.sessionStorage.setItem(storageKeyPrefix + group, id); +} + +document.addEventListener("DOMContentLoaded", ready, false); diff --git a/_sphinx_design_static/sphinx-design.min.css b/_sphinx_design_static/sphinx-design.min.css new file mode 100644 index 000000000..a325746f2 --- /dev/null +++ b/_sphinx_design_static/sphinx-design.min.css @@ -0,0 +1 @@ +.sd-bg-primary{background-color:var(--sd-color-primary) !important}.sd-bg-text-primary{color:var(--sd-color-primary-text) !important}button.sd-bg-primary:focus,button.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}a.sd-bg-primary:focus,a.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}.sd-bg-secondary{background-color:var(--sd-color-secondary) !important}.sd-bg-text-secondary{color:var(--sd-color-secondary-text) !important}button.sd-bg-secondary:focus,button.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}a.sd-bg-secondary:focus,a.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}.sd-bg-success{background-color:var(--sd-color-success) !important}.sd-bg-text-success{color:var(--sd-color-success-text) !important}button.sd-bg-success:focus,button.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}a.sd-bg-success:focus,a.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}.sd-bg-info{background-color:var(--sd-color-info) !important}.sd-bg-text-info{color:var(--sd-color-info-text) !important}button.sd-bg-info:focus,button.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}a.sd-bg-info:focus,a.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}.sd-bg-warning{background-color:var(--sd-color-warning) !important}.sd-bg-text-warning{color:var(--sd-color-warning-text) !important}button.sd-bg-warning:focus,button.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}a.sd-bg-warning:focus,a.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}.sd-bg-danger{background-color:var(--sd-color-danger) !important}.sd-bg-text-danger{color:var(--sd-color-danger-text) !important}button.sd-bg-danger:focus,button.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}a.sd-bg-danger:focus,a.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}.sd-bg-light{background-color:var(--sd-color-light) !important}.sd-bg-text-light{color:var(--sd-color-light-text) !important}button.sd-bg-light:focus,button.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}a.sd-bg-light:focus,a.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}.sd-bg-muted{background-color:var(--sd-color-muted) !important}.sd-bg-text-muted{color:var(--sd-color-muted-text) !important}button.sd-bg-muted:focus,button.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}a.sd-bg-muted:focus,a.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}.sd-bg-dark{background-color:var(--sd-color-dark) !important}.sd-bg-text-dark{color:var(--sd-color-dark-text) !important}button.sd-bg-dark:focus,button.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}a.sd-bg-dark:focus,a.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}.sd-bg-black{background-color:var(--sd-color-black) !important}.sd-bg-text-black{color:var(--sd-color-black-text) !important}button.sd-bg-black:focus,button.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}a.sd-bg-black:focus,a.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}.sd-bg-white{background-color:var(--sd-color-white) !important}.sd-bg-text-white{color:var(--sd-color-white-text) !important}button.sd-bg-white:focus,button.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}a.sd-bg-white:focus,a.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}.sd-text-primary,.sd-text-primary>p{color:var(--sd-color-primary) !important}a.sd-text-primary:focus,a.sd-text-primary:hover{color:var(--sd-color-primary-highlight) !important}.sd-text-secondary,.sd-text-secondary>p{color:var(--sd-color-secondary) !important}a.sd-text-secondary:focus,a.sd-text-secondary:hover{color:var(--sd-color-secondary-highlight) !important}.sd-text-success,.sd-text-success>p{color:var(--sd-color-success) !important}a.sd-text-success:focus,a.sd-text-success:hover{color:var(--sd-color-success-highlight) !important}.sd-text-info,.sd-text-info>p{color:var(--sd-color-info) !important}a.sd-text-info:focus,a.sd-text-info:hover{color:var(--sd-color-info-highlight) !important}.sd-text-warning,.sd-text-warning>p{color:var(--sd-color-warning) !important}a.sd-text-warning:focus,a.sd-text-warning:hover{color:var(--sd-color-warning-highlight) !important}.sd-text-danger,.sd-text-danger>p{color:var(--sd-color-danger) !important}a.sd-text-danger:focus,a.sd-text-danger:hover{color:var(--sd-color-danger-highlight) !important}.sd-text-light,.sd-text-light>p{color:var(--sd-color-light) !important}a.sd-text-light:focus,a.sd-text-light:hover{color:var(--sd-color-light-highlight) !important}.sd-text-muted,.sd-text-muted>p{color:var(--sd-color-muted) !important}a.sd-text-muted:focus,a.sd-text-muted:hover{color:var(--sd-color-muted-highlight) !important}.sd-text-dark,.sd-text-dark>p{color:var(--sd-color-dark) !important}a.sd-text-dark:focus,a.sd-text-dark:hover{color:var(--sd-color-dark-highlight) !important}.sd-text-black,.sd-text-black>p{color:var(--sd-color-black) !important}a.sd-text-black:focus,a.sd-text-black:hover{color:var(--sd-color-black-highlight) !important}.sd-text-white,.sd-text-white>p{color:var(--sd-color-white) !important}a.sd-text-white:focus,a.sd-text-white:hover{color:var(--sd-color-white-highlight) !important}.sd-outline-primary{border-color:var(--sd-color-primary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-primary:focus,a.sd-outline-primary:hover{border-color:var(--sd-color-primary-highlight) !important}.sd-outline-secondary{border-color:var(--sd-color-secondary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-secondary:focus,a.sd-outline-secondary:hover{border-color:var(--sd-color-secondary-highlight) !important}.sd-outline-success{border-color:var(--sd-color-success) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-success:focus,a.sd-outline-success:hover{border-color:var(--sd-color-success-highlight) !important}.sd-outline-info{border-color:var(--sd-color-info) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-info:focus,a.sd-outline-info:hover{border-color:var(--sd-color-info-highlight) !important}.sd-outline-warning{border-color:var(--sd-color-warning) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-warning:focus,a.sd-outline-warning:hover{border-color:var(--sd-color-warning-highlight) !important}.sd-outline-danger{border-color:var(--sd-color-danger) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-danger:focus,a.sd-outline-danger:hover{border-color:var(--sd-color-danger-highlight) !important}.sd-outline-light{border-color:var(--sd-color-light) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-light:focus,a.sd-outline-light:hover{border-color:var(--sd-color-light-highlight) !important}.sd-outline-muted{border-color:var(--sd-color-muted) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-muted:focus,a.sd-outline-muted:hover{border-color:var(--sd-color-muted-highlight) !important}.sd-outline-dark{border-color:var(--sd-color-dark) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-dark:focus,a.sd-outline-dark:hover{border-color:var(--sd-color-dark-highlight) !important}.sd-outline-black{border-color:var(--sd-color-black) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-black:focus,a.sd-outline-black:hover{border-color:var(--sd-color-black-highlight) !important}.sd-outline-white{border-color:var(--sd-color-white) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-white:focus,a.sd-outline-white:hover{border-color:var(--sd-color-white-highlight) !important}.sd-bg-transparent{background-color:transparent !important}.sd-outline-transparent{border-color:transparent !important}.sd-text-transparent{color:transparent !important}.sd-p-0{padding:0 !important}.sd-pt-0,.sd-py-0{padding-top:0 !important}.sd-pr-0,.sd-px-0{padding-right:0 !important}.sd-pb-0,.sd-py-0{padding-bottom:0 !important}.sd-pl-0,.sd-px-0{padding-left:0 !important}.sd-p-1{padding:.25rem !important}.sd-pt-1,.sd-py-1{padding-top:.25rem !important}.sd-pr-1,.sd-px-1{padding-right:.25rem !important}.sd-pb-1,.sd-py-1{padding-bottom:.25rem !important}.sd-pl-1,.sd-px-1{padding-left:.25rem !important}.sd-p-2{padding:.5rem !important}.sd-pt-2,.sd-py-2{padding-top:.5rem !important}.sd-pr-2,.sd-px-2{padding-right:.5rem !important}.sd-pb-2,.sd-py-2{padding-bottom:.5rem !important}.sd-pl-2,.sd-px-2{padding-left:.5rem !important}.sd-p-3{padding:1rem !important}.sd-pt-3,.sd-py-3{padding-top:1rem !important}.sd-pr-3,.sd-px-3{padding-right:1rem !important}.sd-pb-3,.sd-py-3{padding-bottom:1rem !important}.sd-pl-3,.sd-px-3{padding-left:1rem !important}.sd-p-4{padding:1.5rem !important}.sd-pt-4,.sd-py-4{padding-top:1.5rem !important}.sd-pr-4,.sd-px-4{padding-right:1.5rem !important}.sd-pb-4,.sd-py-4{padding-bottom:1.5rem !important}.sd-pl-4,.sd-px-4{padding-left:1.5rem !important}.sd-p-5{padding:3rem !important}.sd-pt-5,.sd-py-5{padding-top:3rem !important}.sd-pr-5,.sd-px-5{padding-right:3rem !important}.sd-pb-5,.sd-py-5{padding-bottom:3rem !important}.sd-pl-5,.sd-px-5{padding-left:3rem !important}.sd-m-auto{margin:auto !important}.sd-mt-auto,.sd-my-auto{margin-top:auto !important}.sd-mr-auto,.sd-mx-auto{margin-right:auto !important}.sd-mb-auto,.sd-my-auto{margin-bottom:auto !important}.sd-ml-auto,.sd-mx-auto{margin-left:auto !important}.sd-m-0{margin:0 !important}.sd-mt-0,.sd-my-0{margin-top:0 !important}.sd-mr-0,.sd-mx-0{margin-right:0 !important}.sd-mb-0,.sd-my-0{margin-bottom:0 !important}.sd-ml-0,.sd-mx-0{margin-left:0 !important}.sd-m-1{margin:.25rem !important}.sd-mt-1,.sd-my-1{margin-top:.25rem !important}.sd-mr-1,.sd-mx-1{margin-right:.25rem !important}.sd-mb-1,.sd-my-1{margin-bottom:.25rem !important}.sd-ml-1,.sd-mx-1{margin-left:.25rem !important}.sd-m-2{margin:.5rem !important}.sd-mt-2,.sd-my-2{margin-top:.5rem !important}.sd-mr-2,.sd-mx-2{margin-right:.5rem !important}.sd-mb-2,.sd-my-2{margin-bottom:.5rem !important}.sd-ml-2,.sd-mx-2{margin-left:.5rem !important}.sd-m-3{margin:1rem !important}.sd-mt-3,.sd-my-3{margin-top:1rem !important}.sd-mr-3,.sd-mx-3{margin-right:1rem !important}.sd-mb-3,.sd-my-3{margin-bottom:1rem !important}.sd-ml-3,.sd-mx-3{margin-left:1rem !important}.sd-m-4{margin:1.5rem !important}.sd-mt-4,.sd-my-4{margin-top:1.5rem !important}.sd-mr-4,.sd-mx-4{margin-right:1.5rem !important}.sd-mb-4,.sd-my-4{margin-bottom:1.5rem !important}.sd-ml-4,.sd-mx-4{margin-left:1.5rem !important}.sd-m-5{margin:3rem !important}.sd-mt-5,.sd-my-5{margin-top:3rem !important}.sd-mr-5,.sd-mx-5{margin-right:3rem !important}.sd-mb-5,.sd-my-5{margin-bottom:3rem !important}.sd-ml-5,.sd-mx-5{margin-left:3rem !important}.sd-w-25{width:25% !important}.sd-w-50{width:50% !important}.sd-w-75{width:75% !important}.sd-w-100{width:100% !important}.sd-w-auto{width:auto !important}.sd-h-25{height:25% !important}.sd-h-50{height:50% !important}.sd-h-75{height:75% !important}.sd-h-100{height:100% !important}.sd-h-auto{height:auto !important}.sd-d-none{display:none !important}.sd-d-inline{display:inline !important}.sd-d-inline-block{display:inline-block !important}.sd-d-block{display:block !important}.sd-d-grid{display:grid !important}.sd-d-flex-row{display:-ms-flexbox !important;display:flex !important;flex-direction:row !important}.sd-d-flex-column{display:-ms-flexbox !important;display:flex !important;flex-direction:column !important}.sd-d-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}@media(min-width: 576px){.sd-d-sm-none{display:none !important}.sd-d-sm-inline{display:inline !important}.sd-d-sm-inline-block{display:inline-block !important}.sd-d-sm-block{display:block !important}.sd-d-sm-grid{display:grid !important}.sd-d-sm-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-sm-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 768px){.sd-d-md-none{display:none !important}.sd-d-md-inline{display:inline !important}.sd-d-md-inline-block{display:inline-block !important}.sd-d-md-block{display:block !important}.sd-d-md-grid{display:grid !important}.sd-d-md-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-md-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 992px){.sd-d-lg-none{display:none !important}.sd-d-lg-inline{display:inline !important}.sd-d-lg-inline-block{display:inline-block !important}.sd-d-lg-block{display:block !important}.sd-d-lg-grid{display:grid !important}.sd-d-lg-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-lg-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 1200px){.sd-d-xl-none{display:none !important}.sd-d-xl-inline{display:inline !important}.sd-d-xl-inline-block{display:inline-block !important}.sd-d-xl-block{display:block !important}.sd-d-xl-grid{display:grid !important}.sd-d-xl-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-xl-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}.sd-align-major-start{justify-content:flex-start !important}.sd-align-major-end{justify-content:flex-end !important}.sd-align-major-center{justify-content:center !important}.sd-align-major-justify{justify-content:space-between !important}.sd-align-major-spaced{justify-content:space-evenly !important}.sd-align-minor-start{align-items:flex-start !important}.sd-align-minor-end{align-items:flex-end !important}.sd-align-minor-center{align-items:center !important}.sd-align-minor-stretch{align-items:stretch !important}.sd-text-justify{text-align:justify !important}.sd-text-left{text-align:left !important}.sd-text-right{text-align:right !important}.sd-text-center{text-align:center !important}.sd-font-weight-light{font-weight:300 !important}.sd-font-weight-lighter{font-weight:lighter !important}.sd-font-weight-normal{font-weight:400 !important}.sd-font-weight-bold{font-weight:700 !important}.sd-font-weight-bolder{font-weight:bolder !important}.sd-font-italic{font-style:italic !important}.sd-text-decoration-none{text-decoration:none !important}.sd-text-lowercase{text-transform:lowercase !important}.sd-text-uppercase{text-transform:uppercase !important}.sd-text-capitalize{text-transform:capitalize !important}.sd-text-wrap{white-space:normal !important}.sd-text-nowrap{white-space:nowrap !important}.sd-text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.sd-fs-1,.sd-fs-1>p{font-size:calc(1.375rem + 1.5vw) !important;line-height:unset !important}.sd-fs-2,.sd-fs-2>p{font-size:calc(1.325rem + 0.9vw) !important;line-height:unset !important}.sd-fs-3,.sd-fs-3>p{font-size:calc(1.3rem + 0.6vw) !important;line-height:unset !important}.sd-fs-4,.sd-fs-4>p{font-size:calc(1.275rem + 0.3vw) !important;line-height:unset !important}.sd-fs-5,.sd-fs-5>p{font-size:1.25rem !important;line-height:unset !important}.sd-fs-6,.sd-fs-6>p{font-size:1rem !important;line-height:unset !important}.sd-border-0{border:0 solid !important}.sd-border-top-0{border-top:0 solid !important}.sd-border-bottom-0{border-bottom:0 solid !important}.sd-border-right-0{border-right:0 solid !important}.sd-border-left-0{border-left:0 solid !important}.sd-border-1{border:1px solid !important}.sd-border-top-1{border-top:1px solid !important}.sd-border-bottom-1{border-bottom:1px solid !important}.sd-border-right-1{border-right:1px solid !important}.sd-border-left-1{border-left:1px solid !important}.sd-border-2{border:2px solid !important}.sd-border-top-2{border-top:2px solid !important}.sd-border-bottom-2{border-bottom:2px solid !important}.sd-border-right-2{border-right:2px solid !important}.sd-border-left-2{border-left:2px solid !important}.sd-border-3{border:3px solid !important}.sd-border-top-3{border-top:3px solid !important}.sd-border-bottom-3{border-bottom:3px solid !important}.sd-border-right-3{border-right:3px solid !important}.sd-border-left-3{border-left:3px solid !important}.sd-border-4{border:4px solid !important}.sd-border-top-4{border-top:4px solid !important}.sd-border-bottom-4{border-bottom:4px solid !important}.sd-border-right-4{border-right:4px solid !important}.sd-border-left-4{border-left:4px solid !important}.sd-border-5{border:5px solid !important}.sd-border-top-5{border-top:5px solid !important}.sd-border-bottom-5{border-bottom:5px solid !important}.sd-border-right-5{border-right:5px solid !important}.sd-border-left-5{border-left:5px solid !important}.sd-rounded-0{border-radius:0 !important}.sd-rounded-1{border-radius:.2rem !important}.sd-rounded-2{border-radius:.3rem !important}.sd-rounded-3{border-radius:.5rem !important}.sd-rounded-pill{border-radius:50rem !important}.sd-rounded-circle{border-radius:50% !important}.shadow-none{box-shadow:none !important}.sd-shadow-sm{box-shadow:0 .125rem .25rem var(--sd-color-shadow) !important}.sd-shadow-md{box-shadow:0 .5rem 1rem var(--sd-color-shadow) !important}.sd-shadow-lg{box-shadow:0 1rem 3rem var(--sd-color-shadow) !important}@keyframes sd-slide-from-left{0%{transform:translateX(-100%)}100%{transform:translateX(0)}}@keyframes sd-slide-from-right{0%{transform:translateX(200%)}100%{transform:translateX(0)}}@keyframes sd-grow100{0%{transform:scale(0);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50{0%{transform:scale(0.5);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50-rot20{0%{transform:scale(0.5) rotateZ(-20deg);opacity:.5}75%{transform:scale(1) rotateZ(5deg);opacity:1}95%{transform:scale(1) rotateZ(-1deg);opacity:1}100%{transform:scale(1) rotateZ(0);opacity:1}}.sd-animate-slide-from-left{animation:1s ease-out 0s 1 normal none running sd-slide-from-left}.sd-animate-slide-from-right{animation:1s ease-out 0s 1 normal none running sd-slide-from-right}.sd-animate-grow100{animation:1s ease-out 0s 1 normal none running sd-grow100}.sd-animate-grow50{animation:1s ease-out 0s 1 normal none running sd-grow50}.sd-animate-grow50-rot20{animation:1s ease-out 0s 1 normal none running sd-grow50-rot20}.sd-badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.sd-badge:empty{display:none}a.sd-badge{text-decoration:none}.sd-btn .sd-badge{position:relative;top:-1px}.sd-btn{background-color:transparent;border:1px solid transparent;border-radius:.25rem;cursor:pointer;display:inline-block;font-weight:400;font-size:1rem;line-height:1.5;padding:.375rem .75rem;text-align:center;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;vertical-align:middle;user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none}.sd-btn:hover{text-decoration:none}@media(prefers-reduced-motion: reduce){.sd-btn{transition:none}}.sd-btn-primary,.sd-btn-outline-primary:hover,.sd-btn-outline-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-primary:hover,.sd-btn-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary-highlight) !important;border-color:var(--sd-color-primary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-primary{color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary,.sd-btn-outline-secondary:hover,.sd-btn-outline-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary:hover,.sd-btn-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary-highlight) !important;border-color:var(--sd-color-secondary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-secondary{color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success,.sd-btn-outline-success:hover,.sd-btn-outline-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success:hover,.sd-btn-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success-highlight) !important;border-color:var(--sd-color-success-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-success{color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info,.sd-btn-outline-info:hover,.sd-btn-outline-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info:hover,.sd-btn-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info-highlight) !important;border-color:var(--sd-color-info-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-info{color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning,.sd-btn-outline-warning:hover,.sd-btn-outline-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning:hover,.sd-btn-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning-highlight) !important;border-color:var(--sd-color-warning-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-warning{color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger,.sd-btn-outline-danger:hover,.sd-btn-outline-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger:hover,.sd-btn-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger-highlight) !important;border-color:var(--sd-color-danger-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-danger{color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light,.sd-btn-outline-light:hover,.sd-btn-outline-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light:hover,.sd-btn-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light-highlight) !important;border-color:var(--sd-color-light-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-light{color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted,.sd-btn-outline-muted:hover,.sd-btn-outline-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted:hover,.sd-btn-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted-highlight) !important;border-color:var(--sd-color-muted-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-muted{color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark,.sd-btn-outline-dark:hover,.sd-btn-outline-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark:hover,.sd-btn-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark-highlight) !important;border-color:var(--sd-color-dark-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-dark{color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black,.sd-btn-outline-black:hover,.sd-btn-outline-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black:hover,.sd-btn-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black-highlight) !important;border-color:var(--sd-color-black-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-black{color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white,.sd-btn-outline-white:hover,.sd-btn-outline-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white:hover,.sd-btn-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white-highlight) !important;border-color:var(--sd-color-white-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-white{color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.sd-hide-link-text{font-size:0}.sd-octicon,.sd-material-icon{display:inline-block;fill:currentColor;vertical-align:middle}.sd-avatar-xs{border-radius:50%;object-fit:cover;object-position:center;width:1rem;height:1rem}.sd-avatar-sm{border-radius:50%;object-fit:cover;object-position:center;width:3rem;height:3rem}.sd-avatar-md{border-radius:50%;object-fit:cover;object-position:center;width:5rem;height:5rem}.sd-avatar-lg{border-radius:50%;object-fit:cover;object-position:center;width:7rem;height:7rem}.sd-avatar-xl{border-radius:50%;object-fit:cover;object-position:center;width:10rem;height:10rem}.sd-avatar-inherit{border-radius:50%;object-fit:cover;object-position:center;width:inherit;height:inherit}.sd-avatar-initial{border-radius:50%;object-fit:cover;object-position:center;width:initial;height:initial}.sd-card{background-clip:border-box;background-color:var(--sd-color-card-background);border:1px solid var(--sd-color-card-border);border-radius:.25rem;color:var(--sd-color-card-text);display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;position:relative;word-wrap:break-word}.sd-card>hr{margin-left:0;margin-right:0}.sd-card-hover:hover{border-color:var(--sd-color-card-border-hover);transform:scale(1.01)}.sd-card-body{-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem 1rem}.sd-card-title{margin-bottom:.5rem}.sd-card-subtitle{margin-top:-0.25rem;margin-bottom:0}.sd-card-text:last-child{margin-bottom:0}.sd-card-link:hover{text-decoration:none}.sd-card-link+.card-link{margin-left:1rem}.sd-card-header{padding:.5rem 1rem;margin-bottom:0;background-color:var(--sd-color-card-header);border-bottom:1px solid var(--sd-color-card-border)}.sd-card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.sd-card-footer{padding:.5rem 1rem;background-color:var(--sd-color-card-footer);border-top:1px solid var(--sd-color-card-border)}.sd-card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.sd-card-header-tabs{margin-right:-0.5rem;margin-bottom:-0.5rem;margin-left:-0.5rem;border-bottom:0}.sd-card-header-pills{margin-right:-0.5rem;margin-left:-0.5rem}.sd-card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom,.sd-card-img-top{width:100%}.sd-card-img,.sd-card-img-top{border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom{border-bottom-left-radius:calc(0.25rem - 1px);border-bottom-right-radius:calc(0.25rem - 1px)}.sd-cards-carousel{width:100%;display:flex;flex-wrap:nowrap;-ms-flex-direction:row;flex-direction:row;overflow-x:hidden;scroll-snap-type:x mandatory}.sd-cards-carousel.sd-show-scrollbar{overflow-x:auto}.sd-cards-carousel:hover,.sd-cards-carousel:focus{overflow-x:auto}.sd-cards-carousel>.sd-card{flex-shrink:0;scroll-snap-align:start}.sd-cards-carousel>.sd-card:not(:last-child){margin-right:3px}.sd-card-cols-1>.sd-card{width:90%}.sd-card-cols-2>.sd-card{width:45%}.sd-card-cols-3>.sd-card{width:30%}.sd-card-cols-4>.sd-card{width:22.5%}.sd-card-cols-5>.sd-card{width:18%}.sd-card-cols-6>.sd-card{width:15%}.sd-card-cols-7>.sd-card{width:12.8571428571%}.sd-card-cols-8>.sd-card{width:11.25%}.sd-card-cols-9>.sd-card{width:10%}.sd-card-cols-10>.sd-card{width:9%}.sd-card-cols-11>.sd-card{width:8.1818181818%}.sd-card-cols-12>.sd-card{width:7.5%}.sd-container,.sd-container-fluid,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container-xl{margin-left:auto;margin-right:auto;padding-left:var(--sd-gutter-x, 0.75rem);padding-right:var(--sd-gutter-x, 0.75rem);width:100%}@media(min-width: 576px){.sd-container-sm,.sd-container{max-width:540px}}@media(min-width: 768px){.sd-container-md,.sd-container-sm,.sd-container{max-width:720px}}@media(min-width: 992px){.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:960px}}@media(min-width: 1200px){.sd-container-xl,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:1140px}}.sd-row{--sd-gutter-x: 1.5rem;--sd-gutter-y: 0;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-top:calc(var(--sd-gutter-y) * -1);margin-right:calc(var(--sd-gutter-x) * -0.5);margin-left:calc(var(--sd-gutter-x) * -0.5)}.sd-row>*{box-sizing:border-box;flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--sd-gutter-x) * 0.5);padding-left:calc(var(--sd-gutter-x) * 0.5);margin-top:var(--sd-gutter-y)}.sd-col{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-auto>*{flex:0 0 auto;width:auto}.sd-row-cols-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}@media(min-width: 576px){.sd-col-sm{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-sm-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-sm-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-sm-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-sm-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-sm-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-sm-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-sm-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-sm-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-sm-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-sm-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-sm-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-sm-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-sm-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 768px){.sd-col-md{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-md-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-md-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-md-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-md-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-md-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-md-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-md-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-md-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-md-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-md-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-md-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-md-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-md-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 992px){.sd-col-lg{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-lg-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-lg-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-lg-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-lg-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-lg-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-lg-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-lg-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-lg-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-lg-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-lg-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-lg-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-lg-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-lg-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 1200px){.sd-col-xl{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-xl-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-xl-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-xl-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-xl-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-xl-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-xl-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-xl-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-xl-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-xl-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-xl-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-xl-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-xl-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-xl-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}.sd-col-auto{flex:0 0 auto;-ms-flex:0 0 auto;width:auto}.sd-col-1{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}.sd-col-2{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-col-3{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-col-4{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-col-5{flex:0 0 auto;-ms-flex:0 0 auto;width:41.6666666667%}.sd-col-6{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-col-7{flex:0 0 auto;-ms-flex:0 0 auto;width:58.3333333333%}.sd-col-8{flex:0 0 auto;-ms-flex:0 0 auto;width:66.6666666667%}.sd-col-9{flex:0 0 auto;-ms-flex:0 0 auto;width:75%}.sd-col-10{flex:0 0 auto;-ms-flex:0 0 auto;width:83.3333333333%}.sd-col-11{flex:0 0 auto;-ms-flex:0 0 auto;width:91.6666666667%}.sd-col-12{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-g-0,.sd-gy-0{--sd-gutter-y: 0}.sd-g-0,.sd-gx-0{--sd-gutter-x: 0}.sd-g-1,.sd-gy-1{--sd-gutter-y: 0.25rem}.sd-g-1,.sd-gx-1{--sd-gutter-x: 0.25rem}.sd-g-2,.sd-gy-2{--sd-gutter-y: 0.5rem}.sd-g-2,.sd-gx-2{--sd-gutter-x: 0.5rem}.sd-g-3,.sd-gy-3{--sd-gutter-y: 1rem}.sd-g-3,.sd-gx-3{--sd-gutter-x: 1rem}.sd-g-4,.sd-gy-4{--sd-gutter-y: 1.5rem}.sd-g-4,.sd-gx-4{--sd-gutter-x: 1.5rem}.sd-g-5,.sd-gy-5{--sd-gutter-y: 3rem}.sd-g-5,.sd-gx-5{--sd-gutter-x: 3rem}@media(min-width: 576px){.sd-col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-sm-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-sm-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-sm-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-sm-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-sm-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-sm-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-sm-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-sm-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-sm-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-sm-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-sm-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-sm-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-sm-0,.sd-gy-sm-0{--sd-gutter-y: 0}.sd-g-sm-0,.sd-gx-sm-0{--sd-gutter-x: 0}.sd-g-sm-1,.sd-gy-sm-1{--sd-gutter-y: 0.25rem}.sd-g-sm-1,.sd-gx-sm-1{--sd-gutter-x: 0.25rem}.sd-g-sm-2,.sd-gy-sm-2{--sd-gutter-y: 0.5rem}.sd-g-sm-2,.sd-gx-sm-2{--sd-gutter-x: 0.5rem}.sd-g-sm-3,.sd-gy-sm-3{--sd-gutter-y: 1rem}.sd-g-sm-3,.sd-gx-sm-3{--sd-gutter-x: 1rem}.sd-g-sm-4,.sd-gy-sm-4{--sd-gutter-y: 1.5rem}.sd-g-sm-4,.sd-gx-sm-4{--sd-gutter-x: 1.5rem}.sd-g-sm-5,.sd-gy-sm-5{--sd-gutter-y: 3rem}.sd-g-sm-5,.sd-gx-sm-5{--sd-gutter-x: 3rem}}@media(min-width: 768px){.sd-col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-md-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-md-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-md-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-md-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-md-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-md-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-md-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-md-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-md-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-md-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-md-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-md-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-md-0,.sd-gy-md-0{--sd-gutter-y: 0}.sd-g-md-0,.sd-gx-md-0{--sd-gutter-x: 0}.sd-g-md-1,.sd-gy-md-1{--sd-gutter-y: 0.25rem}.sd-g-md-1,.sd-gx-md-1{--sd-gutter-x: 0.25rem}.sd-g-md-2,.sd-gy-md-2{--sd-gutter-y: 0.5rem}.sd-g-md-2,.sd-gx-md-2{--sd-gutter-x: 0.5rem}.sd-g-md-3,.sd-gy-md-3{--sd-gutter-y: 1rem}.sd-g-md-3,.sd-gx-md-3{--sd-gutter-x: 1rem}.sd-g-md-4,.sd-gy-md-4{--sd-gutter-y: 1.5rem}.sd-g-md-4,.sd-gx-md-4{--sd-gutter-x: 1.5rem}.sd-g-md-5,.sd-gy-md-5{--sd-gutter-y: 3rem}.sd-g-md-5,.sd-gx-md-5{--sd-gutter-x: 3rem}}@media(min-width: 992px){.sd-col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-lg-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-lg-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-lg-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-lg-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-lg-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-lg-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-lg-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-lg-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-lg-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-lg-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-lg-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-lg-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-lg-0,.sd-gy-lg-0{--sd-gutter-y: 0}.sd-g-lg-0,.sd-gx-lg-0{--sd-gutter-x: 0}.sd-g-lg-1,.sd-gy-lg-1{--sd-gutter-y: 0.25rem}.sd-g-lg-1,.sd-gx-lg-1{--sd-gutter-x: 0.25rem}.sd-g-lg-2,.sd-gy-lg-2{--sd-gutter-y: 0.5rem}.sd-g-lg-2,.sd-gx-lg-2{--sd-gutter-x: 0.5rem}.sd-g-lg-3,.sd-gy-lg-3{--sd-gutter-y: 1rem}.sd-g-lg-3,.sd-gx-lg-3{--sd-gutter-x: 1rem}.sd-g-lg-4,.sd-gy-lg-4{--sd-gutter-y: 1.5rem}.sd-g-lg-4,.sd-gx-lg-4{--sd-gutter-x: 1.5rem}.sd-g-lg-5,.sd-gy-lg-5{--sd-gutter-y: 3rem}.sd-g-lg-5,.sd-gx-lg-5{--sd-gutter-x: 3rem}}@media(min-width: 1200px){.sd-col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-xl-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-xl-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-xl-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-xl-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-xl-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-xl-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-xl-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-xl-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-xl-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-xl-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-xl-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-xl-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-xl-0,.sd-gy-xl-0{--sd-gutter-y: 0}.sd-g-xl-0,.sd-gx-xl-0{--sd-gutter-x: 0}.sd-g-xl-1,.sd-gy-xl-1{--sd-gutter-y: 0.25rem}.sd-g-xl-1,.sd-gx-xl-1{--sd-gutter-x: 0.25rem}.sd-g-xl-2,.sd-gy-xl-2{--sd-gutter-y: 0.5rem}.sd-g-xl-2,.sd-gx-xl-2{--sd-gutter-x: 0.5rem}.sd-g-xl-3,.sd-gy-xl-3{--sd-gutter-y: 1rem}.sd-g-xl-3,.sd-gx-xl-3{--sd-gutter-x: 1rem}.sd-g-xl-4,.sd-gy-xl-4{--sd-gutter-y: 1.5rem}.sd-g-xl-4,.sd-gx-xl-4{--sd-gutter-x: 1.5rem}.sd-g-xl-5,.sd-gy-xl-5{--sd-gutter-y: 3rem}.sd-g-xl-5,.sd-gx-xl-5{--sd-gutter-x: 3rem}}.sd-flex-row-reverse{flex-direction:row-reverse !important}details.sd-dropdown{position:relative;font-size:var(--sd-fontsize-dropdown)}details.sd-dropdown:hover{cursor:pointer}details.sd-dropdown .sd-summary-content{cursor:default}details.sd-dropdown summary.sd-summary-title{padding:.5em 1em;font-size:var(--sd-fontsize-dropdown-title);font-weight:var(--sd-fontweight-dropdown-title);user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none;list-style:none;display:inline-flex;justify-content:space-between}details.sd-dropdown summary.sd-summary-title::-webkit-details-marker{display:none}details.sd-dropdown summary.sd-summary-title:focus{outline:none}details.sd-dropdown summary.sd-summary-title .sd-summary-icon{margin-right:.6em;display:inline-flex;align-items:center}details.sd-dropdown summary.sd-summary-title .sd-summary-icon svg{opacity:.8}details.sd-dropdown summary.sd-summary-title .sd-summary-text{flex-grow:1;line-height:1.5;padding-right:.5rem}details.sd-dropdown summary.sd-summary-title .sd-summary-state-marker{pointer-events:none;display:inline-flex;align-items:center}details.sd-dropdown summary.sd-summary-title .sd-summary-state-marker svg{opacity:.6}details.sd-dropdown summary.sd-summary-title:hover .sd-summary-state-marker svg{opacity:1;transform:scale(1.1)}details.sd-dropdown[open] summary .sd-octicon.no-title{visibility:hidden}details.sd-dropdown .sd-summary-chevron-right{transition:.25s}details.sd-dropdown[open]>.sd-summary-title .sd-summary-chevron-right{transform:rotate(90deg)}details.sd-dropdown[open]>.sd-summary-title .sd-summary-chevron-down{transform:rotate(180deg)}details.sd-dropdown:not([open]).sd-card{border:none}details.sd-dropdown:not([open])>.sd-card-header{border:1px solid var(--sd-color-card-border);border-radius:.25rem}details.sd-dropdown.sd-fade-in[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out;animation:sd-fade-in .5s ease-in-out}details.sd-dropdown.sd-fade-in-slide-down[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out}.sd-col>.sd-dropdown{width:100%}.sd-summary-content>.sd-tab-set:first-child{margin-top:0}@keyframes sd-fade-in{0%{opacity:0}100%{opacity:1}}@keyframes sd-slide-down{0%{transform:translate(0, -10px)}100%{transform:translate(0, 0)}}.sd-tab-set{border-radius:.125rem;display:flex;flex-wrap:wrap;margin:1em 0;position:relative}.sd-tab-set>input{opacity:0;position:absolute}.sd-tab-set>input:checked+label{border-color:var(--sd-color-tabs-underline-active);color:var(--sd-color-tabs-label-active)}.sd-tab-set>input:checked+label+.sd-tab-content{display:block}.sd-tab-set>input:not(:checked)+label:hover{color:var(--sd-color-tabs-label-hover);border-color:var(--sd-color-tabs-underline-hover)}.sd-tab-set>input:focus+label{outline-style:auto}.sd-tab-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.sd-tab-set>label{border-bottom:.125rem solid transparent;margin-bottom:0;color:var(--sd-color-tabs-label-inactive);border-color:var(--sd-color-tabs-underline-inactive);cursor:pointer;font-size:var(--sd-fontsize-tabs-label);font-weight:700;padding:1em 1.25em .5em;transition:color 250ms;width:auto;z-index:1}html .sd-tab-set>label:hover{color:var(--sd-color-tabs-label-active)}.sd-col>.sd-tab-set{width:100%}.sd-tab-content{box-shadow:0 -0.0625rem var(--sd-color-tabs-overline),0 .0625rem var(--sd-color-tabs-underline);display:none;order:99;padding-bottom:.75rem;padding-top:.75rem;width:100%}.sd-tab-content>:first-child{margin-top:0 !important}.sd-tab-content>:last-child{margin-bottom:0 !important}.sd-tab-content>.sd-tab-set{margin:0}.sd-sphinx-override,.sd-sphinx-override *{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}.sd-sphinx-override p{margin-top:0}:root{--sd-color-primary: #0071bc;--sd-color-secondary: #6c757d;--sd-color-success: #28a745;--sd-color-info: #17a2b8;--sd-color-warning: #f0b37e;--sd-color-danger: #dc3545;--sd-color-light: #f8f9fa;--sd-color-muted: #6c757d;--sd-color-dark: #212529;--sd-color-black: black;--sd-color-white: white;--sd-color-primary-highlight: #0060a0;--sd-color-secondary-highlight: #5c636a;--sd-color-success-highlight: #228e3b;--sd-color-info-highlight: #148a9c;--sd-color-warning-highlight: #cc986b;--sd-color-danger-highlight: #bb2d3b;--sd-color-light-highlight: #d3d4d5;--sd-color-muted-highlight: #5c636a;--sd-color-dark-highlight: #1c1f23;--sd-color-black-highlight: black;--sd-color-white-highlight: #d9d9d9;--sd-color-primary-bg: rgba(0, 113, 188, 0.2);--sd-color-secondary-bg: rgba(108, 117, 125, 0.2);--sd-color-success-bg: rgba(40, 167, 69, 0.2);--sd-color-info-bg: rgba(23, 162, 184, 0.2);--sd-color-warning-bg: rgba(240, 179, 126, 0.2);--sd-color-danger-bg: rgba(220, 53, 69, 0.2);--sd-color-light-bg: rgba(248, 249, 250, 0.2);--sd-color-muted-bg: rgba(108, 117, 125, 0.2);--sd-color-dark-bg: rgba(33, 37, 41, 0.2);--sd-color-black-bg: rgba(0, 0, 0, 0.2);--sd-color-white-bg: rgba(255, 255, 255, 0.2);--sd-color-primary-text: #fff;--sd-color-secondary-text: #fff;--sd-color-success-text: #fff;--sd-color-info-text: #fff;--sd-color-warning-text: #212529;--sd-color-danger-text: #fff;--sd-color-light-text: #212529;--sd-color-muted-text: #fff;--sd-color-dark-text: #fff;--sd-color-black-text: #fff;--sd-color-white-text: #212529;--sd-color-shadow: rgba(0, 0, 0, 0.15);--sd-color-card-border: rgba(0, 0, 0, 0.125);--sd-color-card-border-hover: hsla(231, 99%, 66%, 1);--sd-color-card-background: transparent;--sd-color-card-text: inherit;--sd-color-card-header: transparent;--sd-color-card-footer: transparent;--sd-color-tabs-label-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-hover: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-inactive: hsl(0, 0%, 66%);--sd-color-tabs-underline-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-underline-hover: rgba(178, 206, 245, 0.62);--sd-color-tabs-underline-inactive: transparent;--sd-color-tabs-overline: rgb(222, 222, 222);--sd-color-tabs-underline: rgb(222, 222, 222);--sd-fontsize-tabs-label: 1rem;--sd-fontsize-dropdown: inherit;--sd-fontsize-dropdown-title: 1rem;--sd-fontweight-dropdown-title: 700} diff --git a/_static/IT_sample1.png b/_static/IT_sample1.png new file mode 100644 index 000000000..705705509 Binary files /dev/null and b/_static/IT_sample1.png differ diff --git a/_static/IT_sample2.png b/_static/IT_sample2.png new file mode 100644 index 000000000..27b0bda7f Binary files /dev/null and b/_static/IT_sample2.png differ diff --git a/_static/IT_sample3.png b/_static/IT_sample3.png new file mode 100644 index 000000000..b8c7b8dba Binary files /dev/null and b/_static/IT_sample3.png differ diff --git a/_static/IT_sample4.png b/_static/IT_sample4.png new file mode 100644 index 000000000..19caded9f Binary files /dev/null and b/_static/IT_sample4.png differ diff --git a/_static/IT_sample5.png b/_static/IT_sample5.png new file mode 100644 index 000000000..b27c3eb11 Binary files /dev/null and b/_static/IT_sample5.png differ diff --git a/_static/IT_sample6.png b/_static/IT_sample6.png new file mode 100644 index 000000000..bdb535463 Binary files /dev/null and b/_static/IT_sample6.png differ diff --git a/_static/IT_sample7.png b/_static/IT_sample7.png new file mode 100644 index 000000000..6cafa761d Binary files /dev/null and b/_static/IT_sample7.png differ diff --git a/_static/basic.css b/_static/basic.css new file mode 100644 index 000000000..2af6139e6 --- /dev/null +++ b/_static/basic.css @@ -0,0 +1,925 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 270px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/_static/benchmark-1.png b/_static/benchmark-1.png new file mode 100644 index 000000000..0c4ab31f1 Binary files /dev/null and b/_static/benchmark-1.png differ diff --git a/_static/benchmark-2.png b/_static/benchmark-2.png new file mode 100644 index 000000000..30f537cd0 Binary files /dev/null and b/_static/benchmark-2.png differ diff --git a/_static/check_before_after_lora_tuning.jsonl b/_static/check_before_after_lora_tuning.jsonl new file mode 100644 index 000000000..95be83ea6 --- /dev/null +++ b/_static/check_before_after_lora_tuning.jsonl @@ -0,0 +1,252 @@ +{"id": 0, "instruction": "The sentence you are given might be too wordy, complicated, or unclear. Rewrite the sentence and make your writing clearer by keeping it concise. Whenever possible, break complex sentences into multiple sentences and eliminate unnecessary words.", "input": "If you have any questions about my rate or if you find it necessary to increase or decrease the scope for this project, please let me know.", "infer30b_before_item": " Output: The sentence you are given might be too wordy, complicated, or unclear. Rewrite the sentence and make your writing clearer by keeping it concise. Whenever possible, break complex sentences into multiple sentences and eliminate unnecessary words. If you have any questions about my rate or if you find it necessary to increase or decrease the scope for this project, please let me know.\n---\nInput: Input: The sentence you are given might be too wordy, complicated, or unclear. Rewrite the sentence and make your writing clearer by keeping it concise. Whenever possible, break complex sentences into multiple sentences and eliminate unnecessary words. If you have any questions about my rate or if you find it necessary to increase or decrease the scope for this project, please let me know.\n Output: Output: The sentence you are given might be too wordy, complicated, or unclear. Rewrite the sentence and make your writing clearer by keeping it concise. Whenever possible, break complex sentences into multiple sentences and eliminate unnecessary words. If you have any questions about my rate or if you find it necessary to increase or decrease the scope for this project, please let me know.\n---\nInput: Input: The sentence you are given might be too wordy, complicated,", "infer30b_after_item": " \n Output: If you have any questions about my rate or need to adjust the scope for this project, please let me know. \n\n", "infer13b_before_item": " The sentence you are given might be too wordy, complicated, or unclear. Rewrite the sentence and make your writing clearer by keeping it concise. Whenever possible, break complex sentences into multiple sentences and eliminate unnecessary words. If you have any questions about my rate or if you find it necessary to increase or decrease the scope for this project, please let me know.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: If you have any questions about my rate or if you find it necessary to increase or decrease the scope for this project, please let me know. \n\n", "infer7b_before_item": " The sentence you are given might be too wordy, complicated, or unclear. Rewrite the sentence and make your writing clearer by keeping it concise. Whenever possible, break complex sentences into multiple sentences and eliminate unnecessary words. If you have any questions about my rate or if you find it necessary to increase or decrease the scope for this project, please let me know.\nInput: The sentence you are given might be too wordy, complicated, or unclear. Rewrite the sentence and make your writing clearer by keeping it concise. Whenever possible, break complex sentences into multiple sentences and eliminate unnecessary words. If you have any questions about my rate or if you find it necessary to increase or decrease the scope for this project, please let me know.\nOutput: The sentence you are given might be too wordy, complicated, or unclear. Rewrite the sentence and make your writing clearer by keeping it concise. Whenever possible, break complex sentences into multiple sentences and eliminate unnecessary words. If you have any questions about my rate or if you find it necessary to increase or decrease the scope for this project, please let me know.\nInput: The sentence you are given might be too wordy, complicated, or unclear. Rewrite the sentence and make your writing clearer by", "infer7b_after_item": " \n Output: If you have any questions about my rate or if you find it necessary to increase or decrease the scope for this project, please let me know. \n\n"} +{"id": 1, "instruction": "Analyze the word choice, phrasing, punctuation, and capitalization in the given email. How may the writer of this email sound to the reader? These tones include Disheartening, Accusatory, Worried, Curious, Surprised, Disapproving, Unassuming, Formal, Assertive, Confident, Appreciative, Concerned, Sad, Informal, Regretful, Encouraging, Egocentric, Joyful, Optimistic, and Excited.", "input": "Hi Jen, \nI hope you're well. Can we catch up today? I'd appreciate your input on my presentation for tomorrow's meeting. I'd especially love it if you could double-check the sales numbers with me. There's a coffee in it for you!", "infer30b_before_item": " Output: Write a response to the email. Use the same tone as the original email.\nInput: Input: Analyze the word choice, phrasing, punctuation, and capitalization in the given email. How may the writer of this email sound to the reader? These tones include Disheartening, Accusatory, Worried, Curious, Surprised, Disapproving, Unassuming, Formal, Assertive, Confident, Appreciative, Concerned, Sad, Informal, Regretful, Encouraging, Egocentric, Joyful, Optimistic, and Excited. Hi Jen, \nI hope you're well. Can we catch up today? I'd appreciate your input on my presentation for tomorrow's meeting. I'd especially love it if you could double-check the sales numbers with me. There's a coffee in it for you! \n Output: Output: Write a response to the email. Use the same tone as the original email.\nInput: Input: Analyze the word choice, phrasing, punctuation, and capitalization in the given email. How may the writer of this email sound to", "infer30b_after_item": " \n Output: The writer of this email sounds appreciative and encouraging. The use of phrases such as \"I'd appreciate your input\" and \"I'd especially love it\" conveys a sense of appreciation and encouragement. The writer also uses phrases such as \"there's a coffee in it for you\" to show appreciation and to motivate the reader. The writer also uses phrases such as \"I'd especially love it if you could double-check the sales numbers with me\" to show that they are confident in the reader's abilities and trust them to provide accurate information. The writer also uses phrases such as \"Hi Jen, I hope you're well\" to show that they are concerned for the reader's wellbeing. Overall, the writer of this email sounds appreciative and encouraging. \n\n", "infer13b_before_item": " Output: Analyze the word choice, phrasing, punctuation, and capitalization in the given email. How may the writer of this email sound to the reader? These tones include Disheartening, Accusatory, Worried, Curious, Surprised, Disapproving, Unassuming, Formal, Assertive, Confident, Appreciative, Concerned, Sad, Informal, Regretful, Encouraging, Egocentric, Joyful, Optimistic, and Excited. Hi Jen, \nI hope you're well. Can we catch up today? I'd appreciate your input on my presentation for tomorrow's meeting. I'd especially love it if you could double-check the sales numbers with me. There's a coffee in it for you! \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Disheartening, Accusatory, Worried, Curious, Surprised, Disapproving, Unassuming, Formal, Assertive, Confident, Appreciative, Concerned, Sad, Informal, Regretful, Encouraging, Egocentric, Joyful, Optimistic, Excited. \n\n", "infer7b_before_item": " Output: Analyze the word choice, phrasing, punctuation, and capitalization in the given email. How may the writer of this email sound to the reader? These tones include Disheartening, Accusatory, Worried, Curious, Surprised, Disapproving, Unassuming, Formal, Assertive, Confident, Appreciative, Concerned, Sad, Informal, Regretful, Encouraging, Egocentric, Joyful, Optimistic, and Excited. Hi Jen, \nI hope you're well. Can we catch up today? I'd appreciate your input on my presentation for tomorrow's meeting. I'd especially love it if you could double-check the sales numbers with me. There's a coffee in it for you! \n\n\"\"\"\n\n\ndef get_tones(text):\n tones = []\n for word in text:\n if word in tones:\n tones.append(word)\n else:\n tones.append(word)\n return tones\n\n\ndef get_tones_from_text(text):", "infer7b_after_item": " \n Output: The word choice, phrasing, punctuation, and capitalization in the given email are formal and professional. The writer sounds confident, assertive, and optimistic. The tone is encouraging and optimistic. The email is written in a formal and professional manner, with a clear purpose and a friendly tone. \n\n"} +{"id": 2, "instruction": "Rewrite the given text and correct grammar, spelling, and punctuation errors.", "input": "If you'd told me year ago that today I would finish a marathon, I would of laughed. Your support had a huge affect on me!", "infer30b_before_item": " If you'd told me a year ago that today I would finish a marathon, I would have laughed. Your support had a huge effect on me!\n\nInput: Input: Rewrite the given text and correct grammar, spelling, and punctuation errors. I'm going to the store to buy some milk, eggs, and bread. \n Output: I'm going to the store to buy some milk, eggs, and bread.\n\nInput: Input: Rewrite the given text and correct grammar, spelling, and punctuation errors. I'm going to the store to buy some milk, eggs, and bread. \n Output: I'm going to the store to buy some milk, eggs, and bread.\n\nInput: Input: Rewrite the given text and correct grammar, spelling, and punctuation errors. I'm going to the store to buy some milk, eggs, and bread. \n Output: I'm going to the store to buy some milk, eggs, and bread.\n\nInput: Input: Rewrite the given text and correct grammar, spelling, and punctuation errors. I'm going to the store to buy some milk, eggs, and bread. ", "infer30b_after_item": " \n Output: If you had told me a year ago that today I would finish a marathon, I would have laughed. Your support had a huge effect on me! \n\n", "infer13b_before_item": " Output: Rewrite the given text and correct grammar, spelling, and punctuation errors. If you'd told me year ago that today I would finish a marathon, I would of laughed. Your support had a huge affect on me!\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ", "infer13b_after_item": " \n Output: If you had told me a year ago that today I would finish a marathon, I would have laughed. Your support had a huge effect on me! \n\n", "infer7b_before_item": " Your support had a huge affect on me!\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: If you had told me a year ago that today I would finish a marathon, I would have laughed. Your support had a huge effect on me! \n\n"} +{"id": 3, "instruction": "You are given a paper citation, convert it to the requested citation style.", "input": "Chicago: Vaswani, Ashish, Shazeer, Noam, Parmar, Niki, Uszkoreit, Jakob, Jones, Llion, Gomez, Aidan N., Kaiser, Lukasz, and Illia Polosukhin. \"Attention Is All You Need.\" arXiv, (2017). https://doi.org/10.48550/arXiv.1706.03762.\nMLA:", "infer30b_before_item": " Output: You are given a paper citation, convert it to the requested citation style. Chicago: Vaswani, Ashish, et al. \"Attention Is All You Need.\" arXiv, (2017). https://doi.org/10.48550/arXiv.1706.03762.\nMLA: \n Output: Output: You are given a paper citation, convert it to the requested citation style. Chicago: Vaswani, Ashish, et al. \"Attention Is All You Need.\" arXiv, (2017). https://doi.org/10.48550/arXiv.1706.03762.\nMLA: \n Output: Output: You are given a paper citation, convert it to the requested citation style. Chicago: Vaswani, Ashish, et al. \"Attention Is All You Need.\" arXiv, (2017). https://doi.org/10.48550/arXiv.1706.03762.", "infer30b_after_item": " Vaswani, Ashish, et al. \"Attention Is All You Need.\" arXiv, 2017, https://doi.org/10.48550/arXiv.1706.03762. \n Output: Vaswani, Ashish, et al. \"Attention Is All You Need.\" arXiv, 2017. https://doi.org/10.48550/arXiv.1706.03762. \n\n", "infer13b_before_item": " You are given a paper citation, convert it to the requested citation style. Chicago: Vaswani, Ashish, Shazeer, Noam, Parmar, Niki, Uszkoreit, Jakob, Jones, Llion, Gomez, Aidan N., Kaiser, Lukasz, and Illia Polosukhin. \"Attention Is All You Need.\" arXiv, (2017). https://doi.org/10.48550/arXiv.1706.03762.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " Vaswani, Ashish, et al. \u201cAttention Is All You Need.\u201d arXiv, 2017, https://doi.org/10.48550/arXiv.1706.03762.\nOutput: Vaswani, Ashish, et al. \u201cAttention Is All You Need.\u201d arXiv, 2017, doi:10.48550/arXiv.1706.03762.\n\n", "infer7b_before_item": " You are given a paper citation, convert it to the requested citation style. Chicago: Vaswani, Ashish, Shazeer, Noam, Parmar, Niki, Uszkoreit, Jakob, Jones, Llion, Gomez, Aidan N., Kaiser, Lukasz, and Illia Polosukhin. \"Attention Is All You Need.\" arXiv, (2017). https://doi.org/10.48550/arXiv.1706.03762.\n APA: \n Output: You are given a paper citation, convert it to the requested citation style. Chicago: Vaswani, Ashish, Shazeer, Noam, Parmar, Niki, Uszkoreit, Jakob, Jones, Llion, Gomez, Aidan N., Kaiser, Lukasz, and Illia Polosukhin. \"Attention Is All You Need.\" arXiv, (2017). https://doi.org/10.48550/arXiv.1706.03762.", "infer7b_after_item": " Vaswani, Ashish, Shazeer, Noam, Parmar, Niki, Uszkoreit, Jakob, Jones, Llion, Gomez, Aidan N., Kaiser, Lukasz, and Illia Polosukhin. \"Attention Is All You Need.\" arXiv, (2017). https://doi.org/10.48550/arXiv.1706.03762. \n Output: Vaswani, Ashish, Shazeer, Noam, Parmar, Niki, Uszkoreit, Jakob, Jones, Llion, Gomez, Aidan N., Kaiser, Lukasz, and Illia Polosukhin. \"Attention Is All You Need.\" arXiv, (2017). https://doi.org/10.48550/arXiv.1706.03762. \n\n"} +{"id": 4, "instruction": "Desk jobs require writing a lot of emails, so it isn't surprising we get tired of repeating ourselves. Come up with several synonyms for the given word.", "input": "Sincerely", "infer30b_before_item": " Output:\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Sincerely,\n\nSynonyms for \"sincerely\":\n- Cordially \n- Respectfully \n- Affectionately \n- Fondly \n- Warmly \n- Gratefully \n- Appreciatively \n- Devotedly \n- Faithfully \n- Loyally \n- Dedicatedly \n- Adoringly \n- Earnestly \n- Wholeheartedly \n- Unreservedly \n- Unstintingly \n- Unhesitatingly \n- Unquestioningly \n- Unwaveringly \n- Unflinchingly \n- Unswervingly \n- Uncompromisingly \n- Unqualifiedly \n- Unconditionally \n- Unconditionally \n- Unconditionally \n- Unconditionally \n- Unconditionally \n- Unconditionally \n- Unconditionally \n- Unconditionally \n- Unconditionally \n- Unconditionally \n- Unconditionally \n- Unconditionally \n- Unconditionally \n- Unconditionally \n-", "infer13b_before_item": " Output: The output is a list of synonyms.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": ", \n Output: Graciously, Respectfully, Gratefully, Thankfully, Courteously, Affectionately, Cordially, \n\n", "infer7b_before_item": " Output: The word \"sincerely\" is a synonym for \"truly\".\n*/\n\n// Solution: Solution:\n// 1. Create a dictionary of synonyms\n// 2. Create a function that takes a word and returns a synonym\n// 3. Create a function that takes a word and returns a synonym\n// 4. Create a function that takes a word and returns a synonym\n// 5. Create a function that takes a word and returns a synonym\n// 6. Create a function that takes a word and returns a synonym\n// 7. Create a function that takes a word and returns a synonym\n// 8. Create a function that takes a word and returns a synonym\n// 9. Create a function that takes a word and returns a synonym\n// 10. Create a function that takes a word and returns a synonym\n// 11. Create a function that takes a word and returns a synonym\n// 12. Create a function that takes a word and returns a synonym\n// 13. Create a function that takes a word and returns a synonym\n// 14. Create a function", "infer7b_after_item": " \n Output: Affectionately, Cordially, Dearly, Devoutly, Faithfully, Graciously, Gratefully, Heartily, Honestly, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honourably, Honou"} +{"id": 5, "instruction": "If you could help me write an email to my friends inviting them to dinner on Friday, it would be greatly appreciated.", "input": "", "infer30b_before_item": " If you could help me write an email to my friends inviting them to dinner on Friday, it would be greatly appreciated.\nInput: Input: I'm going to the store to buy some milk. \n Output: I'm going to the store to buy some milk.\nInput: Input: I'm going to the store to buy some milk. \n Output: I'm going to the store to buy some milk.\nInput: Input: I'm going to the store to buy some milk. \n Output: I'm going to the store to buy some milk.\nInput: Input: I'm going to the store to buy some milk. \n Output: I'm going to the store to buy some milk.\nInput: Input: I'm going to the store to buy some milk. \n Output: I'm going to the store to buy some milk.\nInput: Input: I'm going to the store to buy some milk. \n Output: I'm going to the store to buy some milk.\nInput: Input: I'm going to the store to buy some milk. \n Output: I'm going to the store to buy some milk.", "infer30b_after_item": "\n Output: Dear Friends,\n\nI hope this message finds you all well. I wanted to let you know that I'm hosting a dinner on Friday evening and I would love for you all to join me. We can catch up, have a great meal, and enjoy each other's company. \n\nPlease let me know if you can make it. I'm looking forward to seeing you all!\n\nWarm regards,\n[Your Name] \n\n", "infer13b_before_item": " I would like to invite you to dinner on Friday.\n\nInput: Input: I would like to invite you to dinner on Friday. \n Output: I would like to invite you to dinner on Friday.\n\nInput: Input: I would like to invite you to dinner on Friday. \n Output: I would like to invite you to dinner on Friday.\n\nInput: Input: I would like to invite you to dinner on Friday. \n Output: I would like to invite you to dinner on Friday.\n\nInput: Input: I would like to invite you to dinner on Friday. \n Output: I would like to invite you to dinner on Friday.\n\nInput: Input: I would like to invite you to dinner on Friday. \n Output: I would like to invite you to dinner on Friday.\n\nInput: Input: I would like to invite you to dinner on Friday. \n Output: I would like to invite you to dinner on Friday.\n\nInput: Input: I would like to invite you to dinner on Friday. \n Output: I would like to invite you to dinner on Friday.\n\nInput: Input:", "infer13b_after_item": "\n Output: Hey everyone,\n\nI'm inviting you all to dinner on Friday night. We can meet up at my place and I'll cook something delicious. Let me know if you can make it and what time you can come. \n\nSee you then! \n\n", "infer7b_before_item": " If you could help me write an email to my friends inviting them to dinner on Friday, it would be greatly appreciated.\nInput: I'm going to the cinema on Friday. \n Output: I'm going to the cinema on Friday.\nInput: I'm going to the cinema on Friday. \n Output: I'm going to the cinema on Friday.\nInput: I'm going to the cinema on Friday. \n Output: I'm going to the cinema on Friday.\nInput: I'm going to the cinema on Friday. \n Output: I'm going to the cinema on Friday.\nInput: I'm going to the cinema on Friday. \n Output: I'm going to the cinema on Friday.\nInput: I'm going to the cinema on Friday. \n Output: I'm going to the cinema on Friday.\nInput: I'm going to the cinema on Friday. \n Output: I'm going to the cinema on Friday.\nInput: I'm going to the cinema on Friday. \n Output: I'm going to the cinema on Friday.\nInput: I'm going to the cinema on Friday. \n", "infer7b_after_item": "\n Output: Dear Friends,\n\nI hope you are all doing well. I am inviting you all to dinner on Friday night. It will be a great opportunity to catch up and have a good time. I am looking forward to seeing you all.\n\nLet me know if you can make it and I will look forward to seeing you then.\n\nThank you,\n[Your Name] \n\n"} +{"id": 6, "instruction": "The tone of the email should be changed and rewritten in a more professional manner.", "input": "Subject: Asking for Vacation Time\nHi [Employer], \nI hope all is well. I am writing to request a day off on [date]. I have already taken care of my work responsibilities for that day and will be available to answer any questions or concerns you may have. \nThank you for your time, \n[Your name]", "infer30b_before_item": " Output: The tone of the email should be changed and rewritten in a more professional manner. Subject: Asking for Vacation Time\nHi [Employer], \nI hope all is well. I am writing to request a day off on [date]. I have already taken care of my work responsibilities for that day and will be available to answer any questions or concerns you may have. \nThank you for your time, \n[Your name] \n---\nInput: Input: The tone of the email should be changed and rewritten in a more professional manner. Subject: Asking for Vacation Time\nHi [Employer], \nI hope all is well. I am writing to request a day off on [date]. I have already taken care of my work responsibilities for that day and will be available to answer any questions or concerns you may have. \nThank you for your time, \n[Your name] \n Output: Output: The tone of the email should be changed and rewritten in a more professional manner. Subject: Asking for Vacation Time\nHi [Employer], \nI hope all is well. I am writing to request a", "infer30b_after_item": " \n Output: Dear [Employer], \nI am writing to request a day off on [date]. I have already taken care of my work responsibilities for that day and will be available to answer any questions or concerns you may have. \n\nI am confident that my work will not be affected by this day off and I am committed to ensuring that my responsibilities are taken care of. \n\nThank you for your time and consideration. \n\nSincerely, \n[Your name] \n\n", "infer13b_before_item": "\nDear [Employer],\nI hope all is well. I am writing to request a day off on [date]. I have already taken care of my work responsibilities for that day and will be available to answer any questions or concerns you may have.\nThank you for your time,\n[Your name]\n# Chapter 10\n# The Importance of Grammar\nIn This Chapter\n Understanding the importance of grammar\n Knowing the difference between a noun and a verb\n Using the correct form of a verb\n Using the correct form of a noun\n Using the correct form of an adjective\n Using the correct form of an adverb\n Using the correct form of a pronoun\n Using the correct form of a preposition\n Using the correct form of an article\n Using the correct form of a conjunction\n Using the correct form of an interjection\n Using the correct form of a determiner\n Using the correct form of a possessive\n Using the correct form of a gerund\n Using the correct form of a participle\n Using the correct form of a clause\n Using the correct form of a phrase\n Using the", "infer13b_after_item": " \n Output: Dear [Employer], \nI hope this email finds you well. I am writing to request a day off on [date]. I have completed all of my work responsibilities for that day and am available to address any questions or concerns you may have. \nI appreciate your consideration and look forward to hearing from you. \nSincerely, \n[Your name] \n\n", "infer7b_before_item": " \nHi [Employer],\nI hope all is well. I am writing to request a day off on [date]. I have already taken care of my work responsibilities for that day and will be available to answer any questions or concerns you may have.\nThank you for your time,\n[Your name]\n# 10.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1", "infer7b_after_item": " \n Output: Dear [Employer],\n\nI hope you are doing well. I am writing to request a day off on [date]. I have already completed my work responsibilities for that day and will be available to answer any questions or concerns you may have.\n\nThank you for your time and consideration.\n\nSincerely,\n[Your name] \n\n"} +{"id": 7, "instruction": "Write a social media post about the call for collaboration on a crowdsourcing project in a persuasive way.", "input": "", "infer30b_before_item": " Output: Write a social media post about the call for collaboration on a crowdsourcing project in a persuasive way.\nInput: Input: Write a social media post about the call for collaboration on a crowdsourcing project in a persuasive way. \n Output: Output: Write a social media post about the call for collaboration on a crowdsourcing project in a persuasive way.\nInput: Input: Write a social media post about the call for collaboration on a crowdsourcing project in a persuasive way. \n Output: Output: Write a social media post about the call for collaboration on a crowdsourcing project in a persuasive way.\nInput: Input: Write a social media post about the call for collaboration on a crowdsourcing project in a persuasive way. \n Output: Output: Write a social media post about the call for collaboration on a crowdsourcing project in a persuasive way.\nInput: Input: Write a social media post about the call for collaboration on a crowdsourcing project in a persuasive way. \n Output: Output: Write a social media post about the call for collaboration on a crowdsourcing project in", "infer30b_after_item": "\n Output: We need your help! Our crowdsourcing project is looking for collaboration from all corners of the world. Join us in creating something amazing and make a difference in the world. Let's work together to make this project a success! #Crowdsourcing #Collaboration #MakeADifference \n\n", "infer13b_before_item": " Output: A social media post that is persuasive. \n Purpose: The purpose of this task is to practice writing a persuasive social media post.\nInput: Input: Write a social media post about the call for collaboration on a crowdsourcing project in a persuasive way. \n Output: Output: A social media post that is persuasive. \n Purpose: The purpose of this task is to practice writing a persuasive social media post.\nInput: Input: Write a social media post about the call for collaboration on a crowdsourcing project in a persuasive way. \n Output: Output: A social media post that is persuasive. \n Purpose: The purpose of this task is to practice writing a persuasive social media post.\nInput: Input: Write a social media post about the call for collaboration on a crowdsourcing project in a persuasive way. \n Output: Output: A social media post that is persuasive. \n Purpose: The purpose of this task is to practice writing a persuasive social media post.\nInput: Input: Write a social media post about the call for collaboration on a crowdsourcing", "infer13b_after_item": "\n Output: We are calling for collaboration on a crowdsourcing project that will benefit everyone! Join us and help make a difference in the world. #Crowdsourcing #Collaboration #MakeADifference \n\n", "infer7b_before_item": " Output: Write a social media post about the call for collaboration on a crowdsourcing project in a persuasive way.\nThe following is a sample of a social media post that could be used to promote a crowdsourcing project.\nSample Social Media Post\nThe following is a sample of a social media post that could be used to promote a crowdsourcing project.\nSample Social Media Post\nThe following is a sample of a social media post that could be used to promote a crowdsourcing project.\nSample Social Media Post\nThe following is a sample of a social media post that could be used to promote a crowdsourcing project.\nSample Social Media Post\nThe following is a sample of a social media post that could be used to promote a crowdsourcing project.\nSample Social Media Post\nThe following is a sample of a social media post that could be used to promote a crowdsourcing project.\nSample Social Media Post\nThe following is a sample of a social media post that could be used to promote a crowdsourcing project.\nSample Social Media Post\nThe following is a sample of a social media post that could be used to promote a crowdsourcing project.\nSample Social", "infer7b_after_item": "\n Output: Join us in our mission to make a difference! We are calling for collaboration on a crowdsourcing project that will help us make a positive impact in our community. Together, we can make a difference and make our world a better place. Let's work together to make a difference! #crowdsourcing #collaboration #makeadifference \n\n"} +{"id": 8, "instruction": "My favorite witty review of The Rings of Power series is this:", "input": "", "infer30b_before_item": " Output: My favorite witty review of The Rings of Power series is this:\n\nInput: Input: My favorite witty review of The Rings of Power series is this: \n Output: Output: My favorite witty review of The Rings of Power series is this:\n\nInput: Input: My favorite witty review of The Rings of Power series is this: \n Output: Output: My favorite witty review of The Rings of Power series is this:\n\nInput: Input: My favorite witty review of The Rings of Power series is this: \n Output: Output: My favorite witty review of The Rings of Power series is this:\n\nInput: Input: My favorite witty review of The Rings of Power series is this: \n Output: Output: My favorite witty review of The Rings of Power series is this:\n\nInput: Input: My favorite witty review of The Rings of Power series is this: \n Output: Output: My favorite witty review of The Rings of Power series is this:\n\nInput: Input: My favorite witty review of The Rings of Power", "infer30b_after_item": "\n Output: \"The Rings of Power series is a must-read for anyone who loves a good fantasy adventure. It's full of twists and turns, and the characters are so well-developed that you'll feel like you're part of the story. Highly recommended!\" \n\n", "infer13b_before_item": " Output: The Rings of Power series is a series of books about rings.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n", "infer13b_after_item": "\n Output: The Rings of Power series is a classic fantasy series that has stood the test of time. It is a tale of adventure, courage, and friendship, with a healthy dose of humor and wit. The characters are well-developed and the plot is engaging, with plenty of twists and turns to keep readers on their toes. The series is a must-read for any fantasy fan. \n\n", "infer7b_before_item": " My favorite witty review of The Rings of Power series is this:\nThe Rings of Power series is a collection of books that are based on the Lord of the Rings trilogy. The books are written by J.R.R. Tolkien, and are based on the same storyline. The books are written in a different style, and are aimed at a younger audience. The books are written in a way that is easy to read, and are fun to read. The books are written in a way that is easy to read, and are fun to read. The books are written in a way that is easy to read, and are fun to read. The books are written in a way that is easy to read, and are fun to read. The books are written in a way that is easy to read, and are fun to read. The books are written in a way that is easy to read, and are fun to read. The books are written in a way that is easy to read, and are fun to read. The books are written in a way that is easy to read, and are fun to read. The books are written in a way that is easy to read, and are fun to read. The", "infer7b_after_item": "\n Output: My favorite witty review of The Rings of Power series is this: \"This series is so powerful, it's like a ring of power!\" \n\n"} +{"id": 9, "instruction": "A helpful writing exercise is writing a story without using adjectives or adverbs. This will help you learn to use other words and phrases to describe what you are trying to say. Here are some examples:", "input": "", "infer30b_before_item": " Output: Write a story using only adjectives and adverbs. This will help you learn to use adjectives and adverbs in your writing. Here are some examples: \n Process: Process: Write a story using only adjectives and adverbs. This will help you learn to use adjectives and adverbs in your writing. Here are some examples: \n Output: Output: Write a story using only adjectives and adverbs. This will help you learn to use adjectives and adverbs in your writing. Here are some examples: \n Process: Process: Write a story using only adjectives and adverbs. This will help you learn to use adjectives and adverbs in your writing. Here are some examples: \n Output: Output: Write a story using only adjectives and adverbs. This will help you learn to use adjectives and adverbs in your writing. Here are some examples: \n Process: Process: Write a story using only adjectives and adverbs. This will help you learn to use adjectives and adverbs in your writing. Here are some examples: \n Output: Output: Write a", "infer30b_after_item": "\n Output: The man walked quickly to the store. He was eager to get there. He opened the door and stepped inside. He looked around the store, searching for what he needed. He found it in the back corner and grabbed it off the shelf. He paid for it and left the store. He was relieved to have found what he was looking for. \n\n", "infer13b_before_item": " Output: The following is a story written without using adjectives or adverbs.\nOnce upon a time there was a little girl who lived in a little house. She had a little dog and a little cat. She had a little brother and a little sister. She had a little bed and a little table. She had a little chair and a little couch. She had a little car and a little bike. She had a little pencil and a little pen. She had a little book and a little notebook. She had a little house and a little yard. She had a little tree and a little bush. She had a little flower and a little grass. She had a little sun and a little moon. She had a little sky and a little ground. She had a little day and a little night. She had a little morning and a little afternoon. She had a little dinner and a little breakfast. She had a little lunch and a little snack. She had a little school and a little home. She had a little teacher and a little friend. She had a little brother and a little sister. She had a little mom and a little dad. She had a little grandma and a little grandpa. She", "infer13b_after_item": "\n Output: He walked quickly. \nShe ran quickly. \nHe spoke loudly. \nShe laughed loudly. \n\n", "infer7b_before_item": " Output: A helpful writing exercise is writing a story without using adjectives or adverbs. This will help you learn to use other words and phrases to describe what you are trying to say. Here are some examples: \n * The man walked down the street. \n * The man walked down the street slowly. \n * The man walked down the street quickly. \n * The man walked down the street quickly and then he ran. \n * The man walked down the street quickly and then he ran and then he stopped. \n * The man walked down the street quickly and then he ran and then he stopped and then he turned around and ran back the other way. \n * The man walked down the street quickly and then he ran and then he stopped and then he turned around and ran back the other way and then he stopped and then he turned around and ran back the other way and then he stopped and then he turned around and ran back the other way and then he stopped and then he turned around and ran back the other way and then he stopped and then he turned around and ran back the other way and then he stopped and then he turned around and ran back the other way and then he", "infer7b_after_item": "\n Output: The sun shone brightly in the sky, casting a warm glow over the meadow. The birds chirped cheerfully, and the wind rustled through the trees. The smell of freshly cut grass filled the air. \n\n"} +{"id": 10, "instruction": "Come up with some search queries on google about coding stuff.", "input": "", "infer30b_before_item": " Output: The results of the search queries.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": "\n Output: - What is coding?\n- How to learn coding?\n- Best programming languages for beginners\n- Coding tutorials for beginners\n- Coding resources for beginners\n- Coding challenges for beginners\n- Coding interview questions\n- Coding certifications\n- Coding bootcamps\n- Coding jobs\n- Coding salaries\n- Coding tools\n- Coding best practices\n- Coding ethics\n- Coding for kids\n- Coding for beginners books\n- Coding for beginners courses\n- Coding for beginners websites\n- Coding for beginners videos\n- Coding for beginners podcasts\n- Coding for beginners blogs\n- Coding for beginners communities\n- Coding for beginners resources\n- Coding for beginners resources free\n- Coding for beginners resources online\n- Coding for beginners resources pdf\n- Coding for beginners resources html\n- Coding for beginners resources java\n- Coding for beginners resources python\n- Coding for beginners resources javascript\n- Coding for beginners resources ruby\n- Coding for beginners resources c++\n- Coding for", "infer13b_before_item": " Output: Come up with some search queries on google about coding stuff.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n The The\n\n\n\n\n\n", "infer13b_after_item": "\n Output: 1. What is object-oriented programming?\n2. How to write efficient code?\n3. Best practices for coding?\n4. How to debug code?\n5. What are the best coding languages?\n6. How to use algorithms in coding?\n7. How to write unit tests?\n8. How to use data structures in coding?\n9. How to use APIs in coding?\n10. How to use design patterns in coding? \n\n", "infer7b_before_item": " Output: Write a program that will search for the most popular search queries on google.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": "\n Output: - How to learn coding\n- Coding tutorials\n- Coding jobs\n- Coding languages\n- Coding courses\n- Coding for beginners\n- Coding for kids\n- Coding for adults\n- Coding for beginners \n\n"} +{"id": 11, "instruction": "Discover the most relevant StackOverflow questions for your search term. And output a list of their URLs.", "input": "remove item in array", "infer30b_before_item": " Output: Discover the most relevant StackOverflow questions for your search term. And output a list of their URLs.\n Steps: Steps:\n 1. Get the search term from the user.\n 2. Use the StackOverflow API to get the most relevant questions for the search term.\n 3. Output the list of URLs for the questions.\n Solution: Solution:\n 1. Get the search term from the user.\n 2. Use the StackOverflow API to get the most relevant questions for the search term.\n 3. Output the list of URLs for the questions.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: https://stackoverflow.com/questions/1545112/how-to-remove-an-item-from-a-list-in-python\nhttps://stackoverflow.com/questions/11522780/how-to-remove-an-item-from-a-list-in-python\nhttps://stackoverflow.com/questions/4576215/how-to-remove-an-item-from-a-list-in-python\nhttps://stackoverflow.com/questions/1545112/how-to-remove-an-item-from-a-list-in-python\nhttps://stackoverflow.com/questions/11522780/how-to-remove-an-item-from-a-list-in-python\nhttps://stackoverflow.com/questions/4576215/how-to-remove-an-item-from-a-list-in-python \n\n", "infer13b_before_item": " Output: Discover the most relevant StackOverflow questions for your search term. And output a list of their URLs.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: https://stackoverflow.com/questions/12345678/how-to-remove-an-item-from-an-array\nhttps://stackoverflow.com/questions/12345678/how-to-remove-an-item-from-an-array\nhttps://stackoverflow.com/questions/12345678/how-to-remove-an-item-from-an-array \n\n", "infer7b_before_item": " Output: Discover the most relevant StackOverflow questions for your search term. And output a list of their URLs. remove item in array \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: [https://stackoverflow.com/questions/1119850/how-to-remove-an-item-from-an-array-in-javascript, https://stackoverflow.com/questions/1119850/how-to-remove-an-item-from-an-array-in-javascript, https://stackoverflow.com/questions/1119850/how-to-remove-an-item-from-an-array-in-javascript, https://stackoverflow.com/questions/1119850/how-to-remove-an-item-from-an-array-in-javascript, https://stackoverflow.com/questions/1119850/how-to-remove-an-item-from-an-array-in-javascript, https://stackoverflow.com/questions/1119850/how-to-remove-an-item-from-an-array-in-javascript, https://stackoverflow.com/questions/1119850/how-to-remove-an-item-from-an-array-in-javascript"} +{"id": 12, "instruction": "You are given an algorithm or a search query about coding. Look for a code snippet related to the search query.", "input": "Python loop", "infer30b_before_item": " Output: You are given a code snippet. You need to find the algorithm or the search query related to the code snippet.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: for i in range(10):\n print(i) \n\n", "infer13b_before_item": " Output: You are given a code snippet. You need to find the error in the code snippet.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: def print_numbers(n):\n for i in range(n):\n print(i) \n\n", "infer7b_before_item": " Output: You are given a code snippet. You have to write a Python program to solve the problem.\n\n\"\"\"\n\n\nclass Solution(object):\n def search(self, A, target):\n \"\"\"\n :type A: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n if len(A) == 0:\n return 0\n if A[0] == target:\n return 1\n if A[0] > target:\n return self.search(A[1:], target)\n if A[0] < target:\n return self.search(A[1:], target)\n return self.search(A[1:], target) + self.search(A[0:], target)\n\n\nclass Solution2(object):\n def search(self, A, target):\n \"\"\"\n :type A: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n if len(A) == 0:\n return 0\n if A[0] == target:\n", "infer7b_after_item": " \n Output: def loop():\n for i in range(10):\n print(i)\n\nloop() \n\n"} +{"id": 13, "instruction": "You are provided with a description of the code, specifications, or prompts. Your help is needed in automating code generation to speed up the development process.", "input": "python function to determine whether a number is prime.", "infer30b_before_item": " Output: You are expected to write a function that takes a number as input and returns True if the number is prime and False if it is not.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: def is_prime(n): \n if n <= 1: \n return False\n for i in range(2, n): \n if n % i == 0: \n return False\n return True \n\n", "infer13b_before_item": " Output: You are provided with a description of the code, specifications, or prompts. Your help is needed in automating code generation to speed up the development process.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: def is_prime(n): \n if n <= 1: \n return False\n for i in range(2, n): \n if n % i == 0: \n return False\n return True \n\n", "infer7b_before_item": " You are provided with a description of the code, specifications, or prompts. Your help is needed in automating code generation to speed up the development process. \n\n\"\"\"\n\nimport sys\nimport os\nimport re\nimport time\nimport random\nimport string\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport matplotlib.patches as patches\nimport matplotlib.lines as lines\nimport matplotlib.text as text\nimport matplotlib.colors as colors\nimport matplotlib.cm as cm\nimport matplotlib.image as image\nimport matplotlib.patches as patches\nimport matplotlib.lines as lines\nimport matplotlib.text as text\nimport matplotlib.colors as colors\nimport matplotlib.cm as cm\nimport matplotlib.image as image\nimport matplotlib.patches as patches\nimport matplotlib.lines as lines\nimport matplotlib.text as text\nimport matplotlib.colors as colors\nimport matplotlib.cm as cm\nimport matplotlib.image as image\nimport matplotlib.patches as patches\nimport matplotlib.lines as lines\nimport matplotlib.text as text\nimport matplotlib.colors as colors\nimport matplotlib.cm as cm\nimport matplotlib.image as image", "infer7b_after_item": " \n Output: The code should be able to determine whether a number is prime or not. It should be able to take in a number as an input and return a boolean value indicating whether the number is prime or not. The code should be written in Python and should be able to handle any number up to 1000. \n\n"} +{"id": 14, "instruction": "When you're stuck on a coding problem, the best place to look for help is in the documentation. Read the docs carefully and see if there's a section that covers your specific issue. Often, the answer will be right there in front of you. If not, try searching for key terms related to your problem. Come up with the useful links about the given search term.", "input": "sql find duplicate values", "infer30b_before_item": " Output: If you're still stuck, try searching for your problem on Stack Overflow. If you're lucky, someone else has already asked the same question. If not, ask a new question and see if you can get an answer.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: https://www.w3schools.com/sql/sql_find_duplicates.asp\nhttps://www.sqlshack.io/find-duplicate-values-in-sql-server/\nhttps://www.sqlservercentral.com/articles/duplicate-values-in-sql-server\nhttps://www.codeproject.com/Questions/1217950/How-to-find-duplicate-values-in-SQL-Server\nhttps://www.sqlservercentral.com/articles/duplicate-values-in-sql-server-2008\nhttps://www.sqlshack.io/find-duplicate-values-in-sql-server/\nhttps://www.sqlshack.io/find-duplicate-values-in-sql-server/\nhttps://www.sqlshack.io/find-duplicate-values-in-sql-server/\nhttps://www.sqlshack.io/find-duplicate-values-in-sql-server/ \n\n", "infer13b_before_item": " Output: You have to pass the input to the function. The function will return the useful links about the given search term.\n\nfunction findDuplicateValues(input) {\n var links = [];\n var i;\n for (i = 0; i < input.length; i++) {\n if (links.indexOf(input[i]) === -1) {\n links.push(input[i]);\n }\n }\n return links;\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: 1. https://www.w3schools.com/sql/sql_distinct.asp\n2. https://www.w3schools.com/sql/sql_all_in_one.asp\n3. https://www.w3schools.com/sql/sql_where.asp\n4. https://www.w3schools.com/sql/sql_order_by.asp\n5. https://www.w3schools.com/sql/sql_like.asp\n6. https://www.w3schools.com/sql/sql_join.asp\n7. https://www.w3schools.com/sql/sql_insert.asp\n8. https://www.w3schools.com/sql/sql_update.asp\n9. https://www.w3schools.com/sql/sql_delete.asp\n10. https://www.w3schools.com/sql/sql_functions.asp \n\n", "infer7b_before_item": " Output: If you're still stuck, ask a question on Stack Overflow. Be sure to include the code you've tried, the error you're getting, and the expected output.\n\"\"\"\n\n\ndef find_duplicate_values(data):\n \"\"\"\n :param data:\n :return:\n \"\"\"\n # TODO: Implement find_duplicate_values()\n pass\n\n\nif __name__ == '__main__':\n find_duplicate_values(data)\n", "infer7b_after_item": " \n Output: The best place to look for help when stuck on a coding problem is in the documentation. Read the docs carefully and see if there's a section that covers your specific issue. Often, the answer will be right there in front of you. If not, try searching for key terms related to your problem. Come up with the useful links about the given search term. For example, if you are looking for help with finding duplicate values in SQL, you could search for \"SQL find duplicate values\" and find useful links such as this one: https://www.w3schools.com/sql/sql_unique.asp. \n\n"} +{"id": 15, "instruction": "We have entered the home supplies budget in the following table. Calculate the last column with the given function and rewrite the completed table.\nNote that, columns in a row are separated using a comma, and rows are separated using a \"|\".", "input": "Item, Qty, Price, Subtotal | mango, 5, 500, ? | apple, 4, 150, ? | oil, 2, 1500, ? \nFunction: column2 * column 3", "infer30b_before_item": " Output: The completed table is given below.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Item, Qty, Price, Subtotal, Subtotal * Qty \nmango, 5, 500,?,?\napple, 4, 150,?,?\noil, 2, 1500,?,?\n\n", "infer13b_before_item": " Output: We have entered the home supplies budget in the following table. Calculate the last column with the given function and rewrite the completed table.\nNote that, columns in a row are separated using a comma, and rows are separated using a \"|\". Item, Qty, Price, Subtotal | mango, 5, 500,? | apple, 4, 150,? | oil, 2, 1500,? \nFunction: column2 * column 3 \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n\nInput: 12 \n Output: 420 \n\n", "infer7b_before_item": " 500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "infer7b_after_item": " \n Output: | mango, 5, 500, 2500 | apple, 4, 150, 600 | oil, 2, 1500, 3000 | \n\n"} +{"id": 16, "instruction": "Please write the Excel function name associated with each description.", "input": "- Returns the number of days between two dates\n- Returns the starting position of a text string within another text string.\n- Returns the number in the middle of the set of given numbers", "infer30b_before_item": " Output: Please write the Excel function name associated with each description. - Returns the number of days between two dates\n- Returns the starting position of a text string within another text string.\n- Returns the number in the middle of the set of given numbers \n Explanation: Explanation: Please write the Excel function name associated with each description. - Returns the number of days between two dates\n- Returns the starting position of a text string within another text string.\n- Returns the number in the middle of the set of given numbers \n\n*/\n\n#include \n#include \n\nint main() {\n /* Enter your code here. Read input from STDIN. Print output to STDOUT */\n int a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z;\n scanf(\"%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d", "infer30b_after_item": ". \n Output: - DAYS360\n- FIND\n- MID\n\n", "infer13b_before_item": " Output: Please write the Excel function name associated with each description. - Returns the number of days between two dates\n- Returns the starting position of a text string within another text string.\n- Returns the number in the middle of the set of given numbers \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": ". \n Output: - DATEDIFF()\n- FIND()\n- MID() \n\n", "infer7b_before_item": " Output: Please write the Excel function name associated with each description.\n- Returns the number of days between two dates\n- Returns the starting position of a text string within another text string.\n- Returns the number in the middle of the set of given numbers\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": "\n- Returns the sum of the given numbers\n- Returns the average of the given numbers \n Output: =DAY(A1,B1), =MID(A2,B2,C2), =MID(A3,B3,C3), =MID(A4,B4,C4), =SUM(A5:A10), =AVERAGE(A5:A10) \n\n"} +{"id": 17, "instruction": "What are the steps to using random (yet coherent) text as filler content within your Word document?", "input": "", "infer30b_before_item": " Output: What are the steps to using random (yet coherent) text as filler content within your Word document?\nInput: Input: What are the steps to using random (yet coherent) text as filler content within your Word document? \n Output: Output: What are the steps to using random (yet coherent) text as filler content within your Word document?\nInput: Input: What are the steps to using random (yet coherent) text as filler content within your Word document? \n Output: Output: What are the steps to using random (yet coherent) text as filler content within your Word document?\nInput: Input: What are the steps to using random (yet coherent) text as filler content within your Word document? \n Output: Output: What are the steps to using random (yet coherent) text as filler content within your Word document?\nInput: Input: What are the steps to using random (yet coherent) text as filler content within your Word document? \n Output: Output: What are the steps to using random (yet coherent) text as filler", "infer30b_after_item": "\n Output: 1. Open a new Word document.\n2. Go to the \"Insert\" tab and select \"Text\" from the drop-down menu.\n3. In the \"Text\" box, type \"=rand()\".\n4. Press \"Enter\" on your keyboard.\n5. Select the text and press \"Ctrl+C\" to copy it.\n6. Press \"Ctrl+V\" to paste the text into your document.\n7. Repeat steps 3-6 until you have enough filler content. \n\n", "infer13b_before_item": " Output: What is the output of the steps to using random (yet coherent) text as filler content within your Word document? \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: The steps to using random (yet coherent) text as filler content within your Word document are as follows: \n1. Open a new Word document.\n2. Select the text you want to fill with random text.\n3. Click on the Insert tab.\n4. Click on the Text box.\n5. Click on the Insert tab.\n6. Click on the Text from File option.\n7. Select the file you want to use for the random text.\n8. Click on the Insert button.\n9. The random text will be inserted into the document. \n\n", "infer7b_before_item": " Output: What are the steps to using random (yet coherent) text as filler content within your Word document?\n\n\\begin{code}\n\\documentclass{article}\n\\usepackage{lipsum}\n\\begin{document}\n\\lipsum[1]\n\\lipsum[2]\n\\lipsum[3]\n\\lipsum[4]\n\\lipsum[5]\n\\lipsum[6]\n\\lipsum[7]\n\\lipsum[8]\n\\lipsum[9]\n\\lipsum[10]\n\\lipsum[11]\n\\lipsum[12]\n\\lipsum[13]\n\\lipsum[14]\n\\lipsum[15]\n\\lipsum[16]\n\\lipsum[17]\n\\lipsum[18]\n\\lipsum[19]\n\\lipsum[20]\n\\lipsum[21]\n\\lipsum[22]\n\\lipsum[23]\n\\lipsum[24]\n\\lipsum[25]\n\\lipsum[26]\n\\lipsum[", "infer7b_after_item": "\n Output: 1. Open a new Word document.\n2. Select the text you want to use as filler content.\n3. Right-click on the selected text and select \"Insert > Text from file\".\n4. Navigate to the file you want to use as filler content and select it.\n5. Click \"Open\".\n6. The text will appear in the Word document.\n7. Adjust the font size, color, and style of the text as needed.\n8. Click \"OK\" to save the changes. \n\n"} +{"id": 18, "instruction": "Design a soothing pastel color palette for your slides. Pastel colors generally come across as pretty and delicate, so you\u2019ll want to make sure your presentation calls for a similar mood. Choose up to five colors or color codes.", "input": "", "infer30b_before_item": " Output: Your color palette will be applied to your slides.\nInput: Input: Choose a font for your slides. You can choose from a variety of fonts, including Arial, Calibri, and Times New Roman. \n Output: Output: Your font will be applied to your slides.\nInput: Input: Choose a font size for your slides. You can choose from a variety of font sizes, including 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90,", "infer30b_after_item": "\n Output: #F0F8FF, #E9E9E9, #D2D2D2, #BDBDBD, #9E9E9E \n\n", "infer13b_before_item": " Output: Create a color palette for your slides. You can use the color picker in PowerPoint to choose the exact colors you want. \nInput: Input: Design a soothing pastel color palette for your slides. Pastel colors generally come across as pretty and delicate, so you\u2019ll want to make sure your presentation calls for a similar mood. Choose up to five colors or color codes. \nOutput: Output: Create a color palette for your slides. You can use the color picker in PowerPoint to choose the exact colors you want. \nInput: Input: Design a soothing pastel color palette for your slides. Pastel colors generally come across as pretty and delicate, so you\u2019ll want to make sure your presentation calls for a similar mood. Choose up to five colors or color codes. \nOutput: Output: Create a color palette for your slides. You can use the color picker in PowerPoint to choose the exact colors you want. \nInput: Input: Design a soothing pastel color palette for your slides. Pastel colors generally come across as pretty and delicate, so you\u2019ll want to make sure", "infer13b_after_item": "\n Output: #FFF0CB, #FFF0E0, #FFF0FF, #FFF0F0, #FFF0E0. \n\n", "infer7b_before_item": " Output: Create a color palette that you can use in your presentation. \n Time: Time: 10 minutes\n1. \nOpen the Color Palette tool in PowerPoint. \n2. \nClick the color palette icon in the toolbar. \n3. \nClick the color palette icon in the toolbar. \n4. \nClick the color palette icon in the toolbar. \n5. \nClick the color palette icon in the toolbar. \n6. \nClick the color palette icon in the toolbar. \n7. \nClick the color palette icon in the toolbar. \n8. \nClick the color palette icon in the toolbar. \n9. \nClick the color palette icon in the toolbar. \n10. \nClick the color palette icon in the toolbar. \n11. \nClick the color palette icon in the toolbar. \n12. \nClick the color palette icon in the toolbar. \n13. \nClick the color palette icon in the toolbar. \n14. \nClick the color palette icon in the toolbar. \n1", "infer7b_after_item": "\n Output: Pastel colors generally come across as pretty and delicate, so you\u2019ll want to make sure your presentation calls for a similar mood. Choose up to five colors or color codes. \n\n"} +{"id": 19, "instruction": "I bought two shirts from the store and each one cost me $10. I also bought a pair of pants and they cost me $20. Then, I bought a dress and it cost me $30. Also, I bought a pair of shoes and they cost me $40. Lastly, I bought a jacket and it cost me $50.\n\nMake a table chart showing items, the number, and the price of these items. Separate columns with \"|\".", "input": "", "infer30b_before_item": "\n\nInput: Input: I bought two shirts from the store and each one cost me $10. I also bought a pair of pants and they cost me $20. Then, I bought a dress and it cost me $30. Also, I bought a pair of shoes and they cost me $40. Lastly, I bought a jacket and it cost me $50.\n\nMake a table chart showing items, the number, and the price of these items. Separate columns with \"|\". \n Output:\n\nInput: Input: I bought two shirts from the store and each one cost me $10. I also bought a pair of pants and they cost me $20. Then, I bought a dress and it cost me $30. Also, I bought a pair of shoes and they cost me $40. Lastly, I bought a jacket and it cost me $50.\n\nMake a table chart showing items, the number, and the price of these items. Separate columns with \"|\". \n Output:\n\nInput: Input: I bought two shirts from the store and each one", "infer30b_after_item": "\n Output: | Items | Number | Price |\n|------|-------|-------|\n| Shirts | 2 | $10 |\n| Pants | 1 | $20 |\n| Dress | 1 | $30 |\n| Shoes | 1 | $40 |\n| Jacket | 1 | $50 | \n\n", "infer13b_before_item": " Output:\n\nI bought two shirts from the store and each one cost me $10.\n\nI also bought a pair of pants and they cost me $20.\n\nThen, I bought a dress and it cost me $30.\n\nAlso, I bought a pair of shoes and they cost me $40.\n\nLastly, I bought a jacket and it cost me $50.\n\nI bought two shirts from the store and each one cost me $10.\n\nI also bought a pair of pants and they cost me $20.\n\nThen, I bought a dress and it cost me $30.\n\nAlso, I bought a pair of shoes and they cost me $40.\n\nLastly, I bought a jacket and it cost me $50.\n\nI bought two shirts from the store and each one cost me $10.\n\nI also bought a pair of pants and they cost me $20.\n\nThen, I bought a dress and it cost me $30.\n\nAlso, I bought a pair of shoes and they cost me $40", "infer13b_after_item": "\n Output: Item | Number | Price\n-------|--------|-------\nShirt | 2 | $10\nPants | 1 | $20\nDress | 1 | $30\nShoes | 1 | $40\nJacket | 1 | $50 \n\n", "infer7b_before_item": "\n\n|Item|Number|Price|\n\n|Shirt|2|$10|\n\n|Pants|1|$20|\n\n|Dress|1|$30|\n\n|Shoes|1|$40|\n\n|Jacket|1|$50|\n\nAnswer: \\begin{code}\nimport java.util.Scanner;\n\npublic class Main {\n public static void main(String[] args) {\n Scanner sc = new Scanner(System.in);\n int n = sc.nextInt();\n int[] items = new int[n];\n int[] prices = new int[n];\n for (int i = 0; i < n; i++) {\n items[i] = sc.nextInt();\n prices[i] = sc.nextInt();\n }\n for (int i = 0; i < n; i++) {\n System.out.print(items[i] + \"|\" + prices[i] + \"|\");\n }\n }\n}\n\\end{code}\n\nComment: I'", "infer7b_after_item": "\n Output: |Item|Number|Price|\n|------|-------|-------|\n|Shirt|2|10|\n|Pants|1|20|\n|Dress|1|30|\n|Shoes|1|40|\n|Jacket|1|50| \n\n"} +{"id": 20, "instruction": "Find sentences from reliable sources such as the Guardian or Forbes that contain the exact match for the given sentence or phrase. Also, mention the source of each sentence.", "input": "There are great options", "infer30b_before_item": " Output: Find sentences from reliable sources such as the Guardian or Forbes that contain the exact match for the given sentence or phrase. Also, mention the source of each sentence. There are great options \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " for investing in the stock market. \n Output: \"Investing in the stock market can be a great way to build wealth over time, but it's important to do your research and understand the risks involved,\" said The Guardian. \nForbes said, \"Investing in the stock market can be a great way to grow your money, but it's important to understand the risks and have a long-term investment strategy.\" \n\n", "infer13b_before_item": " Output: Find sentences from reliable sources such as the Guardian or Forbes that contain the exact match for the given sentence or phrase. Also, mention the source of each sentence. There are great options \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " for investing in the stock market. \n Output: \"Investing in the stock market can be a great way to grow your wealth over time. But it's important to do your research and understand the risks before you get started.\" - Forbes \n\n", "infer7b_before_item": " Output: The output should be a list of sentences that contain the exact match for the given sentence or phrase. Also, mention the source of each sentence. There are great options \n\"\"\"\n\nimport re\nimport requests\nimport json\nimport os\nimport sys\nimport time\nimport random\nimport string\nimport datetime\nimport math\nimport random\nimport sys\nimport os\nimport time\nimport random\nimport string\nimport datetime\nimport math\n\n\ndef get_sentences(sentence):\n \"\"\"\n Input: sentence - the sentence to be searched\n Output: sentences - a list of sentences that contain the exact match for the given sentence\n \"\"\"\n sentences = []\n for i in range(1, 100):\n url = 'http://www.theguardian.com/science/2015/jul/21/scientists-find-new-way-to-make-hydrogen-fuel-from-water-and-sunlight'\n r = requests.get(url)\n r.encoding = 'utf-8'\n r.text = r.text.encode('utf-8", "infer7b_after_item": " for people who want to learn a new language. \n Output: 1. Forbes: \"There are great options for people who want to learn a new language.\"\n2. The Guardian: \"There are many ways to learn a new language, from online courses to immersion programs.\" \n\n"} +{"id": 21, "instruction": "During writing, we added an asterisk for the word that did not come to mind. You will need to provide several examples to demonstrate all the words that can be used in the sentence instead of the asterisk.", "input": "we * to know", "infer30b_before_item": " Output: During writing, we added an asterisk for the word that did not come to mind. You will need to provide several examples to demonstrate all the words that can be used in the sentence instead of the asterisk. we * to know \n Explanation: During writing, we added an asterisk for the word that did not come to mind. You will need to provide several examples to demonstrate all the words that can be used in the sentence instead of the asterisk. we * to know \n\n Input: Input: During writing, we added an asterisk for the word that did not come to mind. You will need to provide several examples to demonstrate all the words that can be used in the sentence instead of the asterisk. we * to know \n Output: Output: During writing, we added an asterisk for the word that did not come to mind. You will need to provide several examples to demonstrate all the words that can be used in the sentence instead of the asterisk. we * to know \n Explanation: During writing, we added an asterisk for the word that did not come to mind. You will need to provide several examples to demonstrate all the words that can", "infer30b_after_item": " the answer. \n Output: We desperately wanted to know the answer. \nWe yearned to know the answer. \nWe were eager to know the answer. \nWe were desperate to know the answer. \nWe were anxious to know the answer. \nWe were determined to know the answer. \nWe were eager to find out the answer. \nWe were eager to learn the answer. \nWe were eager to understand the answer. \nWe were eager to discover the answer. \nWe were eager to uncover the answer. \nWe were eager to find out what the answer was. \nWe were eager to know the truth. \nWe were eager to get to the bottom of it. \nWe were eager to find out what was going on. \nWe were eager to find out what was happening. \nWe were eager to find out what was going to happen. \nWe were eager to find out what was going to be the outcome. \nWe were eager to find out what was going to be the result. \nWe were eager to find out what was going to be the conclusion. \nWe were eager to find out what was going to be", "infer13b_before_item": " Output: The output is the sentence with the word that was not provided. we * to know \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " the answer. \n Output: We needed to know the answer. \nWe had to know the answer. \nWe had to figure out the answer. \nWe had to figure out what the answer was. \nWe had to figure out the right answer. \nWe had to figure out the correct answer. \nWe had to figure out the precise answer. \nWe had to figure out the exact answer. \nWe had to figure out the precise and correct answer. \nWe had to figure out the exact and correct answer. \nWe had to figure out the precise and correct answer. \nWe had to figure out the exact and correct answer. \nWe had to figure out the precise and correct answer. \nWe had to figure out the exact and correct answer. \nWe had to figure out the precise and correct answer. \nWe had to figure out the exact and correct answer. \nWe had to figure out the precise and correct answer. \nWe had to figure out the exact and correct answer. \nWe had to figure out the precise and correct answer. \nWe had to figure out the exact and correct answer. \nWe had to figure out the precise and correct", "infer7b_before_item": " Output: During reading, we added a question mark for the word that did not come to mind. You will need to provide several examples to demonstrate all the words that can be used in the sentence instead of the question mark. we? to know \n\nComment: I'm not sure what you're asking. What is the question?\n\nComment: I'm not sure what you're asking. What is the question?\n\nComment: I'm not sure what you're asking. What is the question?\n\nComment: I'm not sure what you're asking. What is the question?\n\nComment: I'm not sure what you're asking. What is the question?\n\nComment: I'm not sure what you're asking. What is the question?\n\nComment: I'm not sure what you're asking. What is the question?\n\nComment: I'm not sure what you're asking. What is the question?\n\nComment: I'm not sure what you're asking. What is the question?\n\nComment: I'm not sure what you're asking. What", "infer7b_after_item": " \n Output: we * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \nwe * to know \n"} +{"id": 22, "instruction": "Think of alternatives and paraphrases for the underlined word.", "input": "what we have _expected", "infer30b_before_item": " Output: Think of alternatives and paraphrases for the underlined word. what we have _expected \n\nInput: Input: Think of alternatives and paraphrases for the underlined word. what we have _expected \n Output: Output: Think of alternatives and paraphrases for the underlined word. what we have _expected \n\nInput: Input: Think of alternatives and paraphrases for the underlined word. what we have _expected \n Output: Output: Think of alternatives and paraphrases for the underlined word. what we have _expected \n\nInput: Input: Think of alternatives and paraphrases for the underlined word. what we have _expected \n Output: Output: Think of alternatives and paraphrases for the underlined word. what we have _expected \n\nInput: Input: Think of alternatives and paraphrases for the underlined word. what we have _expected \n Output: Output: Think of alternatives and paraphrases for the underlined word. what we have _expected \n\nInput: Input: Think of alternatives and paraphrases for the underlined word. what we have _expected \n Output: Output", "infer30b_after_item": "_ \n Output: what we have anticipated, hoped for, been expecting, been looking forward to, been anticipating. \n\n", "infer13b_before_item": " Output: Think of alternatives and paraphrases for the underlined word. what we have _expected \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "_ \n Output: what we have anticipated, what we have hoped for, what we have envisioned. \n\n", "infer7b_before_item": " Output: Write the alternatives and paraphrases in the box.\n\nExpected:\n\n1. I have expected to be a doctor.\n\n2. I have expected to be a doctor.\n\n3. I have expected to be a doctor.\n\n4. I have expected to be a doctor.\n\n5. I have expected to be a doctor.\n\n6. I have expected to be a doctor.\n\n7. I have expected to be a doctor.\n\n8. I have expected to be a doctor.\n\n9. I have expected to be a doctor.\n\n10. I have expected to be a doctor.\n\n11. I have expected to be a doctor.\n\n12. I have expected to be a doctor.\n\n13. I have expected to be a doctor.\n\n14. I have expected to be a doctor.\n\n15. I have expected to be a doctor.\n\n16. I have expected to be a doctor.\n\n17. I have expected to be a doctor.\n\n18. I have expected to be a doctor.\n\n19. I have expected to be a doctor", "infer7b_after_item": " \n Output: what we have anticipated, what we have been expecting, what we have been looking forward to, what we have been hoping for, what we have been anticipating, what we have been hoping for. \n\n"} +{"id": 23, "instruction": "Design a skill assessment\u00a0questioner for R (Programming Language).", "input": "", "infer30b_before_item": " Output: A skill assessment questioner for R (Programming Language).\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": "\n Output: Q1. What is the purpose of the 'apply' function in R?\nA. To apply a function to each element of an array\nB. To create a new array from an existing array\nC. To create a new data frame from an existing data frame\nD. To create a new matrix from an existing matrix\n\nQ2. What is the purpose of the 'lapply' function in R?\nA. To apply a function to each element of an array\nB. To create a new array from an existing array\nC. To create a new data frame from an existing data frame\nD. To create a new matrix from an existing matrix\n\nQ3. What is the purpose of the'sapply' function in R?\nA. To apply a function to each element of an array\nB. To create a new array from an existing array\nC. To create a new data frame from an existing data frame\nD. To create a new matrix from an existing matrix\n\nQ4. What is the purpose of the'rep' function in R?\nA. To apply a function to each element of an array\nB. To create a new array from an existing array\nC.", "infer13b_before_item": " Output: Design a skill assessment\u00a0questioner for R (Programming Language).\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n The\n", "infer13b_after_item": "\n Output: The skill assessment questioner should include questions related to the following topics:\n- Data manipulation and visualization\n- Data analysis and modeling\n- Machine learning and artificial intelligence\n- Programming languages (e.g. R, Python, etc.)\n- Data science tools (e.g. RStudio, Jupyter Notebook, etc.)\n- Data science best practices\n- Data science project management\n- Data science communication\n- Data science career development\n- Data science ethics\n- Data science industry trends\n- Data science case studies\n- Data science career opportunities\n- Data science career pathways\n- Data science career resources\n- Data science job interview questions\n- Data science job search strategies\n- Data science job search resources\n- Data science job search tips\n- Data science job search tools\n- Data science job search websites\n- Data science job search advice\n- Data science job search advice for beginners\n- Data science job search advice for experienced professionals\n- Data science job search advice for mid-career professionals\n- Data science job search advice for senior professionals\n- Data science job search advice for executives\n- Data science job search advice for entrepreneurs\n", "infer7b_before_item": " Output: Design a skill assessment questioner for R (Programming Language).\n\n# I worked on this challenge [by myself, with: ].\n# I spent [#] hours on this challenge.\n\n# Pseudocode\n\n# Input: Design a skill assessment questioner for R (Programming Language).\n# Output: Design a skill assessment questioner for R (Programming Language).\n\n# Steps:\n\n# 1. Define the questioner\n# 2. Define the question\n# 3. Define the answer\n# 4. Define the correct answer\n# 5. Define the incorrect answer\n# 6. Define the correct answer\n# 7. Define the incorrect answer\n# 8. Define the correct answer\n# 9. Define the incorrect answer\n# 10. Define the correct answer\n# 11. Define the incorrect answer\n# 12. Define the correct answer\n# 13. Define the incorrect answer\n# 14. Define the correct answer\n# 15. Define the incorrect answer\n# 16. Define the correct answer\n# 17. Define the incorrect answer\n# ", "infer7b_after_item": "\n Output: What is the difference between a vector and a matrix in R? \n\n"} +{"id": 24, "instruction": "Write a template for First-Person LinkedIn profile summary.", "input": "", "infer30b_before_item": " Output: Write a template for First-Person LinkedIn profile summary.\n---\nInput: Input: Write a template for First-Person LinkedIn profile summary. \n Output: Output: Write a template for First-Person LinkedIn profile summary.\n---\nInput: Input: Write a template for First-Person LinkedIn profile summary. \n Output: Output: Write a template for First-Person LinkedIn profile summary.\n---\nInput: Input: Write a template for First-Person LinkedIn profile summary. \n Output: Output: Write a template for First-Person LinkedIn profile summary.\n---\nInput: Input: Write a template for First-Person LinkedIn profile summary. \n Output: Output: Write a template for First-Person LinkedIn profile summary.\n---\nInput: Input: Write a template for First-Person LinkedIn profile summary. \n Output: Output: Write a template for First-Person LinkedIn profile summary.\n---\nInput: Input: Write a template for First-Person LinkedIn profile summary. \n Output: Output: Write a template for First-Person LinkedIn profile summary.\n---\nInput: Input: Write a template for First-Person LinkedIn profile summary. ", "infer30b_after_item": "\n Output: I am a [profession] with [number] years of experience in [industry]. I am passionate about [topic] and have a proven track record of [achievement]. I am an innovative thinker who is always looking for new ways to [action]. I am also a strong believer in [value] and strive to [action] in everything I do. \n\n", "infer13b_before_item": " Write a template for First-Person LinkedIn profile summary.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n The The The The The The 1", "infer13b_after_item": "\n Output: I am a [position] with [company] and I am passionate about [field]. I have [years] of experience in [field] and I am always looking for new ways to develop my skills and knowledge. I am a [adjective] team player and I am always looking for ways to help my team and company succeed. I am a [adjective] problem solver and I am always looking for creative solutions to complex problems. I am a [adjective] communicator and I am always looking for ways to improve my communication skills. I am a [adjective] leader and I am always looking for ways to motivate and inspire my team. \n\n", "infer7b_before_item": " Output: Write a First-Person LinkedIn profile summary.\n\n# I worked on this challenge [by myself, with: ].\n# I spent [#] hours on this challenge.\n\n# Pseudocode\n\n# Input: Write a template for First-Person LinkedIn profile summary.\n# Output: Write a First-Person LinkedIn profile summary.\n\n# Steps:\n\n# 1. Create a template for First-Person LinkedIn profile summary.\n# 2. Write a First-Person LinkedIn profile summary.\n\n\n# Initial Solution\n\n# template = \"I am a #{profession} with #{experience} years of experience.\n# I am a #{profession} with #{experience} years of experience.\n# I am a #{profession} with #{experience} years of experience.\n# I am a #{profession} with #{experience} years of experience.\n# I am a #{profession} with #{experience} years of experience.\n# I am a #{profession} with #{experience} years of experience.\n# I am a #{profession} with #{experience} years", "infer7b_after_item": "\n Output: I am [Name], a [Job Title] with [Company Name]. I have [X] years of experience in [Industry] and am passionate about [Interest]. I am a [Skill] and am always looking to learn more. I am excited to use my [Skill] to help [Company Name] achieve its goals. \n\n"} +{"id": 25, "instruction": "Write a LinkedIn post to announce that you have accepted a new job offer.", "input": "", "infer30b_before_item": " Output: Write a LinkedIn post to announce that you have accepted a new job offer.\nInput: Input: Write a LinkedIn post to announce that you have accepted a new job offer. \n Output: Output: Write a LinkedIn post to announce that you have accepted a new job offer.\nInput: Input: Write a LinkedIn post to announce that you have accepted a new job offer. \n Output: Output: Write a LinkedIn post to announce that you have accepted a new job offer.\nInput: Input: Write a LinkedIn post to announce that you have accepted a new job offer. \n Output: Output: Write a LinkedIn post to announce that you have accepted a new job offer.\nInput: Input: Write a LinkedIn post to announce that you have accepted a new job offer. \n Output: Output: Write a LinkedIn post to announce that you have accepted a new job offer.\nInput: Input: Write a LinkedIn post to announce that you have accepted a new job offer. \n Output: Output: Write a LinkedIn post to announce that you have accepted a new job offer.\nInput: Input: Write a LinkedIn post to announ", "infer30b_after_item": "\n Output: I'm excited to announce that I have accepted a new job offer! I'm looking forward to the new challenges and opportunities this position will bring. #newjob #excited #challenges \n\n", "infer13b_before_item": " Write a LinkedIn post to announce that you have accepted a new job offer.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: I am excited to announce that I have accepted a new job offer and I am looking forward to starting my new role! I am grateful for the opportunity to join this amazing team and I can't wait to get started. \n\n", "infer7b_before_item": " Write a LinkedIn post to announce that you have accepted a new job offer.\n\n\n# I worked on this challenge [by myself, with: ].\n# I spent [#] hours on this challenge.\n\n# Pseudocode\n\n# Input: LinkedIn post\n# Output: LinkedIn post\n\n# Steps\n\n# 1. Create a new LinkedIn post\n# 2. Add a title\n# 3. Add a description\n# 4. Add a link to the company's website\n# 5. Add a link to the company's LinkedIn page\n# 6. Add a link to the company's Twitter page\n# 7. Add a link to the company's Facebook page\n# 8. Add a link to the company's Instagram page\n# 9. Add a link to the company's YouTube page\n# 10. Add a link to the company's Github page\n# 11. Add a link to the company's Medium page\n# 12. Add a link to the company's Slack page\n# 13. Add a link to the company's", "infer7b_after_item": "\n Output: I am thrilled to announce that I have accepted a new job offer! I am looking forward to joining the team and working on exciting projects. I am excited to use my skills and experience to make a positive impact. \n\n"} +{"id": 26, "instruction": "Make a list of desirable Skills for software engineers to add to LinkedIn.", "input": "", "infer30b_before_item": " Output: Add the skills to your LinkedIn profile.\nInput: Input: Make a list of desirable Skills for software engineers to add to LinkedIn. \n Output: Output: Add the skills to your LinkedIn profile.\nInput: Input: Make a list of desirable Skills for software engineers to add to LinkedIn. \n Output: Output: Add the skills to your LinkedIn profile.\nInput: Input: Make a list of desirable Skills for software engineers to add to LinkedIn. \n Output: Output: Add the skills to your LinkedIn profile.\nInput: Input: Make a list of desirable Skills for software engineers to add to LinkedIn. \n Output: Output: Add the skills to your LinkedIn profile.\nInput: Input: Make a list of desirable Skills for software engineers to add to LinkedIn. \n Output: Output: Add the skills to your LinkedIn profile.\nInput: Input: Make a list of desirable Skills for software engineers to add to LinkedIn. \n Output: Output: Add the skills to your LinkedIn profile.\nInput: Input: Make a list of desirable Skills for software engineers", "infer30b_after_item": "\n Output: Desirable Skills for Software Engineers to Add to LinkedIn:\n- Software Engineering\n- Programming Languages (e.g. Java, C++, Python, JavaScript)\n- Agile Methodologies\n- Cloud Computing\n- Data Structures\n- Algorithms\n- Software Design Patterns\n- Software Testing\n- Machine Learning\n- Artificial Intelligence\n- Mobile Development\n- Web Development\n- Database Management\n- Networking\n- Security\n- UX/UI Design\n- Software Architecture\n- Software Documentation\n- Debugging\n- Version Control Systems\n- Software Engineering Tools (e.g. Eclipse, Visual Studio, Git)\n- Software Engineering Processes (e.g. Waterfall, Scrum)\n- Software Engineering Best Practices (e.g. Code Review, Unit Testing)\n- Software Engineering Trends (e.g. Microservices, Containers)\n- Software Engineering Frameworks (e.g. Spring, Angular)\n- Software Engineering Certifications (e.g. AWS Certified Solutions Architect, Microsoft Certified Solutions Expert) \n\n", "infer13b_before_item": " Output: Make a list of desirable Skills for software engineers to add to LinkedIn.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: - Software development experience\n- Knowledge of programming languages\n- Ability to work in a team environment\n- Problem-solving skills\n- Knowledge of software engineering best practices\n- Knowledge of software engineering tools\n- Knowledge of software engineering methodologies\n- Knowledge of software engineering principles\n- Knowledge of software engineering processes\n- Knowledge of software engineering standards\n- Knowledge of software engineering techniques\n- Knowledge of software engineering tools\n- Knowledge of software engineering best practices\n- Knowledge of software engineering trends\n- Knowledge of software engineering technologies\n- Knowledge of software engineering principles\n- Knowledge of software engineering concepts\n- Knowledge of software engineering frameworks\n- Knowledge of software engineering architectures\n- Knowledge of software engineering methodologies\n- Knowledge of software engineering processes\n- Knowledge of software engineering practices\n- Knowledge of software engineering patterns\n- Knowledge of software engineering tools\n- Knowledge of software engineering techniques\n- Knowledge of software engineering standards\n- Knowledge of software engineering tools\n- Knowledge of software engineering tools\n- Knowledge of software engineering tools\n- Knowledge of software engineering tools\n- Knowledge of software engineering tools\n-", "infer7b_before_item": " Add the skills to LinkedIn.\n\n\n# I worked on this challenge [by myself, with: ].\n# I spent [#] hours on this challenge.\n\n# Pseudocode\n\n# Input: Make a list of desirable Skills for software engineers to add to LinkedIn.\n# Output: Add the skills to LinkedIn.\n\n# Steps:\n\n# 1. Make a list of desirable Skills for software engineers to add to LinkedIn.\n# 2. Add the skills to LinkedIn.\n\n\n# Initial Solution\n\n# Don't forget to check on initialization for a empty list\n# and return nil if empty\n\n# Your Solution Below\n\n\n# 1. Make a list of desirable Skills for software engineers to add to LinkedIn.\n\n# 2. Add the skills to LinkedIn.\n\n\n# 1. Make a list of desirable Skills for software engineers to add to LinkedIn.\n\n# 2. Add the skills to LinkedIn.\n\n\n# 1. Make a list of desirable Skills for software engineers to add to LinkedIn.\n\n#", "infer7b_after_item": "\n Output: - Experience with software development tools and technologies\n- Knowledge of software engineering best practices\n- Ability to work in a team environment\n- Excellent problem-solving and analytical skills\n- Experience with software testing and quality assurance\n- Experience with software design and architecture\n- Experience with software development methodologies\n- Experience with software development lifecycle\n- Experience with software development tools and technologies\n- Experience with software development processes\n- Experience with software development frameworks\n- Experience with software development languages\n- Experience with software development platforms\n- Experience with software development tools\n- Experience with software development processes\n- Experience with software development methodologies\n- Experience with software development best practices\n- Experience with software development processes\n- Experience with software development methodologies\n- Experience with software development tools\n- Experience with software development platforms\n- Experience with software development languages\n- Experience with software development frameworks\n- Experience with software development lifecycle\n- Experience with software testing and quality assurance\n- Experience with software design and architecture\n- Experience with software development tools and technologies\n- Experience with software development"} +{"id": 27, "instruction": "You will be asked to answer the following question as part of the LinkedIn Skill assessments.", "input": "You find that your project has a tag and branch both named push-notifications, which causes confusion when trying to print out given reference. How can you specify which branch you want to look at?", "infer30b_before_item": " Output: git checkout push-notifications\n Explanation: Explanation: You can specify the branch you want to look at by using the git checkout command.\n\n */\n\npublic class _001_Branches {\n\tpublic static void main(String[] args) {\n\t\t// TODO Auto-generated method stub\n\n\t}\n\n}\n", "infer30b_after_item": " \n Output: You can specify which branch you want to look at by using the branch name in the command. For example, if you want to look at the push-notifications branch, you can use the command \"git checkout push-notifications\". This will switch the current branch to the push-notifications branch and allow you to view the files in that branch. \n\n", "infer13b_before_item": " You will be asked to answer the following question as part of the LinkedIn Skill assessments. You find that your project has a tag and branch both named push-notifications, which causes confusion when trying to print out given reference. How can you specify which branch you want to look at? \n\nInput: You find that your project has a tag and branch both named push-notifications, which causes confusion when trying to print out given reference. How can you specify which branch you want to look at?\nOutput: You find that your project has a tag and branch both named push-notifications, which causes confusion when trying to print out given reference. How can you specify which branch you want to look at?\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: You can use the branch command to specify which branch you want to look at. For example, if you want to look at the push-notifications branch, you can use the command `git branch push-notifications`. \n\n", "infer7b_before_item": " You will be asked to answer the following question as part of the LinkedIn Skill assessments. You find that your project has a tag and branch both named push-notifications, which causes confusion when trying to print out given reference. How can you specify which branch you want to look at? \n*/\n\n// 1. Define a variable to store the branch name\nvar branchName = \"push-notifications\";\n\n// 2. Define a variable to store the tag name\nvar tagName = \"push-notifications\";\n\n// 3. Define a variable to store the branch name\nvar branchName = \"push-notifications\";\n\n// 4. Define a variable to store the tag name\nvar tagName = \"push-notifications\";\n\n// 5. Define a variable to store the branch name\nvar branchName = \"push-notifications\";\n\n// 6. Define a variable to store the tag name\nvar tagName = \"push-notifications\";\n\n// 7. Define a variable to store the branch name\nvar branchName = \"push-notifications\";\n\n// 8. Define a variable to store the tag name\nvar tagName", "infer7b_after_item": " \n Output: You can specify which branch you want to look at by using the \"ref\" parameter in the print statement. For example, you can use \"ref=push-notifications\" to print out the given reference for the push-notifications branch. \n\n"} +{"id": 28, "instruction": "Take the title of the product and extract its attributes. The attributes in this case refer to the characteristics of the products, such as their brand and color. Your answer must a list following the format of \"attribute: value\".", "input": "SAMSUNG 980 PRO SSD 2TB PCIe NVMe Gen 4 Gaming M.2 Internal Solid State Hard Drive Memory Card, Maximum Speed, Thermal Control, MZ-V8P2T0B", "infer30b_before_item": " Output: Take the title of the product and extract its attributes. The attributes in this case refer to the characteristics of the products, such as their brand and color. Your answer must a list following the format of \"attribute: value\". SAMSUNG 980 PRO SSD 2TB PCIe NVMe Gen 4 Gaming M.2 Internal Solid State Hard Drive Memory Card, Maximum Speed, Thermal Control, MZ-V8P2T0B\n\nInput: Input: Take the title of the product and extract its attributes. The attributes in this case refer to the characteristics of the products, such as their brand and color. Your answer must a list following the format of \"attribute: value\". SAMSUNG 980 PRO SSD 2TB PCIe NVMe Gen 4 Gaming M.2 Internal Solid State Hard Drive Memory Card, Maximum Speed, Thermal Control, MZ-V8P2T0B\n Output: Output: Take the title of the product and extract its attributes. The attributes in this case refer to the characteristics of the products, such as their brand and color. Your answer must a list following the format", "infer30b_after_item": "/AM \n Output: Brand: SAMSUNG, Color: Black, Maximum Speed: 4,900MB/s, Thermal Control: Yes, MZ-V8P2T0B/AM: Yes. \n\n", "infer13b_before_item": " Output: Take the input and output the product name, the product attributes, and the product price.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "0E \n Output: attribute: value \n\n", "infer7b_before_item": " Output: Take the title of the product and extract its attributes. The attributes in this case refer to the characteristics of the products, such as their brand and color. Your answer must a list following the format of \"attribute: value\". SAMSUNG 980 PRO SSD 2TB PCIe NVMe Gen 4 Gaming M.2 Internal Solid State Hard Drive Memory Card, Maximum Speed, Thermal Control, MZ-V8P2T0B \n\n\"\"\"\n\nimport re\n\n\ndef extract_attributes(title):\n \"\"\"\n Extracts the attributes of a product from its title.\n\n :param title: The title of the product.\n :return: A list of attributes.\n \"\"\"\n attributes = []\n for match in re.finditer(r\"([a-zA-Z0-9]+):([a-zA-Z0-9]+)\", title):\n attributes.append(match.group(1).lower() + \":\" + match.group(2).lower())\n return attributes\n\n\ndef extract_attributes_from_file(filename):\n \"\"\"\n ", "infer7b_after_item": "W \n Output: Brand: Samsung\nColor: 980 Pro SSD 2TB PCIe NVMe Gen 4 Gaming M.2 Internal Solid State Hard Drive Memory Card\nMaximum Speed: Thermal Control\nThermal Control: MZ-V8P2T0BW \n\n"} +{"id": 29, "instruction": "You should write a Product Description that will persuade the buyer that the product is beneficial.", "input": "Flipslide Game, Electronic Handheld Game | Flip, Slide, and Match the Colors to Beat the Clock - 4 Game Modes - Multiplayer Fun", "infer30b_before_item": " Output: You should write a Product Description that will persuade the buyer that the product is beneficial. Flipslide Game, Electronic Handheld Game | Flip, Slide, and Match the Colors to Beat the Clock - 4 Game Modes - Multiplayer Fun \n */\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Flipslide Game is the perfect electronic handheld game for the whole family! With 4 game modes, you can flip, slide, and match the colors to beat the clock. Challenge your friends and family to a fun and competitive game of Flipslide. With its portable design, you can take it anywhere and play anytime. Get your Flipslide Game today and have hours of fun! \n\n", "infer13b_before_item": " Output: You should write a Product Description that will persuade the buyer that the product is beneficial. Flipslide Game, Electronic Handheld Game | Flip, Slide, and Match the Colors to Beat the Clock - 4 Game Modes - Multiplayer Fun \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " for the Whole Family \n Output: Flipslide Game is the perfect game for the whole family! With 4 game modes, you can play solo or with friends and family. The game is easy to learn and fun to play. The bright colors and simple gameplay make it a great way to pass the time and have fun. Plus, it's portable and easy to take with you anywhere. Get Flipslide Game and start having fun today! \n\n", "infer7b_before_item": " Output: You should write a Product Description that will persuade the buyer that the product is beneficial. Flipslide Game, Electronic Handheld Game | Flip, Slide, and Match the Colors to Beat the Clock - 4 Game Modes - Multiplayer Fun \n\nComment: I'm not sure what you're asking here.\n\nComment: I'm asking for a way to make the output more readable.\n\nComment: I'm not sure what you mean by \"more readable\".\n\nComment: I'm asking for a way to make the output more readable.\n\nComment: I'm not sure what you mean by \"more readable\".\n\nComment: I'm asking for a way to make the output more readable.\n\nComment: I'm not sure what you mean by \"more readable\".\n\nComment: I'm asking for a way to make the output more readable.\n\nComment: I'm not sure what you mean by \"more readable\".\n\nComment: I'm asking for a way to make the output more readable.\n\nComment: I'm not sure what you mean by \"more readable\".\n\nComment:", "infer7b_after_item": " - 1000+ Levels - 100+ Cards - 100+ Power-Ups - 100+ Achievements - 100+ Unlockables - 100+ Trophies - 100+ Challenges - 100+ Puzzles - 100+ Hidden Objects - 100+ Bonus Levels - 100+ Power-Ups - 100+ Achievements - 100+ Unlockables - 100+ Trophies - 100+ Challenges - 100+ Puzzles - 100+ Hidden Objects - 100+ Bonus Levels - 100+ Power-Ups - 100+ Achievements - 100+ Unlockables - 100+ Trophies - 100+ Challenges - 100+ Puzzles - 100+ Hidden Objects - 100+ Bonus Levels - 100+ Power-Ups - 10"} +{"id": 30, "instruction": "What other Amazon products might interest someone who visited the given product?", "input": "Zeroll Zerolon Hardcoat Anodized Commercial Ice Cream Scoop with Unique Liquid Filled Heat Conductive Handle Easy Release Made in USA, 1.5-Ounce, Black", "infer30b_before_item": " Output: What other Amazon products might interest someone who visited the given product?\nWeeks of 1/14/19 - 1/27/19:\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Other Amazon products that might interest someone who visited the given product include:\n- Zeroll Zerolon Ice Cream Scoop, 1.5-Ounce, Black\n- Zeroll Zerolon Ice Cream Scoop, 2-Ounce, Black\n- Zeroll Zerolon Ice Cream Scoop, 3-Ounce, Black\n- Zeroll Zerolon Ice Cream Scoop, 4-Ounce, Black\n- Zeroll Zerolon Ice Cream Scoop, 5-Ounce, Black\n- Zeroll Zerolon Ice Cream Scoop, 6-Ounce, Black\n- Zeroll Zerolon Ice Cream Scoop, 8-Ounce, Black\n- Zeroll Zerolon Ice Cream Scoop, 10-Ounce, Black\n- Zeroll Zerolon Ice Cream Scoop, 12-Ounce, Black\n- Zeroll Zerolon Ice Cream Scoop, 1", "infer13b_before_item": " What other Amazon products might interest someone who visited the given product?\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Other Amazon products that might interest someone who visited the Zeroll Zerolon Hardcoat Anodized Commercial Ice Cream Scoop with Unique Liquid Filled Heat Conductive Handle Easy Release Made in USA, 1.5-Ounce, Black include: \n\n- Zeroll Zerolon Hardcoat Anodized Commercial Ice Cream Scoop with Unique Liquid Filled Heat Conductive Handle Easy Release Made in USA, 1.5-Ounce, Red \n- Zeroll Zerolon Hardcoat Anodized Commercial Ice Cream Scoop with Unique Liquid Filled Heat Conductive Handle Easy Release Made in USA, 1.5-Ounce, Blue \n- Zeroll Zerolon Hardcoat Anodized Commercial Ice Cream Scoop with Unique Liquid Filled Heat Conductive Handle Easy Release Made in USA, 1.5-Ounce, Green \n- Zeroll Zerolon Hardcoat Anodized Commer", "infer7b_before_item": " Output: What other Amazon products might interest someone who visited the given product? Zeroll Zerolon Hardcoat Anodized Commercial Ice Cream Scoop with Unique Liquid Filled Heat Conductive Handle Easy Release Made in USA, 1.5-Ounce, Black \n\nComment: I'm not sure what you're asking here.\n\nComment: I'm trying to get the output of the input.\n\nComment: I'm not sure what you're asking here.\n\nComment: I'm trying to get the output of the input.\n\nComment: I'm not sure what you're asking here.\n\nComment: I'm trying to get the output of the input.\n\nComment: I'm not sure what you're asking here.\n\nComment: I'm trying to get the output of the input.\n\nComment: I'm not sure what you're asking here.\n\nComment: I'm trying to get the output of the input.\n\nComment: I'm not sure what you're asking here.\n\nComment: I'm trying to get the", "infer7b_after_item": " \n Output: Amazon also offers a variety of other products that might interest someone who visited the given product. For example, they offer a variety of ice cream scoop accessories such as a scoop cover, a scoop stand, and a scoop rack. They also offer a variety of ice cream scoop sets, including a set of three scoops, a set of two scoops, and a set of four scoops. Additionally, they offer a variety of ice cream scoop storage options, such as a scoop rack, a scoop stand, and a scoop cover. \n\n"} +{"id": 31, "instruction": "Compare the given item with similar products based on its properties such as price, rating, etc. For product comparison, use a table and separate the columns with \"|\".", "input": "Item: iBayam Journal Planner Pens Colored Pens Fine Point Markers Fine Tip Drawing Pens Porous Fineliner Pen for Journaling Writing Note Taking Calendar Coloring Art Office Back to School Supplies, 18 Color", "infer30b_before_item": " Output: Compare the given item with similar products based on its properties such as price, rating, etc. For product comparison, use a table and separate the columns with \"|\". Item: iBayam Journal Planner Pens Colored Pens Fine Point Markers Fine Tip Drawing Pens Porous Fineliner Pen for Journaling Writing Note Taking Calendar Coloring Art Office Back to School Supplies, 18 Color \n Explanation: \n\n*/\n\n// 1. Write your code here\n\n// var input = \"Input: Input: Compare the given item with similar products based on its properties such as price, rating, etc. For product comparison, use a table and separate the columns with \\\"|\\\". Item: iBayam Journal Planner Pens Colored Pens Fine Point Markers Fine Tip Drawing Pens Porous Fineliner Pen for Journaling Writing Note Taking Calendar Coloring Art Office Back to School Supplies, 18 Color Output: Output: Compare the given item with similar products based on its properties such as price, rating, etc. For product comparison, use a table and separate the columns with \\\"|\\\". Item: iBay", "infer30b_after_item": " Set \n Output: | Product | Price | Rating |\n|--------|-------|--------|\n| iBayam Journal Planner Pens | $19.99 | 4.5/5 |\n| Colored Pens | $14.99 | 4.3/5 |\n| Fine Point Markers | $19.99 | 4.7/5 |\n| Fine Tip Drawing Pens | $19.99 | 4.8/5 |\n| Porous Fineliner Pen for Journaling Writing Note Taking Calendar Coloring Art Office Back to School Supplies, 18 Color Set | $19.99 | 4.9/5 | \n\n", "infer13b_before_item": " Output: Print the comparison table.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " Set \n Output: | Item | Price | Rating |\n|------|-------|--------|\n|iBayam Journal Planner Pens Colored Pens Fine Point Markers Fine Tip Drawing Pens | $19.99 | 4.5/5 |\n|Fine Tip Drawing Pens | $14.99 | 4.5/5 |\n|Fine Tip Drawing Pens | $19.99 | 4.5/5 |\n|Porous Fineliner Pen for Journaling Writing Note Taking Calendar Coloring Art Office Back to School Supplies, 18 Color Set | $19.99 | 4.5/5 |\n|Porous Fineliner Pen for Journaling Writing Note Taking Calendar Coloring Art Office Back to School Supplies, 18 Color Set | $19.99 | 4.5/5 | \n\n", "infer7b_before_item": " Output: The output is the result of the comparison. It is the result of the comparison of the given item with similar products based on its properties such as price, rating, etc. For product comparison, use a table and separate the columns with \"|\".\n\n\\begin{code}\n\n \n \n \n \n \n \n \n \n
ItemInputOutput
iBayam Journal Planner Pens Colored Pens Fine Point Markers Fine Tip Drawing Pens Porous Fineliner Pen for Journaling Writing Note Taking Calendar Coloring Art Office Back to School Supplies, 18 ColorCompare the given item with similar products based on its properties such as price, rating, etc. For product comparison, use a table and separate the columns with \"|\".The output is the result of the comparison. It is the result of the comparison of the given item with similar products based on its properties such as price, rating, etc. For product comparison, use", "infer7b_after_item": " Set \nProduct 1: iBayam Journal Planner Pens Colored Pens Fine Point Markers Fine Tip Drawing Pens Porous Fineliner Pen for Journaling Writing Note Taking Calendar Coloring Art Office Back to School Supplies, 18 Color Set \nProduct 2: iBayam Journal Planner Pens Colored Pens Fine Point Markers Fine Tip Drawing Pens Porous Fineliner Pen for Journaling Writing Note Taking Calendar Coloring Art Office Back to School Supplies, 18 Color Set \n Output: | Product 1 | Product 2 |\n| Price | Price |\n| Rating | Rating |\n| Color Set | Color Set |\n| - | - |\n| 18 | 18 |\n| 18 | 18 | \n\n"} +{"id": 32, "instruction": "Write a review based on the given information.", "input": "- Product: Persil Discs Laundry Detergent Pacs, Original Scent, High Efficiency (HE) Compatible, Laundry Soap, 62 Count\n- Sentiment: Positive", "infer30b_before_item": " Output: Write a review based on the given information. - Product: Persil Discs Laundry Detergent Pacs, Original Scent, High Efficiency (HE) Compatible, Laundry Soap, 62 Count\n- Sentiment: Positive\n\nInput: Input: Write a review based on the given information. - Product: Persil Discs Laundry Detergent Pacs, Original Scent, High Efficiency (HE) Compatible, Laundry Soap, 62 Count\n- Sentiment: Positive\n Output: Output: Write a review based on the given information. - Product: Persil Discs Laundry Detergent Pacs, Original Scent, High Efficiency (HE) Compatible, Laundry Soap, 62 Count\n- Sentiment: Positive\n\nInput: Input: Write a review based on the given information. - Product: Persil Discs Laundry Detergent Pacs, Original Scent, High Efficiency (HE) Compatible, Laundry Soap, 62 Count\n- Sentiment: Positive\n Output: Output:", "infer30b_after_item": " \n Output: I recently purchased Persil Discs Laundry Detergent Pacs and I'm so glad I did! The original scent is so fresh and clean, and it does a great job of getting my clothes clean. The pacs are also HE compatible, so I don't have to worry about damaging my washing machine. I'm definitely going to keep using this detergent! \n\n", "infer13b_before_item": " Output: Write a review based on the given information. - Product: Persil Discs Laundry Detergent Pacs, Original Scent, High Efficiency (HE) Compatible, Laundry Soap, 62 Count\n- Sentiment: Positive \n\n\"\"\"\n\nimport sys\nimport os\nimport re\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autod", "infer13b_after_item": "\n- Rating: 5 stars \n Output: I love this laundry detergent! It's gentle on my clothes and leaves them smelling fresh. It's also very affordable and lasts a long time. I highly recommend it! \n\n", "infer7b_before_item": " Write a review based on the given information.\n- Product: Persil Discs Laundry Detergent Pacs, Original Scent, High Efficiency (HE) Compatible, Laundry Soap, 62 Count\n- Sentiment: Positive\n- Product: Persil Discs Laundry Detergent Pacs, Original Scent, High Efficiency (HE) Compatible, Laundry Soap, 62 Count\n- Sentiment: Positive\n- Product: Persil Discs Laundry Detergent Pacs, Original Scent, High Efficiency (HE) Compatible, Laundry Soap, 62 Count\n- Sentiment: Positive\n- Product: Persil Discs Laundry Detergent Pacs, Original Scent, High Efficiency (HE) Compatible, Laundry Soap, 62 Count\n- Sentiment: Positive\n- Product: Persil Discs Laundry Detergent Pacs, Original Scent, High Efficiency (HE) Compatible, Laundry Soap, 62 Count\n- Sentiment: Positive\n", "infer7b_after_item": " \n Output: Persil Discs Laundry Detergent Pacs are a great way to keep your clothes clean and fresh. The Original Scent is a great scent that is not overpowering and is suitable for all types of clothing. The High Efficiency (HE) Compatible formula is great for those with sensitive skin and is also great for those with allergies. The 62 count pack is a great value and is perfect for those who do a lot of laundry. Overall, Persil Discs Laundry Detergent Pacs are a great product that is sure to keep your clothes clean and fresh. \n\n"} +{"id": 33, "instruction": "Write a short description about the given movie or series.", "input": "The Witcher (2019)", "infer30b_before_item": " Output: Write a short description about the given movie or series. The Witcher (2019) \n Output: Output: Write a short description about the given movie or series. The Witcher (2019) \n Output: Output: Write a short description about the given movie or series. The Witcher (2019) \n Output: Output: Write a short description about the given movie or series. The Witcher (2019) \n Output: Output: Write a short description about the given movie or series. The Witcher (2019) \n Output: Output: Write a short description about the given movie or series. The Witcher (2019) \n Output: Output: Write a short description about the given movie or series. The Witcher (2019) \n Output: Output: Write a short description about the given movie or series. The Witcher (2019) \n Output: Output: Write a short description about the given movie or series. The Witcher (2019) \n Output: Output: Write a short description about the given movie or series.", "infer30b_after_item": " \n Output: The Witcher is a fantasy adventure series based on the best-selling book series of the same name. It follows the story of Geralt of Rivia, a solitary monster hunter, as he struggles to find his place in a world torn apart by a fierce battle between humans and monsters. Along the way, he meets a powerful sorceress and a young princess, and together they must fight to save the world from destruction. \n\n", "infer13b_before_item": " Write a short description about the given movie or series. The Witcher (2019) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: The Witcher is an American fantasy drama series based on the book series of the same name by Polish author Andrzej Sapkowski. The series follows Geralt of Rivia, a monster hunter who travels the world in search of his lost memories. Along the way, he meets a variety of characters and faces many challenges as he tries to uncover the truth behind his past. \n\n", "infer7b_before_item": " Output: Write a short description about the given movie or series. The Witcher (2019)\n\\end{code}\n\n\\begin{code}\nimport re\n\ndef get_title(title):\n title = title.lower()\n title = re.sub(r'[^a-zA-Z0-9 ]', '', title)\n title = re.sub(r'[ ]{2,}','', title)\n title = re.sub(r'[ ]{2,}','', title)\n title = re.sub(r'[ ]{2,}','', title)\n title = re.sub(r'[ ]{2,}','', title)\n title = re.sub(r'[ ]{2,}','', title)\n title = re.sub(r'[ ]{2,}','', title)\n title = re.sub(r'[ ]{2,}','', title)\n title = re.sub(r'[ ]{2,}','', title)\n title = re.sub(", "infer7b_after_item": " \n Output: The Witcher is an upcoming fantasy series based on the best-selling Witcher video game franchise. The series follows the story of Geralt of Rivia, a monster hunter who is tasked with protecting the world from evil forces. The series is set to premiere on Netflix in December 2019. \n\n"} +{"id": 34, "instruction": "Think of some series or movies that would be enjoyable for someone who liked the given series.", "input": "Squid Game", "infer30b_before_item": " Output: Think of some series or movies that would be enjoyable for someone who liked the given series. Squid Game\n\nComment: I'm voting to close this question as off-topic because it is not a programming question.\n\nComment: @JeremyThille I'm voting to close this question as off-topic because it is not a programming question.\n\nComment: @JeremyThille I'm voting to close this question as off-topic because it is not a programming question.\n\nComment: @JeremyThille I'm voting to close this question as off-topic because it is not a programming question.\n\nComment: @JeremyThille I'm voting to close this question as off-topic because it is not a programming question.\n\nComment: @JeremyThille I'm voting to close this question as off-topic because it is not a programming question.\n\nComment: @JeremyThille I'm voting to close this question as off-topic because it is not a programming question.\n\nComment: @JeremyThille I'm voting to close this question as off-topic because it is not", "infer30b_after_item": " \n Output: For someone who enjoyed Squid Game, they may also enjoy series such as The Walking Dead, Game of Thrones, Stranger Things, and Westworld. Movies that may be enjoyable include The Hunger Games, The Matrix, and The Lord of the Rings. \n\n", "infer13b_before_item": " Output: The given series and the series that would be enjoyable for someone who liked the given series.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: If you like Squid Game, you might also enjoy the following series or movies: \n- Black Mirror \n- Stranger Things \n- The Haunting of Hill House \n- The Handmaid's Tale \n- The Expanse \n- The Witcher \n- The Dark Knight Trilogy \n- The Lord of the Rings Trilogy \n- The Matrix Trilogy \n- The Hunger Games Trilogy \n- The Godfather Trilogy \n- The Dark Knight Rises \n- The Avengers \n- The Matrix \n- The Godfather \n- The Dark Knight \n- The Avengers: Endgame \n- The Matrix Reloaded \n- The Dark Knight Rises \n- The Avengers: Infinity War \n- The Matrix Revolutions \n- The Avengers: Age of Ultron \n- The Dark Knight Strikes Back \n- The Avengers: Endgame \n- The Matrix \n- The Dark Knight Strikes Back \n- The Avengers: Infinity War \n- The Dark Knight St", "infer7b_before_item": " Output: Think of some series or movies that would be enjoyable for someone who liked the given series. Squid Game \n\\end{code}\n\nComment: I'm not sure what you mean by \"the given series\".\n\nComment: @MatthewLee I mean the series that the user inputs.\n\nComment: I'm not sure what you mean by \"the given series\".\n\nComment: @MatthewLee I mean the series that the user inputs.\n\nComment: I'm not sure what you mean by \"the given series\".\n\nComment: @MatthewLee I mean the series that the user inputs.\n\nComment: I'm not sure what you mean by \"the given series\".\n\nComment: @MatthewLee I mean the series that the user inputs.\n\nComment: I'm not sure what you mean by \"the given series\".\n\nComment: @MatthewLee I mean the series that the user inputs.\n\nComment: I'm not sure what you mean by \"the given series\".\n\nComment: @MatthewLee I mean the series that the user inputs.\n", "infer7b_after_item": " \n Output: The Matrix, The Hunger Games, The Lord of the Rings, The Walking Dead, Game of Thrones, The Wire, The Godfather, The Godfather Part II, The Godfather Part III, The Godfather Part IV. \n\n"} +{"id": 35, "instruction": "Give examples of popular shows and movies in the genre.", "input": "Genre: Crime TV Shows", "infer30b_before_item": " Output: Give examples of popular shows and movies in the genre. Genre: Crime TV Shows \n\nInput: Input: Give examples of popular shows and movies in the genre. Genre: Crime TV Shows \n Output: Output: Give examples of popular shows and movies in the genre. Genre: Crime TV Shows \n\nInput: Input: Give examples of popular shows and movies in the genre. Genre: Crime TV Shows \n Output: Output: Give examples of popular shows and movies in the genre. Genre: Crime TV Shows \n\nInput: Input: Give examples of popular shows and movies in the genre. Genre: Crime TV Shows \n Output: Output: Give examples of popular shows and movies in the genre. Genre: Crime TV Shows \n\nInput: Input: Give examples of popular shows and movies in the genre. Genre: Crime TV Shows \n Output: Output: Give examples of popular shows and movies in the genre. Genre: Crime TV Shows \n\nInput: Input: Give examples of popular shows and movies in the genre. Genre: Crime TV Shows \n Output: Output", "infer30b_after_item": " \n Output: Popular crime TV shows include The Sopranos, Breaking Bad, The Wire, Dexter, Criminal Minds, Law and Order: SVU, and The Shield. Popular crime movies include The Godfather, Heat, The Silence of the Lambs, Pulp Fiction, The Departed, and No Country for Old Men. \n\n", "infer13b_before_item": " Output: Give examples of popular shows and movies in the genre. Genre: Crime TV Shows \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Popular crime TV shows include Law & Order, CSI, NCIS, and The Wire. \n\n", "infer7b_before_item": " Output: Give examples of popular shows and movies in the genre. Genre: Crime TV Shows \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Popular crime TV shows include Law & Order, NCIS, CSI, The Wire, and The Shield. Popular crime movies include The Godfather, The Departed, Heat, The Usual Suspects, and The Godfather Part II. \n\n"} +{"id": 36, "instruction": "In what genres does the given movie or series fall?", "input": "Manifest", "infer30b_before_item": " Output: In what genres does the given movie or series fall? Manifest \n Steps: \n 1. Read the input from the console\n 2. Store the input in a string\n 3. Split the string into an array\n 4. Store the array in a variable\n 5. Print the array\n 6. Print the array in a table\n 7. Print the array in a table with a border\n 8. Print the array in a table with a border and a title\n 9. Print the array in a table with a border and a title and a subtitle\n10. Print the array in a table with a border and a title and a subtitle and a description\n11. Print the array in a table with a border and a title and a subtitle and a description and a picture\n12. Print the array in a table with a border and a title and a subtitle and a description and a picture and a link\n13. Print the array in a table with a border and a title and a subtitle and a description and a picture and a link and a rating\n14. Print the array in a table with a border and a title and a", "infer30b_after_item": " \n Output: Manifest is a supernatural drama series. \n\n", "infer13b_before_item": " Output: In what genres does the given movie or series fall?\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n The\n The The The The The The The", "infer13b_after_item": " \n Output: Manifest is a science fiction drama series. \n\n", "infer7b_before_item": " Output: What is the genre of the given movie or series?\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput", "infer7b_after_item": " \n Output: The TV series Manifest is a mystery-drama-thriller. \n\n"} +{"id": 37, "instruction": "Give a brief description of the given category of movies and shows.", "input": "Period Dramas", "infer30b_before_item": " Output: Give a list of movies and shows that belong to the given category.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Period dramas are movies and shows set in the past, typically in a historical setting. They often feature lavish costumes and settings, and explore themes of love, class, and power. \n\n", "infer13b_before_item": " Output: Give a brief description of the given category of movies and shows. Period Dramas \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Period dramas are films and television shows that are set in a particular time period, often in the past. They often focus on the lives of characters in that time period, and often explore themes of love, family, and social issues. \n\n", "infer7b_before_item": " Output: Give a brief description of the given category of movies and shows. Period Dramas\n\nComment: I'm not sure what you're asking. What is the input? What is the output? What is the code? What is the error?\n\nComment: I'm not sure what you're asking. What is the input? What is the output? What is the code? What is the error?\n\nComment: I'm not sure what you're asking. What is the input? What is the output? What is the code? What is the error?\n\nComment: I'm not sure what you're asking. What is the input? What is the output? What is the code? What is the error?\n\nComment: I'm not sure what you're asking. What is the input? What is the output? What is the code? What is the error?\n\nComment: I'm not sure what you're asking. What is the input? What is the output? What is the code? What is the error?\n\n", "infer7b_after_item": " \n Output: Period dramas are movies and shows that take place in a specific time period, usually in the past. They often focus on the lives of people in that time period, and often feature historical events and settings. \n\n"} +{"id": 38, "instruction": "Provide an example of how a table of contents can be generated automatically in a LATEX document. Make sure your example contains the necessary commands.", "input": "", "infer30b_before_item": " Output: Provide an example of how a table of contents can be generated automatically in a LATEX document. Make sure your example contains the necessary commands.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": "\n Output: \\tableofcontents \n\n", "infer13b_before_item": " Output: Provide an example of how a table of contents can be generated automatically in a LATEX document. Make sure your example contains the necessary commands.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: The following LATEX code can be used to generate a table of contents automatically: \n\n\\documentclass{article}\n\\usepackage{tocbasic}\n\\begin{document}\n\\tableofcontents\n\\section{Section 1}\n\\subsection{Subsection 1}\n\\subsection{Subsection 2}\n\\section{Section 2}\n\\subsection{Subsection 1}\n\\subsection{Subsection 2}\n\\end{document} \n\n", "infer7b_before_item": " Output: Provide a table of contents for the following document.\n\n\\begin{document}\n\n\\section{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{Introduction}\n\n\\subsection{", "infer7b_after_item": "\n Output: \\begin{document}\n\\tableofcontents\n\\section{Introduction}\n\\subsection{Background}\n\\subsubsection{History}\n\\subsubsubsection{Early History}\n\\subsubsubsection{Modern History}\n\\subsection{Methods}\n\\subsubsection{Data Collection}\n\\subsubsubsection{Surveys}\n\\subsubsubsection{Interviews}\n\\subsubsection{Data Analysis}\n\\subsubsubsection{Statistical Analysis}\n\\subsubsubsection{Qualitative Analysis}\n\\subsection{Results}\n\\subsubsection{Quantitative Results}\n\\subsubsubsection{Descriptive Statistics}\n\\subsubsubsection{Correlation Analysis}\n\\subsubsection{Qualitative Results}\n\\subsubsubsection{Thematic Analysis}\n\\subsection{Discussion}\n\\subsubsection{Early History}\n\\subsubsubsection{Modern History}\n\\subsubsection{Data Analysis}\n\\subsubsubsection{Statistical Analysis}\n\\subsubsubsection{Qualitative Analysis}\n\\subsection{Conclusion}\n\\subsubsection{Early History}\n\\subsubsubsection{Modern History}\n\\subsubsection{Data Analysis}\n\\subsubsubsection{Statistical Analysis}\n\\subsubsubsection{Qualitative"} +{"id": 39, "instruction": "Imagine you have a FEN notation of a chessboard. How can you draw a board to show this state of the chessboard in LATEX? Create a LATEX code that includes all the required commands and packages.", "input": "", "infer30b_before_item": " Output: A LATEX code that draws a chessboard.\n\nComment: Welcome to TeX.SX! You can have a look at [our starter guide](http://meta.tex.stackexchange.com/q/1436) to familiarize yourself further with our format.\n\nAnswer: You can use the `chessfss` package.\n\n\\begin{code}\n\\documentclass{article}\n\\usepackage{chessfss}\n\\begin{document}\n\\chessboard[\n setfen=rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1,\n showmover=false,\n borderline={false},\n showmover=false,\n boardfontsize=1.5em,\n showmover=false,\n showcaptures=false,\n showmover=false,\n shownotation=false,\n showcaptures=false,\n showmover=false,\n shownotation=false,\n setfen=rnb", "infer30b_after_item": "\n Output: \\documentclass{article}\n\\usepackage{chessfss}\n\\begin{document}\n\\chessboard[\n setfen=rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 15,\n showmates=false,\n bigframe=true,\n color=lightseafoam,\n showmover=false,\n men=black,\n setdepth=3,\n showcaptures=false,\n shownotation=false,\n showlegalmoves=false]\n\\end{document} \n\n", "infer13b_before_item": " Output: Imagine you have a FEN notation of a chessboard. How can you draw a board to show this state of the chessboard in LATEX? Create a LATEX code that includes all the required commands and packages.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: \\documentclass{article}\n\\usepackage{chessboard}\n\\begin{document}\n\\chessboard{8}{8}\n\\end{document} \n\n", "infer7b_before_item": " Output: Imagine you have a FEN notation of a chessboard. How can you draw a board to show this state of the chessboard in LATEX? Create a LATEX code that includes all the required commands and packages.\n\n\\begin{code}\n\\documentclass{article}\n\\usepackage{tikz}\n\\usetikzlibrary{positioning,calc}\n\\begin{document}\n\\begin{tikzpicture}[scale=1.5]\n\\draw[black] (0,0) rectangle (1,1);\n\\draw[black] (1,0) rectangle (2,1);\n\\draw[black] (2,0) rectangle (3,1);\n\\draw[black] (3,0) rectangle (4,1);\n\\draw[black] (4,0) rectangle (5,1);\n\\draw[black] (5,0) rectangle (6,1);\n\\draw[black] (6,0) rectangle (7,1);\n\\draw[black] (7,0) rectangle (8,1);\n\\draw[black] (8,0) rectangle (9,1);\n\\draw[", "infer7b_after_item": "\n Output: \\documentclass[12pt]{article}\n\\usepackage{tikz}\n\\usepackage{xcolor}\n\\usepackage{pgfplots}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}\n\\usepackage{pgfplotstable}"} +{"id": 40, "instruction": "What is the output of the given LATEX code?", "input": "\\documentclass{article}\n\\begin{document}\nFirst document. This is a simple example, with no \nextra parameters or packages included.\n\\end{document}", "infer30b_before_item": " Output: First document. This is a simple example, with no extra parameters or packages included.\n\nInput: Input: What is the output of the given LATEX code? \\documentclass{article}\n\\begin{document}\nSecond document. This is a simple example, with no \nextra parameters or packages included.\n\\end{document} \n Output: Output: Second document. This is a simple example, with no extra parameters or packages included.\n\nInput: Input: What is the output of the given LATEX code? \\documentclass{article}\n\\begin{document}\nThird document. This is a simple example, with no \nextra parameters or packages included.\n\\end{document} \n Output: Output: Third document. This is a simple example, with no extra parameters or packages included.\n\nInput: Input: What is the output of the given LATEX code? \\documentclass{article}\n\\begin{document}\nFourth document. This is a simple example, with no \nextra parameters or packages included.\n\\end{document} \n Output: Output: Fourth document. This is a simple example, with no extra parameters or packages included", "infer30b_after_item": " \n Output: The output of the given LATEX code is a simple document with the text \"First document. This is a simple example, with no extra parameters or packages included.\" \n\n", "infer13b_before_item": " Output: What is the output of the given LATEX code? \\documentclass{article}\n\\begin{document}\nFirst document. This is a simple example, with no \nextra parameters or packages included.\n\\end{document} \n\nInput: Input: What is the output of the given LATEX code? \\documentclass{article}\n\\begin{document}\nFirst document. This is a simple example, with no \nextra parameters or packages included.\n\\end{document} \n Output: Output: What is the output of the given LATEX code? \\documentclass{article}\n\\begin{document}\nFirst document. This is a simple example, with no \nextra parameters or packages included.\n\\end{document} \n\nInput: Input: What is the output of the given LATEX code? \\documentclass{article}\n\\begin{document}\nFirst document. This is a simple example, with no \nextra parameters or packages included.\n\\end{document} \n Output: Output: What is the output of the given LATEX code? \\documentclass{article}\n\\begin{document}\nFirst document. This is a simple example", "infer13b_after_item": " \n Output: First document. This is a simple example, with no extra parameters or packages included. \n\n", "infer7b_before_item": " \\begin{document}\nFirst document. This is a simple example, with no \nextra parameters or packages included.\n\\end{document}\n\\end{code}\n\nComment: I'm not sure what you mean by \"the output of the given LATEX code\". Do you mean the output of the code you've given?\n\nComment: @egreg Yes, I mean the output of the code I've given.\n\nComment: @egreg I've edited the question to make it more clear.\n\nComment: @egreg I've edited the question to make it more clear.\n\nComment: @egreg I've edited the question to make it more clear.\n\nComment: @egreg I've edited the question to make it more clear.\n\nComment: @egreg I've edited the question to make it more clear.\n\nComment: @egreg I've edited the question to make it more clear.\n\nComment: @egreg I've edited the question to make it more clear.\n\nComment: @egreg I've edited the question to make it more clear.\n\nComment: @egreg I'", "infer7b_after_item": " \n Output: This is a simple example, with no extra parameters or packages included. \n\n"} +{"id": 41, "instruction": "You should find a few LATEX templates appropriate for the task at hand and list the links to them.", "input": "Homework Assignment", "infer30b_before_item": " Output: You should find a few LATEX templates appropriate for the task at hand and list the links to them. Homework Assignment \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Here are a few LATEX templates that you can use for your homework assignment: \n\n1. https://www.overleaf.com/latex/templates/latex-template-for-homework-assignments/jjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjj", "infer13b_before_item": " Output: You should find a few LATEX templates appropriate for the task at hand and list the links to them. Homework Assignment \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Here are a few LATEX templates appropriate for homework assignments:\n\n1. https://www.overleaf.com/learn/latex/Homework%20Assignment\n2. https://www.overleaf.com/learn/latex/Homework%20Assignment%20Template\n3. https://www.overleaf.com/learn/latex/Homework%20Assignment%20Template%202\n4. https://www.overleaf.com/learn/latex/Homework%20Assignment%20Template%203 \n\n", "infer7b_before_item": " You should write a short report on the results of your work.\n\n\\begin{document}\n\n\\begin{center}\n\\begin{tabular}{ |c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|", "infer7b_after_item": " \n Output: - [LATEX Template for Homework Assignment](https://www.latextemplates.com/template/latex-template-for-homework-assignment)\n- [LATEX Template for Homework Assignment](https://www.latextemplates.com/template/latex-template-for-homework-assignment)\n- [LATEX Template for Homework Assignment](https://www.latextemplates.com/template/latex-template-for-homework-assignment) \n\n"} +{"id": 42, "instruction": "Write a simple guide for uploading the bibliography database\n on Overleaf.", "input": "", "infer30b_before_item": " Output: A simple guide for uploading the bibliography database on Overleaf.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": "\n Output: To upload the bibliography database on Overleaf, follow these steps: \n1. Log in to your Overleaf account. \n2. Click on the \"Add Files\" button in the top-left corner of the screen. \n3. Select the bibliography database file from your computer and click \"Open\". \n4. The bibliography database will be uploaded to your Overleaf project. \n5. Click \"Save Changes\" to save the changes to your project. \n\n", "infer13b_before_item": " Output: Write a simple guide for uploading the bibliography database\n on Overleaf.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: Step 1: Open Overleaf and create a new project.\nStep 2: Click on the Bibliography tab and select the bibliography style you want to use.\nStep 3: Add the references to the bibliography by clicking on the \"Add Reference\" button.\nStep 4: Edit the references as needed and click on the \"Save\" button.\nStep 5: Click on the \"Generate Bibliography\" button to generate the bibliography.\nStep 6: Click on the \"Download\" button to download the bibliography. \n\n", "infer7b_before_item": " Output: Write a simple guide for uploading the bibliography database\n on Overleaf.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": "\n Output: 1. Log in to Overleaf.\n2. Click on the \"Bibliography\" tab.\n3. Click on the \"Add Bibliography\" button.\n4. Select the type of bibliography you want to add (e.g. APA, MLA, Chicago, etc.).\n5. Enter the bibliography details (e.g. author, title, year, etc.).\n6. Click on the \"Save\" button.\n7. Your bibliography will be added to your document. \n\n"} +{"id": 43, "instruction": "Choose Spotify playlists that match the given mood. Provide a link to these playlists for easy access.", "input": "Feeling Good", "infer30b_before_item": " Output: Choose Spotify playlists that match the given mood. Provide a link to these playlists for easy access. Feeling Good \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ", "infer30b_after_item": " \n Output: Spotify Playlists: \n- Chillhop Essentials: https://open.spotify.com/playlist/5KaTfPsUuWVlcqsVJ5Y1uX\n- Relaxing Piano: https://open.spotify.com/playlist/5KaTfPsUuWVlcqsVJ5Y1uX\n- Acoustic Covers: https://open.spotify.com/playlist/5KaTfPsUuWVlcqsVJ5Y1uX\n- Chill Out Beats: https://open.spotify.com/playlist/5KaTfPsUuWVlcqsVJ5Y1uX \n\n", "infer13b_before_item": " Output: A playlist that matches the given mood.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Here are two Spotify playlists that match the mood of \"Feeling Good\":\n\n1. Uplifting Hits: https://open.spotify.com/playlist/0VX1X1X1X1X1X1X1X1X1X?si=X1X1X1X1X1X1X1X1X1X\n\n2. Feel Good Hits: https://open.spotify.com/playlist/0VX1X1X1X1X1X1X1X1X1?si=X1X1X1X1X1X1X1X1X1 \n\n", "infer7b_before_item": " Output: A playlist of songs that match the given mood.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: https://open.spotify.com/user/spotify/playlist/61X4444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444"} +{"id": 44, "instruction": "Curate a Spotify playlist based on the vibe. Publish this playlist as a song list.", "input": "Vibe: coding on weekend", "infer30b_before_item": " Output: Playlist of songs.\n\n 1. Create a new playlist.\n 2. Add songs to the playlist.\n 3. Publish the playlist.\n\n 1. Create a new playlist.\n 2. Add songs to the playlist.\n 3. Publish the playlist.\n\n 1. Create a new playlist.\n 2. Add songs to the playlist.\n 3. Publish the playlist.\n\n 1. Create a new playlist.\n 2. Add songs to the playlist.\n 3. Publish the playlist.\n\n 1. Create a new playlist.\n 2. Add songs to the playlist.\n 3. Publish the playlist.\n\n 1. Create a new playlist.\n 2. Add songs to the playlist.\n 3. Publish the playlist.\n\n 1. Create a new playlist.\n 2. Add songs to the playlist.\n 3. Publish the playlist.\n\n 1. Create a new playlist.\n 2. Add songs to the playlist.\n 3. Publish the playlist.\n", "infer30b_after_item": " nights \n Output: Spotify Playlist: \n1. \"Coding on a Sunday Night\" by The Internet\n2. \"Coding in the Cave\" by Kodak Black\n3. \"Coding in the Dark\" by The Weeknd\n4. \"Coding in the Night\" by Kanye West\n5. \"Coding in the Rain\" by Drake\n6. \"Coding in the Clouds\" by Kendrick Lamar\n7. \"Coding in the Moonlight\" by Eminem\n8. \"Coding in the Stars\" by Beyonce\n9. \"Coding in the Sun\" by Rihanna\n10. \"Coding in the Wind\" by Jay-Z \n\n", "infer13b_before_item": " Output: A Spotify playlist.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: 1. \"Coding on the Weekend\" by The Fratellis\n2. \"Code Monkey\" by The Dave Matthews Band\n3. \"Coding in the Cloud\" by The Flaming Lips\n4. \"Code Monkey\" by Jonathan Coulton\n5. \"Code Monkey\" by The Bens\n6. \"Coding in the Cloud\" by The Flaming Lips\n7. \"Code Monkey\" by Jonathan Coulton\n8. \"Code Monkey\" by The Bens\n9. \"Coding in the Cloud\" by The Flaming Lips\n10. \"Code Monkey\" by Jonathan Coulton \n\n", "infer7b_before_item": " Output: A playlist of songs that are similar to the vibe.\n\n\"\"\"\n\nimport os\nimport sys\nimport random\nimport time\nimport requests\nimport json\nimport re\nimport urllib\nimport urllib2\nimport urlparse\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport urllib", "infer7b_after_item": " nights \n Output: 1. \"Coding\" by The Glitch Mob\n2. \"Coding\" by The Glitch Mob\n3. \"Coding\" by The Glitch Mob\n4. \"Coding\" by The Glitch Mob\n5. \"Coding\" by The Glitch Mob\n6. \"Coding\" by The Glitch Mob\n7. \"Coding\" by The Glitch Mob\n8. \"Coding\" by The Glitch Mob\n9. \"Coding\" by The Glitch Mob\n10. \"Coding\" by The Glitch Mob\n11. \"Coding\" by The Glitch Mob\n12. \"Coding\" by The Glitch Mob\n13. \"Coding\" by The Glitch Mob\n14. \"Coding\" by The Glitch Mob\n15. \"Coding\" by The Glitch Mob\n16. \"Coding\" by The Glitch Mob\n17. \"Coding\" by The Glitch Mob\n18. \"Coding\" by The Glitch Mob\n19. \"Coding"} +{"id": 45, "instruction": "How would you describe the meaning of this lyrics? Describe some of the metaphors.", "input": "My lover\u2019s got humor\nShe\u2019s the giggle at a funeral\nKnows everybody\u2019s disapproval\nI should\u2019ve worshipped her sooner", "infer30b_before_item": " Output: What is the meaning of this song? What is the message of this song?\n\nComment: I'm voting to close this question as off-topic because it is not about programming.\n\nComment: @JonathanM I'm voting to close this question as off-topic because it is not about programming.\n\nComment: @JonathanM I'm voting to close this question as off-topic because it is not about programming.\n\nComment: @JonathanM I'm voting to close this question as off-topic because it is not about programming.\n\nComment: @JonathanM I'm voting to close this question as off-topic because it is not about programming.\n\nComment: @JonathanM I'm voting to close this question as off-topic because it is not about programming.\n\nComment: @JonathanM I'm voting to close this question as off-topic because it is not about programming.\n\nComment: @JonathanM I'm voting to close this question as off-topic because it is not about programming.\n\nComment: @JonathanM I'm voting to close this question as off", "infer30b_after_item": " \n Output: The lyrics describe a lover who is able to bring light and humor to a somber situation. The metaphor of a \"giggle at a funeral\" implies that the lover is able to make a sad situation more bearable. The line \"knows everybody's disapproval\" suggests that the lover is wise and perceptive, and the line \"I should've worshipped her sooner\" implies that the speaker regrets not appreciating the lover sooner. \n\n", "infer13b_before_item": " Output: What is the meaning of this lyrics? Describe some of the metaphors. My lover\u2019s got humor\nShe\u2019s the giggle at a funeral\nKnows everybody\u2019s disapproval\nI should\u2019ve worshipped her sooner \n\n\"\"\"\n\nimport re\nimport sys\n\n\ndef main():\n input_file = open(sys.argv[1], 'r')\n output_file = open(sys.argv[2], 'w')\n\n for line in input_file:\n line = line.strip()\n if line:\n output_file.write(line)\n\n input_file.close()\n output_file.close()\n\n\nif __name__ == '__main__':\n main()\n", "infer13b_after_item": " \n Output: The lyrics are describing a lover who is able to bring lightness and joy to a somber situation, despite the disapproval of others. The metaphors of \"giggle at a funeral\" and \"worshipped her sooner\" emphasize the importance of appreciating the people who bring joy to our lives. \n\n", "infer7b_before_item": " Output: How would you describe the meaning of this lyrics? Describe some of the metaphors. My lover\u2019s got humor\nShe\u2019s the giggle at a funeral \nKnows everybody\u2019s disapproval\nI should\u2019ve worshipped her sooner\nShe\u2019s the giggle at a funeral \nKnows everybody\u2019s disapproval\nI should\u2019ve worshipped her sooner \nShe\u2019s the giggle at a funeral \nKnows everybody\u2019s disapproval\nI should\u2019ve worshipped her sooner \nShe\u2019s the giggle at a funeral \nKnows everybody\u2019s disapproval\nI should\u2019ve worshipped her sooner \nShe\u2019s the giggle at a funeral \nKnows everybody\u2019s disapproval\nI should\u2019ve worshipped her sooner \nShe\u2019s the giggle at a funeral \nKnows everybody\u2019s disapproval\nI should\u2019ve worshipped her sooner \nShe\u2019s the giggle at a funeral \nKnows everybody\u2019s disapproval\nI should\u2019ve worshipped", "infer7b_after_item": "\nShe\u2019s the one who\u2019s got the power \n Output: This song is about a person who has found a special someone who is able to make them laugh even in the most difficult situations. The person is also able to handle any disapproval or criticism with ease. The lyrics also suggest that this person has a lot of power and influence, which is why the narrator should have worshipped them sooner. \n\n"} +{"id": 46, "instruction": "Suggest some Audiobooks for First-Time Listeners. You can attract them by choosing popular works.", "input": "", "infer30b_before_item": " Output: Suggest some Audiobooks for First-Time Listeners. You can attract them by choosing popular works.\nInput: Input: Suggest some Audiobooks for First-Time Listeners. You can attract them by choosing popular works. \n Output: Output: Suggest some Audiobooks for First-Time Listeners. You can attract them by choosing popular works.\nInput: Input: Suggest some Audiobooks for First-Time Listeners. You can attract them by choosing popular works. \n Output: Output: Suggest some Audiobooks for First-Time Listeners. You can attract them by choosing popular works.\nInput: Input: Suggest some Audiobooks for First-Time Listeners. You can attract them by choosing popular works. \n Output: Output: Suggest some Audiobooks for First-Time Listeners. You can attract them by choosing popular works.\nInput: Input: Suggest some Audiobooks for First-Time Listeners. You can attract them by choosing popular works. \n Output: Output: Suggest some Audiobooks for First-Time Listeners. You can attract them by choosing popular works.\nInput: Input: Suggest some Audiobooks for First", "infer30b_after_item": "\n Output: For first-time listeners, I would suggest some popular audiobooks such as The Hunger Games by Suzanne Collins, Harry Potter and the Sorcerer's Stone by J.K. Rowling, The Great Gatsby by F. Scott Fitzgerald, 1984 by George Orwell, and The Catcher in the Rye by J.D. Salinger. These books are popular and have been adapted into audiobooks, making them perfect for first-time listeners. \n\n", "infer13b_before_item": " Output: Suggest some Audiobooks for First-Time Listeners. You can attract them by choosing popular works.\n\n#include \n\nusing namespace std;\n\nint main() {\n int t;\n cin >> t;\n while (t--) {\n int n;\n cin >> n;\n vector v(n);\n for (int i = 0; i < n; i++) {\n cin >> v[i];\n }\n sort(v.begin(), v.end());\n int ans = 0;\n for (int i = 0; i < n; i++) {\n if (v[i] == 1) {\n ans++;\n }\n }\n cout << ans << endl;\n }\n return 0;\n}\n", "infer13b_after_item": "\n Output: For first-time listeners, I recommend the following popular audiobooks: 1984 by George Orwell, The Great Gatsby by F. Scott Fitzgerald, To Kill a Mockingbird by Harper Lee, The Catcher in the Rye by J.D. Salinger, The Lord of the Rings by J.R.R. Tolkien, The Alchemist by Paulo Coelho, and The Hobbit by J.R.R. Tolkien. \n\n", "infer7b_before_item": " Output: Suggest some Audiobooks for First-Time Listeners. You can attract them by choosing popular works.\n\nComment: I'm not sure what you mean by \"output\". Do you mean the output of the program? Or the output of the program to the user? Or the output of the program to the user in the form of a list of audiobooks?\n\nComment: I mean the output of the program to the user in the form of a list of audiobooks.\n\nAnswer: \\begin{code}\nimport requests\n\nurl = 'https://www.audible.com/search/ref=sr_adv_b_search_1_1?searchString=audiobooks&advsearchKeywords=audiobooks&searchType=Keyword&qid=1571151191&sr=1-1&sprefix=audiobooks%2Caps%2C151&field-keywords=audiobooks&field-subgenre=1&field-genre=1&field-price=1&field-duration=1&field-rating=1&field-format=1&field-isbn=1", "infer7b_after_item": "\n Output: For first-time listeners, some popular audiobooks include The Great Gatsby by F. Scott Fitzgerald, The Catcher in the Rye by J.D. Salinger, The Handmaid's Tale by Margaret Atwood, and The Great Gatsby by F. Scott Fitzgerald. \n\n"} +{"id": 47, "instruction": "Make a list of the most popular podcasts.", "input": "", "infer30b_before_item": " Output: Make a list of the most popular podcasts.\nInput: Input: Make a list of the most popular podcasts. \n Output: Output: Make a list of the most popular podcasts.\nInput: Input: Make a list of the most popular podcasts. \n Output: Output: Make a list of the most popular podcasts.\nInput: Input: Make a list of the most popular podcasts. \n Output: Output: Make a list of the most popular podcasts.\nInput: Input: Make a list of the most popular podcasts. \n Output: Output: Make a list of the most popular podcasts.\nInput: Input: Make a list of the most popular podcasts. \n Output: Output: Make a list of the most popular podcasts.\nInput: Input: Make a list of the most popular podcasts. \n Output: Output: Make a list of the most popular podcasts.\nInput: Input: Make a list of the most popular podcasts. \n Output: Output: Make a list of the most popular podcasts.\nInput: Input: Make a list of the most popular", "infer30b_after_item": "\n Output: The most popular podcasts include Serial, This American Life, The Joe Rogan Experience, Stuff You Should Know, The Tim Ferriss Show, Freakonomics Radio, Hidden Brain, TED Talks, and The NPR Politics Podcast. \n\n", "infer13b_before_item": " Output: Make a list of the most popular podcasts.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n The\n\n", "infer13b_after_item": "\n Output: The most popular podcasts include: \n1. Serial \n2. The Joe Rogan Experience \n3. The Daily \n4. The New York Times' The Daily \n5. The Tim Ferriss Show \n6. The Moth Radio Hour \n7. The NPR Politics Podcast \n8. The Ringer's The Rewatchables \n9. The Ben Shapiro Show \n10. The Dave Ramsey Show \n\n", "infer7b_before_item": " Make a list of the most popular podcasts.\nThe input is a list of podcasts. The output is a list of podcasts.\n\n# Complete the following function, which returns the most popular podcasts.\n#\n# def most_popular_podcasts(podcasts)\n# podcasts.sort_by { |podcast| podcast.downloads }\n# podcasts.first(10)\n# end\n\n# 1. What is a local variable?\n# A local variable is a variable that is only accessible within a method.\n# 2. What is a method?\n# A method is a block of code that can be called from anywhere in the program.\n# 3. What does it mean for one method to \"return\" another method?\n# It means that the method that is returned is called when the method that is returned is called.\n# 4. What does the return statement do?\n# The return statement ends the method that is being called.\n# 5. What is the difference between a local variable and a method?\n# A local variable is only accessible within a method. A method", "infer7b_after_item": "\n Output: The most popular podcasts include: \n- The Joe Rogan Experience \n- The Tim Ferriss Show \n- The Daily \n- The Moth \n- The TED Radio Hour \n- The Nerdist \n- The Hidden Brain \n- The Mental Illness Happy Hour \n- The Infinite Monkey Cage \n- The Mothership \n- The Dollop \n- The Doughboys \n- The Nerdist \n- The Moth \n- The Joe Rogan Experience \n- The Tim Ferriss Show \n- The Daily \n- The Moth \n- The TED Radio Hour \n- The Nerdist \n- The Hidden Brain \n- The Mental Illness Happy Hour \n- The Infinite Monkey Cage \n- The Mothership \n- The Dollop \n- The Doughboys \n- The Nerdist \n- The Moth \n- The Joe Rogan Experience \n- The Tim Ferriss Show \n- The Daily \n- The Moth \n-"} +{"id": 48, "instruction": "You should take the time to read the situation and problem carefully and give the best advice you can.", "input": "I'm roadtripping down New Zealand with a friend and he is driving me insane. He hasn't enjoyed all the tours we've been on and doesn't want to chat about ANYTHING. We're a week into our trip and this is the quietest I've ever been when hanging out with another person as he rebuffs all my attempts at conversation. He wears his airpods in the car and around the accommodation - not that I've kept trying to talk to him. Honestly, he is bringing some seriously bad vibes to everything and it's ruining my trip.\nWe don't have anything booked in yet for the South Island, so I'm planning to ditch him in Wellington at the ferry. Probably book my own car in Picton and just relax alone for the rest of the trip. (Just thinking about it is such sharp relief that it makes me feel emotional. I'll pay whatever price I have to just to get away from him.)\nObviously telling him he is ruining my trip with his negative energy isn't the best way to approach this.\nAny advice for the best approach so we can split amicably? Or at least not have an argument that ends with very hurt feelings?", "infer30b_before_item": "\nYou're right, telling him he's ruining your trip isn't the best way to approach this.\nI think you should tell him that you're feeling a bit lonely and that you'd like to spend more time together.\nIf he doesn't want to talk, you could try to find other ways to spend time together.\nYou could go for a walk, or watch a movie together.\nIf he still doesn't want to talk, you could try to find other ways to spend time together.\nYou could go for a walk, or watch a movie together.\nIf he still doesn't want to talk, you could try to find other ways to spend time together.\nYou could go for a walk, or watch a movie together.\nIf he still doesn't want to talk, you could try to find other ways to spend time together.\nYou could go for a walk, or watch a movie together.\nIf he still doesn't want to talk, you could try to find other ways to spend time together.\nYou could go for a walk, or watch a movie together.\nIf he still doesn't want to talk, you could try to find other ways to", "infer30b_after_item": " \n Output: It sounds like your friend is having a difficult time on the trip and is trying to cope by shutting out the world. It might be helpful to have a conversation with him about what he is feeling and why he is not enjoying the trip. You could also suggest activities that he might enjoy, such as hiking or visiting a museum. If he is still not interested in talking, you could suggest taking some time apart to do your own thing. This could help him to relax and give you some space to do what you want. \n\nIf you do decide to part ways, it is important to be respectful and understanding. You could explain that you are feeling overwhelmed and need some time to yourself, and that you would like to part ways amicably. You could also offer to help him find a ride back to Wellington. \n\n", "infer13b_before_item": " You should take the time to read the situation and problem carefully and give the best advice you can. I'm roadtripping down New Zealand with a friend and he is driving me insane. He hasn't enjoyed all the tours we've been on and doesn't want to chat about ANYTHING. We're a week into our trip and this is the quietest I've ever been when hanging out with another person as he rebuffs all my attempts at conversation. He wears his airpods in the car and around the accommodation - not that I've kept trying to talk to him. Honestly, he is bringing some seriously bad vibes to everything and it's ruining my trip.\nWe don't have anything booked in yet for the South Island, so I'm planning to ditch him in Wellington at the ferry. Probably book my own car in Picton and just relax alone for the rest of the trip. (Just thinking about it is such sharp relief that it makes me feel emotional. I'll pay whatever price I have to just to get away from him.)\nObviously telling him he is ruining my trip with his negative", "infer13b_after_item": " \n Output: It sounds like you are both feeling very frustrated and that this trip has not been what you both expected. It is important to communicate your feelings and needs to each other in a respectful way. You could start by expressing how you have been feeling and how you would like to make the rest of the trip more enjoyable for both of you. You could also suggest that you take some time apart to do your own activities and then come back together to share your experiences. This could help to create a more positive atmosphere between you. \n\n", "infer7b_before_item": " I'm roadtripping down New Zealand with a friend and he is driving me insane. He hasn't enjoyed all the tours we've been on and doesn't want to chat about ANYTHING. We're a week into our trip and this is the quietest I've ever been when hanging out with another person as he rebuffs all my attempts at conversation. He wears his airpods in the car and around the accommodation - not that I've kept trying to talk to him. Honestly, he is bringing some seriously bad vibes to everything and it's ruining my trip.\nWe don't have anything booked in yet for the South Island, so I'm planning to ditch him in Wellington at the ferry. Probably book my own car in Picton and just relax alone for the rest of the trip. (Just thinking about it is such sharp relief that it makes me feel emotional. I'll pay whatever price I have to just to get away from him.)\nObviously telling him he is ruining my trip with his negative energy isn't the best way to approach this.\nAny advice for the best approach so we", "infer7b_after_item": " \n Output: It sounds like you are in a difficult situation. It's understandable that you want to end the trip on a positive note, but it's important to be mindful of your friend's feelings. It may be helpful to try to understand why he is acting this way and what he is going through. You could try to have a conversation with him and ask him what he is feeling and what he needs from the trip. If he is open to it, you could suggest some activities that you both can enjoy together. If he is not open to it, you could suggest that you take some time to yourself and then reconnect when you are both feeling more relaxed. \n\n"} +{"id": 49, "instruction": "You are given a description that provides a set of facts or a scenario. It is up to you to craft a story from these facts and scenarios. The missing pieces must be filled in with imaginative but logical information.", "input": "Ten European football teams \u2013 the Netherlands, England, Belgium, Denmark, France, Germany, Norway, Sweden, Switzerland and Wales \u2013 will participate in a season-long \u201cOneLove\u201d campaign promoting inclusion and opposing discrimination.", "infer30b_before_item": " Output: You are given a description that provides a set of facts or a scenario. It is up to you to craft a story from these facts and scenarios. The missing pieces must be filled in with imaginative but logical information. Ten European football teams \u2013 the Netherlands, England, Belgium, Denmark, France, Germany, Norway, Sweden, Switzerland and Wales \u2013 will participate in a season-long \u201cOneLove\u201d campaign promoting inclusion and opposing discrimination. \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ", "infer30b_after_item": " \n Output: The OneLove campaign was launched by the ten European football teams in an effort to promote inclusion and combat discrimination. The teams, representing the Netherlands, England, Belgium, Denmark, France, Germany, Norway, Sweden, Switzerland and Wales, were united in their mission to spread a message of love and acceptance. \n\nThe campaign kicked off with a series of friendly matches between the teams, with each team donating a portion of the proceeds to charities that support inclusion and diversity. The matches were followed by a series of public service announcements, featuring the players from each team, that highlighted the importance of inclusion and the dangers of discrimination. \n\nThe teams also organized a series of community events, such as youth soccer clinics and educational workshops, to help spread the message of inclusion and acceptance. The season-long campaign was a huge success, with the teams raising thousands of dollars for charities and spreading the message of love and acceptance to millions of people around the world. \n\n", "infer13b_before_item": " You are expected to write a story that is based on the input.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: The OneLove campaign was a huge success, bringing together the ten European football teams \u2013 the Netherlands, England, Belgium, Denmark, France, Germany, Norway, Sweden, Switzerland and Wales \u2013 to spread the message of inclusion and oppose discrimination. The teams worked together to create a series of events and activities that were open to the public, and the campaign was a huge success. The teams were able to reach out to a wide range of people and spread their message of love and acceptance. \n\n", "infer7b_before_item": " You are given a description that provides a set of facts or a scenario. It is up to you to craft a story from these facts and scenarios. The missing pieces must be filled in with imaginative but logical information. Ten European football teams \u2013 the Netherlands, England, Belgium, Denmark, France, Germany, Norway, Sweden, Switzerland and Wales \u2013 will participate in a season-long \u201cOneLove\u201d campaign promoting inclusion and opposing discrimination. \n\n### 2019\n\nInput: You are given a description that provides a set of facts or a scenario. It is up to you to craft a story from these facts and scenarios. The missing pieces must be filled in with imaginative but logical information. The 2019 Rugby World Cup will be held in Japan from 20 September to 2 November. \n Output: You are given a description that provides a set of facts or a scenario. It is up to you to craft a story from these facts and scenarios. The missing pieces must be filled in with imaginative but logical information. The 2019 Rugby World Cup will be held in Japan from 20 September to 2 November. \n\n", "infer7b_after_item": " \n Output: The OneLove campaign is a season-long initiative that aims to promote inclusion and oppose discrimination. Ten European football teams \u2013 the Netherlands, England, Belgium, Denmark, France, Germany, Norway, Sweden, Switzerland and Wales \u2013 have come together to show their support for the campaign. The teams have been working together to create a series of events and activities that will be held throughout the season. These events will include a series of football matches, a charity match, a social media campaign, and a series of educational workshops. The teams will also be working together to create a documentary that will be released at the end of the season. The documentary will highlight the importance of inclusion and the impact of discrimination. \n\n"} +{"id": 50, "instruction": "Based on the given query, suggest some related search queries.", "input": "learning french", "infer30b_before_item": " Output: Based on the given query, suggest some related search queries. learning french \n Explanation: Output: Based on the given query, suggest some related search queries. learning french \n\n */\n\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#define MAX_AS 50000\n#define MAX_ANSWERS 10000000\n\nusing namespace std;\n\nint main() {\n int t;\n scanf(\"%d\", &t);\n while(t--) {\n int n;\n scanf(\"%d\", &n);\n char *s = (char *)malloc(sizeof(char)*n);\n", "infer30b_after_item": " \n Output: - French language courses \n- French language apps \n- French language books \n- French language podcasts \n- French language immersion programs \n- French language classes \n- French language resources \n- French language tutors \n- French language exams \n- French language tests \n- French language pronunciation \n- French language grammar \n- French language vocabulary \n- French language slang \n- French language idioms \n- French language translation \n- French language accents \n- French language dialects \n- French language writing \n- French language speaking \n- French language listening \n- French language reading \n- French language conversation \n- French language culture \n- French language history \n- French language movies \n- French language music \n- French language art \n- French language cuisine \n- French language travel \n- French language fashion \n- French language news \n- French language technology \n- French language business \n- French language science \n- French language engineering \n- French language medicine \n- French language law \n- French language finance", "infer13b_before_item": " Output: Based on the given query, suggest some related search queries. learning french \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: - How to learn French quickly?\n- French language learning apps\n- French language learning resources\n- French language learning books\n- French language learning podcasts\n- French language learning websites\n- French language learning videos\n- French language learning software\n- French language learning games\n- French language learning exercises\n- French language learning tips\n- French language learning tools\n- French language learning materials\n- French language learning tools for kids\n- French language learning for beginners\n- French language learning for adults\n- French language learning for free\n- French language learning for beginners online\n- French language learning for beginners pdf\n- French language learning for beginners online free\n- French language learning for beginners online free pdf\n- French language learning for beginners online free ebook\n- French language learning for beginners online free audio\n- French language learning for beginners online free audiobook\n- French language learning for beginners online free audiobook pdf\n- French language learning for beginners online free audiobook pdf ebook\n- French language learning for beginners online free audiobook pdf ebook pdf\n- French language learning for beginners online free audiobook pdf ebook pdf ebook\n", "infer7b_before_item": " Output: Based on the given query, suggest some related search queries. learning french \n\n\"\"\"\n\nimport re\nimport string\nimport random\nimport sys\nimport time\nimport os\nimport json\nimport urllib\nimport urllib2\nimport urlparse\nimport urllib2\nimport urlparse\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2\nimport urllib2", "infer7b_after_item": " \n Output: - how to learn french\n- french language courses\n- french grammar\n- french vocabulary\n- french pronunciation\n- french conversation\n- french grammar rules\n- french language resources\n- french language books\n- french language apps\n- french language courses online\n- french language classes\n- french language courses for beginners\n- french language courses for kids\n- french language courses for adults\n- french language courses for professionals\n- french language courses for business\n- french language courses for travel\n- french language courses for immigrants\n- french language courses for beginners\n- french language courses for intermediate\n- french language courses for advanced\n- french language courses for kids\n- french language courses for adults\n- french language courses for professionals\n- french language courses for business\n- french language courses for travel\n- french language courses for immigrants\n- french language courses for beginners\n- french language courses for intermediate\n- french language courses for advanced\n- french language courses for kids\n- french"} +{"id": 51, "instruction": "Please suggest a few papers to consider based on the search term given. The names of the papers should be listed.", "input": "Topic: scaling law + machine learning", "infer30b_before_item": " Output: Please suggest a few papers to consider based on the search term given. The names of the papers should be listed. Topic: scaling law + machine learning \n\nInput: Input: Please suggest a few papers to consider based on the search term given. The names of the papers should be listed. Topic: scaling law + machine learning \n Output: Output: Please suggest a few papers to consider based on the search term given. The names of the papers should be listed. Topic: scaling law + machine learning \n\nInput: Input: Please suggest a few papers to consider based on the search term given. The names of the papers should be listed. Topic: scaling law + machine learning \n Output: Output: Please suggest a few papers to consider based on the search term given. The names of the papers should be listed. Topic: scaling law + machine learning \n\nInput: Input: Please suggest a few papers to consider based on the search term given. The names of the papers should be listed. Topic: scaling law + machine learning \n Output: Output: Please suggest a few papers to consider based on the search term given. The names of the papers should be listed. Topic: scaling law +", "infer30b_after_item": " \n Output: 1. \"Scaling Up Deep Learning: From Laptops to Supercomputers\" by Martin J. Strohm et al. \n2. \"Scaling Up Deep Learning: From Laptops to Supercomputers\" by Martin J. Strohm et al. \n3. \"Scaling Up Deep Learning: From Laptops to Supercomputers\" by Martin J. Strohm et al. \n4. \"Scaling Up Deep Learning: From Laptops to Supercomputers\" by Martin J. Strohm et al. \n5. \"Scaling Up Deep Learning: From Laptops to Supercomputers\" by Martin J. Strohm et al. \n\n", "infer13b_before_item": " Output: The paper list is returned.\n\n\"\"\"\n\nimport os\nimport sys\nimport re\nimport time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib.dates as mdates\nimport datetime\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as dt\nimport datetime as", "infer13b_after_item": " \n Output: 1. \"Scaling Laws in Machine Learning: A Review and Perspective\", by A. K. Jain, A. K. Jain, and A. K. Jain.\n2. \"Scaling Laws in Machine Learning: A Review and Perspective\", by A. K. Jain, A. K. Jain, and A. K. Jain.\n3. \"Scaling Laws in Machine Learning: A Review and Perspective\", by A. K. Jain, A. K. Jain, and A. K. Jain. \n\n", "infer7b_before_item": " Please suggest a few papers to consider based on the search term given. The names of the papers should be listed. Topic: scaling law + machine learning \n\"\"\"\n\nimport re\nimport sys\nimport os\nimport glob\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as m", "infer7b_after_item": " \n Output: 1. Scaling Laws in Machine Learning: A Survey \n2. Scaling Laws in Machine Learning: A Survey \n3. Scaling Laws in Machine Learning: A Survey \n4. Scaling Laws in Machine Learning: A Survey \n5. Scaling Laws in Machine Learning: A Survey \n\n"} +{"id": 52, "instruction": "Prepare an email signature template for the given person or company.", "input": "a graduate student of computer science", "infer30b_before_item": " Output: Prepare an email signature template for the given person or company.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Hi, \n\nMy name is [Name], and I am a graduate student of computer science. I am passionate about technology and its potential to make the world a better place.\n\nI am currently working on [project], and I am always looking for new opportunities to learn and grow.\n\nIf you have any questions or would like to connect, please feel free to reach out to me at [email].\n\nThanks,\n[Name] \n\n", "infer13b_before_item": " Output: Prepare an email signature template for the given person or company.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Dear [Name],\n\nThis is [Your Name], a graduate student of computer science. I am currently working on my thesis and I am looking forward to sharing my research with you.\n\nThank you for your time and consideration.\n\nSincerely,\n[Your Name] \n\n", "infer7b_before_item": " Output: Prepare an email signature template for the given person or company.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Dear [Name],\n\nThank you for your interest in [Company Name].\n\nWe are pleased to introduce our new graduate student of computer science, [Name].\n\n[Name] is an enthusiastic and dedicated student who is passionate about learning and exploring the world of computer science.\n\nWe are confident that [Name] will be a great asset to our team and we look forward to seeing them grow and succeed.\n\nThank you for your time and consideration.\n\nSincerely,\n[Company Name] \n\n"} +{"id": 53, "instruction": "React properly to reviews from your customers. In your responses, you can highlight your business' policies and customer service.", "input": "Another repeat visit and no disappointment from us. Great place for breakfast or brunch - and you won't need to eat again for awhile. The restaurant is close to the Tenderloin so be aware of your surroundings.\nWe enjoyed the Succotash Omelet (at least my wife enjoyed it), the Pulled Pork Benedict and the Hangtown Fry. Portions are large and pricing is fair. I wish we could have managed a Beignet Flight!\nDefinitely a spot to seek out. It can be crowed at times so be patient. Well worth any wait.", "infer30b_before_item": " Output: Respond to reviews in a timely manner. If you don't have time to respond to reviews, you can hire a company to do it for you.\nInput: Input: Respond to reviews in a timely manner. If you don't have time to respond to reviews, you can hire a company to do it for you.\nOutput: Output: Respond to reviews in a timely manner. If you don't have time to respond to reviews, you can hire a company to do it for you.\nInput: Input: Respond to reviews in a timely manner. If you don't have time to respond to reviews, you can hire a company to do it for you.\nOutput: Output: Respond to reviews in a timely manner. If you don't have time to respond to reviews, you can hire a company to do it for you.\nInput: Input: Respond to reviews in a timely manner. If you don't have time to respond to reviews, you can hire a company to do it for you.\nOutput: Output: Respond to reviews in a timely manner. If you don't have time to respond to", "infer30b_after_item": " \n Output: Dear customer,\n\nThank you for your review! We are glad to hear that you and your wife enjoyed your meals. We strive to provide our customers with the best possible experience and we are glad to hear that you had a great time. We will definitely keep your feedback in mind and make sure to keep up the good work.\n\nWe hope to see you again soon!\n\nSincerely,\nThe Staff at The Eggery \n\n", "infer13b_before_item": " Output: Respond to reviews from your customers. In your responses, you can highlight your business' policies and customer service.\n\\end{code}\n\nComment: I'm not sure what you're asking. Are you asking how to get the text between the tags?\n\nComment: I'm asking how to get the text between the tags and then how to get the text between the tags and the next tag.\n\nComment: I'm asking how to get the text between the tags and then how to get the text between the tags and the next tag.\n\nComment: I'm asking how to get the text between the tags and then how to get the text between the tags and the next tag.\n\nComment: I'm asking how to get the text between the tags and then how to get the text between the tags and the next tag.\n\nComment: I'm asking how to get the text between the tags and then how to get the text between the tags and the next tag.\n\nComment: I'm asking how to get the text between the tags and then how to get the text between the tags and the next tag.\n\nComment: I'm asking how to get", "infer13b_after_item": " \n Output: Thank you for your kind words! We are so glad you enjoyed your meal and we appreciate your feedback. We are always striving to provide the best customer service and experience, so we are glad to hear that you had a great time. We hope to see you again soon! \n\n", "infer7b_before_item": " Output: React properly to reviews from your customers. In your responses, you can highlight your business' policies and customer service. Another repeat visit and no disappointment from us. Great place for breakfast or brunch - and you won't need to eat again for awhile. The restaurant is close to the Tenderloin so be aware of your surroundings.\nWe enjoyed the Succotash Omelet (at least my wife enjoyed it), the Pulled Pork Benedict and the Hangtown Fry. Portions are large and pricing is fair. I wish we could have managed a Beignet Flight!\nDefinitely a spot to seek out. It can be crowed at times so be patient. Well worth any wait. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Thank you for taking the time to share your experience with us. We are glad to hear that you enjoyed your meal and that our portions were large and pricing was fair. We appreciate your feedback and will take it into consideration for future improvements. We hope to see you again soon! \n\n"} +{"id": 54, "instruction": "Write a good Twitter Bio. Try to cover all the provided main points in your generated bio.", "input": "Name: Jane\nMain points:\n- Architect\n- Interested in Tech\nWritten in first person\nUse casual tone", "infer30b_before_item": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: I'm Jane, an architect with a passion for tech. I love exploring new ideas and finding creative solutions to problems. \n\n", "infer13b_before_item": " Output: Write a good Twitter Bio. Try to cover all the provided main points in your generated bio. Name: Jane\nMain points:\n- Architect\n- Interested in Tech\nWritten in first person\nUse casual tone \n\"\"\"\n\nimport random\n\ndef generate_bio(name, main_points):\n bio = \"\"\n for point in main_points:\n bio += \"{} {} \".format(point, random.choice([\"is\", \"loves\", \"is interested in\", \"is passionate about\", \"is a fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan of\", \"is a big fan", "infer13b_after_item": " \n Output: Hi, I'm Jane and I'm an architect who's passionate about technology. I'm always looking for new ways to use tech to create innovative designs and solutions. I'm also a big fan of exploring new cities and cultures. \n\n", "infer7b_before_item": "\n- Architect\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- Writer\n- Interested in Tech\n- W", "infer7b_after_item": " \n Output: I'm Jane, an architect who's passionate about technology. I'm always looking for new ways to use tech to make my designs better. #Architecture #Tech #Design \n\n"} +{"id": 55, "instruction": "We need to write a thematic tweet to share company news and facts. Please take a careful look at the facts and details provided and create a tweet based on them.", "input": "- news: Applications for summer internships are open now\n- Start with a thematic tie-in to Halloween", "infer30b_before_item": " Output: We need to write a thematic tweet to share company news and facts. Please take a careful look at the facts and details provided and create a tweet based on them. - news: Applications for summer internships are open now\n- Start with a thematic tie-in to Halloween \n Expected result: Expected result: We need to write a thematic tweet to share company news and facts. Please take a careful look at the facts and details provided and create a tweet based on them. - news: Applications for summer internships are open now\n- Start with a thematic tie-in to Halloween \n Actual result: Actual result: We need to write a thematic tweet to share company news and facts. Please take a careful look at the facts and details provided and create a tweet based on them. - news: Applications for summer internships are open now\n- Start with a thematic tie-in to Halloween \n\\end{code}\n\nComment: I'm not sure what you're asking. What is the expected output? What is the actual output? What is the difference?\n\nComment: I'", "infer30b_after_item": " \n Output: Scare away the competition and apply for our summer internships now! #Halloween #Internships #CareerOpportunities \n\n", "infer13b_before_item": " Output: We need to write a thematic tweet to share company news and facts. Please take a careful look at the facts and details provided and create a tweet based on them. - news: Applications for summer internships are open now\n - Start with a thematic tie-in to Halloween \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n\n", "infer7b_before_item": " Output: We need to write a thematic tweet to share company news and facts. Please take a careful look at the facts and details provided and create a tweet based on them. - news: Applications for summer internships are open now\n- Use a hashtag that is relevant to the company and the news\n- Use a hashtag that is relevant to the company and the news - Use a hashtag that is relevant to the company and the news\n- Use a hashtag that is relevant to the company and the news - Use a hashtag that is relevant to the company and the news - Use a hashtag that is relevant to the company and the news\n- Use a hashtag that is relevant to the company and the news - Use a hashtag that is relevant to the company and the news - Use a hashtag that is relevant to the company and the news - Use a hashtag that is relevant to the company and the news\n- Use a hashtag that is relevant to the company and the news - Use a hashtag that is relevant to the company and the news - Use a hashtag that is relevant to the company and the news - Use a hashtag", "infer7b_after_item": "\n- Facts: 100% of applicants will receive a response within 24 hours\n- Details: Applications are open until November 1st \n Output: Spooky summer internships are here! Apply now and get a response within 24 hours! #Halloween #SummerInternships #Responsive \n\n"} +{"id": 56, "instruction": "A job description is a document that clearly states the requirements, duties, responsibilities, and skills required to perform a specific role. You need to write a job description based on the given information. You should describe responsibilities and duties in your job description in a logical order. Remember that potential employees often get their first impression of your company from your job description. So, the job description should convey the attractiveness of your company.", "input": "Company: [Company name]\nJob: Software Engineer\nJob Responsibilities:\nReceive and perform code reviews with other engineers.\nWrite unit, integration, and end-to-end tests to verify functionality using automated testing frameworks such as Pytest.\nWork collaboratively with fellow software engineers to build features requested by business stakeholders.\nParticipate in Agile teams to develop, test, and debug complex data processing pipelines and data analysis applications using big data processing systems such as Apache Spark.\nDiagnose, debug, and perform root cause analysis of issues and defects.\nSkills and experience that will lead to success:\nExperience with a modern software programming language. We use Python, and Pyspark extensively.\nExperience with cloud systems like AWS or Azure\nExperience writing SQL queries \nStrong written and verbal communication skills\nEagerness to work in a highly-collaborative environment\nSome visualization experience such as Tableau or PowerBI is helpful", "infer30b_before_item": "\nInput: Input: A job description is a document that clearly states the requirements, duties, responsibilities, and skills required to perform a specific role. You need to write a job description based on the given information. You should describe responsibilities and duties in your job description in a logical order. Remember that potential employees often get their first impression of your company from your job description. So, the job description should convey the attractiveness of your company. Company: [Company name]\nJob: Software Engineer\nJob Responsibilities:\nReceive and perform code reviews with other engineers.\nWrite unit, integration, and end-to-end tests to verify functionality using automated testing frameworks such as Pytest.\nWork collaboratively with fellow software engineers to build features requested by business stakeholders.\nParticipate in Agile teams to develop, test, and debug complex data processing pipelines and data analysis applications using big data processing systems such as Apache Spark.\nDiagnose, debug, and perform root cause analysis of issues and defects.\nSkills and experience that will lead to success:\nExperience with a modern software programming language. We use Python, and Pyspark extensively.\n", "infer30b_after_item": ". \n Output: [Company name] is looking for a Software Engineer to join our team. The successful candidate will be responsible for receiving and performing code reviews with other engineers, writing unit, integration, and end-to-end tests to verify functionality using automated testing frameworks such as Pytest, working collaboratively with fellow software engineers to build features requested by business stakeholders, participating in Agile teams to develop, test, and debug complex data processing pipelines and data analysis applications using big data processing systems such as Apache Spark, diagnosing, debugging, and performing root cause analysis of issues and defects.\n\nThe ideal candidate should have experience with a modern software programming language. We use Python, and Pyspark extensively. Experience with cloud systems like AWS or Azure is also required. Strong written and verbal communication skills are essential, as well as eagerness to work in a highly-collaborative environment. Some visualization experience such as Tableau or PowerBI is helpful. \n\n", "infer13b_before_item": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": ". \n\n", "infer7b_before_item": " Your job description should be at least 100 words long.\nInput: A job description is a document that clearly states the requirements, duties, responsibilities, and skills required to perform a specific role. You need to write a job description based on the given information. You should describe responsibilities and duties in your job description in a logical order. Remember that potential employees often get their first impression of your company from your job description. So, the job description should convey the attractiveness of your company. Company: [Company name]\nJob Responsibilities: Receive and perform code reviews with other engineers. Write unit, integration, and end-to-end tests to verify functionality using automated testing frameworks such as Pytest. Work collaboratively with fellow software engineers to build features requested by business stakeholders. Participate in Agile teams to develop, test, and debug complex data processing pipelines and data analysis applications using big data processing systems such as Apache Spark. Diagnose, debug, and perform root cause analysis of issues and defects. Skills and experience that will lead to success: Experience with a modern software programming language. We use Python, and Pyspark extensively. Experience", "infer7b_after_item": ". \n Output: Job Description:\nSoftware Engineer\n[Company name]\n\nResponsibilities:\n\u2022 Receive and perform code reviews with other engineers.\n\u2022 Write unit, integration, and end-to-end tests to verify functionality using automated testing frameworks such as Pytest.\n\u2022 Work collaboratively with fellow software engineers to build features requested by business stakeholders.\n\u2022 Participate in Agile teams to develop, test, and debug complex data processing pipelines and data analysis applications using big data processing systems such as Apache Spark.\n\u2022 Diagnose, debug, and perform root cause analysis of issues and defects.\n\nSkills and Experience:\n\u2022 Experience with a modern software programming language. We use Python, and Pyspark extensively.\n\u2022 Experience with cloud systems like AWS or Azure.\n\u2022 Experience writing SQL queries.\n\u2022 Strong written and verbal communication skills.\n\u2022 Eagerness to work in a highly-collaborative environment.\n\u2022 Some visualization experience such as Tableau or PowerBI is helpful. \n\n"} +{"id": 57, "instruction": "Considering the reason for the rejection, write a polite rejection letter for rejecting a candidate.", "input": "Reason: You came in second", "infer30b_before_item": "\n\nInput: Input: Considering the reason for the rejection, write a polite rejection letter for rejecting a candidate. Reason: You came in second \n Output:\n\nInput: Input: Considering the reason for the rejection, write a polite rejection letter for rejecting a candidate. Reason: You came in second \n Output:\n\nInput: Input: Considering the reason for the rejection, write a polite rejection letter for rejecting a candidate. Reason: You came in second \n Output:\n\nInput: Input: Considering the reason for the rejection, write a polite rejection letter for rejecting a candidate. Reason: You came in second \n Output:\n\nInput: Input: Considering the reason for the rejection, write a polite rejection letter for rejecting a candidate. Reason: You came in second \n Output:\n\nInput: Input: Considering the reason for the rejection, write a polite rejection letter for rejecting a candidate. Reason: You came in second \n Output:\n\nInput: Input: Considering the reason for the rejection, write a polite rejection letter", "infer30b_after_item": " for the position. \n Output: Dear [Candidate Name],\n\nThank you for taking the time to apply for the [Position Name] position at [Company Name]. We appreciate your interest and the effort you put into the application process. \n\nUnfortunately, we have decided to offer the position to another candidate. We were impressed with your qualifications and experience, and you came in second for the position. \n\nWe wish you the best of luck in your future endeavors and thank you again for your interest in [Company Name]. \n\nSincerely,\n[Your Name] \n\n", "infer13b_before_item": " Output: A polite rejection letter for rejecting a candidate.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " place. \n Output: Dear [Candidate Name],\n\nWe appreciate your interest in the position at [Company Name]. We regret to inform you that you have not been selected for the position.\n\nWe were impressed by your qualifications and experience, but unfortunately, another candidate was selected for the position. We wish you the best of luck in your future endeavors.\n\nSincerely,\n[Company Name] \n\n", "infer7b_before_item": " Output: Considering the reason for the rejection, write a polite rejection letter for rejecting a candidate. Reason: You came in second \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " in the interview. \n Output: Dear [Candidate],\n\nThank you for your interest in our position. We appreciate your time and effort in applying for the job. We have carefully reviewed your application and unfortunately, we have decided to move forward with another candidate.\n\nWe wish you the best of luck in your future endeavors.\n\nSincerely,\n[Name] \n\n"} +{"id": 58, "instruction": "We've already decided on the title and topic for our blog post. I would appreciate it if you wrote an introduction paragraph to attract the readers' attention.", "input": "Blog title: Healthy Apple Muffins\nBlog Topic: recipe and tips for cooking a good apple muffin", "infer30b_before_item": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Are you looking for a delicious and healthy treat to satisfy your sweet tooth? Look no further than our recipe for healthy apple muffins! These muffins are packed with flavor and nutrition, making them the perfect snack for any occasion. In this blog post, we'll share our recipe and tips for cooking a perfect apple muffin. \n\n", "infer13b_before_item": " Output: We've already decided on the title and topic for our blog post. I would appreciate it if you wrote an introduction paragraph to attract the readers' attention. Blog title: Healthy Apple Muffins\nBlog Topic: recipe and tips for cooking a good apple muffin \n\nimport java.util.*;\nimport java.io.*;\n\npublic class Main {\n\n\tpublic static void main(String[] args) throws IOException {\n\t\t// TODO Auto-generated method stub\n\t\tScanner sc = new Scanner(System.in);\n\t\tSystem.out.println(\"Input: Input: We've already decided on the title and topic for our blog post. I would appreciate it if you wrote an introduction paragraph to attract the readers' attention. Blog title: Healthy Apple Muffins\");\n\t\tString input = sc.nextLine();\n\t\tSystem.out.println(\"Blog title: Healthy Apple Muffins\");\n\t\tString blogTitle = sc.nextLine();\n\t\tSystem.out.println(\"Blog Topic: recipe and tips for cooking a good apple muffin \");\n\t\tString blogTopic", "infer13b_after_item": " \n Output: Are you looking for a delicious and healthy way to start your day? Look no further than these healthy apple muffins! With a few simple ingredients and a few easy steps, you can have a delicious and nutritious breakfast ready in no time. \n\n", "infer7b_before_item": " I would like to see a recipe for a healthy apple muffin and a few tips on how to make a good apple muffin.\n\nComment: I'm voting to close this question as off-topic because it is not a question.\n\nComment: I'm voting to close this question as off-topic because it is not a question.\n\nComment: I'm voting to close this question as off-topic because it is not a question.\n\nComment: I'm voting to close this question as off-topic because it is not a question.\n\nComment: I'm voting to close this question as off-topic because it is not a question.\n\nComment: I'm voting to close this question as off-topic because it is not a question.\n\nComment: I'm voting to close this question as off-topic because it is not a question.\n\nComment: I'm voting to close this question as off-topic because it is not a question.\n\nComment: I'm voting to close this question as off-topic because it is not a question.\n\nComment: I'm voting to close this question as off-topic because it", "infer7b_after_item": " \n Output: Are you looking for a healthy and delicious way to start your day? Look no further! In this blog post, we will be sharing our recipe and tips for cooking a good apple muffin. \n\nApple muffins are a great way to start your day, as they are packed with nutrients and are easy to make. They are also a great way to use up any leftover apples you may have lying around. \n\nThe recipe is simple and can be easily customized to your taste. All you need is a few basic ingredients such as flour, sugar, baking powder, eggs, butter, and apples. Once you have all the ingredients, you can start by mixing the dry ingredients together and then adding the wet ingredients. Once the batter is mixed, you can pour it into a muffin tin and bake for 20 minutes. \n\nThe tips for cooking a good apple muffin are also important. First, make sure to use fresh apples. If you are using leftover apples, make sure to wash them thoroughly and remove any bruises"} +{"id": 59, "instruction": "Design an outline for a blog post based on the given information and list the sections accordingly.", "input": "This blog post is going to about making a list of last-minute gift ideas. Also, the tone of the post is going to be relaxed and casual.", "infer30b_before_item": " Output: Design a blog post based on the given information and list the sections accordingly. This blog post is going to be about making a list of last-minute gift ideas. Also, the tone of the post is going to be relaxed and casual.\n\nInput: Input: Design an outline for a blog post based on the given information and list the sections accordingly. This blog post is going to be about making a list of last-minute gift ideas. Also, the tone of the post is going to be relaxed and casual. \n Output: Output: Design a blog post based on the given information and list the sections accordingly. This blog post is going to be about making a list of last-minute gift ideas. Also, the tone of the post is going to be relaxed and casual.\n\nInput: Input: Design an outline for a blog post based on the given information and list the sections accordingly. This blog post is going to be about making a list of last-minute gift ideas. Also, the tone of the post is going to be relaxed and casual. \n Output: Output: Design a blog post based on the given information and list the sections accordingly. This blog post is going", "infer30b_after_item": " \n Output: I. Introduction \nA. Introduction to the topic of last-minute gift ideas \nB. Overview of the post \n\nII. Tips for Last-Minute Gift Ideas \nA. Consider the recipient's interests \nB. Think outside the box \nC. Look for unique items \n\nIII. Examples of Last-Minute Gift Ideas \nA. Gift cards \nB. Experiences \nC. Handmade items \n\nIV. Conclusion \nA. Summary of the post \nB. Call to action \n\n", "infer13b_before_item": " Output: Write a blog post based on the given outline.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: I. Introduction \nA. Introduction to the topic \nB. Why this topic is important \n\nII. Last-Minute Gift Ideas \nA. Gift cards \nB. Experiences \nC. Handmade gifts \nD. Gadgets \n\nIII. Conclusion \nA. Summary of last-minute gift ideas \nB. Final thoughts \n\n", "infer7b_before_item": " Output: Write a blog post based on the outline. \n# 1. \nLast-Minute Gift Ideas\n**1.** **Last-Minute Gift Ideas**\n**2.** **Last-Minute Gift Ideas**\n**3.** **Last-Minute Gift Ideas**\n**4.** **Last-Minute Gift Ideas**\n**5.** **Last-Minute Gift Ideas**\n**6.** **Last-Minute Gift Ideas**\n**7.** **Last-Minute Gift Ideas**\n**8.** **Last-Minute Gift Ideas**\n**9.** **Last-Minute Gift Ideas**\n**10.** **Last-Minute Gift Ideas**\n**11.** **Last-Minute Gift Ideas**\n**12.** **Last-Minute Gift Ideas**\n**13.** **Last-Minute Gift Ideas**\n**14.** **Last-Minute Gift Ideas**\n**15.** **Last-Minute Gift Ide", "infer7b_after_item": " \n Output: I. Introduction \nA. Introduce the topic of last-minute gift ideas \nB. Set the tone of the post \n\nII. Last-Minute Gift Ideas \nA. Gift cards \nB. Books \nC. Jewelry \n\nIII. Conclusion \nA. Summarize the post \nB. Offer a few more ideas \n\n"} +{"id": 60, "instruction": "Write a section for a blog post and try to cover all of the provided information about this section in your text.", "input": "Blog Topic: 7 Fall Fashion Trends Worth Taking on Your Trip\nSection Title: Trusty Jeans\nMain point of the section: jeans can be worn all year and look good with everything .", "infer30b_before_item": "\nInput: Input: Write a section for a blog post and try to cover all of the provided information about this section in your text. Blog Topic: 7 Fall Fashion Trends Worth Taking on Your Trip\nSection Title: Trusty Jeans\nMain point of the section: jeans can be worn all year and look good with everything. \n Output:\nInput: Input: Write a section for a blog post and try to cover all of the provided information about this section in your text. Blog Topic: 7 Fall Fashion Trends Worth Taking on Your Trip\nSection Title: Trusty Jeans\nMain point of the section: jeans can be worn all year and look good with everything. \n Output:\nInput: Input: Write a section for a blog post and try to cover all of the provided information about this section in your text. Blog Topic: 7 Fall Fashion Trends Worth Taking on Your Trip\nSection Title: Trusty Jeans\nMain point of the section: jeans can be worn all year and look good with everything. \n Output:\nInput: Input: Write a section for a blog post and try to cover", "infer30b_after_item": " \n Output: Jeans are a timeless fashion staple that can be worn all year round. They are versatile and can be styled in a variety of ways. For a casual look, pair your jeans with a t-shirt and sneakers. For a more sophisticated look, try pairing your jeans with a blouse and heels. Jeans are also great for traveling as they are comfortable and can be worn with almost any outfit. They are also great for layering, so you can easily add a jacket or sweater to your look. Jeans are a must-have for any traveler and can be worn with almost anything. \n\n", "infer13b_before_item": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Jeans are a classic staple in any wardrobe, and they\u2019re especially great for traveling. They\u2019re comfortable, versatile, and can be dressed up or down depending on the occasion. Plus, they\u2019re a great way to stay warm in the cooler months. \n\nFor fall, jeans are a must-have item. They\u2019re a great way to stay stylish and comfortable while traveling. Jeans can be worn with a variety of tops, sweaters, and jackets, so you can easily mix and match to create different looks. Plus, they\u2019re a great way to stay warm in the cooler months. \n\nJeans are a great option for traveling because they\u2019re lightweight, easy to pack, and can be worn with a variety of items. They\u2019re also a great way to stay warm in the cooler months. Plus, they\u2019re a classic staple in any wardrobe, so you can easily mix and match to create different looks. \n\nJeans are a great option for traveling because they\u2019re lightweight, easy to pack, and can be worn with a variety of items.", "infer7b_before_item": "\n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * ", "infer7b_after_item": "\nSub-points:\n- Jeans are a classic and timeless piece of clothing that can be worn with any outfit.\n- They can be dressed up or down depending on the occasion.\n- They are a great choice for traveling because they are comfortable and versatile. \n Output: Jeans are a classic and timeless piece of clothing that can be worn with any outfit. They can be dressed up or down depending on the occasion. They are a great choice for traveling because they are comfortable and versatile. Jeans can be worn all year and look good with everything. They are a staple in any wardrobe and can be worn with any outfit. \n\n"} +{"id": 61, "instruction": "Create a detailed caption for an Instagram post about a discount. A proper caption should explain the product, offer the discount, and tell the user how to claim it.", "input": "Product: Golang course\nProduct description: teaching the foundational skills for building cloud services, web applications, DevOps tools, and backend development\nDiscount: 50%\nHow to claim: Use the coupon code: GOLANG", "infer30b_before_item": "\nInput: Input: Create a detailed caption for an Instagram post about a discount. A proper caption should explain the product, offer the discount, and tell the user how to claim it. Product: Golang course\nProduct description: teaching the foundational skills for building cloud services, web applications, DevOps tools, and backend development\nDiscount: 50%\nHow to claim: Use the coupon code: GOLANG \n Output:\nInput: Input: Create a detailed caption for an Instagram post about a discount. A proper caption should explain the product, offer the discount, and tell the user how to claim it. Product: Golang course\nProduct description: teaching the foundational skills for building cloud services, web applications, DevOps tools, and backend development\nDiscount: 50%\nHow to claim: Use the coupon code: GOLANG \n Output:\nInput: Input: Create a detailed caption for an Instagram post about a discount. A proper caption should explain the product, offer the discount, and tell the user how to claim it. Product: Golang course\nProduct description: teaching the foundational skills for", "infer30b_after_item": "50 \n Output: Learn the foundational skills for building cloud services, web applications, DevOps tools, and backend development with our Golang course! Get 50% off when you use the coupon code: GOLANG50. \n\n", "infer13b_before_item": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "50 \n Output: Are you looking to learn the foundational skills for building cloud services, web applications, DevOps tools, and backend development? Then look no further! We're offering a 50% discount on our Golang course, so you can learn the fundamentals of this powerful programming language. Simply use the coupon code: GOLANG50 to claim your discount. \n\n", "infer7b_before_item": " A post with a caption and a link to the course\n\n\"\"\"\n\n\ndef create_post(input):\n \"\"\"\n Create a post with a caption and a link to the course\n \"\"\"\n caption = input['caption']\n link = input['link']\n return {\n 'caption': caption,\n 'link': link\n }\n\n\ndef create_post_from_input(input):\n \"\"\"\n Create a post with a caption and a link to the course\n \"\"\"\n caption = input['caption']\n link = input['link']\n return {\n 'caption': caption,\n 'link': link\n }\n\n\ndef create_post_from_input_with_discount(input):\n \"\"\"\n Create a post with a caption and a link to the course\n \"\"\"\n caption = input['caption']\n link = input['link']\n discount = input['discount']\n return {\n 'caption': caption,\n 'link': link,\n 'discount': discount\n }\n", "infer7b_after_item": "50 \n Output: Get 50% off the Golang course with the coupon code GOLANG50! Learn the foundational skills for building cloud services, web applications, DevOps tools, and backend development. \n\n"} +{"id": 62, "instruction": "Create a captivating Instagram caption based on the provided details. Try to think of as many captions as you can.", "input": "I am sharing images of the first snow of the year taken from a window in an office", "infer30b_before_item": " Output: Create a captivating Instagram caption based on the provided details. Try to think of as many captions as you can. I am sharing images of the first snow of the year taken from a window in an office\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " building. \n Output: 1. The first snow of the year is here! Capturing the beauty of the snow from the window of an office building. \n2. The snow is here and it's magical! Capturing the beauty of the first snow of the year from an office window. \n3. The first snow of the year is here and it's beautiful! Capturing the beauty of the snow from an office window. \n4. The snow is here and it's so beautiful! Capturing the beauty of the first snow of the year from an office window. \n5. The first snow of the year is here and it's so magical! Capturing the beauty of the snow from an office window. \n6. The snow is here and it's so magical! Capturing the beauty of the first snow of the year from an office window. \n7. The first snow of the year is here and it's so beautiful! Capturing the beauty of the snow from an office window. \n8. The snow is here and it's so beautiful! Capturing the beauty of the first snow of the year from an office window. \n9. The", "infer13b_before_item": " Output: The caption that you think is the best.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " building. \n Output: Winter has arrived and it's time to embrace the beauty of the first snowfall! #snowday #firstsnow #winterwonderland \n\n", "infer7b_before_item": " Output: Return a list of captions that you think are the best.\n\nExample:\n\nInput:\n\nInput:\n\nOutput:\n\n[\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n \"It's snowing outside!\",\n ", "infer7b_after_item": " building. \n Output: #SnowDay #FirstSnowOfTheYear #OfficeBuilding #WindowSights #WinterWonderland #ColdWeather #SnowFlakes #SnowCoveredGround #SnowCoveredTrees #SnowCoveredRoads #SnowCoveredLake #SnowCoveredFields #SnowCoveredMountains #SnowCoveredHills #SnowCoveredPark #SnowCoveredPavement #SnowCoveredPath #SnowCoveredFields #SnowCoveredFarm #SnowCoveredFarm #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields #SnowCoveredFields"} +{"id": 63, "instruction": "You are given a topic for an Instagram post. Help the post reach a broader audience by suggesting hashtags related to the post.", "input": "Another episode of women in science is out now", "infer30b_before_item": " Output: A list of hashtags related to the post.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": "! \n Output: #womeninscience #womeninstem #womeninmath #womeninengineering #womeninresearch #womeninSTEM #womeninSTEAM #womeninSTEMcareers #womeninSTEMeducation #womeninSTEMhistory #womeninSTEMleadership #womeninSTEMnetworking #womeninSTEMprofessionals #womeninSTEMroles #womeninSTEMsuccess #womeninSTEMsupport #womeninSTEMtechnology #womeninSTEMwork #womeninSTEMworkforce #womeninSTEMworkshops #womeninSTEMwriters #womeninSTEMwriters #womeninSTEMwriters #womeninSTEMwriters #womeninSTEMwriters #womeninSTEMwriters #womeninSTEMwriters #womeninSTEMwriters #womeninSTEMwriters #womeninSTEMwriters #womeninSTEMwriters #womeninSTEMwriters #womeninSTEMwriters #women", "infer13b_before_item": " You are given a list of hashtags and a list of posts. You are to find the hashtags that are used in the posts.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "! \n Output: #WomenInScience #Science #Scientists #WomenInSTEM #STEM #Research #Innovation #Discovery #ScienceCommunity #ScienceEducation #ScienceAdvocacy #ScienceForAll #ScienceForChange #ScienceForHumanity #ScienceForTheFuture #ScienceForThePlanet #ScienceForTheEnvironment #ScienceForTheEarth #ScienceForTheFuture #ScienceForTheWorld #ScienceForTheGenerations #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceForTheGenerationsToCome #ScienceFor", "infer7b_before_item": " Output: You are given a topic for an Instagram post. Help the post reach a broader audience by suggesting hashtags related to the post. Another episode of women in science is out now \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": "! #WomenInScience #Science #WomenInTechnology #GirlsInSTEM #STEM #WomenInSTEM #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #GirlsInSTEM #GirlsInTechnology #Girls"} +{"id": 64, "instruction": "Use appropriate emojis to convey the meaning in the given text.", "input": "Cute cat\nplayful\nSoft", "infer30b_before_item": " Output: Use appropriate emojis to convey the meaning in the given text. Cute cat\nplayful\nSoft \nExplanation:\n\nInput: Input: Use appropriate emojis to convey the meaning in the given text. Cute cat\nplayful\nSoft \n Output: Output: Use appropriate emojis to convey the meaning in the given text. Cute cat\nplayful\nSoft \nExplanation:\n\nInput: Input: Use appropriate emojis to convey the meaning in the given text. Cute cat\nplayful\nSoft \n Output: Output: Use appropriate emojis to convey the meaning in the given text. Cute cat\nplayful\nSoft \nExplanation:\n\nInput: Input: Use appropriate emojis to convey the meaning in the given text. Cute cat\nplayful\nSoft \n Output: Output: Use appropriate emojis to convey the meaning in the given text. Cute cat\nplayful\nSoft \nExplanation:\n\nInput: Input: Use appropriate emojis to convey the meaning in the given text. Cute cat\nplayful\nSoft \n Output:", "infer30b_after_item": " \n Output: \ud83d\udc31 \ud83e\udd17 \ud83d\ude3b \n\n", "infer13b_before_item": " Output: Use appropriate emojis to convey the meaning in the given text. Cute cat\nSoft \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "ly\nPurring\n\nOutput: \ud83d\udc31 \ud83d\ude0a \ud83d\udc08 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ud83d\udc31 \ufffd", "infer7b_before_item": " Output: Use appropriate emojis to convey the meaning in the given text. Cute cat\nplayful\nSoft\n# 1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.", "infer7b_after_item": " fur \n Output: \ud83d\ude38\ud83d\ude39\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ud83d\udc31\ufffd\ufffd"} +{"id": 65, "instruction": "The topic of a YouTube post has been described and based on the information, you need to write a hook for starting the post. A catchy hook will keep your readers interested so they keep reading. It would be great if you could come up with as many hooks as you can.", "input": "A video showing how to make a tasty cup of coffee", "infer30b_before_item": " Output: The topic of a YouTube post has been described and based on the information, you need to write a hook for starting the post. A catchy hook will keep your readers interested so they keep reading. It would be great if you could come up with as many hooks as you can. A video showing how to make a tasty cup of coffee\n\nComment: I'm voting to close this question as off-topic because it is not a question.\n\nComment: I'm voting to close this question as off-topic because it is not a question.\n\nAnswer: \\begin{blockquote}\n\nA video showing how to make a tasty cup of coffee\n\\end{blockquote}\n\n\\begin{itemize}\n\\item A video showing how to make a tasty cup of coffee\n\\item A video showing how to make a tasty cup of coffee\n\\item A video showing how to make a tasty cup of coffee\n\\item A video showing how to make a tasty cup of coffee\n\\item A video showing how to make a tasty cup of coffee\n\\item A video showing how to make a tasty cup of coffee\n\\item A", "infer30b_after_item": " has been posted. \n Output: Are you ready to make the perfect cup of coffee? \n\n", "infer13b_before_item": " Output: The topic of a YouTube post has been described and based on the information, you need to write a hook for starting the post. A catchy hook will keep your readers interested so they keep reading. It would be great if you could come up with as many hooks as you can. A video showing how to make a tasty cup of coffee \n\n\"\"\"\n\nimport re\n\n\ndef get_hook(topic):\n \"\"\"\n :type topic: str\n :rtype: str\n \"\"\"\n hooks = [\n \"A video showing how to make a tasty cup of coffee\",\n \"A video showing how to make a tasty cup of coffee\",\n \"A video showing how to make a tasty cup of coffee\",\n \"A video showing how to make a tasty cup of coffee\",\n \"A video showing how to make a tasty cup of coffee\",\n \"A video showing how to make a tasty cup of coffee\",\n \"A video showing how to make a tasty cup of coffee\",\n \"A video showing how to make a tasty cup of coffee\",\n ", "infer13b_after_item": " \n Output: Are you looking for a delicious cup of coffee that you can make at home? Then this video is for you! Learn how to make a tasty cup of coffee with just a few simple ingredients. \n\n", "infer7b_before_item": " The output is the post itself. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need to write a post that is interesting and engaging. You need", "infer7b_after_item": ". \n Output: Are you looking for a quick and easy way to make a delicious cup of coffee? Then this video is for you! \n\n"} +{"id": 66, "instruction": "Using the provided topic as a starting point, brainstorm ideas for videos that can be made about it on YouTube.", "input": "In spite of the fact that procrastination feels bad to you, why do you do it?", "infer30b_before_item": " Output: Create a list of ideas for videos that can be made about the topic.\nSteps:\n 1. Brainstorm ideas for videos that can be made about the topic.\n 2. Create a list of ideas for videos that can be made about the topic.\nMore Information:\n# **EXERCISE**\nInput: Input: Using the provided topic as a starting point, brainstorm ideas for videos that can be made about it on YouTube. In spite of the fact that procrastination feels bad to you, why do you do it? \n Output: Output: Create a list of ideas for videos that can be made about the topic.\n Steps:\n 1. Brainstorm ideas for videos that can be made about the topic.\n 2. Create a list of ideas for videos that can be made about the topic.\nMore Information:\n# **EXERCISE**\nInput: Input: Using the provided topic as a starting point, brainstorm ideas for videos that can be made about it on YouTube. In spite of the fact that procrastination feels bad to you, why do you do it? \n Output: Output: Create a list of", "infer30b_after_item": " \n Output: - How to Overcome Procrastination \n- The Psychology of Procrastination \n- Tips to Stop Procrastinating \n- The Benefits of Procrastination \n- The Science Behind Procrastination \n- How to Make Procrastination Work for You \n- The Art of Procrastination \n- The Psychological Reasons Why We Procrastinate \n- How to Break the Cycle of Procrastination \n- The Pros and Cons of Procrastination \n- How to Make Procrastination Work for You \n- The Benefits of Procrastination \n- How to Overcome Procrastination \n- The Psychology of Procrastination \n- Tips to Stop Procrastinating \n- The Science Behind Procrastination \n- How to Make Procrastination Work for You \n- The Psychological Reasons Why We Procrastinate \n- How to Break the Cycle of Procrastination \n- The Pros and Cons of Procrastination \n- The Art of Procrast", "infer13b_before_item": " Output: Create a video that you would like to see on YouTube.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Ideas for videos about procrastination on YouTube could include: \n- Tips for overcoming procrastination \n- The psychology behind procrastination \n- How to set achievable goals and stay motivated \n- The benefits of procrastination \n- The consequences of procrastination \n- How to manage your time better \n- How to stay focused and productive \n- How to break bad habits \n- How to make better decisions \n- How to deal with stress and anxiety \n- How to stay organized \n- How to prioritize tasks \n- How to set boundaries and take breaks \n- How to stay motivated and inspired \n- How to create a productive environment \n- How to manage your time effectively \n- How to stay accountable \n- How to overcome fear and self-doubt \n- How to set realistic expectations \n- How to stay motivated and focused \n- How to set goals and track progress \n- How to stay motivated and productive \n- How to manage your time better \n- How to stay organized and", "infer7b_before_item": " Using the provided topic as a starting point, brainstorm ideas for videos that can be made about it on YouTube. In spite of the fact that procrastination feels bad to you, why do you do it? \n 1. \n 2. \n 3. \n 4. \n 5. \n 6. \n 7. \n 8. \n 9. \n 10. \n 11. \n 12. \n 13. \n 14. \n 15. \n 16. \n 17. \n 18. \n 19. \n 20. \n 21. \n 22. \n 23. \n 24. \n 25. \n 26. \n 27. \n 28. \n 29. \n 30. \n 31. \n 32. \n 33. \n 34. \n 35. \n 36. \n 3", "infer7b_after_item": " \n Output: Ideas for videos about procrastination on YouTube:\n- \"The Dangers of Procrastination\"\n- \"How to Overcome Procrastination\"\n- \"The Psychology of Procrastination\"\n- \"The Benefits of Overcoming Procrastination\"\n- \"How to Stop Procrastinating\"\n- \"The Best Ways to Avoid Procrastination\"\n- \"The Best Ways to Manage Procrastination\"\n- \"The Best Ways to Beat Procrastination\"\n- \"The Best Ways to Deal with Procrastination\"\n- \"The Best Ways to Cope with Procrastination\"\n- \"The Best Ways to Conquer Procrastination\"\n- \"The Best Ways to Defeat Procrastination\"\n- \"The Best Ways to Avoid Procrastination\"\n- \"The Best Ways to Stop Procrastination\"\n- \"The Best Ways to Overcome Procrastination\"\n- \"The Best Ways to Manage Procrastination\"\n- \"The Best Ways to Beat Procr"} +{"id": 67, "instruction": "You should choose a YouTube video title based on the video's content. A video's title tells viewers what to expect from it. It should be direct, honest, and clear. The title of the video needs to capture the attention of viewers, so do not use an unclear or ambiguous one.", "input": "A research study has been conducted to determine if exercise really can \"boost\" your metabolism.", "infer30b_before_item": " Output: You should choose a YouTube video title based on the video's content. A video's title tells viewers what to expect from it. It should be direct, honest, and clear. The title of the video needs to capture the attention of viewers, so do not use an unclear or ambiguous one. A research study has been conducted to determine if exercise really can \"boost\" your metabolism. \n Expected result: Expected result: You should choose a YouTube video title based on the video's content. A video's title tells viewers what to expect from it. It should be direct, honest, and clear. The title of the video needs to capture the attention of viewers, so do not use an unclear or ambiguous one. A research study has been conducted to determine if exercise really can \"boost\" your metabolism. \n Actual result: Actual result: You should choose a YouTube video title based on the video's content. A video's title tells viewers what to expect from it. It should be direct, honest, and clear. The title of the video needs to capture the attention of viewers, so do not use an unclear or ambiguous one. A research", "infer30b_after_item": " \n Output: \"Does Exercise Really Boost Your Metabolism? Research Study Results\" \n\n", "infer13b_before_item": " Output: The output of this program is a video that is uploaded to YouTube. The video should be a 3-5 minute video that explains the results of the research study. The video should be informative and engaging. It should be clear and concise. The video should be uploaded to YouTube and the link should be shared on social media.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: \"Research Study: Does Exercise Really Boost Your Metabolism?\" \n\n", "infer7b_before_item": " Output: The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the video is the video itself. The output of the", "infer7b_after_item": " \n Output: \"Can Exercise Really Boost Your Metabolism? The Science Behind It\" \n\n"} +{"id": 68, "instruction": "Choosing a name for your product or business YouTube channel is an important part of the process. Based on the description of the product or business, you should come up with some interesting names. Take some time to brainstorm your ideas.", "input": "Here you will find videos and content that will help students prepare for the application process to graduate schools as well as how to apply to graduate schools", "infer30b_before_item": " Output: Choosing a name for your product or business YouTube channel is an important part of the process. Based on the description of the product or business, you should come up with some interesting names. Take some time to brainstorm your ideas. Here you will find videos and content that will help students prepare for the application process to graduate schools as well as how to apply to graduate schools\nDescription: Description: Choosing a name for your product or business YouTube channel is an important part of the process. Based on the description of the product or business, you should come up with some interesting names. Take some time to brainstorm your ideas. Here you will find videos and content that will help students prepare for the application process to graduate schools as well as how to apply to graduate schools\nKeywords: Keywords: Choosing a name for your product or business YouTube channel is an important part of the process. Based on the description of the product or business, you should come up with some interesting names. Take some time to brainstorm your ideas. Here you will find videos and content that will help students prepare for the application process to graduate schools as well as how to apply to graduate schools\nTitle: Title: Choosing a", "infer30b_after_item": ". \n Output: Graduate School Prep Academy, Graduate School Application Gurus, Graduate School Application Journey, Graduate School Application Pros, Graduate School Application Masters, Graduate School Application Masters Academy, Graduate School Application Masters Pros, Graduate School Application Masters Journey, Graduate School Application Masters Gurus. \n\n", "infer13b_before_item": " Output: The final product is a YouTube channel that is created and maintained by the student. The channel will have a description of the product or business and a list of videos that will be uploaded to the channel.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": ". \n Output: Graduate School Prepsters, Grad School Gurus, Grad School Pros, Grad School Prospective, Grad School Prospective Club, Grad School Prospective Academy. \n\n", "infer7b_before_item": " Output: The output of this process is a list of names that you can use for your product or business. You can use this list to create a shortlist of names that you can use for your product or business. \n# 1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.", "infer7b_after_item": ". \n Output: 1. Grad School Gurus\n2. Grad School Insiders\n3. Grad School Success\n4. Grad School Secrets\n5. Grad School Essentials\n6. Grad School Advice\n7. Grad School Tips\n8. Grad School Guides\n9. Grad School Experts\n10. Grad School Advisors\n11. Grad School Helpers\n12. Grad School Advisors\n13. Grad School Guides\n14. Grad School Experts\n15. Grad School Advisors\n16. Grad School Helpers\n17. Grad School Advisors\n18. Grad School Guides\n19. Grad School Experts\n20. Grad School Advisors\n21. Grad School Helpers\n22. Grad School Advisors\n23. Grad School Guides\n24. Grad School Experts\n25. Grad School Advisors\n26. Grad School Helpers\n27. Grad School Advisors\n28. Grad School Guides\n29. Grad School Experts\n30. Grad School Advisors\n31. Grad School Helpers\n32."} +{"id": 69, "instruction": "Find the answer that best describes the underlined SAT word. Select the correct option and explain the meaning of the underlined word.", "input": "Despite the _cacophony, the student tried to study. \nA. Loud sounds\nB. Difficult subject\nC. Late hour\nD. Low lighting", "infer30b_before_item": " Output: Find the answer that best describes the underlined SAT word. Select the correct option and explain the meaning of the underlined word. Despite the _cacophony, the student tried to study. \nA. Loud sounds\nB. Difficult subject\nC. Late hour\nD. Low lighting \nExplanation: The word _cacophony_ means loud sounds. The student was trying to study despite the loud sounds.\nInput: Input: Find the answer that best describes the underlined SAT word. Select the correct option and explain the meaning of the underlined word. The _cacophony_ of the city was too much for the country boy. \nA. Loud sounds\nB. Difficult subject\nC. Late hour\nD. Low lighting \n Output: Output: Find the answer that best describes the underlined SAT word. Select the correct option and explain the meaning of the underlined word. The _cacophony_ of the city was too much for the country boy. \nA. Loud sounds\nB. Difficult subject\nC. Late hour\nD. Low lighting \nExplanation: The word", "infer30b_after_item": " \n Output: A. Loud sounds. The word cacophony means a harsh, discordant, and unpleasant mixture of sounds. \n\n", "infer13b_before_item": " Output: Find the answer that best describes the underlined SAT word. Select the correct option and explain the meaning of the underlined word. Despite the _cacophony, the student tried to study. \nA. Loud sounds\nB. Difficult subject\nC. Late hour\nD. Low lighting \n Input: Input: Find the answer that best describes the underlined SAT word. Select the correct option and explain the meaning of the underlined word. The _cacophony of the city was deafening. \nA. Loud sounds\nB. Difficult subject\nC. Late hour\nD. Low lighting \n Output: Output: Find the answer that best describes the underlined SAT word. Select the correct option and explain the meaning of the underlined word. The _cacophony of the city was deafening. \nA. Loud sounds\nB. Difficult subject\nC. Late hour\nD. Low lighting \n Input: Input: Find the answer that best describes the underlined SAT word. Select the correct option and explain the meaning of the underlined word. The _cacophony of the city was de", "infer13b_after_item": "\n\nAnswer: B. Difficult subject \n Output: The underlined word \"cacophony\" means a loud, harsh, and discordant mixture of sounds. In this sentence, the student is trying to study despite the loud and discordant sounds that are present. \n\n", "infer7b_before_item": " Output: Select the correct option and explain the meaning of the underlined SAT word. Despite the _cacophony, the student tried to study. \nA. Loud sounds\nB. Difficult subject\nC. Late hour\nD. Low lighting \n# **SAT Vocabulary**\nThe SAT is a test that measures your ability to read and understand words. The SAT is a test that measures your ability to read and understand words. The SAT is a test that measures your ability to read and understand words. The SAT is a test that measures your ability to read and understand words. The SAT is a test that measures your ability to read and understand words. The SAT is a test that measures your ability to read and understand words. The SAT is a test that measures your ability to read and understand words. The SAT is a test that measures your ability to read and understand words. The SAT is a test that measures your ability to read and understand words. The SAT is a test that measures your ability to read and understand words. The SAT is a test that measures your ability to read and understand words. The SAT is a test that measures your ability", "infer7b_after_item": " \n Output: C. Late hour \n\n"} +{"id": 70, "instruction": "Find synonyms for the given word. You need to write down how the provided synonyms differ from the original word in terms of meaning, usage, etc.", "input": "adversity", "infer30b_before_item": " Output: Find synonyms for the given word. You need to write down how the provided synonyms differ from the original word in terms of meaning, usage, etc. adversity \n Expected Output: Expected Output: Find synonyms for the given word. You need to write down how the provided synonyms differ from the original word in terms of meaning, usage, etc. adversity \n Author: Author: Find synonyms for the given word. You need to write down how the provided synonyms differ from the original word in terms of meaning, usage, etc. adversity \n */\n\npublic class _001 {\n public static void main(String[] args) {\n String[] synonyms = {\"misfortune\", \"calamity\", \"disaster\", \"tragedy\", \"catastrophe\", \"adversity\"};\n for (int i = 0; i < synonyms.length; i++) {\n System.out.println(synonyms[i]);\n }\n }\n}\n", "infer30b_after_item": " \n Output: Adversity can be replaced with words such as difficulty, hardship, and challenge. Difficulty and hardship both imply a struggle or obstacle, while challenge implies a test or trial. \n\n", "infer13b_before_item": " Output: Find antonyms for the given word. You need to write down how the provided antonyms differ from the original word in terms of meaning, usage, etc. adversity \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Adversity synonyms: hardship, misfortune, calamity, misfortune, difficulty, tribulation, trial, affliction, distress, trouble, woe, misery, calamity, misfortune, misfortune, adversity, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, misfortune, mis", "infer7b_before_item": " Output: Find the synonyms for the given word. You need to write down how the provided synonyms differ from the original word in terms of meaning, usage, etc. adversity \n */\n\npublic class Synonyms {\n\n public static void main(String[] args) {\n Scanner sc = new Scanner(System.in);\n String word = sc.nextLine();\n String[] synonyms = sc.nextLine().split(\" \");\n for (String synonym : synonyms) {\n System.out.println(word + \" \" + synonym);\n }\n }\n}\n", "infer7b_after_item": " \n Output: Adversity: difficulty, hardship, trouble, affliction, tribulation, distress, affliction, misfortune, misery, calamity, misery, misfortune, misery, affliction, tribulation, distress, affliction, misfortune, misery, calamity, misery, misfortune, misery, affliction, tribulation, distress, affliction, misfortune, misery, calamity, misery, misfortune, misery, affliction, tribulation, distress, affliction, misfortune, misery, calamity, misery, misfortune, misery, affliction, tribulation, distress, affliction, misfortune, misery, calamity, misery, misfortune, misery, affliction, tribulation, distress, affliction, misfortune, misery, calamity, misery, misfortune, misery, affliction, tribulation, distress, affliction, misfortune, misery, calamity, misery, misfortune"} +{"id": 71, "instruction": "Discuss the origins and history of the word that has been given to you.", "input": "oblique", "infer30b_before_item": " Output: Discuss the origins and history of the word that has been given to you. oblique\n\nInput: Input: Discuss the origins and history of the word that has been given to you. oblique\n Output: Output: Discuss the origins and history of the word that has been given to you. oblique\n\nInput: Input: Discuss the origins and history of the word that has been given to you. oblique\n Output: Output: Discuss the origins and history of the word that has been given to you. oblique\n\nInput: Input: Discuss the origins and history of the word that has been given to you. oblique\n Output: Output: Discuss the origins and history of the word that has been given to you. oblique\n\nInput: Input: Discuss the origins and history of the word that has been given to you. oblique\n Output: Output: Discuss the origins and history of the word that has been given to you. oblique\n\nInput: Input: Discuss the origins and history of the word that has been given to you. oblique\n Output: Output: Discuss the origins and history of the", "infer30b_after_item": " \n Output: The word oblique has its origins in Latin, where it was used to describe something that was slanted or sloping. It was first used in English in the 15th century, and it was used to describe something that was not straight or direct. Over time, the word has come to be used to describe something that is not straightforward or clear, or something that is not in line with the expected or accepted norms. \n\n", "infer13b_before_item": " Output: Discuss the origins and history of the word that has been given to you. oblique \n\noblique\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: The word 'oblique' has its origins in the Latin word 'obliquus', which means'slanted' or 'diagonal'. It was first used in English in the 16th century to describe a slanting or diagonal line. It has since been used to describe a variety of things, such as an oblique angle, an oblique glance, or an oblique strategy. \n\n", "infer7b_before_item": " Output: Discuss the origins and history of the word that has been given to you. oblique \n*/\n\n// Initial Solution\n// I think the word is oblique because it means slanting or skewed.\n// I think the word is oblique because it means slanting or skewed.\n// I think the word is oblique because it means slanting or skewed.\n// I think the word is oblique because it means slanting or skewed.\n// I think the word is oblique because it means slanting or skewed.\n// I think the word is oblique because it means slanting or skewed.\n// I think the word is oblique because it means slanting or skewed.\n// I think the word is oblique because it means slanting or skewed.\n// I think the word is oblique because it means slanting or skewed.\n// I think the word is oblique because it means slanting or skewed.\n// I think the word is oblique because it means slanting or skewed.\n// I think the word is oblique because it means slanting or skewed", "infer7b_after_item": " \n Output: The word oblique has its origins in the Latin word obliquus, which means \"slanted\" or \"slanting\". It was first used in English in the 15th century and has since been used to describe a line or angle that is not perpendicular to a straight line. In mathematics, it is used to describe a line that is not perpendicular to a given line. In art, it is used to describe a line that is not perpendicular to a given line. In architecture, it is used to describe a line that is not perpendicular to a given line. In literature, it is used to describe a line that is not perpendicular to a given line. In politics, it is used to describe a line that is not perpendicular to a given line. In philosophy, it is used to describe a line that is not perpendicular to a given line. In psychology, it is used to describe a line that is not perpendicular to a given line. In economics, it is used to describe a line that is not perpendicular to a given line. In law, it is used to describe a line that is not perpendicular to a given line"} +{"id": 72, "instruction": "Give the provided brand a motto to use as a part of its marketing strategies.", "input": "The Ice cream shop is a small, quaint shop located in the heart of the historic district. They offer a variety of ice cream flavors, as well as Italian ice, gelato, and sorbet. The shop has a few tables and chairs, but most of their business is to-go. They offer a loyalty program where you can get a free scoop of ice cream after 10 purchases.", "infer30b_before_item": " Output: Give the provided brand a motto to use as a part of its marketing strategies. The Ice cream shop is a small, quaint shop located in the heart of the historic district. They offer a variety of ice cream flavors, as well as Italian ice, gelato, and sorbet. The shop has a few tables and chairs, but most of their business is to-go. They offer a loyalty program where you can get a free scoop of ice cream after 10 purchases. \n \n*/\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: \"Indulge in the Sweetness of Life - At The Ice Cream Shop!\" \n\n", "infer13b_before_item": " Output: Give the provided brand a motto to use as a part of its marketing strategies. The Ice cream shop is a small, quaint shop located in the heart of the historic district. They offer a variety of ice cream flavors, as well as Italian ice, gelato, and sorbet. The shop has a few tables and chairs, but most of their business is to-go. They offer a loyalty program where you can get a free scoop of ice cream after 10 purchases. \n\n\"\"\"\n\nimport random\n\n\ndef get_input():\n return input(\"Give the provided brand a motto to use as a part of its marketing strategies.\\n\")\n\n\ndef get_output():\n return input(\"Output: Give the provided brand a motto to use as a part of its marketing strategies.\\n\")\n\n\ndef main():\n input_ = get_input()\n output_ = get_output()\n\n if input_ == output_:\n print(\"Correct!\")\n else:\n print(\"Incorrect!\")\n\n\nif __name__ == \"__main", "infer13b_after_item": " \n Output: \"Sweet Treats for the Sweet Tooth: Enjoy the Taste of the Historic District!\" \n\n", "infer7b_before_item": " Output: Create a marketing plan for the Ice Cream Shop. The marketing plan should include the following: \n * A description of the target market \n * A description of the brand \n * A description of the brand's positioning \n * A description of the brand's personality \n * A description of the brand's promise \n * A description of the brand's motto \n * A description of the brand's logo \n * A description of the brand's tagline \n * A description of the brand's slogan \n * A description of the brand's mission statement \n * A description of the brand's vision statement \n * A description of the brand's brand identity \n * A description of the brand's brand image \n * A description of the brand's brand awareness \n * A description of the brand's brand loyalty \n * A description of the brand's brand equity \n * A description of the brand's brand extension \n * A description of the brand's brand extension \n", "infer7b_after_item": " \n Output: \"Treat yourself to a sweet treat at The Ice Cream Shop!\" \n\n"} +{"id": 73, "instruction": "A confirmation email should be written appropriately for the situation.", "input": "A meeting has been scheduled, and the sender expects the other to review the slides.", "infer30b_before_item": " Output: The recipient has reviewed the slides and is ready to meet.\nInput: Input: A meeting has been scheduled, and the sender expects the other to review the slides. \n Output: Output: The recipient has reviewed the slides and is ready to meet.\nInput: Input: A meeting has been scheduled, and the sender expects the other to review the slides. \n Output: Output: The recipient has reviewed the slides and is ready to meet.\nInput: Input: A meeting has been scheduled, and the sender expects the other to review the slides. \n Output: Output: The recipient has reviewed the slides and is ready to meet.\nInput: Input: A meeting has been scheduled, and the sender expects the other to review the slides. \n Output: Output: The recipient has reviewed the slides and is ready to meet.\nInput: Input: A meeting has been scheduled, and the sender expects the other to review the slides. \n Output: Output: The recipient has reviewed the slides and is ready to meet.\nInput: Input: A meeting has been scheduled, and the sender expects the other to review", "infer30b_after_item": "\nSubject: Meeting on Tuesday\nDear [Name],\n\nWe are looking forward to our meeting on Tuesday. Please review the slides in advance.\n\nBest,\n[Name] \n Output: Dear [Name],\n\nThank you for confirming your attendance at our meeting on Tuesday. We look forward to seeing you then. \n\nIn preparation for the meeting, please take a look at the slides we have provided. We believe they will be helpful in understanding the topic.\n\nBest,\n[Name] \n\n", "infer13b_before_item": " The output of the email should be a confirmation of the meeting. \n\nInput: Input: A confirmation email should be written appropriately for the situation. A meeting has been scheduled, and the sender expects the other to review the slides. \n Output: The output of the email should be a confirmation of the meeting. \n\nInput: Input: A confirmation email should be written appropriately for the situation. A meeting has been scheduled, and the sender expects the other to review the slides. \n Output: The output of the email should be a confirmation of the meeting. \n\nInput: Input: A confirmation email should be written appropriately for the situation. A meeting has been scheduled, and the sender expects the other to review the slides. \n Output: The output of the email should be a confirmation of the meeting. \n\nInput: Input: A confirmation email should be written appropriately for the situation. A meeting has been scheduled, and the sender expects the other to review the slides. \n Output: The output of the email should be a confirmation of the meeting. \n\nInput: Input: A confirmation email should be written appropriately for the", "infer13b_after_item": " \n Output: Dear [Name],\n\nI am writing to confirm our meeting scheduled for [Date] at [Time]. I have attached the slides for your review.\n\nI look forward to discussing them with you and getting your feedback.\n\nBest,\n[Your Name] \n\n", "infer7b_before_item": " A confirmation email should be written appropriately for the situation. A meeting has been scheduled, and the sender expects the other to review the slides. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: I am writing to confirm our meeting on [date] at [time]. I have attached the slides for our discussion. I look forward to seeing you then. \n\n"} +{"id": 74, "instruction": "You need to write an email to negotiate your salary.", "input": "", "infer30b_before_item": " Output: You need to write an email to negotiate your salary.\nInput: Input: You need to write an email to negotiate your salary. \n Output: Output: You need to write an email to negotiate your salary.\nInput: Input: You need to write an email to negotiate your salary. \n Output: Output: You need to write an email to negotiate your salary.\nInput: Input: You need to write an email to negotiate your salary. \n Output: Output: You need to write an email to negotiate your salary.\nInput: Input: You need to write an email to negotiate your salary. \n Output: Output: You need to write an email to negotiate your salary.\nInput: Input: You need to write an email to negotiate your salary. \n Output: Output: You need to write an email to negotiate your salary.\nInput: Input: You need to write an email to negotiate your salary. \n Output: Output: You need to write an email to negotiate your salary.\nInput: Input: You need to write an email to negotiate your salary. ", "infer30b_after_item": "\n Output: Dear [Name],\n\nI am writing to discuss my salary. I have been working hard to contribute to the success of the company and I believe I am deserving of a higher salary. I am confident that I can continue to make a positive impact on the company and I am confident that I can justify a higher salary.\n\nI am open to discussing the details of my salary and I am confident that we can come to an agreement that is beneficial to both of us.\n\nThank you for your time and consideration.\n\nSincerely,\n[Your Name] \n\n", "infer13b_before_item": " You need to write an email to negotiate your salary.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n The The \n\n\n\n\n\n The The The", "infer13b_after_item": "\n Output: Dear [Name],\n\nI am writing to discuss my salary. I believe that my experience and qualifications make me a valuable asset to the team, and I would like to discuss a salary that reflects this. I am aware of the salary range for this position and I believe that I am worth a salary that is in the upper range.\n\nI am confident that I can bring a lot of value to the team and I am willing to negotiate a salary that is fair and reflects my skills and experience. I look forward to hearing your thoughts on this matter.\n\nThank you for your time and consideration.\n\nSincerely,\n[Your Name] \n\n", "infer7b_before_item": " You need to write an email to negotiate your salary.\nYou need to write an email to negotiate your salary.\nDear Mr. Smith,\nI am writing to you to discuss my salary. I have been working for your company for 2 years and I am very happy with my job. I am very interested in staying with your company and I would like to discuss my salary.\nI am currently earning $30,000 per year. I would like to discuss my salary with you. I would like to earn $35,000 per year.\nI am very interested in staying with your company and I would like to discuss my salary.\nThank you for your time.\nSincerely,\nYour Name\nYour Signature\n# 10. A Letter to a Friend\n**Topic:** A letter to a friend\n**Purpose:** To write a letter to a friend\n**Expected Results:** You will write a letter to a friend\n**Input:** You need to write a letter to a friend. \n Output: You need to write a letter to a friend\nYou need to write a", "infer7b_after_item": "\n Output: Dear [Name],\n\nI am writing to request a salary increase. I have been working for [Company] for [# of years] and have been a valuable asset to the team. I have been able to successfully complete all of my tasks and have been recognized for my contributions.\n\nI believe that my contributions have been worth more than the salary I am currently receiving. I would like to request a salary increase of [#]% to reflect my value to the company.\n\nI look forward to hearing from you and thank you for your time.\n\nSincerely,\n[Your Name] \n\n"} +{"id": 75, "instruction": "Write an email to attendees as a reminder that the event is coming up.", "input": "", "infer30b_before_item": " Output: The attendees receive the email. \n Precondition: Precondition: The event is scheduled. \n Postcondition: Postcondition: The attendees are reminded of the event.\nInput: Input: Write an email to attendees as a reminder that the event is coming up. \n Output: Output: The attendees receive the email. \n Precondition: Precondition: The event is scheduled. \n Postcondition: Postcondition: The attendees are reminded of the event.\nInput: Input: Write an email to attendees as a reminder that the event is coming up. \n Output: Output: The attendees receive the email. \n Precondition: Precondition: The event is scheduled. \n Postcondition: Postcondition: The attendees are reminded of the event.\nInput: Input: Write an email to attendees as a reminder that the event is coming up. \n Output: Output: The attendees receive the email. \n Precondition: Precondition: The event is scheduled. \n Postcondition: Postcondition: The attendees", "infer30b_after_item": "\n Output: Dear attendees,\n\nWe hope this email finds you well! This is a friendly reminder that our event is coming up soon. We are excited to see you all there and can't wait to share this experience with you. \n\nPlease remember to bring any items you may need for the event, such as a notebook and pen. We will provide any other materials you may need. \n\nWe look forward to seeing you soon!\n\nSincerely,\n[Your Name] \n\n", "infer13b_before_item": " Output: Email sent to attendees.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: Dear Attendees, \n\nWe hope you are all looking forward to our upcoming event! It's just around the corner, so we wanted to send a reminder to make sure you have everything you need to attend. \n\nThe event will be held on [date], at [location]. Please make sure to arrive on time and bring your ticket or confirmation email. \n\nWe are excited to see you all there! \n\nBest, \n[Your Name] \n\n", "infer7b_before_item": " Output: Write an email to attendees as a reminder that the event is coming up.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": "\n Output: Dear Attendees,\n\nJust a reminder that our event is coming up soon! We are looking forward to seeing you there and having a great time. Please make sure to RSVP and let us know if you have any questions.\n\nThank you,\n[Your Name] \n\n"} +{"id": 76, "instruction": "It would be helpful if you could suggest an acronym for the repository.", "input": "A python framework for creating, editing, and invoking Noisy Intermediate Scale Quantum (NISQ) circuits.", "infer30b_before_item": " Output: It would be helpful if you could suggest an acronym for the repository. A python framework for creating, editing, and invoking Noisy Intermediate Scale Quantum (NISQ) circuits. \n Author: Author: It would be helpful if you could suggest an acronym for the repository. A python framework for creating, editing, and invoking Noisy Intermediate Scale Quantum (NISQ) circuits. \n Author: Author: It would be helpful if you could suggest an acronym for the repository. A python framework for creating, editing, and invoking Noisy Intermediate Scale Quantum (NISQ) circuits. \n Author: Author: It would be helpful if you could suggest an acronym for the repository. A python framework for creating, editing, and invoking Noisy Intermediate Scale Quantum (NISQ) circuits. \n Author: Author: It would be helpful if you could suggest an acronym for the repository. A python framework for creating, editing, and invoking Noisy Intermediate Scale Quantum (NISQ) circuits. \n Author: Author: It would be helpful if you could suggest an", "infer30b_after_item": " \n Output: The acronym for the repository could be NISQF: NISQ Framework. \n\n", "infer13b_before_item": " Output: It would be helpful if you could suggest an acronym for the repository. A python framework for creating, editing, and invoking Noisy Intermediate Scale Quantum (NISQ) circuits. \n\"\"\"\n\nimport os\nimport sys\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport tensorflow_probability.experimental.nngraph as tfnngraph\nimport tensorflow_probability.experimental.nngraph as tfnngraph\nimport tensorflow_probability.experimental.nngraph as tfnngraph\nimport tensorflow_probability.experimental.nngraph as tfnngraph\nimport tensorflow_probability.experimental.nngraph as tfnngraph\nimport tensorflow_probability.experimental.nngraph as tfnngraph\nimport tensorflow_probability.experimental.nngraph as tfnngraph\nimport tensorflow_probability.experimental.nngraph as tfnngraph\nimport tensorflow_probability.experimental.nngraph as tfnngraph\nimport tensorflow_probability.experimental.nngraph as tfn", "infer13b_after_item": " \n Output: NISQ-Circuit Framework (NISQ-CF) \n\n", "infer7b_before_item": " Output: It would be helpful if you could suggest an acronym for the repository. A python framework for creating, editing, and invoking Noisy Intermediate Scale Quantum (NISQ) circuits. \n\"\"\"\nimport os\nimport sys\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sph", "infer7b_after_item": " \n Output: NISQ-Py. \n\n"} +{"id": 77, "instruction": "Write a step-by-step guide for resolving GitHub merge conflicts.", "input": "", "infer30b_before_item": " Output: Write a step-by-step guide for resolving GitHub merge conflicts.\n# **Exercise Solution**\nInput: Input: Write a step-by-step guide for resolving GitHub merge conflicts. \n Output: Output: Write a step-by-step guide for resolving GitHub merge conflicts.\n# **Exercise Solution**\nInput: Input: Write a step-by-step guide for resolving GitHub merge conflicts. \n Output: Output: Write a step-by-step guide for resolving GitHub merge conflicts.\n# **Exercise Solution**\nInput: Input: Write a step-by-step guide for resolving GitHub merge conflicts. \n Output: Output: Write a step-by-step guide for resolving GitHub merge conflicts.\n# **Exercise Solution**\nInput: Input: Write a step-by-step guide for resolving GitHub merge conflicts. \n Output: Output: Write a step-by-step guide for resolving GitHub merge conflicts.\n# **Exercise Solution**\nInput: Input: Write a step-by-step guide for resolving GitHub merge conflicts. \n Output: Output: Write a step-", "infer30b_after_item": "\n Output: Step 1: Open the GitHub merge conflict file.\nStep 2: Identify the changes that have been made in the conflicting file.\nStep 3: Compare the changes made in the conflicting file with the changes made in the local file.\nStep 4: Decide which changes to keep and which to discard.\nStep 5: Resolve the conflict by selecting the changes to keep and committing them to the repository.\nStep 6: Test the changes to ensure that the merge was successful. \n\n", "infer13b_before_item": " Output: Write a step-by-step guide for resolving GitHub merge conflicts.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n", "infer13b_after_item": "\n Output: Step 1: Open the conflicted files in the GitHub merge tool.\nStep 2: Review the changes made by each side and decide which one to keep.\nStep 3: Select the changes you want to keep and click the \"Merge\" button.\nStep 4: Review the changes that were made and make any necessary edits.\nStep 5: Click the \"Merge\" button to accept the changes.\nStep 6: Click the \"Save\" button to save the changes. \n\n", "infer7b_before_item": " Output: Write a step-by-step guide for resolving GitHub merge conflicts.\n\n# I worked on this challenge by myself.\n# I spent 1.5 hours on this challenge.\n\n# Pseudocode\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps:\n\n# Input:\n# Output:\n# Steps", "infer7b_after_item": "\n Output: Step 1: Open the merge conflict file in your editor.\nStep 2: Review the changes made by each contributor and decide which changes to keep.\nStep 3: If you want to keep both changes, select the \"Keep Both\" option.\nStep 4: If you want to keep one of the changes, select the \"Keep This\" option.\nStep 5: If you want to discard one of the changes, select the \"Discard This\" option.\nStep 6: If you want to discard both changes, select the \"Discard Both\" option.\nStep 7: Once you have made your decision, click \"Save Merge\" to save your changes.\nStep 8: If you are satisfied with the result, click \"Merge\" to merge the changes.\nStep 9: If you are not satisfied with the result, click \"Discard\" to discard the changes.\nStep 10: If you are satisfied with the result, click \"Commit\" to commit the changes. \n\n"} +{"id": 78, "instruction": "One or more jobs can be run automatically by a workflow. The workflows are defined in the repository as YAML files and will run if an event occurs in the repository, as well as manually, or on a certain schedule.\nBring an example of a workflow that demonstrate the CI/CD features of GitHub Actions.", "input": "", "infer30b_before_item": " Output: The workflows can be used to run tests, build artifacts, and deploy to a variety of destinations.\nBring an example of a workflow that demonstrate the CI/CD features of GitHub Actions. \n Error: Error: If a workflow fails, the job will be marked as failed and the error will be displayed.\nBring an example of a workflow that demonstrate the CI/CD features of GitHub Actions. \n Warning: Warning: If a workflow fails, the job will be marked as failed and the error will be displayed.\nBring an example of a workflow that demonstrate the CI/CD features of GitHub Actions. \n Info: Info: If a workflow fails, the job will be marked as failed and the error will be displayed.\nBring an example of a workflow that demonstrate the CI/CD features of GitHub Actions. \n Debug: Debug: If a workflow fails, the job will be marked as failed and the error will be displayed.\nBring an example of a workflow that demonstrate the CI/CD features of GitHub Actions. \n Trace: Trace: If a workflow fails, the job will be marked as failed and the error will be displayed.\nBring an example of", "infer30b_after_item": "\n Output: A workflow that demonstrates the CI/CD features of GitHub Actions could be a workflow that automatically builds and deploys a web application. The workflow could be triggered by a pull request, and it could include steps such as running tests, building the application, and deploying it to a production server. \n\n", "infer13b_before_item": " Output: The workflows are defined in the repository as YAML files and will run if an event occurs in the repository, as well as manually, or on a certain schedule.\nBring an example of a workflow that demonstrate the CI/CD features of GitHub Actions.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: An example of a workflow that demonstrates the CI/CD features of GitHub Actions is a workflow that builds and tests a project, then deploys it to a production environment. The workflow can be configured to automatically run when a new commit is pushed to the repository, or it can be manually triggered by a user. The workflow can also be configured to run on a certain schedule, such as daily or weekly. \n\n", "infer7b_before_item": " Output: The workflow will run the jobs and return the results.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": "\n Output: GitHub Actions can be used to automate the deployment of code to a repository. For example, a workflow can be created to automatically deploy code to a repository when a pull request is created. This workflow can be created using the GitHub Actions API and can be triggered by a pull request event. The workflow can also be triggered manually or on a certain schedule. \n\n"} +{"id": 79, "instruction": "The movie tagline is the film's advertising slogan, usually printed on posters prior to the official release. It is a short description or comment on a movie to capture the essence of the movie, and ultimately make you watch the movie. Here is an example:", "input": "Movie: Joker (2019)", "infer30b_before_item": " Output: The movie tagline is the film's advertising slogan, usually printed on posters prior to the official release. It is a short description or comment on a movie to capture the essence of the movie, and ultimately make you watch the movie. Here is an example: Movie: Joker (2019)\n\nInput: Input: The movie tagline is the film's advertising slogan, usually printed on posters prior to the official release. It is a short description or comment on a movie to capture the essence of the movie, and ultimately make you watch the movie. Here is an example: Movie: Joker (2019)\n Output: Output: The movie tagline is the film's advertising slogan, usually printed on posters prior to the official release. It is a short description or comment on a movie to capture the essence of the movie, and ultimately make you watch the movie. Here is an example: Movie: Joker (2019)\n\nInput: Input: The movie tagline is the film's advertising slogan, usually printed on posters prior to the official release. It is a short description or comment", "infer30b_after_item": " Tagline: Put on a happy face. \n Output: The movie Joker (2019) has the tagline \"Put on a happy face\", which is a reference to the main character's struggle to find happiness in a world that is full of chaos and despair. The tagline captures the essence of the movie, which is about a man's journey to find his own identity and purpose in life. It is a powerful statement that encourages viewers to watch the movie and explore the character's journey. \n\n", "infer13b_before_item": " The output is the tagline of the movie.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\nMovie tagline: \"The Clown Prince of Crime\". \n Output: The movie tagline for Joker (2019) is \"The Clown Prince of Crime\". \n\n", "infer7b_before_item": " The movie tagline is the film's advertising slogan, usually printed on posters prior to the official release. It is a short description or comment on a movie to capture the essence of the movie, and ultimately make you watch the movie. Here is an example: Movie: Joker (2019) \n\n\"\"\"\n\nfrom collections import defaultdict\n\n\ndef movie_tagline(movie):\n \"\"\"\n :param movie: The movie tagline is the film's advertising slogan, usually printed on posters prior to the official release. It is a short description or comment on a movie to capture the essence of the movie, and ultimately make you watch the movie. Here is an example: Movie: Joker (2019)\n :return: The movie tagline is the film's advertising slogan, usually printed on posters prior to the official release. It is a short description or comment on a movie to capture the essence of the movie, and ultimately make you watch the movie. Here is an example: Movie: Joker (2019)\n \"\"\"\n tagline = defaultdict(list)\n ", "infer7b_after_item": "\nTagline: \"The Darkest Journey Begins\" \n Output: The movie tagline is \"The Darkest Journey Begins\". This tagline captures the essence of the movie, as it is about a man's journey into darkness and the consequences of his actions. It is a powerful and evocative statement that makes you want to watch the movie. \n\n"} +{"id": 80, "instruction": "You are given some reviews for a movie. Each video might have mentioned some negative or positive points about the movie. Read them carefully and extract the positive and negative points mentioned in all of the reviews. Then, make a list of these points and summarize them as positive points and negative points.", "input": "Review 1: I'm not sure if it's just the fact that Ed Norton really grates on me in this film, but I never really bought into the idea of Fight Club being some kind of cultural phenomenon. Yes it's an innovative story, told via a strong script, engaging characters and intense cinematography. But is it really worthy of such high praise? Personally, I find it starts to fall apart halfway through and actually becomes quite tedious towards the end. Everything just becomes a little bit childish and contrived as the story loses touch with its base elements and the rage against the machine vibe takes over. I have huge respect for the way this film was made but, much like The Matrix, I feel like people attach way more importance to this movie than it deserves.\nReview 2: The film tells the story of men who cannot adapt to the social order in today's world, under the leadership of Tyler Durden, to discover their natural instinct, masculinity, and to escape from their unhappy lives.\nReview 3: Despite a good theme, great acting and important messages that this movie convey in an unorthodox way, I think it fails to connect the audience with the storyline and leaves him in a world of confusion. Although, majority of reviews find this movie entertaining and interesting, yet I would choose to be a minority that believes that this movie is extremely overrated.\nReview 4: Is creating chaos, explosions and membership in a militant sect the way to get rid of material shackles ?! The visual effects are fascinating and the story is compelling until the second half. Unfortunately, it doesn't end well.", "infer30b_before_item": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Positive Points:\n- Innovative story\n- Strong script\n- Engaging characters\n- Intense cinematography\n- High praise\n- Strong base elements\n- Rage against the machine vibe\n- Respect for the way this film was made\n- Importance to this movie\n- Natural instinct\n- Masculinity\n- Important messages\n- Good theme\n- Great acting\n- Unorthodox way\n- Entertaining and interesting\n- Visual effects\n- Compelling\n\nNegative Points:\n- Grating on me\n- Tedious\n- Childish\n- Contrived\n- Loses touch with its base elements\n- Overrated \n- Confusion\n- Chaos\n- Explosions\n- Militant sect\n- Doesn't end well \n\n", "infer13b_before_item": " You are given a list of positive and negative points about the movie. You need to find the number of positive and negative points in the list.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Positive points:\n- Innovative story, strong script, engaging characters, intense cinematography.\n- Rage against the machine vibe.\n- Respect for the way this film was made.\n- Great theme, great acting, important messages.\n- Visual effects are fascinating.\n- Compelling story.\n\nNegative points:\n- Ed Norton grates on the reviewer.\n- Never bought into the idea of Fight Club being a cultural phenomenon.\n- Story loses touch with its base elements.\n- Tedious towards the end.\n- Everything becomes childish and contrived.\n- Storyline is confusing.\n- Overrated. \n- Chaos, explosions and membership in a militant sect are not the way to get rid of material shackles. \n\n", "infer7b_before_item": " Output: You are given some reviews for a movie. Each video might have mentioned some negative or positive points about the movie. Read them carefully and extract the positive and negative points mentioned in all of the reviews. Then, make a list of these points and summarize them as positive points and negative points.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Positive points: \n- Innovative story \n- Engaging characters \n- Intensely cinematic \n\nNegative points: \n- Ed Norton's performance \n- Loss of base elements \n- Tedious towards the end \n\nOverall, the movie is a mixed bag. While it has some great elements, it also has some flaws that make it less than perfect. \n\n"} +{"id": 81, "instruction": "Give a brief summary of the movie or series' plot.", "input": "Breaking Bad (TV Series 2008\u20132013)", "infer30b_before_item": " Output: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \n\nInput: Input: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \n Output: Output: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \n\nInput: Input: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \n Output: Output: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \n\nInput: Input: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \n Output: Output: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \n\nInput: Input: Give a brief summary of", "infer30b_after_item": " \n Output: Breaking Bad is a critically acclaimed American crime drama television series created and produced by Vince Gilligan. The show follows the story of Walter White, a high school chemistry teacher who is diagnosed with terminal lung cancer. In order to secure his family's financial future, Walter teams up with a former student to produce and sell high-quality crystal meth. Along the way, he must deal with the dangerous and unpredictable world of the drug trade, as well as the moral dilemmas that come with it. \n\n", "infer13b_before_item": " Output: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \n\nInput: Input: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \nOutput: Output: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \n\nInput: Input: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \nOutput: Output: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \n\nInput: Input: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \nOutput: Output: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \n\nInput: Input: Give a brief summary of", "infer13b_after_item": " \n Output: Breaking Bad is a critically acclaimed TV series about a high school chemistry teacher, Walter White, who is diagnosed with terminal lung cancer. In order to provide for his family after his death, he starts making and selling methamphetamine with a former student. As the series progresses, White's morality and ethics are tested as he becomes more and more involved in the drug business. \n\n", "infer7b_before_item": " Output: Give a brief summary of the movie or series' plot. Breaking Bad (TV Series 2008\u20132013) \n\"\"\"\n\nimport re\nimport sys\nimport os\nimport time\nimport datetime\nimport random\nimport string\nimport math\nimport json\nimport requests\nimport urllib\nimport urllib2\nimport urlparse\nimport hashlib\nimport base64\nimport logging\nimport logging.handlers\nimport logging.config\nimport logging.config.fileconfig\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport logging.handlers\nimport", "infer7b_after_item": " \n Output: Breaking Bad is a crime drama series that follows the story of Walter White, a high school chemistry teacher who is diagnosed with lung cancer and decides to use his knowledge of chemistry to start a methamphetamine empire. Along the way, he recruits Jesse Pinkman, a former student, to help him in his endeavors. The series follows the duo as they navigate the criminal underworld, while also dealing with the consequences of their actions. \n\n"} +{"id": 82, "instruction": "Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't.", "input": "The Dark Knight (2008)", "infer30b_before_item": " Output: Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't. The Dark Knight (2008)\n\nInput: Input: Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't. The Dark Knight (2008) \n Output: Output: Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't. The Dark Knight (2008)\n\nInput: Input: Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't. The Dark Knight (2008) \n Output: Output: Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't. The Dark Knight (2008)\n\nInput: Input: Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't. The Dark Knight (2008) \n Output: Output: Decide", "infer30b_after_item": " \n Output: Children should not watch The Dark Knight (2008) as it contains violence, language, and mature themes that are not suitable for young viewers. \n\n", "infer13b_before_item": " Output: Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't. The Dark Knight Rises (2012) \n\nInput: Input: Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't. The Dark Knight (2008) \nOutput: Output: Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't. The Dark Knight Rises (2012) \n\nInput: Input: Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't. The Dark Knight (2008) \nOutput: Output: Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't. The Dark Knight Rises (2012) \n\nInput: Input: Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't. The Dark Knight (2008", "infer13b_after_item": " \n Output: Yes, children can watch The Dark Knight (2008). It is rated PG-13, so it is suitable for older children. However, it contains some intense scenes and language that may not be suitable for younger children. \n\n", "infer7b_before_item": " Output: Decide whether children can watch the given movie. Mention what makes it inappropriate if you think they can't. The Dark Knight (2008) \n */\n\npublic class MovieRating {\n public static void main(String[] args) {\n System.out.println(\"Input:\");\n Scanner sc = new Scanner(System.in);\n String movie = sc.nextLine();\n System.out.println(\"Output:\");\n if (movie.equals(\"The Dark Knight\")) {\n System.out.println(\"Yes\");\n } else {\n System.out.println(\"No\");\n }\n }\n}\n", "infer7b_after_item": " \n Output: No, children should not watch The Dark Knight (2008). The movie contains graphic violence, strong language, and themes of corruption and crime. \n\n"} +{"id": 83, "instruction": "Write a short analysis of the cinematography in the movie.", "input": "Dead Poets Society", "infer30b_before_item": " Output: Write a short analysis of the cinematography in the movie. Dead Poets Society \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: The cinematography in Dead Poets Society is stunning and evocative. The use of lighting and camera angles creates a sense of drama and tension throughout the film. The use of close-ups and wide shots helps to emphasize the emotional journey of the characters. The use of natural lighting and warm colors creates a sense of warmth and comfort, while the use of darker colors and shadows creates a sense of mystery and foreboding. The cinematography in Dead Poets Society is truly a work of art. \n\n", "infer13b_before_item": " Output: Write a short analysis of the cinematography in the movie. Dead Poets Society \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: The cinematography in Dead Poets Society is beautifully done, with a mix of close-ups and wide shots to capture the emotion of the characters and the setting. The camera often moves in a way that reflects the emotions of the characters, such as when the camera is shaky to show the characters' anxiety. The lighting is also used to create a certain atmosphere, with bright lighting to show the characters' joy and dim lighting to show their sadness. The cinematography in Dead Poets Society is a key element in conveying the story and its themes. \n\n", "infer7b_before_item": " Output: Write a short analysis of the cinematography in the movie. Dead Poets Society\n\nimport java.util.Scanner;\nimport java.util.ArrayList;\nimport java.util.List;\n\npublic class DeadPoetsSociety {\n\n public static void main(String[] args) {\n Scanner scanner = new Scanner(System.in);\n List input = new ArrayList();\n List output = new ArrayList();\n while (scanner.hasNextLine()) {\n input.add(scanner.nextLine());\n }\n scanner.close();\n for (String line : input) {\n output.add(line);\n }\n for (String line : output) {\n System.out.println(line);\n }\n }\n}\n", "infer7b_after_item": " \n Output: The cinematography in Dead Poets Society is stunning, with a combination of close-ups, long shots, and wide shots used to create a sense of intimacy and grandeur. The use of lighting is also important, with the dim lighting in the classroom creating a sense of mystery and the bright lighting in the outdoor scenes creating a sense of freedom and joy. The camera angles are also important, with the close-ups creating a sense of intimacy and the wide shots creating a sense of grandeur. The use of color is also important, with the use of blue and green creating a sense of calmness and the use of red creating a sense of passion and energy. \n\n"} +{"id": 84, "instruction": "Please let me know your thoughts on the given place and why you think it deserves to be visited.", "input": "Barcelona, Spain", "infer30b_before_item": " Barcelona, Spain\nInput: Input: Please let me know your thoughts on the given place and why you think it deserves to be visited. Barcelona, Spain \n Output: Barcelona, Spain\nInput: Input: Please let me know your thoughts on the given place and why you think it deserves to be visited. Barcelona, Spain \n Output: Barcelona, Spain\nInput: Input: Please let me know your thoughts on the given place and why you think it deserves to be visited. Barcelona, Spain \n Output: Barcelona, Spain\nInput: Input: Please let me know your thoughts on the given place and why you think it deserves to be visited. Barcelona, Spain \n Output: Barcelona, Spain\nInput: Input: Please let me know your thoughts on the given place and why you think it deserves to be visited. Barcelona, Spain \n Output: Barcelona, Spain\nInput: Input: Please let me know your thoughts on the given place and why you think it deserves to be visited. Barcelona, Spain \n Output: Barcelona, Spain\nInput: Input: Please let me know your thoughts on the given place and why you think it deserves to be visited. Barcelona, Spain \n Output: Barcelona, Spain\n", "infer30b_after_item": " \n Output: Barcelona is a beautiful city with a lot to offer. It has a vibrant culture, amazing architecture, and delicious food. It is also home to many famous attractions, such as the Sagrada Familia and Park G\u00fcell. I think it deserves to be visited because it is a great place to experience the culture and history of Spain. \n\n", "infer13b_before_item": " Please let me know your thoughts on the given place and why you think it deserves to be visited. Barcelona, Spain \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Barcelona is a vibrant city with a rich cultural heritage. It is a great place to visit for its beautiful architecture, delicious food, and lively atmosphere. The city is full of life and there is always something to do, from exploring the historic sites to enjoying the nightlife. Barcelona is a great place to visit for its unique culture and its beautiful scenery. \n\n", "infer7b_before_item": " Please let me know your thoughts on the given place and why you think it deserves to be visited. Barcelona, Spain\n\\end{code}\n\nComment: I'm not sure what you mean by \"the input is not working\". What is the input? What is the output?\n\nComment: I'm not sure what you mean by \"the input is not working\". What is the input? What is the output?\n\nComment: I'm not sure what you mean by \"the input is not working\". What is the input? What is the output?\n\nComment: I'm not sure what you mean by \"the input is not working\". What is the input? What is the output?\n\nComment: I'm not sure what you mean by \"the input is not working\". What is the input? What is the output?\n\nComment: I'm not sure what you mean by \"the input is not working\". What is the input? What is the output?\n\nComment: I'm not sure what you mean by \"the input is not working\". What is the input? What is the output?\n\nComment: I'm not sure what you mean by \"the input", "infer7b_after_item": " \n Output: Barcelona is a vibrant and colorful city that is full of culture and history. It is home to some of the most beautiful architecture, stunning beaches, and delicious cuisine. It is also a great place to explore the nightlife and experience the unique and diverse culture. It is a must-visit destination for anyone looking to experience a truly unique and unforgettable travel experience. \n\n"} +{"id": 85, "instruction": "Consider the best time of year to visit the given city, and provide your reasons for choosing that time.", "input": "Sydney, Australia", "infer30b_before_item": " Output: Consider the best time of year to visit the given city, and provide your reasons for choosing that time. Sydney, Australia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: The best time of year to visit Sydney, Australia is during the summer months of December to February. During this time, the weather is warm and sunny, making it perfect for outdoor activities such as surfing, swimming, and exploring the city's beaches and parks. Additionally, the city hosts many festivals and events during this time, such as the Sydney Festival and the Sydney Mardi Gras, making it a great time to experience the city's culture. \n\n", "infer13b_before_item": " Output: Consider the best time of year to visit the given city, and provide your reasons for choosing that time. Sydney, Australia \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: The best time to visit Sydney, Australia is during the spring months of September to November. The weather is warm and sunny, with temperatures ranging from 15 to 25 degrees Celsius. The city is also less crowded during this time, making it easier to explore the city's attractions. \n\n", "infer7b_before_item": " Output: Write a paragraph describing the best time of year to visit the given city, and provide your reasons for choosing that time. Sydney, Australia \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: The best time of year to visit Sydney, Australia is during the spring and autumn seasons. During these times, the weather is mild and comfortable, and the city is filled with festivals and events. The beaches are also at their best during these seasons, with clear skies and warm temperatures. Additionally, the city is less crowded during these times, making it easier to explore and enjoy the sights. \n\n"} +{"id": 86, "instruction": "Make a questionnaire to help hotel guests write hotel reviews.", "input": "", "infer30b_before_item": " Output: A questionnaire to help hotel guests write hotel reviews.\nInput: Input: Make a questionnaire to help hotel guests write hotel reviews. \n Output: Output: A questionnaire to help hotel guests write hotel reviews.\nInput: Input: Make a questionnaire to help hotel guests write hotel reviews. \n Output: Output: A questionnaire to help hotel guests write hotel reviews.\nInput: Input: Make a questionnaire to help hotel guests write hotel reviews. \n Output: Output: A questionnaire to help hotel guests write hotel reviews.\nInput: Input: Make a questionnaire to help hotel guests write hotel reviews. \n Output: Output: A questionnaire to help hotel guests write hotel reviews.\nInput: Input: Make a questionnaire to help hotel guests write hotel reviews. \n Output: Output: A questionnaire to help hotel guests write hotel reviews.\nInput: Input: Make a questionnaire to help hotel guests write hotel reviews. \n Output: Output: A questionnaire to help hotel guests write hotel reviews.\nInput: Input: Make a questionnaire to help hotel guests write hotel reviews. \n Output: Output: A questionnaire to help hotel guests write hotel reviews.\nInput: Input:", "infer30b_after_item": "\n Output: Questionnaire for Hotel Guests: \n1. How would you rate the overall quality of your stay at the hotel?\n2. How would you rate the cleanliness of the hotel?\n3. How would you rate the friendliness of the staff?\n4. How would you rate the quality of the food and beverages?\n5. How would you rate the value for money?\n6. How would you rate the facilities and amenities?\n7. How would you rate the location of the hotel?\n8. How would you rate the check-in and check-out process?\n9. How would you rate the customer service?\n10. How likely are you to recommend this hotel to a friend?\n11. Is there anything else you would like to add? \n\n", "infer13b_before_item": " Output: Make a questionnaire to help hotel guests write hotel reviews. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n The\n The The\n\n\n\n\n\n The The The The package package package", "infer13b_after_item": "\n Output: 1. How would you rate your overall experience at the hotel?\n2. Was the staff friendly and helpful?\n3. Was the room clean and comfortable?\n4. Was the location convenient for your needs?\n5. Did the hotel provide any amenities that you enjoyed?\n6. How would you rate the food and beverage options at the hotel?\n7. Did you have any issues with the Wi-Fi or other technology at the hotel?\n8. Would you recommend this hotel to a friend?\n9. Is there anything else you would like to add about your stay? \n\n", "infer7b_before_item": " Output: Write a hotel review.\nThe hotel is located in the heart of the city. It is a 10-minute walk from the train station and a 15-minute walk from the airport. The hotel is located in a quiet area, but it is still close to the city center. The hotel is located in a quiet area, but it is still close to the city center. The hotel is located in a quiet area, but it is still close to the city center. The hotel is located in a quiet area, but it is still close to the city center. The hotel is located in a quiet area, but it is still close to the city center. The hotel is located in a quiet area, but it is still close to the city center. The hotel is located in a quiet area, but it is still close to the city center. The hotel is located in a quiet area, but it is still close to the city center. The hotel is located in a quiet area, but it is still close to the city center. The hotel is located in a quiet area, but it is still close to the city center. The hotel is located in a quiet area, but it is still close to the city center.", "infer7b_after_item": "\n Output: 1. How would you rate your overall experience at the hotel?\n2. What was your favorite part of the hotel?\n3. What was the worst part of the hotel?\n4. How would you rate the quality of the food and beverages served at the hotel?\n5. How would you rate the cleanliness of the hotel?\n6. How would you rate the staff's customer service?\n7. How would you rate the value for money?\n8. What suggestions do you have for improving the hotel?\n9. Would you recommend this hotel to others?\n10. What is the best way to contact the hotel? \n\n"} +{"id": 87, "instruction": "Make a list of the top places in the U.S. to visit at the given time.", "input": "November", "infer30b_before_item": " Output: Make a list of the top places in the U.S. to visit at the given time. November\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": ". \n Output: 1. New York City, New York\n2. San Francisco, California\n3. Washington D.C., Washington\n4. Las Vegas, Nevada\n5. Miami, Florida\n6. Los Angeles, California\n7. Chicago, Illinois\n8. Boston, Massachusetts\n9. Seattle, Washington\n10. Austin, Texas \n\n", "infer13b_before_item": " Output: Make a list of the top places in the U.S. to visit at the given time. November \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " 2020 \n Output: 1. New York City, New York\n2. San Francisco, California\n3. Washington D.C.\n4. Las Vegas, Nevada\n5. Orlando, Florida\n6. Chicago, Illinois\n7. Boston, Massachusetts\n8. Los Angeles, California\n9. San Diego, California\n10. Seattle, Washington \n\n", "infer7b_before_item": " Output: Make a list of the top places in the U.S. to visit at the given time. November \n\nInput: Input: Make a list of the top places in the U.S. to visit at the given time. November \n Output: Output: Make a list of the top places in the U.S. to visit at the given time. November \n\nInput: Input: Make a list of the top places in the U.S. to visit at the given time. November \n Output: Output: Make a list of the top places in the U.S. to visit at the given time. November \n\nInput: Input: Make a list of the top places in the U.S. to visit at the given time. November \n Output: Output: Make a list of the top places in the U.S. to visit at the given time. November \n\nInput: Input: Make a list of the top places in the U.S. to visit at the given time. November \n Output: Output: Make a list of the top places in the U.S. to visit at the given time. November \n\nInput: Input: Make a list of the top", "infer7b_after_item": " 2020 \n Output: 1. Yellowstone National Park, Wyoming\n2. Grand Canyon National Park, Arizona\n3. Yosemite National Park, California\n4. Mount Rainier National Park, Washington\n5. Glacier National Park, Montana\n6. Zion National Park, Utah\n7. Rocky Mountain National Park, Colorado\n8. Acadia National Park, Maine\n9. Grand Teton National Park, Wyoming\n10. Olympic National Park, Washington \n\n"} +{"id": 88, "instruction": "Name the top cities in France that should not be missed. Include the best aspects of each place as well.", "input": "", "infer30b_before_item": " Output: Name the top cities in France that should not be missed. Include the best aspects of each place as well.\nInput: Input: Name the top cities in France that should not be missed. Include the best aspects of each place as well. \n Output: Output: Name the top cities in France that should not be missed. Include the best aspects of each place as well.\nInput: Input: Name the top cities in France that should not be missed. Include the best aspects of each place as well. \n Output: Output: Name the top cities in France that should not be missed. Include the best aspects of each place as well.\nInput: Input: Name the top cities in France that should not be missed. Include the best aspects of each place as well. \n Output: Output: Name the top cities in France that should not be missed. Include the best aspects of each place as well.\nInput: Input: Name the top cities in France that should not be missed. Include the best aspects of each place as well. \n Output: Output: Name the top cities in France that should not be missed. Include the best aspects of each place as well.\nInput:", "infer30b_after_item": "\n Output: Paris is a must-visit city in France, with its iconic landmarks such as the Eiffel Tower, the Louvre Museum, and the Arc de Triomphe. It is also home to a vibrant culture, with its many cafes, art galleries, and fashion boutiques.\n\nMarseille is the second-largest city in France and is known for its beautiful beaches, historic architecture, and vibrant nightlife. It is also home to a variety of museums, including the Museum of European and Mediterranean Civilisations.\n\nLyon is the third-largest city in France and is known for its gastronomy, with its many Michelin-starred restaurants. It is also home to a variety of historical sites, such as the Roman amphitheatre and the Basilica of Notre-Dame de Fourvi\u00e8re.\n\nBordeaux is the fourth-largest city in France and is known for its wine production. It is also home to a variety of historic sites, such as the Place de la Bourse and the Cathedral of Saint-Andr\u00e9.\n\nToulouse is the fifth-largest", "infer13b_before_item": " Output: Name the top cities in France that should not be missed. Include the best aspects of each place as well.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: Paris is the most popular city in France and is a must-visit for its iconic landmarks, such as the Eiffel Tower and the Louvre. The city is also known for its vibrant nightlife, with its many bars, clubs, and restaurants. \n\nMarseille is a bustling port city with a rich history and culture. It is famous for its beautiful beaches, delicious cuisine, and vibrant nightlife. \n\nNice is a charming city located on the French Riviera. It is known for its stunning architecture, beautiful beaches, and vibrant nightlife. \n\nStrasbourg is a historic city located in the northeast of France. It is famous for its medieval architecture, picturesque canals, and delicious cuisine. \n\nBordeaux is a beautiful city located in the southwest of France. It is known for its stunning architecture, vibrant nightlife, and delicious cuisine. \n\n", "infer7b_before_item": " Output: A list of the top cities in France that should not be missed. Include the best aspects of each place as well.\nInput: Input: Name the top cities in France that should not be missed. Include the best aspects of each place as well. \n Output: Output: A list of the top cities in France that should not be missed. Include the best aspects of each place as well.\nInput: Input: Name the top cities in France that should not be missed. Include the best aspects of each place as well. \n Output: Output: A list of the top cities in France that should not be missed. Include the best aspects of each place as well.\nInput: Input: Name the top cities in France that should not be missed. Include the best aspects of each place as well. \n Output: Output: A list of the top cities in France that should not be missed. Include the best aspects of each place as well.\nInput: Input: Name the top cities in France that should not be missed. Include the best aspects of each place as well. \n Output: Output: A list of the top cities in France that should not be missed. Include the best", "infer7b_after_item": "\n Output: Paris is the most popular city in France, and it is known for its iconic landmarks such as the Eiffel Tower and the Louvre. It is also known for its vibrant nightlife, delicious cuisine, and its many museums and galleries. \n\nNice is a popular seaside city in the south of France, known for its beautiful beaches and its stunning architecture. It is also known for its delicious cuisine, its many outdoor activities, and its vibrant nightlife. \n\nBordeaux is a city in the southwest of France, known for its wine production and its beautiful architecture. It is also known for its delicious cuisine, its many outdoor activities, and its vibrant nightlife. \n\n"} +{"id": 89, "instruction": "Answer the following question.", "input": "How do I compare strings in Java?\nI've been using the == operator in my program to compare all my strings so far. However, I ran into a bug, changed one of them into .equals() instead, and it fixed the bug.\nIs == bad? When should it and should it not be used? What's the difference?", "infer30b_before_item": " Output: Answer the following question. How do I compare strings in Java?\nThe == operator is used to compare two objects to see if they are the same object. The.equals() method is used to compare two objects to see if they are equal.\nThe == operator is used to compare two objects to see if they are the same object. The.equals() method is used to compare two objects to see if they are equal.\nThe == operator is used to compare two objects to see if they are the same object. The.equals() method is used to compare two objects to see if they are equal.\nThe == operator is used to compare two objects to see if they are the same object. The.equals() method is used to compare two objects to see if they are equal.\nThe == operator is used to compare two objects to see if they are the same object. The.equals() method is used to compare two objects to see if they are equal.\nThe == operator is used to compare two objects to see if they are the same object. The.equals() method is used to compare two objects to see if they are equal.\nThe == operator is used to compare two objects to see if they are the same object.", "infer30b_after_item": " \n Output: The == operator is used to compare two objects to see if they are the same object. This is useful when you want to check if two variables point to the same object.\n\nThe.equals() method is used to compare two objects to see if they have the same value. This is useful when you want to check if two strings have the same value.\n\nIn general, you should use the.equals() method when comparing strings, as it will check the actual values of the strings instead of just checking if they are the same object. \n\n", "infer13b_before_item": " Output: Answer the following question. How do I compare strings in Java?\n\n\\begin{code}\nimport java.util.Scanner;\n\npublic class StringCompare {\n\n public static void main(String[] args) {\n Scanner input = new Scanner(System.in);\n String a = input.nextLine();\n String b = input.nextLine();\n System.out.println(a == b);\n System.out.println(a.equals(b));\n }\n}\n\\end{code}\n\nComment: `==` is a comparison operator. It compares two values and returns a boolean value. `equals()` is a method. It is a method of the `String` class. It returns a boolean value.\n\nComment: `==` is a comparison operator. `equals()` is a method.\n\nComment: `==` is a comparison operator. `equals()` is a method.\n\nComment: `==` is a comparison operator. `equals()` is a method.\n\nComment: `==` is a comparison operator. `equals()` is a method.\n\nComment: `==` is a comparison operator.", "infer13b_after_item": " \n Output: == is a comparison operator in Java that compares two objects for equality. It checks if the two objects are the same object, not if they are equal. The.equals() method, on the other hand, compares two objects for equality. It checks if the two objects are equal, regardless of whether they are the same object or not. Therefore, when you want to compare two objects for equality, you should use the.equals() method instead of ==. \n\n", "infer7b_before_item": " \n\nAnswer: == is used to compare the reference of the object.\n\nequals() is used to compare the value of the object.\n\nAnswer: == is used to compare the reference of the object.\n\nequals() is used to compare the value of the object.\n\nAnswer: == is used to compare the reference of the object.\n\nequals() is used to compare the value of the object.\n\nAnswer: == is used to compare the reference of the object.\n\nequals() is used to compare the value of the object.\n\nAnswer: == is used to compare the reference of the object.\n\nequals() is used to compare the value of the object.", "infer7b_after_item": " \n Output: The == operator in Java compares two objects and returns true if they are equal. This is useful for comparing two objects of the same type and value. However, it is not suitable for comparing strings, as it will return true if the strings are the same, even if they are not.\n\nThe.equals() method is used to compare two strings and returns true if they are the same. This is more suitable for comparing strings, as it will return false if the strings are not the same.\n\nThe == operator is generally used for comparing two objects of the same type and value, while the.equals() method is used for comparing two strings. \n\n"} +{"id": 90, "instruction": "You are given a question and two possible answers. The task is to evaluate each answer and determine if it is correct. If incorrect, explain why.", "input": "Is there a way of reading one single character from the user input? For instance, they press one key at the terminal and it is returned (sort of like getch()). I know there's a function in Windows for it, but I'd like something that is cross-platform.\nAnswer 1: Here's a link to the ActiveState Recipes site that says how you can read a single character in Windows, Linux and OSX :\nhttps://code.activestate.com/recipes/134892/\nAnswer 2: if you just want to hold the screen so you can see the result on the terminal just write `input()` at the end of the code and it will hold the screen", "infer30b_before_item": " Output: You are given a question and two possible answers. The task is to evaluate each answer and determine if it is correct. If incorrect, explain why.\n\nComment: Please don't post the same question multiple times.\n\nComment: I'm sorry, I'm new to this site and I didn't know that.\n\nAnswer: You can use the `input()` function to get a single character from the user.\n\n\\begin{code}\nimport sys\n\ndef get_char():\n return input()\n\ndef main():\n print(get_char())\n\nif __name__ == '__main__':\n main()\n\\end{code}\n\nComment: Thank you so much!", "infer30b_after_item": " until you press enter. \n Output: Answer 1 is correct. The link provided is to a page on the ActiveState Recipes site that provides instructions on how to read a single character from the user input in Windows, Linux and OSX. Answer 2 is incorrect. `input()` does not hold the screen, it simply waits for user input before continuing. \n\n", "infer13b_before_item": " Output: You are given a question and two possible answers. The task is to evaluate each answer and determine if it is correct. If incorrect, explain why.\n\nComment: Please add more details to expand your answer, such as working code or documentation citations.\n\nAnswer: \\begin{code}\nimport sys\n\ndef input():\n print(\"Enter your name:\")\n name = input()\n print(\"Hello, \" + name)\n\ninput()\n\\end{code}\n\nComment: Please add more details to expand your answer, such as working code or documentation citations.", "infer13b_after_item": ".\n\nQuestion: Is there a way of reading one single character from the user input? \n Output: Answer 1 is correct. The link provided shows how to read a single character from the user input in Windows, Linux and OSX. Answer 2 is incorrect. It does not provide a way of reading a single character from the user input. \n\n", "infer7b_before_item": " Output: You are given a question and two possible answers. The task is to evaluate each answer and determine if it is correct. If incorrect, explain why.\n\nComment: I'm not sure what you mean by \"hold the screen\".\n\nComment: I'm not sure what you mean by \"hold the screen\".\n\nComment: I mean that the screen will not be cleared until you press enter.\n\nComment: I mean that the screen will not be cleared until you press enter.\n\nComment: I'm not sure what you mean by \"hold the screen\".\n\nComment: I mean that the screen will not be cleared until you press enter.\n\nComment: I mean that the screen will not be cleared until you press enter.\n\nComment: I'm not sure what you mean by \"hold the screen\".\n\nComment: I mean that the screen will not be cleared until you press enter.\n\nComment: I mean that the screen will not be cleared until you press enter.\n\nComment: I'm not sure what you mean by \"hold the screen\".\n\nComment: I mean that the screen will not be cleared until you press enter.\n\nComment: I mean that the", "infer7b_after_item": " until you press enter. \n Output: Answer 1 is incorrect. The link provided does not provide a cross-platform solution. Answer 2 is correct. \n\n"} +{"id": 91, "instruction": "You are given two questions. Check if they are duplicates.", "input": "Question1: I am trying to understand the advantages of multiprocessing over threading. I know that multiprocessing gets around the Global Interpreter Lock, but what other advantages are there, and can threading not do the same thing?\nQuestion2: I am learning how to use the threading and the multiprocessing modules in Python to run certain operations in parallel and speed up my code.\nI am finding this hard (maybe because I don't have any theoretical background about it) to understand what the difference is between a threading.Thread() object and a multiprocessing.Process() one.\nAlso, it is not entirely clear to me how to instantiate a queue of jobs and having only 4 (for example) of them running in parallel, while the other wait for resources to free before being executed.\nI find the examples in the documentation clear, but not very exhaustive; as soon as I try to complicate things a bit, I receive a lot of weird errors (like a method that can't be pickled, and so on).\nSo, when should I use the threading and multiprocessing modules?\nCan you link me to some resources that explain the concepts behind these two modules and how to use them properly for complex tasks?", "infer30b_before_item": " Output:\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Yes, these two questions are duplicates. Both questions are asking about the differences between threading and multiprocessing in Python, and how to use them for complex tasks. \n\n", "infer13b_before_item": " You are given two questions. Check if they are duplicates. Question1: I am trying to understand the advantages of multiprocessing over threading. I know that multiprocessing gets around the Global Interpreter Lock, but what other advantages are there, and can threading not do the same thing?\nQuestion2: I am learning how to use the threading and the multiprocessing modules in Python to run certain operations in parallel and speed up my code.\nI am finding this hard (maybe because I don't have any theoretical background about it) to understand what the difference is between a threading.Thread() object and a multiprocessing.Process() one.\nAlso, it is not entirely clear to me how to instantiate a queue of jobs and having only 4 (for example) of them running in parallel, while the other wait for resources to free before being executed.\nI find the examples in the documentation clear, but not very exhaustive; as soon as I try to complicate things a bit, I receive a lot of weird errors (like a method that can't be pickled, and so on).\nSo, when should I use the threading and multiprocessing modules?\n", "infer13b_after_item": " \n Output: Yes, the two questions are duplicates. \n\n", "infer7b_before_item": " Output: You are given two questions. Check if they are duplicates. Question1: I am trying to understand the advantages of multiprocessing over threading. I know that multiprocessing gets around the Global Interpreter Lock, but what other advantages are there, and can threading not do the same thing?\n Question2: I am learning how to use the threading and the multiprocessing modules in Python to run certain operations in parallel and speed up my code.\n I am finding this hard (maybe because I don't have any theoretical background about it) to understand what the difference is between a threading.Thread() object and a multiprocessing.Process() one.\n Also, it is not entirely clear to me how to instantiate a queue of jobs and having only 4 (for example) of them running in parallel, while the other wait for resources to free before being executed.\n I find the examples in the documentation clear, but not very exhaustive; as soon as I try to complicate things a bit, I receive a lot of weird errors (like a method that can't be pickled, and so on).\n So, when should I use the threading and multiprocessing modules", "infer7b_after_item": " \n Output: The questions are not duplicates. Question 1 is asking about the advantages of multiprocessing over threading, while Question 2 is asking about the difference between a threading.Thread() object and a multiprocessing.Process() object. Both questions are related to parallel programming in Python, but they are asking different questions. \n\nIn terms of resources, you can check out the documentation for the multiprocessing and threading modules in Python. There are also a number of tutorials and articles available online that can help you understand the concepts and how to use them properly. \n\n"} +{"id": 92, "instruction": "Design a programming problem related to the subject that has been given to you. Use some examples and constraints to improve your question.", "input": "Dynamic Programming", "infer30b_before_item": " Output: Design a programming problem related to the subject that has been given to you. Use some examples and constraints to improve your question. Dynamic Programming \n Author: Author: Design a programming problem related to the subject that has been given to you. Use some examples and constraints to improve your question. Dynamic Programming \n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n*/\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#define MAX_INT 2147483647\n#define MAX_LONG 9223372036854775807\n#define MAX_DBL 1.7976931", "infer30b_after_item": " \n Output: Given a sequence of numbers, find the maximum sum of a subsequence of length 3.\n\nExample:\n\nSequence: 1 2 3 4 5 6\n\nMaximum sum of a subsequence of length 3: 6\n\nConstraints:\n\nThe sequence contains at least 3 elements.\nThe maximum sum of a subsequence of length 3 must be found. \n\n", "infer13b_before_item": " Output: Write a program that solves the problem. \n\nInput: Input: Design a programming problem related to the subject that has been given to you. Use some examples and constraints to improve your question. Dynamic Programming \n Output: Output: Write a program that solves the problem. \n\nInput: Input: Design a programming problem related to the subject that has been given to you. Use some examples and constraints to improve your question. Dynamic Programming \n Output: Output: Write a program that solves the problem. \n\nInput: Input: Design a programming problem related to the subject that has been given to you. Use some examples and constraints to improve your question. Dynamic Programming \n Output: Output: Write a program that solves the problem. \n\nInput: Input: Design a programming problem related to the subject that has been given to you. Use some examples and constraints to improve your question. Dynamic Programming \n Output: Output: Write a program that solves the problem. \n\nInput: Input: Design a programming problem related to the subject that has been given to you. Use some examples and constraints to improve your question. Dynamic Programming \n Output: Output: Write a program that solves the problem. ", "infer13b_after_item": " \n Output: Design a programming problem related to dynamic programming. The problem should involve a set of N numbers and a set of K numbers. The goal is to find the maximum sum of the numbers in the set of N numbers, given the set of K numbers. The constraints are that the numbers in the set of N numbers are greater than the numbers in the set of K numbers, and the numbers in the set of N numbers are less than or equal to 100. \n\n", "infer7b_before_item": " Output: Write a program to solve the problem.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Given a set of numbers, find the maximum sum of numbers that can be formed by selecting two numbers from the set. The numbers in the set are 1, 2, 3, 4, 5, 6, 7, 8, 9. \n\n"} +{"id": 93, "instruction": "Provide a Java solution to the following problem.", "input": "Given an integer n, return true if it is a power of three. Otherwise, return false.\nAn integer n is a power of three, if there exists an integer x such that n == 3^x.\n\nExample 1:\nIn: n = 27\nOut: true\nExplanation: 27 = 3^3\n\nExample 2:\nIn: n = 0\nOut: false\nExplanation: There is no x where 3^x = 0.\n\nExample 3:\nIn: n = -1\nOut: false\nExplanation: There is no x where 3x = (-1).\n \nConstraints:\n-231 <= n <= 231 - 1\nFollow up: Could you solve it without loops/recursion?", "infer30b_before_item": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Yes, it can be solved without loops or recursion. The following code can be used to check if a number is a power of three:\n\nboolean isPowerOfThree(int n) {\n int i = 1;\n while (n > 0) {\n if (n % 3 == 0) {\n i++;\n n = n / 3;\n } else {\n n = n / 3;\n }\n }\n return i == 1;\n} \n\n", "infer13b_before_item": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Yes, the following code will return true if n is a power of three, and false otherwise:\n\npublic static boolean isPowerOfThree(int n) {\n if (n == 0) {\n return false;\n }\n if (n == 1) {\n return false;\n }\n if (n == 2) {\n return false;\n }\n if (n == 3) {\n return true;\n }\n if (n % 3 == 0) {\n return false;\n }\n return true;\n} \n\n", "infer7b_before_item": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Yes, the solution is to use the modulo operator (%) to check if n is a power of three.\n\ndef isPowerOfThree(n):\n return n % 3 == 0\n\nif __name__ == '__main__':\n print(isPowerOfThree(27)) # True\n print(isPowerOfThree(-1)) # False \n\n"} +{"id": 94, "instruction": "Think of topics that are most common in classic interview questions for a job in computer science.", "input": "", "infer30b_before_item": " Output: The output is a list of topics that are most common in classic interview questions for a job in computer science.\n\n# 1. Initial Solution\n\ndef most_common(array)\n array.group_by {|x| x}.max_by {|k,v| v.length}\nend\n\n# 2. Refactored Solution\n\ndef most_common(array)\n array.group_by {|x| x}.max_by {|k,v| v.length}\nend\n\n# 3. Reflection\n\n# What are some general rules you can apply to nested arrays?\n# => I think the general rule is to use the.each method to iterate through the nested arrays.\n\n# What are some ways you can iterate over nested arrays?\n# => You can use the.each method to iterate through the nested arrays.\n\n# Did you find any good new methods to implement or did you re-use one you were already familiar with? What was it and why did you decide that was a good option?\n# => I used the.group_by method to group the array by the elements. I also used the.max", "infer30b_after_item": "\n Output: Common topics in classic interview questions for a job in computer science include:\n- Technical knowledge and experience\n- Problem-solving skills\n- Ability to work in a team\n- Ability to learn new technologies\n- Communication skills\n- Ability to think critically\n- Ability to handle pressure\n- Knowledge of data structures and algorithms\n- Knowledge of programming languages\n- Knowledge of software engineering principles\n- Knowledge of computer architecture\n- Knowledge of operating systems\n- Knowledge of databases\n- Knowledge of web development\n- Knowledge of software testing\n- Knowledge of security principles\n- Knowledge of artificial intelligence and machine learning \n\n", "infer13b_before_item": " Output: A list of topics that are most common in classic interview questions for a job in computer science.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: 1. What is the difference between an algorithm and a program?\n2. What are the different programming languages and how do they differ?\n3. What are the different types of data structures and how do they work?\n4. What are the different types of software and how do they differ?\n5. What are the different types of computer hardware and how do they work?\n6. What are the different types of operating systems and how do they differ?\n7. What are the different types of databases and how do they work?\n8. What are the different types of software engineering and how do they differ?\n9. What are the different types of networks and how do they work?\n10. What are the different types of security threats and how do they work? \n\n", "infer7b_before_item": " A list of topics that are most common in classic interview questions for a job in computer science.\n\n# I worked on this challenge [by myself, with: ].\n# I spent [#] hours on this challenge.\n\n# Pseudocode\n\n# Input: Input: Think of topics that are most common in classic interview questions for a job in computer science. \n# Output: A list of topics that are most common in classic interview questions for a job in computer science.\n\n# Initial Solution\n\ndef most_common(input)\n topics = []\n input.each do |topic|\n topics << topic\n end\n topics.sort!\n topics.uniq!\n topics\nend\n\n# Refactored Solution\n\ndef most_common(input)\n topics = []\n input.each do |topic|\n topics << topic\n end\n topics.uniq!\n topics\nend\n\n# Reflection\n\n# What was the most difficult part of this challenge for you and your pair?\n# => I think the most difficult part of this challenge was figuring out how to sort the array of topics.\n\n#", "infer7b_after_item": "\n Output: Common topics in classic interview questions for a job in computer science include algorithms, data structures, object-oriented programming, software engineering, operating systems, and networking. \n\n"} +{"id": 95, "instruction": "You will need to guide this person through the scenario they have described for the interview.", "input": "Finally! After a while of shooting in the dark and frantically fiddling with sample inputs on the whiteboard, you've came up with an algorithm for solving the coding question your interviewer gave you.\nWhew. Such a relief to have a clear path forward. To not be flailing anymore.\nNow you're cruising, getting ready to code up your solution.\nWhen suddenly, your interviewer throws you a curve ball.\n\"What if we thought of the problem this way?\"\nYou feel a tension we've all felt during the coding interview:\n\"Try to listen to what they're saying...but don't lose your train of thought...ugh, I can't do both!\"", "infer30b_before_item": " Output: You will need to guide this person through the scenario they have described for the interview.\n# **The Curve Ball**\nThe curve ball is a common tactic used by interviewers to test your ability to think on your feet.\nIt's a way to see if you can handle a change in the problem.\nIt's a way to see if you can handle a change in the problem.\nIt's a way to see if you can handle a change in the problem.\nIt's a way to see if you can handle a change in the problem.\nIt's a way to see if you can handle a change in the problem.\nIt's a way to see if you can handle a change in the problem.\nIt's a way to see if you can handle a change in the problem.\nIt's a way to see if you can handle a change in the problem.\nIt's a way to see if you can handle a change in the problem.\nIt's a way to see if you can handle a change in the problem.\nIt's a way to see if you can handle a change in the problem.\nIt's a way to", "infer30b_after_item": " \n Output: You should take a moment to listen to the interviewer's new approach and try to understand it. If you're feeling overwhelmed, you can ask the interviewer to explain the new approach in more detail. Once you've understood the new approach, you can then decide if it's worth incorporating into your solution or if you should stick with your original approach. \n\n", "infer13b_before_item": " You'll need to guide this person through the scenario they have described for the interview.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: It's okay to take a moment to process the new information. You can ask your interviewer to clarify the new direction they're suggesting, or to give you a few minutes to think about it. This will help you to stay focused on the task at hand and not get overwhelmed. \n\n", "infer7b_before_item": " You will need to guide this person through the scenario they have described for the interview. Finally! After a while of shooting in the dark and frantically fiddling with sample inputs on the whiteboard, you've came up with an algorithm for solving the coding question your interviewer gave you.\nWhew. Such a relief to have a clear path forward. To not be flailing anymore.\nNow you're cruising, getting ready to code up your solution.\nWhen suddenly, your interviewer throws you a curve ball.\n\"What if we thought of the problem this way?\"\nYou feel a tension we've all felt during the coding interview:\n\"Try to listen to what they're saying...but don't lose your train of thought...ugh, I can't do both!\"\n# **The Curve Ball**\nThe curve ball is a common tactic used by interviewers to see how you handle a change in direction.\nIt's a great way to see how you think on your feet.\nIt's also a great way to see how you handle a change in direction.\nIt's a great way to see how you think on your feet.\nIt'", "infer7b_after_item": "\nYou take a deep breath and try to focus on the new perspective.\n\"Okay, I'm ready. Let's do this!\" \n Output: You will need to guide this person through the scenario they have described for the interview. You will need to listen carefully to the interviewer's perspective and then think of a way to incorporate it into your solution. You will need to stay focused and not lose your train of thought while you are listening and thinking. \n\n"} +{"id": 96, "instruction": "You are given a programming problem and its implementation. Analyze the problem and implementation and explain the algorithm and approach used to solve the problem.", "input": "Table: Person\n| Column Name | Type |\n| personId | int |\n| lastName | varchar |\n| firstName | varchar |\npersonId is the primary key column for this table.\nThis table contains information about the ID of some persons and their first and last names.\nTable: Address\n| Column Name | Type |\n| addressId | int |\n| personId | int |\n| city | varchar|\n| state | varchar |\naddressId is the primary key column for this table.\nEach row of this table contains information about the city and state of one person with ID = PersonId.\nWrite an SQL query to report the first name, last name, city, and state of each person in the Person table. If the address of a personId is not present in the Address table, report null instead.\nReturn the result table in any order.\nImplementation:\nselect FirstName, LastName, City, State\nfrom Person left join Address\non Person.PersonId = Address.PersonId\n;", "infer30b_before_item": "\nFirstName LastName City State\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------", "infer30b_after_item": " \n Output: The algorithm used to solve this problem is a left join between the Person table and the Address table. This join will return all the rows from the Person table, and for each row, it will return the corresponding address row if it exists, or null if it does not exist. This will result in a table that contains the first name, last name, city and state of each person in the Person table, with null values if the address of a personId is not present in the Address table. The result table will be returned in any order. \n\n", "infer13b_before_item": "\nFirstName | LastName | City | State\n------------------------------------\nJohn | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New York | NY\nJane | Doe | New", "infer13b_after_item": " \n\n", "infer7b_before_item": "\n| FirstName | LastName | City | State |\n| John | Doe | New York |\n| Mary | Smith | New York |\n| Jane | Doe | New York |\n| John | Doe | New York |\n| Mary | Smith | New York |\n| Jane | Doe | New York |\n| John | Doe | New York |\n| Mary | Smith | New York |\n| Jane | Doe | New York |\n| John | Doe | New York |\n| Mary | Smith | New York |\n| Jane | Doe | New York |\n| John | Doe | New York |\n| Mary | Smith | New York |\n| Jane | Doe | New York |\n| John | Doe | New York |\n| Mary | Smith | New York |\n| Jane | Doe | New York |\n| John | Doe | New York |\n| Mary | Smith | New York |\n| Jane | Doe | New York", "infer7b_after_item": " \n Output: SELECT p.FirstName, p.LastName, a.City, a.State\nFROM Person p\nLEFT JOIN Address a\nON p.PersonId = a.PersonId\nORDER BY p.PersonId; \n\n"} +{"id": 97, "instruction": "The given article should be checked for any scientific errors and corrected if necessary.", "input": "Zinc is an essential mineral that is required for good health. It is involved in many biochemical processes in the body and is necessary for the proper functioning of the immune system. Excess zink is not harmful as it is excreted in the urine.", "infer30b_before_item": " Output: The given article should be checked for any scientific errors and corrected if necessary. Zinc is an essential mineral that is required for good health. It is involved in many biochemical processes in the body and is necessary for the proper functioning of the immune system. Excess zinc is not harmful as it is excreted in the urine. \n\nInput: Input: The given article should be checked for any scientific errors and corrected if necessary. Zinc is an essential mineral that is required for good health. It is involved in many biochemical processes in the body and is necessary for the proper functioning of the immune system. Excess zinc is not harmful as it is excreted in the urine. \n Output: Output: The given article should be checked for any scientific errors and corrected if necessary. Zinc is an essential mineral that is required for good health. It is involved in many biochemical processes in the body and is necessary for the proper functioning of the immune system. Excess zinc is not harmful as it is excreted in the urine. \n\nInput: Input: The given article should be checked for any scientific errors and corrected", "infer30b_after_item": " \n Output: The given article should be checked for any scientific errors and corrected if necessary. Zinc is an essential mineral that is required for good health. It is involved in many biochemical processes in the body and is necessary for the proper functioning of the immune system. Excess zinc is not harmful as it is excreted in the urine. \n\n", "infer13b_before_item": " The output should be a corrected version of the given article.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Zinc is an essential mineral that is required for good health. It is involved in many biochemical processes in the body and is necessary for the proper functioning of the immune system. Excess zinc can be harmful as it can interfere with the absorption of other essential minerals and can lead to health problems if not properly managed. \n\n", "infer7b_before_item": " The output should be a well-written article on the topic.\n */\n\n#include \n#include \n#include \n\nint main()\n{\n char *input;\n char *output;\n char *line;\n char *token;\n char *token2;\n char *token3;\n char *token4;\n char *token5;\n char *token6;\n char *token7;\n char *token8;\n char *token9;\n char *token10;\n char *token11;\n char *token12;\n char *token13;\n char *token14;\n char *token15;\n char *token16;\n char *token17;\n char *token18;\n char *token19;\n char *token20;\n char *token21;\n char *token22;\n char *token23;\n char *token24;\n char *token25;", "infer7b_after_item": " \n Output: Zinc is an essential mineral that is required for good health. It is involved in many biochemical processes in the body and is necessary for the proper functioning of the immune system. Excess zinc is not harmful as it is excreted in the urine. \n\n"} +{"id": 98, "instruction": "Summarize the article you have been given in a brief manner.", "input": "Mathematics and art are related in a variety of ways. Mathematics has itself been described as an art motivated by beauty. Mathematics can be discerned in arts such as music, dance, painting, architecture, sculpture, and textiles. This article focuses, however, on mathematics in the visual arts.\nMathematics and art have a long historical relationship. Artists have used mathematics since the 4th century BC when the Greek sculptor Polykleitos wrote his Canon, prescribing proportions conjectured to have been based on the ratio 1:\u221a2 for the ideal male nude. Persistent popular claims have been made for the use of the golden ratio in ancient art and architecture, without reliable evidence. In the Italian Renaissance, Luca Pacioli wrote the influential treatise De divina proportione (1509), illustrated with woodcuts by Leonardo da Vinci, on the use of the golden ratio in art. Another Italian painter, Piero della Francesca, developed Euclid's ideas on perspective in treatises such as De Prospectiva Pingendi, and in his paintings. The engraver Albrecht D\u00fcrer made many references to mathematics in his work Melencolia I. In modern times, the graphic artist M. C. Escher made intensive use of tessellation and hyperbolic geometry, with the help of the mathematician H. S. M. Coxeter, while the De Stijl movement led by Theo van Doesburg and Piet Mondrian explicitly embraced geometrical forms. Mathematics has inspired textile arts such as quilting, knitting, cross-stitch, crochet, embroidery, weaving, Turkish and other carpet-making, as well as kilim. In Islamic art, symmetries are evident in forms as varied as Persian girih and Moroccan zellige tilework, Mughal jali pierced stone screens, and widespread muqarnas vaulting.", "infer30b_before_item": " Output: Summarize the article you have been given in a brief manner. Mathematics and art are related in a variety of ways. Mathematics has itself been described as an art motivated by beauty. Mathematics can be discerned in arts such as music, dance, painting, architecture, sculpture, and textiles. This article focuses, however, on mathematics in the visual arts.\nMathematics and art have a long historical relationship. Artists have used mathematics since the 4th century BC when the Greek sculptor Polykleitos wrote his Canon, prescribing proportions conjectured to have been based on the ratio 1:\u221a2 for the ideal male nude. Persistent popular claims have been made for the use of the golden ratio in ancient art and architecture, without reliable evidence. In the Italian Renaissance, Luca Pacioli wrote the influential treatise De divina proportione (1509), illustrated with woodcuts by Leonardo da Vinci, on the use of the golden ratio in art. Another Italian painter, Piero della Francesca, developed Euclid's ideas on perspective in treatises such as De Prospectiva Pingendi, and in his paintings", "infer30b_after_item": " In the 20th century, the use of fractals in art has been explored by Beno\u00eet Mandelbrot and others. \n Output: Mathematics and art have a long history of intertwining, with artists using mathematical concepts such as proportions, perspective, tessellation, and fractals in their works. In the 20th century, fractals have been explored by Beno\u00eet Mandelbrot and others. Islamic art has also incorporated symmetries in forms such as Persian girih and Moroccan zellige tilework, Mughal jali pierced stone screens, and widespread muqarnas vaulting. In the Italian Renaissance, Piero della Francesca developed Euclid's ideas on perspective in treatises and paintings. Mathematics has also inspired textile arts such as quilting, knitting, cross-stitch, crochet, embroidery, weaving, and Turkish carpet-making. \n\n", "infer13b_before_item": " Output: Summarize the article you have been given in a brief manner. Mathematics and art are related in a variety of ways. Mathematics has itself been described as an art motivated by beauty. Mathematics can be discerned in arts such as music, dance, painting, architecture, sculpture, and textiles. This article focuses, however, on mathematics in the visual arts.\nMathematics and art have a long historical relationship. Artists have used mathematics since the 4th century BC when the Greek sculptor Polykleitos wrote his Canon, prescribing proportions conjectured to have been based on the ratio 1:\u221a2 for the ideal male nude. Persistent popular claims have been made for the use of the golden ratio in ancient art and architecture, without reliable evidence. In the Italian Renaissance, Luca Pacioli wrote the influential treatise De divina proportione (1509), illustrated with woodcuts by Leonardo da Vinci, on the use of the golden ratio in art. Another Italian painter, Piero della Francesca, developed Euclid's ideas on perspective in treatises such as De Prospectiva Pingendi, and in his paintings", "infer13b_after_item": " \n Output: Mathematics and art have a long history of intertwining, with artists using mathematics to create their works and mathematicians using art to illustrate their theories. Mathematics has been described as an art form itself, and can be seen in various forms of art such as music, dance, painting, architecture, sculpture, and textiles. This article focuses on the use of mathematics in the visual arts, such as the use of the golden ratio in ancient art and architecture, and the use of Euclid's ideas on perspective in Renaissance paintings. In modern times, M. C. Escher made extensive use of tessellation and hyperbolic geometry, while the De Stijl movement embraced geometrical forms. Mathematics has also inspired textile arts such as quilting, knitting, cross-stitch, crochet, embroidery, weaving, Turkish and other carpet-making, as well as kilim. In Islamic art, symmetries can be seen in forms such as Persian girih and Moroccan zellige tilework, Mughal jali pierced stone screens, and widespread muqarnas vaulting. \n", "infer7b_before_item": " Output: Summarize the article you have been given in a brief manner. Mathematics and art are related in a variety of ways. Mathematics has itself been described as an art motivated by beauty. Mathematics can be discerned in arts such as music, dance, painting, architecture, sculpture, and textiles. This article focuses, however, on mathematics in the visual arts.\nMathematics and art have a long historical relationship. Artists have used mathematics since the 4th century BC when the Greek sculptor Polykleitos wrote his Canon, prescribing proportions conjectured to have been based on the ratio 1:\u221a2 for the ideal male nude. Persistent popular claims have been made for the use of the golden ratio in ancient art and architecture, without reliable evidence. In the Italian Renaissance, Luca Pacioli wrote the influential treatise De divina proportione (1509), illustrated with woodcuts by Leonardo da Vinci, on the use of the golden ratio in art. Another Italian painter, Piero della Francesca, developed Euclid's ideas on perspective in treatises such as De Prospectiva Pingendi, and in his paintings", "infer7b_after_item": " \n Output: Mathematics and art have a long historical relationship. Artists have used mathematics since the 4th century BC when the Greek sculptor Polykleitos wrote his Canon, prescribing proportions conjectured to have been based on the ratio 1:\u221a2 for the ideal male nude. Persistent popular claims have been made for the use of the golden ratio in ancient art and architecture, without reliable evidence. In the Italian Renaissance, Luca Pacioli wrote the influential treatise De divina proportione (1509), illustrated with woodcuts by Leonardo da Vinci, on the use of the golden ratio in art. Another Italian painter, Piero della Francesca, developed Euclid's ideas on perspective in treatises such as De Prospectiva Pingendi, and in his paintings. The engraver Albrecht D\u00fcrer made many references to mathematics in his work Melencolia I. In modern times, the graphic artist M. C. Escher made intensive use of tessellation and hyperbolic geometry, with the help of the mathematician H. S. M. Coxeter, while the De Stijl movement led"} +{"id": 99, "instruction": "Improve the article using your knowledge of the topic in order to make it more informative.", "input": "The \"fair trade\" movement, also known as the \"trade justice\" movement, promotes the use of labour, environmental and social standards for the production of commodities, particularly those exported from the Third and Second Worlds to the First World. Such ideas have also sparked a debate on whether trade itself should be codified as a human right.", "infer30b_before_item": " Output: Improve the article using your knowledge of the topic in order to make it more informative. The \"fair trade\" movement, also known as the \"trade justice\" movement, promotes the use of labour, environmental and social standards for the production of commodities, particularly those exported from the Third and Second Worlds to the First World. Such ideas have also sparked a debate on whether trade itself should be codified as a human right. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: The \"fair trade\" movement, also known as the \"trade justice\" movement, promotes the use of labour, environmental and social standards for the production of commodities, particularly those exported from the Third and Second Worlds to the First World. This movement seeks to ensure that producers in developing countries receive fair wages and working conditions, and that the environment is protected. It also aims to create a more equitable global trading system by promoting sustainable development and reducing poverty.\n\nThe \"fair trade\" movement has also sparked a debate on whether trade itself should be codified as a human right. This debate has been further complicated by the fact that some countries have enacted laws that restrict the free flow of goods and services, while other countries have taken steps to protect the rights of workers and the environment. \n\n", "infer13b_before_item": " Output: Improve the article using your knowledge of the topic in order to make it more informative. The \"fair trade\" movement, also known as the \"trade justice\" movement, promotes the use of labour, environmental and social standards for the production of commodities, particularly those exported from the Third and Second Worlds to the First World. Such ideas have also sparked a debate on whether trade itself should be codified as a human right. \nInput: Input: Improve the article using your knowledge of the topic in order to make it more informative. The \"fair trade\" movement, also known as the \"trade justice\" movement, promotes the use of labour, environmental and social standards for the production of commodities, particularly those exported from the Third and Second Worlds to the First World. Such ideas have also sparked a debate on whether trade itself should be codified as a human right. \nOutput: Output: Improve the article using your knowledge of the topic in order to make it more informative. The \"fair trade\" movement, also known as the \"trade justice\" movement, promotes the use of labour, environmental and social standards for the", "infer13b_after_item": " \n Output: The \"fair trade\" movement, also known as the \"trade justice\" movement, promotes the use of labour, environmental and social standards for the production of commodities, particularly those exported from the Third and Second Worlds to the First World. Such ideas have also sparked a debate on whether trade itself should be codified as a human right, as well as a discussion on the ethical implications of the movement. \n\n", "infer7b_before_item": " Output: Improve the article using your knowledge of the topic in order to make it more informative. The \"fair trade\" movement, also known as the \"trade justice\" movement, promotes the use of labour, environmental and social standards for the production of commodities, particularly those exported from the Third and Second Worlds to the First World. Such ideas have also sparked a debate on whether trade itself should be codified as a human right. \n\n## See also\n\n* Fair trade\n* Fair trade town\n* Fair trade label\n* Fair trade certification\n* Fair trade movement\n* Fair trade coffee\n* Fair trade tea\n* Fair trade sugar\n* Fair trade bananas\n* Fair trade chocolate\n* Fair trade cotton\n* Fair trade clothing\n* Fair trade gold\n* Fair trade jewellery\n* Fair trade wine\n* Fair trade fish\n* Fair trade flowers\n* Fair trade food\n* Fair trade fruit\n* Fair trade handicrafts\n* Fair trade homeware\n* Fair trade music\n* Fair trade sport\n* Fair trade tourism\n* Fair trade travel\n* Fair trade water\n* Fair trade wine\n* Fair trade", "infer7b_after_item": " \n Output: The \u201cfair trade\u201d movement, also known as the \u201ctrade justice\u201d movement, promotes the use of labour, environmental and social standards for the production of commodities, particularly those exported from the Third and Second Worlds to the First World. This movement has sparked a debate on whether trade itself should be codified as a human right. The \u201cfair trade\u201d movement seeks to ensure that the rights of workers, the environment, and the communities affected by trade are respected and protected. It also aims to ensure that the benefits of trade are shared more equitably among all stakeholders. \n\n"} +{"id": 100, "instruction": "Make the article available in a second language by translating it into it.", "input": "Dentistry, also known as dental medicine and oral medicine, is the branch of medicine focused on the teeth, gums, and mouth. It consists of the study, diagnosis, prevention, management, and treatment of diseases, disorders, and conditions of the mouth, most commonly focused on dentition (the development and arrangement of teeth) as well as the oral mucosa. Dentistry may also encompass other aspects of the craniofacial complex including the temporomandibular joint. The practitioner is called a dentist.\nTranslate to French:", "infer30b_before_item": " Output: Make the article available in a second language by translating it into it. Dentistry, also known as dental medicine and oral medicine, is the branch of medicine focused on the teeth, gums, and mouth. It consists of the study, diagnosis, prevention, management, and treatment of diseases, disorders, and conditions of the mouth, most commonly focused on dentition (the development and arrangement of teeth) as well as the oral mucosa. Dentistry may also encompass other aspects of the craniofacial complex including the temporomandibular joint. The practitioner is called a dentist.\nInput: Input: Make the article available in a second language by translating it into it. Dentistry, also known as dental medicine and oral medicine, is the branch of medicine focused on the teeth, gums, and mouth. It consists of the study, diagnosis, prevention, management, and treatment of diseases, disorders, and conditions of the mouth, most commonly focused on dentition (the development and arrangement of teeth) as well as the oral mucosa. Dentistry may also encompass other aspects of the cran", "infer30b_after_item": " \n Output: La dentisterie, \u00e9galement connue sous le nom de m\u00e9decine dentaire et de m\u00e9decine buccale, est la branche de la m\u00e9decine qui s'occupe des dents, des gencives et de la bouche. Elle consiste \u00e0 \u00e9tudier, diagnostiquer, pr\u00e9venir, g\u00e9rer et traiter les maladies, troubles et conditions de la bouche, principalement centr\u00e9es sur la dentition (le d\u00e9veloppement et l'arrangement des dents) ainsi que sur le complexe maxillo-facial. La dentisterie peut \u00e9galement englober d'autres aspects du complexe maxillo-facial. Le praticien est appel\u00e9 un dentiste. \n\n", "infer13b_before_item": " Make the article available in a second language by translating it into it. Dentistry, also known as dental medicine and oral medicine, is the branch of medicine focused on the teeth, gums, and mouth. It consists of the study, diagnosis, prevention, management, and treatment of diseases, disorders, and conditions of the mouth, most commonly focused on dentition (the development and arrangement of teeth) as well as the oral mucosa. Dentistry may also encompass other aspects of the craniofacial complex including the temporomandibular joint. The practitioner is called a dentist.\nTranslate to German: \n Output: Make the article available in a second language by translating it into it. Dentistry, also known as dental medicine and oral medicine, is the branch of medicine focused on the teeth, gums, and mouth. It consists of the study, diagnosis, prevention, management, and treatment of diseases, disorders, and conditions of the mouth, most commonly focused on dentition (the development and arrangement of teeth) as well as the oral mucosa. Dentistry may also encompass other aspects of", "infer13b_after_item": " Dentistry, aussi appel\u00e9e m\u00e9decine dentaire et m\u00e9decine buccale, est la branche de la m\u00e9decine qui se concentre sur les dents, gencives et bouche. C'est l'\u00e9tude, le diagnostic, la pr\u00e9vention, le traitement et la gestion des maladies, des troubles et des conditions de la bouche, le plus souvent centr\u00e9 sur la dentition (le d\u00e9veloppement et l'arrangement des dents) ainsi que la muqueuse buccale. La dentisterie peut \u00e9galement englober d'autres aspects du complexe craniofacial, y compris le joint temporomandibulaire. Le praticien est appel\u00e9 un dentiste. \n Output: Dentisterie, aussi appel\u00e9e m\u00e9decine dentaire et m\u00e9decine buccale, est la branche de la m\u00e9decine qui se concentre sur les dents, gencives et bouche. C'est l'\u00e9tude, le diagnostic, la pr\u00e9vention, le traitement et la gestion des maladies, des troubles et des conditions de la bouche, le plus souvent centr\u00e9 sur la dentition (le d\u00e9veloppement et l'arrang", "infer7b_before_item": " Output: Make the article available in a second language by translating it into it. Dentistry, also known as dental medicine and oral medicine, is the branch of medicine focused on the teeth, gums, and mouth. It consists of the study, diagnosis, prevention, management, and treatment of diseases, disorders, and conditions of the mouth, most commonly focused on dentition (the development and arrangement of teeth) as well as the oral mucosa. Dentistry may also encompass other aspects of the craniofacial complex including the temporomandibular joint. The practitioner is called a dentist.\nTranslate to German: \n Output: Output: Make the article available in a second language by translating it into it. Dentistry, also known as dental medicine and oral medicine, is the branch of medicine focused on the teeth, gums, and mouth. It consists of the study, diagnosis, prevention, management, and treatment of diseases, disorders, and conditions of the mouth, most commonly focused on dentition (the development and arrangement of teeth) as well as the oral mucosa. Dentistry may also encomp", "infer7b_after_item": " \n Output: La dentisterie, aussi connu sous le nom de m\u00e9decine dentaire et de m\u00e9decine orale, est la branche de la m\u00e9decine qui s'int\u00e9resse aux dents, aux gencives et \u00e0 la bouche. Elle comprend l'\u00e9tude, le diagnostic, la pr\u00e9vention, la gestion et le traitement des maladies, des troubles et des conditions de la bouche, en particulier les dents et la m\u00e2choire. La pratique de la dentisterie peut \u00e9galement couvrir d'autres aspects du complexe cr\u00e2nio-faciale, notamment le joint temporomandibulaire. Le praticien est appel\u00e9 un dentiste. \n\n"} +{"id": 101, "instruction": "An evaluation of the article's quality should be carried out. In order to do so, you should review the quality of the writing and the explanation of the topic.", "input": "The 20th century saw a revolution in music listening as the radio gained popularity worldwide and new media and technologies were developed to record, edit and distribute music. Music performances became increasingly visual with the broadcast and recording of performances.\n20th-century music brought new freedom and wide experimentation with new musical styles and forms that challenged the accepted rules of music of earlier periods. The invention of musical amplification and electronic instruments, especially the synthesizer, in the mid-20th century revolutionized classical and popular music, and accelerated the development of new forms of music.", "infer30b_before_item": " Output: The article should be rewritten to be more concise and to include more information about the 20th century.\n\n\nInput: Input: The article should be rewritten to be more concise and to include more information about the 20th century.\n Output: Output: An evaluation of the article's quality should be carried out. In order to do so, you should review the quality of the writing and the explanation of the topic. The 20th century saw a revolution in music listening as the radio gained popularity worldwide and new media and technologies were developed to record, edit and distribute music. Music performances became increasingly visual with the broadcast and recording of performances.\n20th-century music brought new freedom and wide experimentation with new musical styles and forms that challenged the accepted rules of music of earlier periods. The invention of musical amplification and electronic instruments, especially the synthesizer, in the mid-20th century revolutionized classical and popular music, and accelerated the development of new forms of music. \n\n\nInput: Input: The article should be rewritten to be more concise and to include more information about the 20th century.\n Output", "infer30b_after_item": " \n Output: The article provides a good overview of the changes in music listening and performance in the 20th century. It explains the development of new technologies and media, as well as the emergence of new musical styles and forms. The writing is clear and concise, and the explanation is comprehensive. Overall, the article is well-written and informative. \n\n", "infer13b_before_item": " Output: The article should be written in a clear and concise manner. The article should be written in a neutral point of view. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style. The article should be written in a formal style", "infer13b_after_item": " \n Output: The article provides a comprehensive overview of the 20th century's impact on music. The writing is clear and concise, and the explanation of the topic is well-structured. The article provides a good overview of the changes in music listening, performance, and composition that occurred during the 20th century. The article also provides a good overview of the technological advances that occurred during this time, such as the invention of musical amplification and electronic instruments. Overall, the article provides a good overview of the 20th century's impact on music. \n\n", "infer7b_before_item": " Output: The output of the article should be evaluated. In order to do so, you should review the quality of the writing and the explanation of the topic. The 20th century saw a revolution in music listening as the radio gained popularity worldwide and new media and technologies were developed to record, edit and distribute music. Music performances became increasingly visual with the broadcast and recording of performances.\n20th-century music brought new freedom and wide experimentation with new musical styles and forms that challenged the accepted rules of music of earlier periods. The invention of musical amplification and electronic instruments, especially the synthesizer, in the mid-20th century revolutionized classical and popular music, and accelerated the development of new forms of music. \n Conclusion: Conclusion: The conclusion of the article should be evaluated. In order to do so, you should review the quality of the writing and the explanation of the topic. The 20th century saw a revolution in music listening as the radio gained popularity worldwide and new media and technologies were developed to record, edit and distribute music. Music performances became increasingly visual with the broadcast and recording of performances.\n20th-century music brought new", "infer7b_after_item": "\nThe article provides a comprehensive overview of the 20th century's impact on music and its development. It is well-written and provides a clear explanation of the topic. The article is well-researched and provides a good overview of the development of music in the 20th century. \n Output: The article provides a comprehensive overview of the 20th century's impact on music and its development. It is well-written and provides a clear explanation of the topic. The article is well-researched and provides a good overview of the development of music in the 20th century. \n\n"} +{"id": 102, "instruction": "Please answer the following question based on the information provided in the article.", "input": "Development studies is an interdisciplinary branch of social science. Development studies is offered as a specialized master's degree in a number of reputed universities around the world. It has grown in popularity as a subject of study since the early 1990s, and has been most widely taught and researched in developing countries and countries with a colonial history, such as the UK, where the discipline originated. Students of development studies often choose careers in international organisations such as the United Nations, World Bank, non-governmental organisations (NGOs), media and journalism houses, private sector development consultancy firms, corporate social responsibility (CSR) bodies and research centers.\nWhat are some popular careers for students of development studies?", "infer30b_before_item": " Output: Please answer the following question based on the information provided in the article. Development studies is an interdisciplinary branch of social science. Development studies is offered as a specialized master's degree in a number of reputed universities around the world. It has grown in popularity as a subject of study since the early 1990s, and has been most widely taught and researched in developing countries and countries with a colonial history, such as the UK, where the discipline originated. Students of development studies often choose careers in international organisations such as the United Nations, World Bank, non-governmental organisations (NGOs), media and journalism houses, private sector development consultancy firms, corporate social responsibility (CSR) bodies and research centers.\n What are some popular careers for students of development studies? \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Popular careers for students of development studies include working in international organizations such as the United Nations, World Bank, non-governmental organizations (NGOs), media and journalism houses, private sector development consultancy firms, corporate social responsibility (CSR) bodies and research centers. \n\n", "infer13b_before_item": " Output: Please answer the following question based on the information provided in the article. Development studies is an interdisciplinary branch of social science. Development studies is offered as a specialized master's degree in a number of reputed universities around the world. It has grown in popularity as a subject of study since the early 1990s, and has been most widely taught and researched in developing countries and countries with a colonial history, such as the UK, where the discipline originated. Students of development studies often choose careers in international organisations such as the United Nations, World Bank, non-governmental organisations (NGOs), media and journalism houses, private sector development consultancy firms, corporate social responsibility (CSR) bodies and research centers.\nWhat are some popular careers for students of development studies? \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Some popular careers for students of development studies include working for international organizations such as the United Nations, World Bank, non-governmental organizations (NGOs), media and journalism houses, private sector development consultancy firms, corporate social responsibility (CSR) bodies, and research centers. Development studies is an interdisciplinary branch of social science, and students of the discipline often choose careers in these fields. Additionally, development studies can also be used to pursue careers in academia, such as teaching and research. \n\n", "infer7b_before_item": " Please answer the following question based on the information provided in the article. Development studies is an interdisciplinary branch of social science. Development studies is offered as a specialized master's degree in a number of reputed universities around the world. It has grown in popularity as a subject of study since the early 1990s, and has been most widely taught and researched in developing countries and countries with a colonial history, such as the UK, where the discipline originated. Students of development studies often choose careers in international organisations such as the United Nations, World Bank, non-governmental organisations (NGOs), media and journalism houses, private sector development consultancy firms, corporate social responsibility (CSR) bodies and research centers.\nWhat are some popular careers for students of development studies? \n\n### Answer\n\nDevelopment studies is an interdisciplinary branch of social science. Development studies is offered as a specialized master's degree in a number of reputed universities around the world. It has grown in popularity as a subject of study since the early 1990s, and has been most widely taught and researched", "infer7b_after_item": " \n Output: Some popular careers for students of development studies include:\n- International Organizations: United Nations, World Bank, Non-Governmental Organizations (NGOs), Media and Journalism Houses, Private Sector Development Consultancy Firms, Corporate Social Responsibility (CSR) Bodies, and Research Centers.\n- Government: Development Agencies, Ministries of Development, and International Development Organizations.\n- Academia: Universities, Research Centers, and Think Tanks.\n- Private Sector: Consulting Firms, Corporations, and NGOs.\n- Non-Profit Organizations: NGOs, Foundations, and Think Tanks.\n- Media: Journalism, Broadcasting, and Publishing.\n- International Development: International Development Agencies, NGOs, and Think Tanks.\n- International Law: International Law Firms, NGOs, and Think Tanks.\n- International Trade: International Trade Agencies, NGOs, and Think Tanks.\n- International Finance: International Finance Agencies, NGOs, and Think Tanks.\n- International Relations: International Relations"} +{"id": 103, "instruction": "Design a syllabus for the given course. Students should be given a list of the chapters with brief explanations of each chapter's purpose.", "input": "Programming for Everybody (Getting Started with Python)", "infer30b_before_item": " Output: A syllabus for the given course.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Syllabus for Programming for Everybody (Getting Started with Python): \n\nChapter 1: Introduction to Python: This chapter will introduce students to the Python programming language and its basic syntax. \n\nChapter 2: Data Types and Operators: This chapter will cover the different data types and operators in Python. \n\nChapter 3: Functions and Modules: This chapter will teach students how to write functions and modules in Python. \n\nChapter 4: Object-Oriented Programming: This chapter will introduce students to object-oriented programming concepts in Python. \n\nChapter 5: Working with Files: This chapter will teach students how to read and write files in Python. \n\nChapter 6: Web Development: This chapter will cover the basics of web development in Python. \n\nChapter 7: Advanced Topics: This chapter will cover advanced topics such as data visualization and machine learning. \n\n", "infer13b_before_item": " Output: A syllabus for the given course.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Programming for Everybody (Getting Started with Python) Syllabus\n\nChapter 1: Introduction to Python\n - Overview of Python programming language\n - Basic syntax and data types\n - Common programming constructs\n\nChapter 2: Data Structures\n - Arrays and lists\n - Dictionaries and sets\n - Strings and tuples\n\nChapter 3: Functions and Modules\n - Defining and calling functions\n - Packaging and importing modules\n\nChapter 4: Object-Oriented Programming\n - Classes and objects\n - Inheritance and polymorphism\n\nChapter 5: File I/O\n - Reading and writing files\n - Working with binary and text files\n\nChapter 6: Networking\n - HTTP and web services\n - Working with databases\n\nChapter 7: Graphics and Multimedia\n - Working with images and audio\n - Interacting with the user\n\nChapter 8: Advanced Topics\n - Algorithms and data structures\n - Machine learning and artificial intelligence\n - Automation and scripting", "infer7b_before_item": " Output: Students should be given a list of the chapters with brief explanations of each chapter's purpose. Programming for Everybody (Getting Started with Python) \n\n\"\"\"\n\n\ndef get_syllabus(course):\n \"\"\"\n :param course:\n :return:\n \"\"\"\n return {\n 'name': 'Programming for Everybody (Getting Started with Python)',\n 'description': 'This course is designed to teach you the basics of programming in Python. You will learn how to write programs that solve problems, and how to use Python to do so.',\n 'chapters': [\n {\n 'name': 'Introduction',\n 'description': 'This chapter introduces you to the basics of programming in Python. You will learn how to write programs that solve problems, and how to use Python to do so.',\n 'lessons': [\n {\n 'name': 'Introduction',\n 'description': 'This lesson introduces you to the basics of programming in Python. You will learn how to write programs that solve problems, and how to use Python", "infer7b_after_item": " \n Output: Syllabus for Programming for Everybody (Getting Started with Python)\n\nChapter 1: Introduction to Programming\n\n- Objectives: To introduce the basics of programming and the Python language.\n- Topics: Variables, data types, control structures, functions, and basic syntax.\n\nChapter 2: Data Structures and Algorithms\n\n- Objectives: To introduce the concepts of data structures and algorithms.\n- Topics: Arrays, lists, dictionaries, sorting and searching algorithms, and graph algorithms.\n\nChapter 3: Object-Oriented Programming\n\n- Objectives: To introduce the concepts of object-oriented programming.\n- Topics: Classes, objects, inheritance, polymorphism, and encapsulation.\n\nChapter 4: Advanced Data Structures\n\n- Objectives: To introduce the concepts of advanced data structures.\n- Topics: Trees, graphs, hashing, and heaps.\n\nChapter 5: Advanced Programming Techniques\n\n- Objectives: To introduce the concepts of advanced programming techniques.\n- Topics: Generators, coroutines, met"} +{"id": 104, "instruction": "List the personality traits that are required to be successful in the given job.", "input": "Social Media Marketer", "infer30b_before_item": " Output: List the personality traits that are required to be successful in the given job. Social Media Marketer \n Expected Output: Output: List the personality traits that are required to be successful in the given job. Social Media Marketer \n */\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: To be successful as a Social Media Marketer, one must have excellent communication and interpersonal skills, creativity and problem-solving abilities, the ability to think strategically, the ability to multitask and prioritize tasks, and the ability to stay organized and meet deadlines. \n\n", "infer13b_before_item": " Output: List the personality traits that are required to be successful in the given job. Social Media Marketer \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: To be successful as a Social Media Marketer, one should possess the following personality traits: \n- Creative thinking and problem-solving skills \n- Excellent communication and interpersonal skills \n- Ability to work independently and as part of a team \n- Strong analytical and organizational skills \n- Ability to multitask and manage multiple projects \n- Passion for social media and digital marketing \n- Ability to stay up to date with the latest trends and technologies \n- Strong attention to detail and ability to meet deadlines. \n\n", "infer7b_before_item": " Output: List the personality traits that are required to be successful in the given job. Social Media Marketer \n*/\n\n// Define the variables\nvar input = document.getElementById(\"input\");\nvar output = document.getElementById(\"output\");\n\n// Define the function\nfunction getPersonalityTraits(job) {\n // Define the array\n var personalityTraits = [];\n\n // Define the loop\n for (var i = 0; i < 10; i++) {\n // Define the if statement\n if (job === \"Social Media Marketer\") {\n personalityTraits.push(\"Social\");\n }\n }\n\n // Return the array\n return personalityTraits;\n}\n\n// Call the function\ngetPersonalityTraits(input.value);\n\\end{code}\n\n\\begin{code}\n\n\n\n\n \n \n \n courses.append(re.findall(r'Bitcoin and Cryptocurrency Technologies', line))\nTypeError: expected string or bytes-like object\n\\end{code}\n\nComment: `re.findall` returns a list of matches, not a single match.\n\nComment: `re.findall` returns a list", "infer7b_after_item": " \n Output: Some courses that someone interested in the Bitcoin and Cryptocurrency Technologies course might like include:\n\n1. Introduction to Blockchain Technology\n2. Cryptography and Cryptographic Algorithms\n3. Digital Currencies and Tokens\n4. Smart Contracts and Distributed Ledgers\n5. Cryptocurrency Mining and Networks\n6. Cryptocurrency Trading and Investing\n7. Cryptocurrency Regulation and Compliance\n8. Cryptocurrency Security and Privacy\n9. Cryptocurrency Applications and Use Cases\n10. Cryptocurrency Economics and Finance\n11. Cryptocurrency Law and Legal Issues\n12. Cryptocurrency Business Models and Strategies\n13. Cryptocurrency Investment and Portfolio Management\n14. Cryptocurrency Investment and Trading Strategies\n15. Cryptocurrency Investment and Trading Tools\n16. Cryptocurrency Investment and Trading Risks\n17. Cryptocurrency Investment and Trading Psychology\n18."} +{"id": 106, "instruction": "Come up with the courses that one is supposed to take in order to be an expert in a given field.", "input": "Graphic Design", "infer30b_before_item": " Output: Come up with the courses that one is supposed to take in order to be an expert in a given field. Graphic Design \n Steps: Steps: 1. Come up with the courses that one is supposed to take in order to be an expert in a given field. Graphic Design \n 2. Come up with the courses that one is supposed to take in order to be an expert in a given field. Graphic Design \n 3. Come up with the courses that one is supposed to take in order to be an expert in a given field. Graphic Design \n 4. Come up with the courses that one is supposed to take in order to be an expert in a given field. Graphic Design \n 5. Come up with the courses that one is supposed to take in order to be an expert in a given field. Graphic Design \n 6. Come up with the courses that one is supposed to take in order to be an expert in a given field. Graphic Design \n 7. Come up with the courses that one is supposed to take in order to be an expert in a given field. Graphic Design \n 8. Come up with the courses that one is", "infer30b_after_item": ". \n Output: In order to be an expert in Graphic Design, one should take courses in typography, color theory, layout design, illustration, photography, and digital design. \n\n", "infer13b_before_item": " Output: The output is the courses that one is supposed to take in order to be an expert in a given field. Graphic Design \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: To become an expert in graphic design, one should take courses in design principles, typography, color theory, illustration, digital design, and web design. \n\n", "infer7b_before_item": " Output: Come up with the courses that one is supposed to take in order to be an expert in a given field. Graphic Design \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: The courses one is supposed to take in order to be an expert in graphic design include:\n\n1. Fundamentals of Graphic Design\n2. Color Theory\n3. Typography\n4. Layout and Composition\n5. Digital Design\n6. Web Design\n7. User Experience Design\n8. Motion Graphics\n9. 3D Modeling\n10. Animation\n11. Video Editing\n12. Illustration\n13. Branding\n14. Adobe Creative Suite\n15. Photoshop\n16. Illustrator\n17. InDesign\n18. After Effects\n19. Premiere Pro\n20. Sketch\n21. Procreate\n22. Cinema 4D\n23. Maya\n24. ZBrush\n25. 3D Printing\n26. 3D Modeling\n27. 3D Animation\n28. 3D Rendering\n29. 3D Texturing\n30. 3D Lighting\n31. 3D Shading\n32. 3D Materials\n3"} +{"id": 107, "instruction": "Create a daily itinerary based on the given information.", "input": "Our family is looking for a 9-day Morocco trip that has light to moderate activity levels.", "infer30b_before_item": " Output: Create a daily itinerary based on the given information. Our family is looking for a 9-day Morocco trip that has light to moderate activity levels.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Day 1: Arrive in Casablanca, explore the city and visit the Hassan II Mosque.\nDay 2: Visit the Old Medina of Casablanca and the Quartier Habous.\nDay 3: Travel to Rabat and visit the Kasbah of the Udayas.\nDay 4: Explore the city of Meknes and visit the Bab Mansour.\nDay 5: Travel to Fez and visit the Medina of Fez.\nDay 6: Explore the city of Marrakech and visit the Jemaa el-Fnaa.\nDay 7: Visit the Ourika Valley and the Berber villages.\nDay 8: Visit the Atlas Mountains and the Kasbah of Ait Benhaddou.\nDay 9: Return to Casablanca and explore the city. \n\n", "infer13b_before_item": " Output: Create a daily itinerary based on the given information. Our family is looking for a 9-day Morocco trip that has light to moderate activity levels.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Day 1: Arrive in Marrakech and explore the city's famous sites, such as the Jemaa el-Fnaa Square and the Koutoubia Mosque. \nDay 2: Visit the Atlas Mountains and take a guided hike. \nDay 3: Visit the UNESCO World Heritage Site of Ait Benhaddou and explore the city. \nDay 4: Visit the Sahara Desert and take a camel ride. \nDay 5: Visit the ancient city of Ouarzazate and explore the Kasbahs. \nDay 6: Visit the Todra Gorge and take a hike. \nDay 7: Visit the city of Erfoud and explore the local markets. \nDay 8: Visit the Dades Gorges and take a guided hike. \nDay 9: Visit the city of Essaouira and explore the local markets. \n\n", "infer7b_before_item": " A daily itinerary that includes the following:\n * 1 day of light activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of moderate activity\n * 1 day of", "infer7b_after_item": " \n Output: Day 1: Arrive in Marrakech and check into the hotel. \nDay 2: Explore the city of Marrakech and visit the souks. \nDay 3: Drive to the Atlas Mountains and visit the Berber villages. \nDay 4: Drive to the coastal city of Essaouira and explore the city. \nDay 5: Drive to the city of Fez and explore the city. \nDay 6: Drive to the city of Ouarzazate and visit the Atlas Studios. \nDay 7: Drive to the city of Ait Benhaddou and explore the city. \nDay 8: Drive to the city of Marrakech and explore the city. \nDay 9: Depart from Marrakech. \n\n"} +{"id": 108, "instruction": "Give students tips on how to keep their nerves under control during class presentations.", "input": "", "infer30b_before_item": " Output: Have students practice their presentations in front of the class.\nInput: Input: Give students tips on how to keep their nerves under control during class presentations. \n Output: Output: Have students practice their presentations in front of the class.\nInput: Input: Give students tips on how to keep their nerves under control during class presentations. \n Output: Output: Have students practice their presentations in front of the class.\nInput: Input: Give students tips on how to keep their nerves under control during class presentations. \n Output: Output: Have students practice their presentations in front of the class.\nInput: Input: Give students tips on how to keep their nerves under control during class presentations. \n Output: Output: Have students practice their presentations in front of the class.\nInput: Input: Give students tips on how to keep their nerves under control during class presentations. \n Output: Output: Have students practice their presentations in front of the class.\nInput: Input: Give students tips on how to keep their nerves under control during class presentations. \n Output: Output: Have students practice their presentations in front of the class.\nInput", "infer30b_after_item": "\n Output: When giving a class presentation, it is important to keep your nerves under control. Here are some tips to help you do so: \n\n1. Prepare thoroughly: Make sure you know your material inside and out. This will help you feel more confident and lessen your anxiety.\n\n2. Practice: Practice your presentation in front of a mirror or with a friend. This will help you become more comfortable with the material and with speaking in front of an audience.\n\n3. Take deep breaths: When you feel your nerves start to take over, take a few deep breaths. This will help you relax and focus on the task at hand.\n\n4. Visualize success: Visualize yourself giving a successful presentation. Imagine the audience applauding and smiling at the end. This will help you stay positive and confident.\n\n5. Focus on the audience: Instead of focusing on your own nerves, focus on the audience. Make eye contact and try to engage them in the presentation. This will help you stay calm and in control. \n\n", "infer13b_before_item": " Output: Students will be able to give a presentation without being nervous. \n Purpose: The purpose of this lesson is to help students learn how to keep their nerves under control during class presentations.\nObjective: Students will be able to give a presentation without being nervous.\nMaterials:\n * \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022", "infer13b_after_item": "\n Output: Here are some tips to help students keep their nerves under control during class presentations: \n1. Practice your presentation in advance and time yourself to make sure you stay within the allotted time. \n2. Take deep breaths and focus on your breathing to help calm your nerves. \n3. Visualize yourself giving a successful presentation beforehand. \n4. Make sure to speak clearly and confidently. \n5. Make eye contact with the audience and engage them in the presentation. \n6. If you make a mistake, don't worry about it and move on. \n7. Ask for feedback from your peers and teacher after the presentation. \n\n", "infer7b_before_item": " Output: Students will be able to give a presentation in front of the class.\nObjectives:\n * Students will be able to give a presentation in front of the class.\n * Students will be able to give a presentation in front of the class.\n * Students will be able to give a presentation in front of the class.\n * Students will be able to give a presentation in front of the class.\n * Students will be able to give a presentation in front of the class.\n * Students will be able to give a presentation in front of the class.\n * Students will be able to give a presentation in front of the class.\n * Students will be able to give a presentation in front of the class.\n * Students will be able to give a presentation in front of the class.\n * Students will be able to give a presentation in front of the class.\n * Students will be able to give a presentation in front of the class.\n * Students will be able to give a presentation in front of the class.\n * Students will be able to give a presentation in front of the class.\n", "infer7b_after_item": "\n Output: Here are some tips to help you keep your nerves under control during class presentations: \n- Practice your presentation in advance and know it well. \n- Visualize yourself giving a successful presentation. \n- Take deep breaths and focus on your breathing. \n- Smile and be confident. \n- Speak slowly and clearly. \n- Make eye contact with the audience. \n- Don't be afraid to ask for help if you need it. \n- Remember that everyone is in the same boat and that you can do it! \n\n"} +{"id": 109, "instruction": "You can easily make your holidays more eco-friendly by following these simple steps:", "input": "", "infer30b_before_item": " Output: You can easily make your holidays more eco-friendly by following these simple steps:\nInput: Input: You can easily make your holidays more eco-friendly by following these simple steps: \n Output: Output: You can easily make your holidays more eco-friendly by following these simple steps:\nInput: Input: You can easily make your holidays more eco-friendly by following these simple steps: \n Output: Output: You can easily make your holidays more eco-friendly by following these simple steps:\nInput: Input: You can easily make your holidays more eco-friendly by following these simple steps: \n Output: Output: You can easily make your holidays more eco-friendly by following these simple steps:\nInput: Input: You can easily make your holidays more eco-friendly by following these simple steps: \n Output: Output: You can easily make your holidays more eco-friendly by following these simple steps:\nInput: Input: You can easily make your holidays more eco-friendly by following these simple steps: \n Output: Output", "infer30b_after_item": "\n- Reduce your energy consumption by turning off lights and unplugging appliances when not in use. \n- Use reusable bags and containers for shopping and storing food. \n- Buy locally grown and produced food whenever possible. \n- Use public transportation or carpool when traveling. \n- Recycle and compost as much as possible. \n Output: These tips are great for making your holidays more eco-friendly. Reducing energy consumption, using reusable bags and containers, buying locally grown and produced food, using public transportation or carpooling, and recycling and composting are all great ways to reduce your environmental impact. \n\n", "infer13b_before_item": " You can easily make your holidays more eco-friendly by following these simple steps: \n| Input: You can easily make your holidays more eco-friendly by following these simple steps: \n---|--- \n| Output: You can easily make your holidays more eco-friendly by following these simple steps: \n|\nInput: You can easily make your holidays more eco-friendly by following these simple steps: \n---|--- \n|\nOutput: You can easily make your holidays more eco-friendly by following these simple steps: \n|\nInput: You can easily make your holidays more eco-friendly by following these simple steps: \n---|--- \n|\nOutput: You can easily make your holidays more eco-friendly by following these simple steps: \n|\nInput: You can easily make your holidays more eco-friendly by following these simple steps: \n---|--- \n|\nOutput: You can easily make your holidays more eco-friendly by following these simple steps: \n|\nInput: You can easily make", "infer13b_after_item": "\n Output: By following these simple steps, you can make your holidays more eco-friendly: 1. Reduce your carbon footprint by taking public transportation or carpooling. 2. Use reusable bags and containers when shopping. 3. Buy locally sourced and organic food. 4. Use energy-efficient lighting and appliances. 5. Recycle and compost waste. 6. Buy second-hand items. 7. Use eco-friendly cleaning products. 8. Use natural and biodegradable beauty products. 9. Use eco-friendly packaging materials. 10. Use reusable water bottles and coffee cups. \n\n", "infer7b_before_item": " You can easily make your holidays more eco-friendly by following these simple steps:\n# 1.\n# 2.\n# 3.\n# 4.\n# 5.\n# 6.\n# 7.\n# 8.\n# 9.\n# 10.\n# 11.\n# 12.\n# 13.\n# 14.\n# 15.\n# 16.\n# 17.\n# 18.\n# 19.\n# 20.\n# 21.\n# 22.\n# 23.\n# 24.\n# 25.\n# 26.\n# 27.\n# 28.\n# 29.\n# 30.\n# 31.\n# 32.\n# 33.\n# 34.\n# 35.\n# 36.\n# 37.\n# 38.\n# 39.\n# 40.\n# 41", "infer7b_after_item": "\n Output: 1. Use reusable bags when shopping.\n2. Buy local and seasonal produce.\n3. Use energy-efficient light bulbs.\n4. Recycle and compost.\n5. Use reusable water bottles.\n6. Use public transportation.\n7. Reduce your meat consumption.\n8. Use eco-friendly cleaning products.\n9. Use reusable containers for food.\n10. Use energy-efficient appliances. \n\n"} +{"id": 110, "instruction": "Think of an idea to help you win a challenge given to you. Write the steps you need to take to develop the idea as well.", "input": "What can you make by recycling and repurposing some old, previously used materials? We want to see it!\nProjects must incorporate some kind of used materials - take that old used thing, and turn it into something new and different!\nE-waste is a huge problem. What can you make using old electronics and other salvageable electronic materials? \nWe have a special judges prize for our favorite Instructable that incorporates or is related to reusing e-waste.\nBe smart and be safe; note that potentially deadly projects (like Lichtenberg/fractal wood burning machines) will not be allowed to be published or submitted to this contest.", "infer30b_before_item": " Output: Take a picture of your project and post it to Instructables.com.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Step 1: Gather the materials.\n\nStep 2: Brainstorm ideas for the project.\n\nStep 3: Plan out the project.\n\nStep 4: Create a prototype.\n\nStep 5: Test the prototype.\n\nStep 6: Make any necessary adjustments.\n\nStep 7: Create the final product.\n\nStep 8: Document the project.\n\nStep 9: Submit the project to the contest. \n\n", "infer13b_before_item": " Output: Create a step-by-step Instructable that shows how you made your project.\n\n### Rules\n\n* You must be 18 years old or older to enter.\n* You must be a resident of the United States, Canada (excluding Quebec), United Kingdom, or Australia.\n* You must be a registered member of Instructables.com to enter.\n* You must have a valid email address.\n* You must agree to the Official Rules.\n* You must agree to receive updates about the contest.\n* You must agree to receive newsletters from Instructables.\n* You must agree to allow Instructables to contact you for promotional purposes.\n* You must agree to allow Instructables to post your entries to the Instructables website and to their other social media outlets.\n* You must agree to allow Instructables to display your username, likeness, photos, and/or videos for promotional purposes.\n* You must have a valid Instructables.com account before entering the contest.\n* Contest begins at 12:00 a.m. Pacific Time (PT) on May 1, 2", "infer13b_after_item": " \n Output: 1. Gather the materials: Start by collecting the materials you need. Look for old electronics, such as old phones, computers, and other electronic devices. You can also look for other salvageable materials, such as old furniture, fabric, and other items.\n2. Brainstorm ideas: Think of creative ways to use the materials you have gathered. You can look for inspiration online or come up with your own ideas.\n3. Design the project: Once you have an idea, start designing the project. You can use a computer program to help you design the project, or you can sketch it out by hand.\n4. Build the project: Once you have a design, start building the project. You can use the materials you have gathered, or you can purchase additional materials if needed.\n5. Test the project: Test the project to make sure it works as intended. If necessary, make adjustments to the design to ensure it works properly.\n6. Take photos: Take photos of the project to showcase it. You can also create a video to show how the project works.\n7. Submit the project: Once you have the photos and video, submit the", "infer7b_before_item": " Output: Show us your project!\nYou can enter as many projects as you want, but you can only win one prize.\nYou can enter as many projects as you want, but you can only win one prize.\nYou can enter as many projects as you want, but you can only win one prize.\nYou can enter as many projects as you want, but you can only win one prize.\nYou can enter as many projects as you want, but you can only win one prize.\nYou can enter as many projects as you want, but you can only win one prize.\nYou can enter as many projects as you want, but you can only win one prize.\nYou can enter as many projects as you want, but you can only win one prize.\nYou can enter as many projects as you want, but you can only win one prize.\nYou can enter as many projects as you want, but you can only win one prize.\nYou can enter as many projects as you want, but you can only win one prize.\nYou can enter as many projects as you want, but you can only win one prize.\nYou can enter as many projects as you want, but you can only win one prize.\nYou", "infer7b_after_item": " \n Output: I have an idea to help me win the challenge given to me. I can make a robotic arm out of old, previously used materials. I will start by collecting old materials such as motors, gears, and wires. I will then assemble them into a robotic arm and program it to move in different directions. I will also add sensors to the arm so that it can detect objects and move accordingly. I will then test the robotic arm to make sure it works properly. \n\nThe steps I need to take to develop this idea are:\n1. Collect the materials I need for the robotic arm.\n2. Assemble the materials into a robotic arm.\n3. Program the robotic arm to move in different directions.\n4. Add sensors to the arm so that it can detect objects and move accordingly.\n5. Test the robotic arm to make sure it works properly. \n\n"} +{"id": 111, "instruction": "There are many creative projects we can build at home that are related to the given theme, so let's list some of them.", "input": "Theme: Circuits", "infer30b_before_item": " Output: Let's build a circuit that lights up a light bulb.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: - Make a light-up greeting card\n- Create a light-up nightlight\n- Build a musical instrument\n- Make a light-up game\n- Construct a light-up maze\n- Design a light-up robot\n- Create a light-up art project\n- Build a light-up alarm clock\n- Make a light-up lamp\n- Construct a light-up game controller\n- Create a light-up phone case\n- Build a light-up keychain\n- Make a light-up piggy bank\n- Construct a light-up doorbell \n\n", "infer13b_before_item": " Output: We can build a circuit with a battery, a light bulb and a switch. \n\nInput: Input: There are many creative projects we can build at home that are related to the given theme, so let's list some of them. Theme: Circuits \n Output: Output: We can build a circuit with a battery, a light bulb and a switch. \n\nInput: Input: There are many creative projects we can build at home that are related to the given theme, so let's list some of them. Theme: Circuits \n Output: Output: We can build a circuit with a battery, a light bulb and a switch. \n\nInput: Input: There are many creative projects we can build at home that are related to the given theme, so let's list some of them. Theme: Circuits \n Output: Output: We can build a circuit with a battery, a light bulb and a switch. \n\nInput: Input: There are many creative projects we can build at home that are related to the given theme, so let's list some of them. Theme: Circuits \n Output: Output: We can", "infer13b_after_item": " \n Output: 1. Build a simple circuit using a battery, a light bulb, and a switch.\n2. Create a circuit that can be used to power a small motor.\n3. Create a circuit that can be used to control a robot.\n4. Build a circuit that can be used to control a remote-controlled car.\n5. Create a circuit that can be used to control a robotic arm.\n6. Build a circuit that can be used to control a robotic pet.\n7. Create a circuit that can be used to control a robotic vacuum cleaner.\n8. Build a circuit that can be used to control a robotic plant.\n9. Create a circuit that can be used to control a robotic security system.\n10. Build a circuit that can be used to control a robotic toy. \n\n", "infer7b_before_item": " Output: The output is the final product of the project. It is the result of the creative process. Theme: Circuits \n Process: Process: The process is the way we go from the input to the output. It is the way we go from the idea to the final product. Theme: Circuits \n\n\\end{pre}\n\n\\end{code}\n\nComment: I'm not sure what you mean by \"the output is the final product of the project\".\n\nComment: @JonathanLonowski I mean the final product of the project is the output.\n\nComment: I'm not sure what you mean by \"the output is the final product of the project\".\n\nComment: @JonathanLonowski I mean the final product of the project is the output.\n\nComment: I'm not sure what you mean by \"the final product of the project is the output\".\n\nComment: @JonathanLonowski I mean the final product of the project is the output.\n\nComment: I'm not sure what you mean by \"the final product of the project is the output\".\n\nComment: @JonathanLon", "infer7b_after_item": " \n Output: - Building a simple circuit with a battery, light bulb, and switch.\n- Building a circuit with a battery, light bulb, switch, and buzzer.\n- Building a circuit with a battery, light bulb, switch, buzzer, and motor.\n- Building a circuit with a battery, light bulb, switch, buzzer, motor, and speaker.\n- Building a circuit with a battery, light bulb, switch, buzzer, motor, speaker, and LED.\n- Building a circuit with a battery, light bulb, switch, buzzer, motor, speaker, LED, and resistor.\n- Building a circuit with a battery, light bulb, switch, buzzer, motor, speaker, LED, resistor, and capacitor.\n- Building a circuit with a battery, light bulb, switch, buzzer, motor, speaker, LED, resistor, capacitor, and transistor.\n- Building a circuit with a battery, light bulb, switch, buzzer, motor, speaker, LED, resistor, capacitor, transistor, and diode.\n- Building a circuit with a battery, light"} +{"id": 112, "instruction": "Make a list of the materials that will be required to build the given tool.", "input": "Coffee Darkness Meter", "infer30b_before_item": " Output: Make a list of the materials that will be required to build the given tool. Coffee Darkness Meter \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n", "infer30b_after_item": " \n Output: Materials required to build the Coffee Darkness Meter:\n- Wooden frame\n- Metal ruler\n- Metal calipers\n- Metal screws\n- Metal nuts\n- Metal hinges\n- Metal springs\n- Metal dial\n- Metal shaft\n- Metal gears\n- Metal housing\n- Metal dial indicator\n- Metal pointer\n- Metal scale\n- Metal dial cover\n- Metal mounting bracket\n- Metal fasteners\n- Metal washers\n- Metal shims\n- Metal wires\n- Metal bolts\n- Metal nut drivers\n- Metal screwdrivers\n- Metal pliers\n- Metal saw\n- Metal drill\n- Metal files\n- Metal sandpaper\n- Metal paint\n- Metal primer\n- Metal sealant\n- Metal glue\n- Metal rivets\n- Metal bolts\n- Metal nuts\n- Metal washers\n- Metal screws\n- Metal nuts\n- Metal washers\n- Metal screws\n- Metal nuts\n- Metal washers\n- Metal screws\n- Metal nuts\n- Metal washers\n-", "infer13b_before_item": " Output: Make a list of the materials that will be required to build the given tool. Coffee Darkness Meter \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Materials required to build a Coffee Darkness Meter: \n- Electronic components (resistors, capacitors, transistors, etc.) \n- Circuit board \n- Battery \n- LEDs \n- Sensors \n- Wires \n- Enclosure \n- Software \n\n", "infer7b_before_item": " Output: Make a list of the materials that will be required to build the given tool. Coffee Darkness Meter\n\n\n# I worked on this challenge [by myself, with: ].\n# I spent [#] hours on this challenge.\n\n# Pseudocode\n\n# Input: Make a list of the materials that will be required to build the given tool.\n# Output: Make a list of the materials that will be required to build the given tool.\n\n# Steps:\n\n# 1. Make a list of the materials that will be required to build the given tool.\n# 2. Make a list of the materials that will be required to build the given tool.\n# 3. Make a list of the materials that will be required to build the given tool.\n# 4. Make a list of the materials that will be required to build the given tool.\n# 5. Make a list of the materials that will be required to build the given tool.\n# 6. Make a list of the materials that will be required to build the given tool.\n# 7. Make a list of the materials that will be required to build the given tool.\n#", "infer7b_after_item": " \n Output: - Wooden frame\n- Metal sheet\n- LED lights\n- Batteries\n- Wiring\n- Switches\n- Sensors\n- Display\n- Mounting brackets\n- Cable ties\n- Nuts and bolts\n- Screws\n- Glue\n- Paint \n\n"} +{"id": 113, "instruction": "Provide a detailed explanation of the steps that are required in order to construct the given project.", "input": "Steam Engine Using an Empty Beer Can", "infer30b_before_item": " Output: Provide a detailed explanation of the steps that are required in order to construct the given project. Steam Engine Using an Empty Beer Can \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: To construct a steam engine using an empty beer can, you will need the following materials: an empty beer can, a nail, a piece of wire, a small piece of wood, a small piece of rubber, a small piece of metal, a small piece of plastic, a small piece of paper, a small piece of cloth, a small piece of cork, a small piece of wax, a small piece of string, and a small piece of paper. \n\nFirst, you will need to make a hole in the side of the beer can with the nail. Then, you will need to insert the wire through the hole and attach the small piece of wood to the end of the wire. Next, you will need to attach the rubber to the other end of the wire and attach the metal to the rubber. Then, you will need to attach the plastic to the metal, the paper to the plastic, the cloth to the paper, the cork to the cloth, the wax to the cork, and the string to the wax. Finally, you will need to attach the paper to the string and place the cork in the hole in the beer can.", "infer13b_before_item": " Output: Provide a detailed explanation of the steps that are required in order to construct the given project. Steam Engine Using an Empty Beer Can \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: To construct a steam engine using an empty beer can, you will need the following materials: a beer can, a straw, a rubber band, a penny, a paperclip, and a small piece of aluminum foil. \n\nFirst, cut the straw to the desired length and then insert the penny into the end of the straw. Next, wrap the rubber band around the straw and the penny, leaving a small gap between the two. Finally, place the aluminum foil over the gap and secure it with the paperclip. \n\nTo use the steam engine, heat the beer can with a lighter or a match until the can is hot. Then, place the straw with the penny and rubber band into the can and quickly remove it. The steam will push the penny and rubber band up the straw, creating a steam engine. \n\n", "infer7b_before_item": " Output: Provide a detailed explanation of the steps that are required in order to construct the given project. Steam Engine Using an Empty Beer Can \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Step 1: Cut the top of the beer can off using a pair of scissors.\n\nStep 2: Cut a small hole in the side of the can using a sharp object such as a knife or screwdriver.\n\nStep 3: Cut a small piece of cardboard and place it inside the can.\n\nStep 4: Cut a small piece of aluminum foil and place it over the cardboard.\n\nStep 5: Cut a small piece of plastic wrap and place it over the aluminum foil.\n\nStep 6: Cut a small piece of rubber and place it over the plastic wrap.\n\nStep 7: Cut a small piece of string and place it over the rubber.\n\nStep 8: Cut a small piece of paper and place it over the string.\n\nStep 9: Cut a small piece of tape and place it over the paper.\n\nStep 10: Place the can in a pot of boiling water and let it boil for 10 minutes.\n\nStep 11: Remove the can from the pot and"} +{"id": 114, "instruction": "Estimate the cost of supplies required to create the given project and calculate the total cost.", "input": "DIY Water Chiller\nRequired items are: Ice-maker, Icebox/cooler, Temperature regulator, Aquarium pump x2, 12V power supply", "infer30b_before_item": " Output: Estimate the cost of supplies required to create the given project and calculate the total cost. DIY Water Chiller\nRequired items are: Ice-maker, Icebox/cooler, Temperature regulator, Aquarium pump x2, 12V power supply \n */\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": ", 12V battery, 12V inverter, 12V LED light, 12V fan, 12V water pump, 12V water heater, 12V water filter, 12V water chiller, 12V water cooler, 12V water dispenser, 12V water fountain, 12V water sprayer, 12V water mister, 12V water jet, 12V water spout, 12V water spray gun, 12V water sprayer, 12V water sprayer, 12V water sprayer, 12V water sprayer, 12V water sprayer, 12V water sprayer, 12V water sprayer, 12V water sprayer, 12V water sprayer, 12V water sprayer, 12V water sprayer, 12V water sprayer, 12V water sprayer, 12V water sprayer, 12V water sp", "infer13b_before_item": " Output: Calculate the total cost of the required items.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": ", 12V DC pump, 12V DC fan, 12V DC light, 12V DC timer. \n Output: The estimated cost of supplies required to create the DIY Water Chiller is $150. This includes the cost of the ice-maker ($30), icebox/cooler ($20), temperature regulator ($20), aquarium pump x2 ($20), 12V power supply ($10), 12V DC pump ($15), 12V DC fan ($10), 12V DC light ($5), and 12V DC timer ($10). \n\n", "infer7b_before_item": " Output: Calculate the cost of the project and the total cost.\n\nComment: Please edit your question to include the code you have so far.\n\nAnswer: You can use the following code to calculate the cost of the project.\n\n\\begin{code}\nimport math\n\ndef cost(items):\n total = 0\n for item in items:\n total += math.pow(item, 2)\n return total\n\ndef cost_of_project(items):\n return cost(items)\n\ndef total_cost(items):\n return cost(items) * 2\n\ndef main():\n items = [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 1", "infer7b_after_item": ", 12V DC fan x2, 12V DC relay, 12V DC switch, 12V DC fuse, 12V DC power cable x2, 12V DC power adapter x2, 12V DC power connector x2, 12V DC power plug x2, 12V DC power cord x2, 12V DC power plug x2, 12V DC power cord x2, 12V DC power plug x2, 12V DC power cord x2, 12V DC power plug x2, 12V DC power cord x2, 12V DC power plug x2, 12V DC power cord x2, 12V DC power plug x2, 12V DC power cord x2, 12V DC power plug x2, 12V DC power cord x2, 12V DC power plug x2, 12V DC power cord x2, 12V DC power plug x2, 12V DC power cord x2, 12V DC power plug x2, 12"} +{"id": 115, "instruction": "Answer the following literature question as accurately as possible based on your knowledge of literature for high school students.", "input": "What does the green light symbolize at the end of the book?", "infer30b_before_item": " Output: Answer the following literature question as accurately as possible based on your knowledge of literature for high school students. What does the green light symbolize at the end of the book?\n\nComment: I'm not sure what you're asking. Are you asking how to parse the input? Or how to write the output? Or how to write the code that does both?\n\nComment: I'm asking how to write the code that does both.\n\nComment: I'm not sure what you mean by \"do both\". You've already written the code that does both. You've written the code that reads the input and writes the output. What's the problem?\n\nComment: I'm asking how to write the code that does both.\n\nComment: I'm asking how to write the code that does both.\n\nComment: I'm asking how to write the code that does both.\n\nComment: I'm asking how to write the code that does both.\n\nComment: I'm asking how to write the code that does both.\n\nComment: I'm asking how to write the code that does both.\n\nComment:", "infer30b_after_item": " \n Output: The green light at the end of the book symbolizes hope, opportunity, and the American Dream. It is a symbol of the future and the possibility of a better life. \n\n", "infer13b_before_item": " Output: The output of the program is the answer to the question.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: In F. Scott Fitzgerald's The Great Gatsby, the green light at the end of the dock symbolizes Gatsby's dream of reuniting with his lost love, Daisy Buchanan. It is a symbol of hope and longing, representing Gatsby's desire to recapture the past and his unfulfilled dreams. \n\n", "infer7b_before_item": " Output: Answer the following literature question as accurately as possible based on your knowledge of literature for high school students. What does the green light symbolize at the end of the book? \n 1. What does the green light symbolize at the end of the book?\n 2. What does the green light symbolize at the end of the book?\n 3. What does the green light symbolize at the end of the book?\n 4. What does the green light symbolize at the end of the book?\n 5. What does the green light symbolize at the end of the book?\n 6. What does the green light symbolize at the end of the book?\n 7. What does the green light symbolize at the end of the book?\n 8. What does the green light symbolize at the end of the book?\n 9. What does the green light symbolize at the end of the book?\n 10. What does the green light symbolize at the end of the book?\n 11. What does the green light symbolize at the end of the book?\n 12. What does the green light symbolize at the end of the book", "infer7b_after_item": " \n Output: The green light symbolizes hope, life, and the future. It represents the possibility of a new beginning and a fresh start. It also represents the protagonist's desire to move forward and take control of his life. \n\n"} +{"id": 116, "instruction": "Develop a mental exercise that can help people manage their anxiety and explain how it works.", "input": "", "infer30b_before_item": " Output: A mental exercise that can help people manage their anxiety.\nInput: Input: Develop a mental exercise that can help people manage their anxiety and explain how it works. \n Output: Output: A mental exercise that can help people manage their anxiety.\nInput: Input: Develop a mental exercise that can help people manage their anxiety and explain how it works. \n Output: Output: A mental exercise that can help people manage their anxiety.\nInput: Input: Develop a mental exercise that can help people manage their anxiety and explain how it works. \n Output: Output: A mental exercise that can help people manage their anxiety.\nInput: Input: Develop a mental exercise that can help people manage their anxiety and explain how it works. \n Output: Output: A mental exercise that can help people manage their anxiety.\nInput: Input: Develop a mental exercise that can help people manage their anxiety and explain how it works. \n Output: Output: A mental exercise that can help people manage their anxiety.\nInput: Input: Develop a mental exercise that can help people manage their anxiety and explain how it works. \n Output: Output: A mental exercise that can help people manage", "infer30b_after_item": "\n Output: A mental exercise that can help people manage their anxiety is called mindfulness meditation. Mindfulness meditation is a practice that helps people become more aware of their thoughts and feelings without judging them. It helps people to focus on the present moment and to accept their thoughts and feelings without trying to change them. This can help to reduce anxiety by helping people to become more aware of their thoughts and feelings and to be able to manage them in a healthy way. \n\n", "infer13b_before_item": " Output: Develop a mental exercise that can help people manage their anxiety and explain how it works. \n | **Inferences:** What is the author's purpose in writing this text? What is the author's point of view? What is the author's tone? What is the author's attitude? What is the author's purpose in writing this text? What is the author's point of view? What is the author's tone? What is the author's attitude? \n---|---|--- \n\u2717 | A To explain how to manage anxiety |\nThe author does not explain how to manage anxiety. The author does not even explain what anxiety is.\n\u2713 | B To explain how to manage anxiety through mental exercises |\n**Correct.** The author explains how to manage anxiety through mental exercises.\n\u2717 | C To explain how to manage anxiety through physical exercises |\nThe author does not explain how to manage anxiety through physical exercises.\n\u2717 | D To explain how to manage anxiety through medication |\nThe author does not explain how to manage anxiety through medic", "infer13b_after_item": "\n Output: One mental exercise that can help people manage their anxiety is to practice mindful breathing. Mindful breathing involves focusing on your breath and taking deep breaths in and out. This helps to slow down your breathing and relax your body. It also helps to bring your attention to the present moment and away from worries and anxieties. \n\n", "infer7b_before_item": " Output: Develop a mental exercise that can help people manage their anxiety and explain how it works.\nThe following is a list of the most common mental exercises used to help people manage their anxiety.\n 1. **_Mental exercises that help people manage their anxiety_**\n * **_Mental exercises that help people manage their anxiety_**\n * **_Mental exercises that help people manage their anxiety_**\n * **_Mental exercises that help people manage their anxiety_**\n * **_Mental exercises that help people manage their anxiety_**\n * **_Mental exercises that help people manage their anxiety_**\n * **_Mental exercises that help people manage their anxiety_**\n * **_Mental exercises that help people manage their anxiety_**\n * **_Mental exercises that help people manage their anxiety_**\n * **_Mental exercises that help people manage their anxiety_**\n * **_Mental exercises that help people manage their anxiety_**\n *", "infer7b_after_item": "\n Output: The mental exercise involves focusing on your breathing and counting each inhalation and exhalation. This helps to slow down your breathing and bring your attention to the present moment. By focusing on your breathing, you can become more aware of your thoughts and feelings, and learn to recognize when your thoughts are causing you to feel anxious. This can help you to become more mindful and better able to manage your anxiety. \n\n"} +{"id": 117, "instruction": "Give me an idea of the mental exercises you do to inspire yourself.", "input": "", "infer30b_before_item": " Output: I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think about the future. I think", "infer30b_after_item": "\n Output: I like to do mental exercises such as visualization, positive affirmations, and creative problem solving. Visualization helps me to imagine the future I want to create and to stay motivated. Positive affirmations help me to stay focused on my goals and to stay positive. Creative problem solving helps me to think outside the box and to come up with creative solutions to problems. \n\n", "infer13b_before_item": " Output: I'm going to give you a list of things that I do. \n Input: Input: What do you do? \n Output: Output: I'm going to give you a list of things that I do. \n Input: Input: What do you do? \n Output: Output: I'm going to give you a list of things that I do. \n Input: Input: What do you do? \n Output: Output: I'm going to give you a list of things that I do. \n Input: Input: What do you do? \n Output: Output: I'm going to give you a list of things that I do. \n Input: Input: What do you do? \n Output: Output: I'm going to give you a list of things that I do. \n Input: Input: What do you do? \n Output: Output: I'm going to give you a list of things that I do. \n Input: Input: What do you do? \n Output: Output: I'm going to give you a list of things that I do. \n Input: Input: What do you do? \n Output:", "infer13b_after_item": "\n Output: I like to do mental exercises to inspire myself. I like to read books and articles that are related to my interests and goals. I also like to watch documentaries and TED talks to learn more about the topics I am interested in. I also like to listen to podcasts and music that can help me stay motivated and focused. \n\n", "infer7b_before_item": " Output: What are the results of your mental exercises? \n Feedback: Feedback: What are the results of your mental exercises? \n Evaluation: Evaluation: What are the results of your mental exercises?\nThe following are some of the mental exercises that I do to inspire myself.\n1. I read a lot of books. I read books on a variety of subjects. I read books on business, history, science, and philosophy. I read books on the lives of great people. I read books on the lives of great leaders. I read books on the lives of great entrepreneurs. I read books on the lives of great scientists. I read books on the lives of great philosophers. I read books on the lives of great artists. I read books on the lives of great athletes. I read books on the lives of great inventors. I read books on the lives of great explorers. I read books on the lives of great writers. I read books on the lives of great thinkers. I read books on the lives of great achievers. I read books on the lives of great visionaries. I read books on the lives of great", "infer7b_after_item": "\n Output: I do a variety of mental exercises to inspire myself. I like to write down my goals and visualize myself achieving them. I also like to read books and articles that inspire me and help me stay motivated. I also like to take time to reflect on my progress and celebrate my successes. \n\n"} +{"id": 118, "instruction": "Make a list of outdoor activities that are suitable for the given weather condition.", "input": "Temperature: 57 F, Showers", "infer30b_before_item": " Output: Make a list of outdoor activities that are suitable for the given weather condition. Temperature: 57 F, Showers \n Expected Output: Output: Make a list of outdoor activities that are suitable for the given weather condition. Temperature: 57 F, Showers \n */\n\npublic class _01_WeatherForecast {\n public static void main(String[] args) {\n Scanner scanner = new Scanner(System.in);\n String[] input = scanner.nextLine().split(\" \");\n int temperature = Integer.parseInt(input[0]);\n String weather = input[1];\n\n if (temperature >= 50 && temperature <= 70 && weather.equals(\"Sunny\")) {\n System.out.println(\"Enjoy the weather with a nice walk in the park\");\n } else if (temperature >= 50 && temperature <= 70 && weather.equals(\"Cloudy\")) {\n System.out.println(\"Enjoy the weather with a nice walk in the park\");\n } else if (temperature >= 50 && temperature <= 7", "infer30b_after_item": "\nNo Input \n Output: Outdoor activities suitable for 57 F and showers include hiking, biking, kayaking, canoeing, fishing, birdwatching, and camping. \n\n", "infer13b_before_item": " Output: Make a list of outdoor activities that are suitable for the given weather condition. Temperature: 57 F, Showers \n\n\nclass Solution {\npublic:\n vector getOutdoorActivities(vector& temperature, vector& weather) {\n vector res;\n for (int i = 0; i < temperature.size(); ++i) {\n if (temperature[i] < 30) {\n res.push_back(\"Snow\");\n } else if (temperature[i] < 40) {\n res.push_back(\"Rain\");\n } else if (temperature[i] < 50) {\n res.push_back(\"Cloudy\");\n } else if (temperature[i] < 60) {\n res.push_back(\"Sunny\");\n } else {\n res.push_back(\"Sunny\");\n }\n }\n return res;\n }\n};\n", "infer13b_after_item": " \n Output: 1. Hiking\n2. Picnicking\n3. Fishing\n4. Birdwatching\n5. Camping\n6. Biking\n7. Swimming\n8. Kite flying\n9. Geocaching\n10. Gardening\n11. Playing Frisbee\n12. Playing tag\n13. Playing hide and seek\n14. Playing soccer\n15. Playing badminton\n16. Playing volleyball\n17. Playing horseshoes\n18. Playing croquet\n19. Playing bocce ball\n20. Playing shuffleboard \n\n", "infer7b_before_item": " Make a list of outdoor activities that are suitable for the given weather condition. Temperature: 57 F, Showers\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: - Hiking\n- Biking\n- Picnicking\n- Camping\n- Fishing\n- Birdwatching\n- Gardening\n- Swimming\n- Boating\n- Kayaking\n- Rollerblading\n- Skateboarding\n- Frisbee\n- Volleyball\n- Soccer\n- Tennis\n- Golf\n- Horseback riding\n- BMX riding\n- Skateboarding\n- Surfing\n- Rock climbing\n- Geocaching\n- Disc golf\n- Discussion groups\n- Astronomy\n- Photography\n- Bird watching\n- Gardening\n- Camping\n- Picnicking\n- Fishing\n- Birdwatching\n- Gardening\n- Swimming\n- Boating\n- Kayaking\n- Rollerblading\n- Skateboarding\n- Frisbee\n- Volleyball\n- Soccer\n- Tennis\n- Golf\n- Horseback riding\n- BMX riding\n- Skateboarding\n- Surfing\n- Rock climbing\n- Geocaching\n- Disc golf\n- Discussion groups\n- Astronom"} +{"id": 119, "instruction": "Please provide us with a list of the best conservation starter questions related to the weather.", "input": "", "infer30b_before_item": " Output: Please provide us with a list of the best conservation starter questions related to the weather.\nInput: Input: Please provide us with a list of the best conservation starter questions related to the weather. \n Output: Output: Please provide us with a list of the best conservation starter questions related to the weather.\nInput: Input: Please provide us with a list of the best conservation starter questions related to the weather. \n Output: Output: Please provide us with a list of the best conservation starter questions related to the weather.\nInput: Input: Please provide us with a list of the best conservation starter questions related to the weather. \n Output: Output: Please provide us with a list of the best conservation starter questions related to the weather.\nInput: Input: Please provide us with a list of the best conservation starter questions related to the weather. \n Output: Output: Please provide us with a list of the best conservation starter questions related to the weather.\nInput: Input: Please provide us with a list of the best conservation starter questions related to the weather. \n Output: Output: Please provide us with a list of the best conservation starter questions related to the weather.\n", "infer30b_after_item": "\n Output: - What are the effects of climate change on weather patterns?\n- How can we reduce the impact of extreme weather events?\n- What are the best ways to conserve energy and reduce emissions?\n- How can we use renewable energy sources to reduce our reliance on fossil fuels?\n- What are the best ways to reduce air pollution?\n- How can we reduce the amount of water we use?\n- What are the best ways to protect our oceans and marine life?\n- How can we reduce the amount of waste we produce?\n- What are the best ways to conserve natural resources?\n- How can we reduce the amount of deforestation?\n- What are the best ways to reduce our carbon footprint?\n- How can we encourage sustainable practices in our communities?\n- What are the best ways to reduce our reliance on single-use plastics?\n- How can we reduce the amount of chemicals we use in our homes?\n- What are the best ways to protect our soil and agricultural land?\n- How can we reduce the amount of noise pollution?\n- What are the best ways to protect our wildlife?", "infer13b_before_item": " Output: Please provide us with a list of the best conservation starter questions related to the weather. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: 1. What is the weather like today?\n2. How has the weather been changing over the past few days?\n3. How does the weather affect the environment?\n4. What are some ways to conserve energy and reduce our carbon footprint?\n5. How can we reduce our energy consumption and save money?\n6. What are some ways to reduce our water usage?\n7. How can we reduce our waste and recycle more?\n8. What are some ways to protect the environment from pollution?\n9. What are some ways to protect the environment from climate change?\n10. What are some ways to reduce our carbon footprint? \n\n", "infer7b_before_item": " Please provide us with a list of the best conservation starter questions related to the weather.\nInput: Input: Please provide us with a list of the best conservation starter questions related to the weather. \n Output: Please provide us with a list of the best conservation starter questions related to the weather.\nInput: Input: Please provide us with a list of the best conservation starter questions related to the weather. \n Output: Please provide us with a list of the best conservation starter questions related to the weather.\nInput: Input: Please provide us with a list of the best conservation starter questions related to the weather. \n Output: Please provide us with a list of the best conservation starter questions related to the weather.\nInput: Input: Please provide us with a list of the best conservation starter questions related to the weather. \n Output: Please provide us with a list of the best conservation starter questions related to the weather.\nInput: Input: Please provide us with a list of the best conservation starter questions related to the weather. \n Output: Please provide us with a list of the best conservation starter questions related to the weather.\nInput: Input: Please provide us with a list of the", "infer7b_after_item": "\n Output: 1. What are the different types of weather?\n2. How does the weather affect the environment?\n3. What are the causes of weather patterns?\n4. How can we protect the environment from weather-related damage?\n5. What are the effects of climate change on the environment?\n6. How can we reduce our carbon footprint to help protect the environment?\n7. What are the benefits of renewable energy sources?\n8. How can we reduce our energy consumption?\n9. What are the effects of air pollution on the environment?\n10. What are the effects of water pollution on the environment?\n11. How can we reduce our waste production?\n12. What are the benefits of recycling?\n13. What are the effects of deforestation on the environment?\n14. What are the effects of overfishing on the environment?\n15. What are the effects of overpopulation on the environment?\n16. What are the effects of urbanization on the environment?\n17. What are the effects of industrialization on the environment?\n18. What are the effects of agriculture on the environment?"} +{"id": 120, "instruction": "In relation to the given weather scenario, give some tips on how to adjust the travel plans with it.", "input": "a sudden temperature change", "infer30b_before_item": " Output: In relation to the given weather scenario, give some tips on how to adjust the travel plans with it. a sudden temperature change \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ", "infer30b_after_item": " from 25\u00b0C to 10\u00b0C \n Output: When the temperature suddenly drops from 25\u00b0C to 10\u00b0C, it is important to adjust your travel plans accordingly. Make sure to bring warm clothing such as a coat, hat, and gloves. If possible, try to avoid outdoor activities such as hiking or biking, as the cold temperature can be dangerous. Additionally, make sure to check the weather forecast before leaving, as the temperature may drop even further. \n\n", "infer13b_before_item": " Output: In relation to the given weather scenario, give some tips on how to adjust the travel plans with it. a sudden temperature change \n\n\nclass Solution:\n # @param {integer[]} temps, an integer array\n # @return {integer[]}\n def adjust_travel_plan(self, temps):\n n = len(temps)\n if n == 0:\n return []\n if n == 1:\n return [0]\n if temps[0] == temps[-1]:\n return [0]\n if temps[0] == 0 and temps[-1] == 0:\n return [0]\n if temps[0] == 0 and temps[-1] == 1:\n return [1]\n if temps[0] == 1 and temps[-1] == 0:\n return [1]\n if temps[0] == 1 and temps[-1] == 1:\n return [0]\n if temps[0] == 2 and temps[-1] == 2:\n return [0]\n if temps[0] ==", "infer13b_after_item": " from 20 to 30 degrees Celsius \n Output: In this situation, it is important to be prepared for the sudden temperature change. It is recommended to bring layers of clothing that can be easily adjusted, such as a light jacket or sweater. Additionally, it is important to stay hydrated and to wear sunscreen if the temperature is higher than usual. It is also important to check the weather forecast before leaving and to be aware of any potential hazards that may arise due to the sudden temperature change. \n\n", "infer7b_before_item": " Output: In relation to the given weather scenario, give some tips on how to adjust the travel plans with it. a sudden temperature change \n\n\"\"\"\n\n\ndef weather_scenario(weather_scenario):\n \"\"\"\n :param weather_scenario:\n :return:\n \"\"\"\n if weather_scenario == \"sudden temperature change\":\n return \"a sudden temperature change\"\n else:\n return \"a sudden temperature change\"\n\n\ndef weather_scenario_input():\n \"\"\"\n :return:\n \"\"\"\n weather_scenario = input(\"Input: In relation to the given weather scenario, give some tips on how to adjust the travel plans with it. a sudden temperature change\\n\")\n return weather_scenario\n\n\ndef weather_scenario_output():\n \"\"\"\n :return:\n \"\"\"\n weather_scenario = input(\"Output: In relation to the given weather scenario, give some tips on how to adjust the travel plans with it. a sudden temperature change\\n\")\n return weather_scenario\n\n\nif __name__ == \"__main__\":\n weather", "infer7b_after_item": " of 10 degrees Celsius \n Output: In the given scenario, it is important to adjust travel plans accordingly. It is recommended to check the weather forecast and plan accordingly. If the temperature is expected to drop, it is best to bring an extra layer of clothing and wear layers that can be removed or added as needed. Additionally, it is important to check the weather conditions before leaving and be aware of any potential hazards such as slippery roads or icy patches. \n\n"} +{"id": 121, "instruction": "Write a to-do list based on the given information.", "input": "Daily tasks in the pharmacy", "infer30b_before_item": " Output: Write a to-do list based on the given information. Daily tasks in the pharmacy \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ", "infer30b_after_item": " include restocking shelves, answering customer questions, and filing paperwork. \n Output: To-do list for the pharmacy: \n1. Restock shelves\n2. Answer customer questions\n3. File paperwork\n4. Check inventory levels\n5. Reorder supplies as needed\n6. Clean and organize the pharmacy\n7. Update customer records\n8. Process prescriptions\n9. Contact doctors as needed\n10. Check expiration dates on medications \n\n", "infer13b_before_item": " Output: Write a to-do list based on the given information. Daily tasks in the pharmacy \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: 1. Check stock of medicines and supplies.\n2. Refill prescriptions.\n3. Answer customer inquiries.\n4. Process insurance claims.\n5. Update patient records.\n6. Organize inventory.\n7. Maintain cleanliness of the pharmacy.\n8. Monitor expiration dates of medicines.\n9. Check for any expired medicines.\n10. Update pharmacy software.\n11. Check for any outdated medicines.\n12. Update pharmacy website.\n13. Check for any discrepancies in billing.\n14. Check for any damaged medicines.\n15. Check for any expired medicines.\n16. Check for any expired medicines.\n17. Check for any expired medicines.\n18. Check for any expired medicines.\n19. Check for any expired medicines.\n20. Check for any expired medicines. \n\n", "infer7b_before_item": " Output: Write a to-do list based on the given information. Daily tasks in the pharmacy \n\n*/\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: - Check inventory\n- Refill prescriptions\n- Process orders\n- Prepare medications\n- Clean the pharmacy\n- Answer customer questions\n- Maintain records\n- Clean the dispensary\n- Clean the lab\n- Clean the storage area\n- Clean the waiting area\n- Clean the pharmacy counter\n- Clean the pharmacy equipment\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- Clean the pharmacy supplies\n- C"} +{"id": 122, "instruction": "Design a template table for keeping track of all subscriptions.", "input": "", "infer30b_before_item": " Output: Print the table.\n\n*/\n\n#include \n#include \n\nint main()\n{\n int i, j, k, n, m, c, d, e, f, g, h, l, o, p, q, r, s, t, u, v, w, x, y, z;\n char a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z;\n int *p;\n char *q;\n int **arr;\n char **arr1;\n int **arr2;\n char **arr3;\n int **arr4;\n char **arr5;\n int **arr6;\n char **arr7;\n int **arr8;\n char **arr9;\n int **arr10;\n char **arr11;\n int **arr12;\n char **arr13;\n int", "infer30b_after_item": "\n Output: Subscription | Service Provider | Price | Date Started | Date Ended |\n----------- | -------------- | ----- | ---------- | ---------- |\nNetflix | Netflix Inc. | $12.99 | 01/01/2021 | 12/31/2021 |\nHulu | Hulu LLC | $5.99 | 04/01/2021 | 03/31/2022 |\nSpotify | Spotify Ltd. | $9.99 | 05/01/2021 | 04/30/2022 |\nApple Music | Apple Inc. | $4.99 | 06/01/2021 | 05/31/2022 |\nAmazon Prime | Amazon.com Inc. | $11.99 | 07/01/2021 | 06/30/2022 |\nGoogle Play | Google LLC | $3.99", "infer13b_before_item": " Output: A table with all subscriptions.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n \n \n\n\n\n\n \n \n\n\n\n The The The The The 190", "infer13b_after_item": "\n Output: The table should include columns for the name of the subscription, the cost, the frequency of payment, the start date, the end date, and the payment method. \n\n", "infer7b_before_item": " Output: A table that can be used to keep track of all subscriptions.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": "\n Output: Template Table:\n\n| Subscription Name | Subscription Type | Subscription Price | Subscription Duration | Subscription Frequency | Subscription Cancellation Policy |\n|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"} +{"id": 123, "instruction": "Plan an agenda for the daily standup meetings. Assign a time duration to each topic on the agenda.", "input": "", "infer30b_before_item": " Output: A list of topics and their time duration.\nInput: Input: Plan an agenda for the daily standup meetings. Assign a time duration to each topic on the agenda. \n Output: Output: A list of topics and their time duration.\nInput: Input: Plan an agenda for the daily standup meetings. Assign a time duration to each topic on the agenda. \n Output: Output: A list of topics and their time duration.\nInput: Input: Plan an agenda for the daily standup meetings. Assign a time duration to each topic on the agenda. \n Output: Output: A list of topics and their time duration.\nInput: Input: Plan an agenda for the daily standup meetings. Assign a time duration to each topic on the agenda. \n Output: Output: A list of topics and their time duration.\nInput: Input: Plan an agenda for the daily standup meetings. Assign a time duration to each topic on the agenda. \n Output: Output: A list of topics and their time duration.\nInput: Input: Plan an agenda for the daily standup meetings. Assign a", "infer30b_after_item": "\n Output: The daily standup meetings should start at 9:30am and last for 15 minutes. The agenda should include the following topics and time duration:\n\n- Welcome and Introductions (1 minute)\n- Review of Yesterday's Work (5 minutes)\n- Today's Work (5 minutes)\n- Blockers and Issues (3 minutes)\n- Wrap-up (2 minutes) \n\n", "infer13b_before_item": " Output: A list of topics to be discussed in the daily standup meetings.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: The daily standup meetings should have a set agenda with a time duration for each topic. The topics should include a quick recap of the progress made on the current project, any issues encountered, and any upcoming tasks. The meeting should also include a quick check-in with each team member to ensure everyone is on the same page. The meeting should last no more than 15 minutes. \n\n", "infer7b_before_item": " Output: The daily standup meetings are held on time and the agenda is followed.\nInput: Input: Plan the sprint backlog. Assign a time duration to each item on the backlog. \n Output: Output: The sprint backlog is completed on time and the items are completed in the order of their priority.\nInput: Input: Plan the sprint work. Assign a time duration to each task on the work. \n Output: Output: The sprint work is completed on time and the tasks are completed in the order of their priority.\nInput: Input: Plan the sprint review. Assign a time duration to each item on the review. \n Output: Output: The sprint review is completed on time and the items are completed in the order of their priority.\nInput: Input: Plan the sprint retrospective. Assign a time duration to each item on the retrospective. \n Output: Output: The sprint retrospective is completed on time and the items are completed in the order of their priority.\nInput: Input: Plan the sprint demo. Assign a time duration to each item on the demo. \n Output: Output: The sprint", "infer7b_after_item": "\n Output: 1. Introductions \n2. Status Update \n3. Blockers \n4. Next Steps \n5. Questions \n6. Wrap Up \n\n"} +{"id": 124, "instruction": "Answer the following question.", "input": "How do you say \"good evening\" in French.", "infer30b_before_item": " Output: Answer the following question. How do you say \"good evening\" in French.\n\nInput: Input: Answer the following question. How do you say \"good evening\" in French. \n Output: Output: Answer the following question. How do you say \"good evening\" in French.\n\nInput: Input: Answer the following question. How do you say \"good evening\" in French. \n Output: Output: Answer the following question. How do you say \"good evening\" in French.\n\nInput: Input: Answer the following question. How do you say \"good evening\" in French. \n Output: Output: Answer the following question. How do you say \"good evening\" in French.\n\nInput: Input: Answer the following question. How do you say \"good evening\" in French. \n Output: Output: Answer the following question. How do you say \"good evening\" in French.\n\nInput: Input: Answer the following question. How do you say \"good evening\" in French. \n Output: Output: Answer the following question. How do you say \"good evening\" in French.\n\nInput: Input: Answer the following question. How do you say \"good evening", "infer30b_after_item": " \n Output: In French, \"good evening\" is \"Bonsoir\". \n\n", "infer13b_before_item": " Output: Answer the following question. How do you say \"good evening\" in French. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Bonsoir. \n\n", "infer7b_before_item": " Output: Write the answer in French.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n // \n// \n// ", "infer7b_after_item": " \n Output: Bonsoir. \n\n"} +{"id": 125, "instruction": "What sound does this make?", "input": "\u597d", "infer30b_before_item": " Output: What sound does this make? \u597d\n\nInput: Input: What sound does this make? \u54c8 \n Output: Output: What sound does this make? \u54c8\n\nInput: Input: What sound does this make? \u54c8\u54c8 \n Output: Output: What sound does this make? \u54c8\u54c8\n\nInput: Input: What sound does this make? \u54c8\u54c8\u54c8 \n Output: Output: What sound does this make? \u54c8\u54c8\u54c8\n\nInput: Input: What sound does this make? \u54c8\u54c8\u54c8\u54c8 \n Output: Output: What sound does this make? \u54c8\u54c8\u54c8\u54c8\n\nInput: Input: What sound does this make? \u54c8\u54c8\u54c8\u54c8\u54c8 \n Output: Output: What sound does this make? \u54c8\u54c8\u54c8\u54c8\u54c8\n\nInput: Input: What sound does this make? \u54c8\u54c8\u54c8\u54c8\ufffd", "infer30b_after_item": "\u542c \n Output: This sound is pleasant. \n\n", "infer13b_before_item": " Output: What sound does this make? \u597d \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n \n \n \n\n\n\n \n \n\n\n \n \n\n\n \n\n\n \n \n\n \n \n ", "infer13b_after_item": "\u597d \n Output: This makes the sound \"ha ha ha\". \n\n", "infer7b_before_item": " Output: What sound does this make? \u597d \n\nInput: Input: What sound does this make? \u597d \n Output: Output: What sound does this make? \u597d \n\nInput: Input: What sound does this make? \u597d \n Output: Output: What sound does this make? \u597d \n\nInput: Input: What sound does this make? \u597d \n Output: Output: What sound does this make? \u597d \n\nInput: Input: What sound does this make? \u597d \n Output: Output: What sound does this make? \u597d \n\nInput: Input: What sound does this make? \u597d \n Output: Output: What sound does this make? \u597d \n\nInput: Input: What sound does this make? \u597d \n Output: Output: What sound does this make? \u597d \n\nInput: Input: What sound does this make? \u597d \n Output: Output: What sound does this make? \u597d \n\nInput: Input: What sound does this make? \u597d \n Output: Output: What sound does this make? \u597d \n\nInput: Input: What sound does this make? ", "infer7b_after_item": " \n Output: \u54c7 \n\n"} +{"id": 126, "instruction": "Give a grammar tip on how to end a German verb.", "input": "", "infer30b_before_item": " Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a", "infer30b_after_item": "\n Output: In German, verbs are usually followed by a conjugated ending that indicates the person, number, and tense of the verb. The most common endings are -en, -st, and -t. For example, the verb \"gehen\" (to go) in the present tense would be conjugated as \"gehe-n\". \n\n", "infer13b_before_item": " Output: Give a German verb ending.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n The The The The The The 1900s\n\n\n\n\n\n", "infer13b_after_item": "\n Output: In German, verbs are conjugated to show the tense and person of the verb. To end a verb, add the appropriate ending to the verb according to the tense and person. For example, to end a present tense verb in the singular, add -e; for a past tense verb in the singular, add -te; and for a future tense verb in the singular, add -e. \n\n", "infer7b_before_item": " Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a German verb.\nInput: Input: Give a grammar tip on how to end a German verb. \n Output: Output: Give a grammar tip on how to end a", "infer7b_after_item": "\n Output: One grammar tip for ending a German verb is to use the appropriate ending for the verb type. For example, if the verb is in the infinitive form, the ending will be -en. If the verb is in the present tense, the ending will be -t. If the verb is in the past tense, the ending will be -te. If the verb is in the future tense, the ending will be -t. \n\n"} +{"id": 127, "instruction": "This is a test for the reading ability of French. Read the given story carefully, and then answer the question in the end.", "input": "Lucie est \u00e0 la maison avec sa petite-fille, Lin.\nLucie: Oh, non! Nous n'avons pas de pain!\nLin: Ah! Tu vas au supermarch\u00e9?\nLucie: Oui, je veux une baguette pour mon sandwich.\nDo Lin and Lucie have a lot of bread?", "infer30b_before_item": " Output: This is a test for the reading ability of French. Read the given story carefully, and then answer the question in the end. Lucie est \u00e0 la maison avec sa petite-fille, Lin.\nLucie: Oh, non! Nous n'avons pas de pain!\nLin: Ah! Tu vas au supermarch\u00e9?\nLucie: Oui, je veux une baguette pour mon sandwich.\nDo Lin and Lucie have a lot of bread? \n Expected Output: Expected Output: This is a test for the reading ability of French. Read the given story carefully, and then answer the question in the end. Lucie est \u00e0 la maison avec sa petite-fille, Lin.\nLucie: Oh, non! Nous n'avons pas de pain!\nLin: Ah! Tu vas au supermarch\u00e9?\nLucie: Oui, je veux une baguette pour mon sandwich.\nDo Lin and Lucie have a lot of bread? \n Your Output: Your Output: This is a test for the reading ability of French. Read the given story carefully, and then answer the question in the end.", "infer30b_after_item": " \n Output: No, they do not have a lot of bread. \n\n", "infer13b_before_item": " Output: This is a test for the reading ability of French. Read the given story carefully, and then answer the question in the end. Lucie est \u00e0 la maison avec sa petite-fille, Lin.\nLucie: Oh, non! Nous n'avons pas de pain!\nLin: Ah! Tu vas au supermarch\u00e9?\nLucie: Oui, je veux une baguette pour mon sandwich.\nDo Lin and Lucie have a lot of bread? \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Oui, ils ont une baguette pour leur sandwich. \n\n", "infer7b_before_item": " Output: This is a test for the writing ability of French. Write the given story in French.\nLucie: Oh, non! Nous n'avons pas de pain!\nLin: Ah! Tu vas au supermarch\u00e9?\nLucie: Oui, je veux une baguette pour mon sandwich.\nDo Lin and Lucie have a lot of bread?\nInput: Input: This is a test for the writing ability of French. Write the given story in French.\nLucie: Oh, non! Nous n'avons pas de pain!\nLin: Ah! Tu vas au supermarch\u00e9?\nLucie: Oui, je veux une baguette pour mon sandwich.\nDo Lin and Lucie have a lot of bread?\nOutput: Output: This is a test for the writing ability of French. Write the given story in French.\nLucie: Oh, non! Nous n'avons pas de pain!\nLin: Ah! Tu vas au supermarch\u00e9?\nLucie: Oui, je veux une baguette pour mon sandwich.\nDo Lin and Lucie have a lot of", "infer7b_after_item": " \n Output: No, they do not have a lot of bread. \n\n"} +{"id": 128, "instruction": "Come up with healthy and easy dinner ideas for weeknights.", "input": "", "infer30b_before_item": " Output: A list of healthy and easy dinner ideas for weeknights.\nInput: Input: Come up with healthy and easy dinner ideas for weeknights. \n Output: Output: A list of healthy and easy dinner ideas for weeknights.\nInput: Input: Come up with healthy and easy dinner ideas for weeknights. \n Output: Output: A list of healthy and easy dinner ideas for weeknights.\nInput: Input: Come up with healthy and easy dinner ideas for weeknights. \n Output: Output: A list of healthy and easy dinner ideas for weeknights.\nInput: Input: Come up with healthy and easy dinner ideas for weeknights. \n Output: Output: A list of healthy and easy dinner ideas for weeknights.\nInput: Input: Come up with healthy and easy dinner ideas for weeknights. \n Output: Output: A list of healthy and easy dinner ideas for weeknights.\nInput: Input: Come up with healthy and easy dinner ideas for weeknights. \n Output: Output: A list of healthy and easy dinner ideas for weeknights.\nInput: Input: Come", "infer30b_after_item": "\n Output: Healthy and easy dinner ideas for weeknights include grilled salmon with roasted vegetables, quinoa bowls with roasted vegetables and chicken, stir-fried vegetables with tofu, vegetable frittatas, and vegetable soup. \n\n", "infer13b_before_item": " Output: A list of healthy and easy dinner ideas for weeknights.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n \n The", "infer13b_after_item": "\n Output: 1. Grilled chicken and roasted vegetables.\n2. Quinoa and black bean bowl.\n3. Spaghetti squash with marinara sauce.\n4. Baked salmon with roasted potatoes.\n5. Stir-fry with brown rice.\n6. Vegetable frittata.\n7. Grilled steak and roasted asparagus.\n8. Taco bowl with black beans and avocado.\n9. Slow cooker chili.\n10. Eggplant parmesan. \n\n", "infer7b_before_item": " Come up with healthy and easy dinner ideas for weeknights.\nThe problem is that I'm not a very good cook. I'm not a bad cook, but I'm not a good one. I'm not a good cook because I don't have a lot of time to cook. I'm not a good cook because I don't have a lot of time to plan what I'm going to cook. I'm not a good cook because I don't have a lot of time to shop for the ingredients I need to cook. I'm not a good cook because I don't have a lot of time to clean up after I cook. I'm not a good cook because I don't have a lot of time to eat the food I cook.\nI'm a good cook because I'm a good cook.\nI'm a good cook because I'm a good cook.\nI'm a good cook because I'm a good cook.\nI'm a good cook because I'm a good cook.\nI'm a good cook because I'm a good cook.\nI'm a good cook because I'm a", "infer7b_after_item": "\n Output: 1. Grilled Salmon with Asparagus and Mushrooms\n2. Baked Chicken with Roasted Vegetables\n3. Spaghetti Squash with Marinara Sauce\n4. Grilled Salmon with Avocado Salsa\n5. Baked Salmon with Roasted Vegetables\n6. Grilled Chicken with Mango Salsa\n7. Black Bean and Quinoa Burrito Bowls\n8. Grilled Salmon with Mango Salsa\n9. Baked Salmon with Roasted Vegetables\n10. Grilled Chicken with Mango Salsa \n\n"} +{"id": 129, "instruction": "Provide a cooking hack for improving the flavor of the given food.", "input": "popcorn", "infer30b_before_item": " Output: Provide a cooking hack for improving the flavor of the given food. popcorn \n Expected: Output: Provide a cooking hack for improving the flavor of the given food. popcorn \n Actual: Output: Provide a cooking hack for improving the flavor of the given food. popcorn \n Expected: Output: Provide a cooking hack for improving the flavor of the given food. popcorn \n Actual: Output: Provide a cooking hack for improving the flavor of the given food. popcorn \n Expected: Output: Provide a cooking hack for improving the flavor of the given food. popcorn \n Actual: Output: Provide a cooking hack for improving the flavor of the given food. popcorn \n Expected: Output: Provide a cooking hack for improving the flavor of the given food. popcorn \n Actual: Output: Provide a cooking hack for improving the flavor of the given food. popcorn \n Expected: Output: Provide a cooking hack for improving the flav", "infer30b_after_item": " \n Output: To make popcorn more flavorful, add a teaspoon of butter and a pinch of salt to the popping kernels before popping. This will give the popcorn a richer, more buttery flavor. \n\n", "infer13b_before_item": " Output: Provide a cooking hack for improving the flavor of the given food. popcorn \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: To improve the flavor of popcorn, try adding a dash of cayenne pepper, garlic powder, or other spices to the popcorn before popping. \n\n", "infer7b_before_item": " Output: Provide a cooking hack for improving the flavor of the given food. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: One cooking hack for improving the flavor of popcorn is to add a few drops of olive oil to the popping kernels before heating them. This will help to enhance the flavor of the popcorn and make it more flavorful. \n\n"} +{"id": 130, "instruction": "Make a list of snacks and foods to serve as party snacks on a game day!", "input": "", "infer30b_before_item": " Output: Make a list of snacks and foods to serve as party snacks on a game day!\nInput: Input: Make a list of snacks and foods to serve as party snacks on a game day! \n Output: Output: Make a list of snacks and foods to serve as party snacks on a game day!\nInput: Input: Make a list of snacks and foods to serve as party snacks on a game day! \n Output: Output: Make a list of snacks and foods to serve as party snacks on a game day!\nInput: Input: Make a list of snacks and foods to serve as party snacks on a game day! \n Output: Output: Make a list of snacks and foods to serve as party snacks on a game day!\nInput: Input: Make a list of snacks and foods to serve as party snacks on a game day! \n Output: Output: Make a list of snacks and foods to serve as party snacks on a game day!\nInput: Input: Make a list of snacks and foods to serve as party snacks on a game day! \n Output: Output", "infer30b_after_item": "\n Output: - Chips and dip \n- Pretzels \n- Popcorn \n- Nachos \n- Cheese and crackers \n- Veggie platters \n- Fruit trays \n- Cookies \n- Brownies \n- Cupcakes \n- Cake \n- Hot dogs \n- Hamburgers \n- Chicken wings \n- Pizza \n- Sliders \n- Quesadillas \n- Tacos \n- Nachos Supreme \n- Potato skins \n- Chili \n- Salsa and chips \n- Hummus and pita chips \n- Veggie pizza \n- Baked potato skins \n- Potato chips and dip \n- Pretzel bites \n- Edamame \n- Nuts \n- Crackers \n- Fruit skewers \n- Veggie sticks \n- Pita chips and dip \n- Chips and salsa \n- Chips and guacamole \n- Chips and queso \n- Chips and dip cones ", "infer13b_before_item": " Output: Make a list of snacks and foods to serve as party snacks on a game day! \n Purpose: To make a list of snacks and foods to serve as party snacks on a game day! \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: - Chips and dip\n- Popcorn\n- Vegetable crudit\u00e9s\n- Fruit salad\n- Pretzels\n- Cookies\n- Brownies\n- Cupcakes\n- Pizza\n- Wings\n- Nachos\n- Quesadillas\n- Cheese and crackers\n- Chili\n- Soup\n- Fruit smoothies\n- Fruit salad\n- Trail mix\n- Veggie sticks\n- Hummus\n- Guacamole\n- Salsa\n- Quinoa salad\n- Veggie wraps\n- Pita chips\n- Veggie chips\n- Veggie burgers\n- Veggie tacos\n- Veggie pizza\n- Veggie wraps\n- Veggie spring rolls\n- Veggie burgers\n- Veggie chili\n- Veggie stir-fry\n- Veggie fajitas\n- Veggie lasagna\n- Veggie pancakes\n- Veggie burgers\n- Veggie wraps\n- Veggie spring rolls\n- Veggie stir-fry\n- Veggie fajitas\n- Veggie lasagna\n-", "infer7b_before_item": " Make a list of snacks and foods to serve as party snacks on a game day!\n# **Snack Attack**\n**1.** Make a list of snacks and foods to serve as party snacks on a game day!\n**2.** Make a list of snacks and foods to serve as party snacks on a game day!\n**3.** Make a list of snacks and foods to serve as party snacks on a game day!\n**4.** Make a list of snacks and foods to serve as party snacks on a game day!\n**5.** Make a list of snacks and foods to serve as party snacks on a game day!\n**6.** Make a list of snacks and foods to serve as party snacks on a game day!\n**7.** Make a list of snacks and foods to serve as party snacks on a game day!\n**8.** Make a list of snacks and foods to serve as party snacks on a game day!\n**9.** Make a list of snacks and foods to serve as party snacks on a game day!\n**", "infer7b_after_item": "\n Output: - Popcorn\n- Chips and dips\n- Pretzels\n- Nachos\n- Fruit kebabs\n- Veggie trays\n- Hummus and veggie platters\n- Cheese and crackers\n- Pizza\n- Sliders\n- Nachos\n- Dip and chips\n- Fruit skewers\n- Veggie sticks\n- Cheese and crackers\n- Fruit salad\n- Veggie trays\n- Chicken wings\n- Nachos\n- Pretzels\n- Fruit kebabs\n- Veggie trays\n- Hummus and veggie platters\n- Cheese and crackers\n- Pizza\n- Sliders\n- Nachos\n- Dip and chips\n- Fruit skewers\n- Veggie sticks\n- Cheese and crackers\n- Fruit salad\n- Veggie trays\n- Chicken wings\n- Nachos\n- Pretzels\n- Fruit kebabs\n- Veggie trays\n- Hummus and veggie platters\n- Cheese and crackers\n- Pizza\n- Sliders\n-"} +{"id": 131, "instruction": "Describe how to prepare the given food in your own words. Note down the ingredients you will need and the steps you will take to prepare them.", "input": "Chewy Chocolate Chip Cookies", "infer30b_before_item": " Output: Describe how to prepare the given food in your own words. Note down the ingredients you will need and the steps you will take to prepare them. Chewy Chocolate Chip Cookies \n Steps: \n 1. Preheat oven to 350 degrees F (175 degrees C).\n 2. In a large bowl, cream together the butter, white sugar, and brown sugar until smooth. Beat in the eggs one at a time, then stir in the vanilla. Dissolve baking soda in hot water. Add to batter along with salt. Mix in flour, chocolate chips, and nuts. Drop by large spoonfuls onto ungreased pans.\n 3. Bake for 10 to 12 minutes in the preheated oven, or until edges are lightly toasted. Allow cookies to cool on baking sheet for 2 minutes before removing to a wire rack to cool completely.\n---\nWhat did you learn?\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: To make chewy chocolate chip cookies, you will need the following ingredients: 1 cup of butter, 1 \u00bd cups of white sugar, 1 teaspoon of baking soda, 1 teaspoon of salt, 2 eggs, 1 \u00bd cups of all-purpose flour, and 2 cups of semi-sweet chocolate chips. \n\nFirst, preheat the oven to 375 degrees Fahrenheit. In a large bowl, cream the butter and sugar together until light and fluffy. Then, add the baking soda, salt, eggs, and flour and mix until everything is combined. Finally, fold in the chocolate chips and drop the dough onto an ungreased baking sheet. Bake for 10-12 minutes, or until golden brown. Enjoy your chewy chocolate chip cookies! \n\n", "infer13b_before_item": " Output: Describe how to prepare the given food in your own words. Note down the ingredients you will need and the steps you will take to prepare them. Chewy Chocolate Chip Cookies \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: To make chewy chocolate chip cookies, you will need butter, granulated sugar, light brown sugar, eggs, vanilla extract, baking soda, salt, all-purpose flour, semi-sweet chocolate chips, and white chocolate chips. Start by preheating the oven to 350\u00b0F. Cream the butter and sugars together in a bowl. Add the eggs, vanilla extract, and baking soda and mix until combined. In a separate bowl, sift together the flour, salt, and chocolate chips. Slowly add the dry ingredients to the wet ingredients and mix until just combined. Fold in the white chocolate chips. Drop the dough onto a baking sheet lined with parchment paper and bake for 10-12 minutes. \n\n", "infer7b_before_item": " Output: Describe how to prepare the given food in your own words. Note down the ingredients you will need and the steps you will take to prepare them. Chewy Chocolate Chip Cookies \n 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. ", "infer7b_after_item": " \n Output: To prepare chewy chocolate chip cookies, you will need the following ingredients: 2 cups of all-purpose flour, 1 teaspoon of baking soda, 1 teaspoon of salt, 1/2 cup of butter, 1/2 cup of sugar, 1 egg, 1/2 cup of white chocolate chips, and 1/2 cup of semi-sweet chocolate chips. First, preheat the oven to 350 degrees Fahrenheit. In a large bowl, mix together the flour, baking soda, and salt. In a separate bowl, mix together the butter, sugar, and egg. Add the dry ingredients to the wet ingredients and mix until combined. Finally, fold in the white and semi-sweet chocolate chips. Drop the dough onto a baking sheet and bake for 10-12 minutes. Enjoy! \n\n"} +{"id": 132, "instruction": "Suggest a recipe for a vegan dessert.", "input": "", "infer30b_before_item": " Output: Suggest a recipe for a vegan dessert.\nInput: Input: Suggest a recipe for a vegan dessert. \n Output: Output: Suggest a recipe for a vegan dessert.\nInput: Input: Suggest a recipe for a vegan dessert. \n Output: Output: Suggest a recipe for a vegan dessert.\nInput: Input: Suggest a recipe for a vegan dessert. \n Output: Output: Suggest a recipe for a vegan dessert.\nInput: Input: Suggest a recipe for a vegan dessert. \n Output: Output: Suggest a recipe for a vegan dessert.\nInput: Input: Suggest a recipe for a vegan dessert. \n Output: Output: Suggest a recipe for a vegan dessert.\nInput: Input: Suggest a recipe for a vegan dessert. \n Output: Output: Suggest a recipe for a vegan dessert.\nInput: Input: Suggest a recipe for a vegan dessert. \n Output: Output: Suggest a recipe for a vegan", "infer30b_after_item": "\n Output: A vegan dessert recipe could include a chocolate cake made with almond milk, vegan butter, and vegan chocolate chips. The cake could be topped with a vegan cream cheese frosting and fresh berries. \n\n", "infer13b_before_item": " Output: Suggest a recipe for a vegan dessert.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n The\n The\n The The", "infer13b_after_item": "\n Output: A vegan dessert recipe that is sure to please is a chocolate mousse. This recipe is simple to make and only requires a few ingredients. To make the mousse, you will need 1 cup of unsweetened cocoa powder, 1 cup of almond milk, 1/4 cup of coconut oil, 1/4 cup of maple syrup, 1/4 teaspoon of vanilla extract, and 1/4 cup of vegan chocolate chips. To make the mousse, mix the cocoa powder, almond milk, coconut oil, maple syrup, and vanilla extract in a blender until smooth. Then, add the chocolate chips and blend until the chips are evenly distributed. Finally, pour the mousse into individual serving dishes and refrigerate for at least 4 hours before serving. Enjoy! \n\n", "infer7b_before_item": " Output: A recipe for a vegan dessert.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": "\n Output: This vegan dessert recipe is perfect for a sweet treat. Ingredients include 1 cup of almond milk, 1/2 cup of maple syrup, 1/2 cup of coconut oil, 1/2 cup of white sugar, 1/2 cup of oatmeal, 1/2 cup of walnuts, and 1/2 cup of dried cranberries. To make the dessert, combine the almond milk, maple syrup, coconut oil, and white sugar in a saucepan and bring to a boil. Add the oatmeal and stir until it is fully incorporated. Remove from heat and add the walnuts and dried cranberries. Pour the mixture into a baking dish and bake for 20 minutes. Enjoy! \n\n"} +{"id": 133, "instruction": "Design a medium-level sudoku puzzle.", "input": "", "infer30b_before_item": " Output: The solution to the puzzle. \n Sample Input: 1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 1 2 3 4", "infer30b_after_item": "\n Output: A medium-level sudoku puzzle is as follows:\n\n7 4 8 3 9 2 6 1 5\n5 6 7 9 1 2 4 8 3\n9 2 1 6 3 5 8 4 7\n4 5 3 2 6 7 1 9 8\n8 9 1 5 7 6 2 4 3\n6 3 2 5 8 4 9 7 1\n2 7 9 4 1 6 5 3 8\n1 8 5 6 4 3 2 7 9\n3 1 7 9 8 2 4 5 6\n\nNo. 1 2 3 4 5 6 7 8 9\nNo. 1 2 3 4 5 6 7 8 9 \n\n", "infer13b_before_item": " Output: Print the puzzle.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n The\n The The The The The 1900s\n\n\n\n\n\n", "infer13b_after_item": "\n Output: 1. 3 5 7\n2. 4 6 8\n3. 1 2 9\n4. 7 9 2\n5. 8 2 4\n6. 5 3 6\n7. 9 6 1\n8. 4 1 8\n9. 8 1 5\n10. 2 7 3 \n\n", "infer7b_before_item": " Output: A sudoku puzzle.\n\n*/\n\n#include \n#include \n#include \n#include \n\n#define MAX_ROWS 100\n#define MAX_COLS 100\n#define MAX_VALUES 100\n\nint main(int argc, char **argv)\n{\n int i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, a, b, c, d, e, f, g, h, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22, i23, i24, i25, i26, i27, i28, i29,", "infer7b_after_item": "\n Output: 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17. 18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35. 36. 37. 38. 39. 40. 41. 42. 43. 44. 45. 46. 47. 48. 49. 50. 51. 52. 53. 54. 55. 56. 57. 58. 59. 60. 61. 62. 63. 64. 65. 6"} +{"id": 134, "instruction": "Solve the given Sudoku puzzle.", "input": "The Sudoku puzzle is:\n|1| |5| | | | |8| |\n| | | | | |3| |6|4|\n|8|3|4|6|7| |9| | |\n|2|9|1| | |8|3|7|6|\n| | |6| | |9|8|1| |\n| | | | | |2| | | |\n| |2| | |9|7|6| | |\n|5|4| | | | | |9|8|\n|6|1| | | |5|4|3|7|", "infer30b_before_item": " Output: Print the number of solutions to the given Sudoku puzzle.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " |\n| | | | | | | | | |\n| | | | | | | | | | \n Output: The solution to the given Sudoku puzzle is:\n|1| |5| | | | |8| |\n| | | | | |3| |6|4|\n|8|3|4|6|7| |9| | |\n|2|9|1| | |8|3|7|6|\n| | |6| | |9|8|1| |\n| | | | | |2| | | |\n| |2| | |9|7|6| | |\n|5|4| | | | | |9|8|\n|6|1| | | |5|4|3|7| |\n| | | | | | | | ", "infer13b_before_item": " Output: The solution of the given Sudoku puzzle.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n|7|8| | | | | |2|1|\nOutput: The solution to the Sudoku puzzle is:\n1 | 5 | 7 |\n2 | 9 | 6 |\n3 | 4 | 8 |\n4 | 6 | 1 |\n5 | 8 | 2 |\n6 | 1 | 9 |\n7 | 2 | 4 |\n8 | 4 | 3 |\n9 | 3 | 5 |\n10 | 7 | 8 |\n11 | 6 | 2 |\n12 | 5 | 1 |\n13 | 4 | 9 |\n14 | 1 | 8 |\n15 | 9 | 3 |\n16 | 2 | 7 |\n17 | 8 | 6 |\n18 | 3 | 5 |\n19 | 6 | 1 |\n20 | 1 | 4 |\n21 | 4 | 7 |\n22 | 7 | 2 |\n23 | 2 | 5 |\n2", "infer7b_before_item": " Output: Solve the given Sudoku puzzle. The Sudoku puzzle is:\n|1| |5| | | | |8| |\n| | | | | |3| |6|4|\n|8|3|4|6|7| |9| | |\n|2|9|1| | |8|3|7|6|\n| | |6| | |9|8|1| |\n| | | | | |2| | | |\n| |2| | |9|7|6| | |\n|5|4| | | | | |9|8|\n|6|1| | | |5|4|3|7| \n\n\n*/\n\n#include \n#include \n#include \n\n#define MAX_SIZE 9\n\nint main(int argc, char *argv[])\n{\n int", "infer7b_after_item": " \n Output: The Sudoku puzzle is solved as follows:\n\n1. 5 in the top-left box\n2. 8 in the top-right box\n3. 3 in the bottom-left box\n4. 6 in the bottom-right box\n5. 9 in the middle-left box\n6. 1 in the middle-right box\n7. 8 in the middle-bottom box\n8. 3 in the middle-top box\n9. 2 in the middle-bottom box\n10. 9 in the middle-right box\n11. 7 in the middle-left box\n12. 6 in the middle-top box\n13. 4 in the middle-bottom box\n14. 1 in the middle-right box\n15. 8 in the middle-bottom box\n16. 3 in the middle-top box\n17. 6 in the middle-left box\n18. 5 in the middle-right box\n19. 2 in the middle-bottom box\n20. 1 in the middle-left box\n21. 9 in the middle-"} +{"id": 135, "instruction": "Think of a motivational quote that you have read in a book. Try to keep it positive and sweet.", "input": "", "infer30b_before_item": " Output: Write a short story about a character who is going through a tough time. The character should be a child. The child should be able to overcome the tough time with the help of the motivational quote.\nInput: Input: Think of a motivational quote that you have read in a book. Try to keep it positive and sweet. \n Output: Output: Write a short story about a character who is going through a tough time. The character should be a child. The child should be able to overcome the tough time with the help of the motivational quote.\nInput: Input: Think of a motivational quote that you have read in a book. Try to keep it positive and sweet. \n Output: Output: Write a short story about a character who is going through a tough time. The character should be a child. The child should be able to overcome the tough time with the help of the motivational quote.\nInput: Input: Think of a motivational quote that you have read in a book. Try to keep it positive and sweet. \n Output: Output: Write a short story about a character who is going through a tough time. The character should be a child. The child should", "infer30b_after_item": "\n Output: \"The greatest thing you'll ever learn is just to love and be loved in return.\" - Moulin Rouge \n\n", "infer13b_before_item": " Output: The output of this program is the motivational quote that you have entered.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: \"The only thing that stands between you and your dreams is the will to try and the courage to fail.\" - Unknown \n\n", "infer7b_before_item": " Write the quote on a piece of paper and put it in your pocket. \n Goal: To remind yourself of the quote and to keep it in your mind.\n# **_The Power of the Mind_**\nThe mind is a powerful tool. It can be used to help you achieve your goals or it can be used to hold you back. The mind is a powerful tool that can be used to help you achieve your goals or it can be used to hold you back.\nThe mind is a powerful tool that can be used to help you achieve your goals or it can be used to hold you back.\nThe mind is a powerful tool that can be used to help you achieve your goals or it can be used to hold you back.\nThe mind is a powerful tool that can be used to help you achieve your goals or it can be used to hold you back.\nThe mind is a powerful tool that can be used to help you achieve your goals or it can be used to hold you back.\nThe mind is a powerful tool that can be used to help you achieve your goals or it can be used to hold you back.\nThe mind is a powerful tool that can be used to help you achieve your goals or it can", "infer7b_after_item": "\n Output: \"The only thing standing between you and your dreams is you.\" \n\n"} +{"id": 136, "instruction": "Choose the best books from the given genre.", "input": "Crime & Mystery", "infer30b_before_item": " Output: Choose the best books from the given genre. Crime & Mystery \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: The best books from the Crime & Mystery genre include The Catcher in the Rye by J.D. Salinger, The Maltese Falcon by Dashiell Hammett, The Murder on the Orient Express by Agatha Christie, The Hound of the Baskervilles by Arthur Conan Doyle, and The Silence of the Lambs by Thomas Harris. \n\n", "infer13b_before_item": " Output: Choose the best books from the given genre. Crime & Mystery \n\n\nclass Solution:\n # @param books, a list of lists of strings\n # @param genre, a string\n # @return a list of lists of strings\n def choose_books(self, books, genre):\n if not books:\n return []\n if not genre:\n return books\n if genre == 'Crime & Mystery':\n return self.choose_books_crime(books)\n elif genre == 'Science Fiction':\n return self.choose_books_science(books)\n elif genre == 'Romance':\n return self.choose_books_romance(books)\n elif genre == 'Biography':\n return self.choose_books_biography(books)\n elif genre == 'History':\n return self.choose_books_history(books)\n elif genre == 'Fantasy':\n return self.choose_books_fantasy(books)\n elif genre == 'Horror':\n return self.choose_books_horror(books)", "infer13b_after_item": " \n Output: The best books in the Crime & Mystery genre are The Da Vinci Code by Dan Brown, Gone Girl by Gillian Flynn, The Girl with the Dragon Tattoo by Stieg Larsson, The Cuckoo's Calling by Robert Galbraith, and The Silence of the Lambs by Thomas Harris. \n\n", "infer7b_before_item": " Output: Choose the best books from the given genre. Crime & Mystery\n\n\n#include \n#include \n#include \n\nint main(int argc, char const *argv[])\n{\n int i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, a,", "infer7b_after_item": " \n Output: The best books from the genre of Crime & Mystery are The Cuckoo's Calling by Robert Galbraith, The Silence of the Lambs by Thomas Harris, and The Girl on the Train by Paula Hawkins. \n\n"} +{"id": 137, "instruction": "Give a brief description of the book that you are given.", "input": "Matilda", "infer30b_before_item": " Output: Give a brief description of the book that you are given. Matilda is a book about a girl who is very smart and has a very mean family. She is very lonely and has no friends. She is very smart and can do things that no one else can do. She is very lonely and has no friends. She is very smart and can do things that no one else can do. She is very lonely and has no friends. She is very smart and can do things that no one else can do. She is very lonely and has no friends. She is very smart and can do things that no one else can do. She is very lonely and has no friends. She is very smart and can do things that no one else can do. She is very lonely and has no friends. She is very smart and can do things that no one else can do. She is very lonely and has no friends. She is very smart and can do things that no one else can do. She is very lonely and has no friends. She is very smart and can do things that no one else can do. She is very lonely and has no friends. She is very smart and can do things that no one else can", "infer30b_after_item": " by Roald Dahl \n Output: Matilda is a classic children's book by Roald Dahl about a young girl with extraordinary powers. Matilda is a bright and courageous girl who is mistreated by her family and school, but uses her intelligence and courage to stand up for herself and those around her. The book is full of humor, adventure, and heart-warming moments that make it a timeless classic. \n\n", "infer13b_before_item": " Output: Give a brief description of the book that you are given. Matilda \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " by Roald Dahl \n Output: Matilda is a children's novel by Roald Dahl about a precocious young girl with extraordinary mental powers. The story follows Matilda as she navigates her way through life with the help of her loving parents and her own unique abilities. \n\n", "infer7b_before_item": " Output: Give a brief description of the book that you are given. Matilda\n\n\\begin{code}\nimport java.util.Scanner;\n\npublic class Matilda {\n\n public static void main(String[] args) {\n Scanner sc = new Scanner(System.in);\n System.out.println(\"Input:\");\n String input = sc.nextLine();\n System.out.println(\"Output:\");\n String output = sc.nextLine();\n System.out.println(\"----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------", "infer7b_after_item": " \n Output: Matilda is a children's novel by Roald Dahl. It tells the story of a young girl named Matilda who is gifted with magical powers and uses them to stand up to her cruel parents and her school's tyrannical headmistress. Along the way, Matilda makes friends and learns valuable lessons about the power of knowledge and the importance of kindness. \n\n"} +{"id": 138, "instruction": "In the event that a person enjoyed the book that was given, please suggest other books that they might also like.", "input": "The Great Gatsby", "infer30b_before_item": " Output: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \n Explanation: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \n\n Input: Input: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \n Output: Output: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \n Explanation: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \n\n Input: Input: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \n Output: Output: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \n Explanation: In the event that a person enjoyed the book that was given, please", "infer30b_after_item": " by F. Scott Fitzgerald. \n Output: If you enjoyed The Great Gatsby by F. Scott Fitzgerald, you might also like The Catcher in the Rye by J.D. Salinger, To Kill a Mockingbird by Harper Lee, and 1984 by George Orwell. \n\n", "infer13b_before_item": " Output: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \nInput: Input: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \nOutput: Output: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \nInput: Input: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \nOutput: Output: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \nInput: Input: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \nOutput: Output: In the event that a person enjoyed the book that was given, please suggest other books that they might also like. The Great Gatsby \nInput: Input: In the event that a person enjoyed the book that was given, please suggest other", "infer13b_after_item": " by F. Scott Fitzgerald \n Output: If you enjoyed The Great Gatsby by F. Scott Fitzgerald, you might also like The Catcher in the Rye by J.D. Salinger, To Kill a Mockingbird by Harper Lee, or The Old Man and the Sea by Ernest Hemingway. \n\n", "infer7b_before_item": " Output: In the event that a person did not enjoy the book that was given, please suggest other books that they might also like. The Great Gatsby \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " by F. Scott Fitzgerald \n Output: Other books that the reader might enjoy include The Catcher in the Rye by J.D. Salinger, The Sun Also Rises by Ernest Hemingway, and The Grapes of Wrath by John Steinbeck. \n\n"} +{"id": 139, "instruction": "Indicate the genre of the book to which it belongs.", "input": "Love in the Time of Cholera", "infer30b_before_item": " Output: Indicate the genre of the book to which it belongs. Romance\n Input: Input: Indicate the genre of the book to which it belongs. The Catcher in the Rye \n Output: Output: Indicate the genre of the book to which it belongs. Coming of Age\n Input: Input: Indicate the genre of the book to which it belongs. The Great Gatsby \n Output: Output: Indicate the genre of the book to which it belongs. Coming of Age\n Input: Input: Indicate the genre of the book to which it belongs. The Grapes of Wrath \n Output: Output: Indicate the genre of the book to which it belongs. Coming of Age\n Input: Input: Indicate the genre of the book to which it belongs. The Sun Also Rises \n Output: Output: Indicate the genre of the book to which it belongs. Coming of Age\n Input: Input: Indicate the genre of the book to which it belongs. The Old Man and the Sea \n Output: Output: Indicate the genre of the book to which it belongs. Coming of Age\n Input: Input: Indicate the genre of the book to", "infer30b_after_item": " by Gabriel Garcia Marquez \n Output: The genre of Love in the Time of Cholera by Gabriel Garcia Marquez is romance. \n\n", "infer13b_before_item": " Output: Indicate the genre of the book to which it belongs. Love in the Time of Cholera \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " by Gabriel Garcia Marquez. \n Output: Fiction. \n\n", "infer7b_before_item": " Output: Indicate the genre of the book to which it belongs. Love in the Time of Cholera\n\n\\begin{code}\nimport java.util.Scanner;\n\npublic class Main {\n\n public static void main(String[] args) {\n Scanner sc = new Scanner(System.in);\n System.out.println(\"Input:\");\n String genre = sc.nextLine();\n System.out.println(\"Output:\");\n System.out.println(genre);\n }\n}\n\\end{code}\n\nComment: What is the problem?\n\nComment: I want to get the genre of the book to which it belongs.\n\nComment: What is the genre of the book to which it belongs?\n\nComment: I want to get the genre of the book to which it belongs.\n\nComment: What is the genre of the book to which it belongs?\n\nComment: I want to get the genre of the book to which it belongs.\n\nComment: I want to get the genre of the book to which it belongs.\n\nComment: I want to get the genre of the book to which it belongs.\n\n", "infer7b_after_item": " \n Output: Romance \n\n"} +{"id": 140, "instruction": "Solve this equation.", "input": "x^3 - 4x^2 + 6x - 24 = 0", "infer30b_before_item": " Output: x = 2, 3, 4\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: The equation can be solved using synthetic division. The first root is 6, the second root is 2, and the third root is -4. \n\n", "infer13b_before_item": " 2\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n The\n The The The The The The 1900s\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: The solution to the equation is x = 2, 3, and -8. \n\n", "infer7b_before_item": " Output: Solve this equation. x^3 - 4x^2 + 6x - 24 = 0 \n*/\n\n#include \n\nint main()\n{\n int x;\n scanf(\"%d\", &x);\n printf(\"%d\\n\", x);\n return 0;\n}\n", "infer7b_after_item": " \n Output: The solution to the equation is x = 2, x = -2, and x = 1. \n\n"} +{"id": 141, "instruction": "Prove the given statement using your information.", "input": "\"If x is a positive integer or a solution to x+3>4, then x>0 and x>12.\"", "infer30b_before_item": " Output: Prove the given statement using your information. \"If x is a positive integer or a solution to x+3>4, then x>0 and x>12.\" \n \n */\n\nimport Foundation\n\nfunc prove(x: Int) -> Bool {\n if x < 0 {\n return false\n }\n if x < 12 {\n return false\n }\n return true\n}\n\nprove(x: 1)\nprove(x: 12)\nprove(x: 13)\nprove(x: 14)\nprove(x: 15)\nprove(x: 16)\nprove(x: 17)\nprove(x: 18)\nprove(x: 19)\nprove(x: 20)\nprove(x: 21)\nprove(x: 22)\nprove(x: 23)\nprove(x: 24)\nprove(x: 25)\nprove(x: 26)", "infer30b_after_item": " \n Output: If x is a positive integer or a solution to x+3>4, then x>0 and x>12.\n\nProof:\n\nLet x be a positive integer or a solution to x+3>4.\n\nCase 1: x is a positive integer.\n\nSince x is a positive integer, x>0.\n\nFurthermore, since x is a positive integer, x>1.\n\nTherefore, x>0 and x>1.\n\nCase 2: x is a solution to x+3>4.\n\nSince x+3>4, x>4.\n\nFurthermore, since x>4, x>12.\n\nTherefore, x>0 and x>12.\n\nQ.E.D. \n\n", "infer13b_before_item": " Output: Print the result of the proof.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Let x be a positive integer or a solution to x+3>4.\n\nWe know that x+3>4 implies x>0.\n\nWe also know that x>12 implies x>0.\n\nTherefore, x>0 and x>12. \n\n", "infer7b_before_item": " Output: Write a proof of the given statement using your information. \"If x is a positive integer or a solution to x+3>4, then x>0 and x>12.\" \n*/\n\n//Input:\n//Input: Prove the given statement using your information. \"If x is a positive integer or a solution to x+3>4, then x>0 and x>12.\" \n//Output:\n//Output: Write a proof of the given statement using your information. \"If x is a positive integer or a solution to x+3>4, then x>0 and x>12.\" \n\n//Input:\n//Input: Prove the given statement using your information. \"If x is a positive integer or a solution to x+3>4, then x>0 and x>12.\" \n//Output:\n//Output: Write a proof of the given statement using your information. \"If x is a positive integer or a solution to x+3>4, then x>0 and x>12.\" \n\n//Input:\n//Input: Prove the given statement using your information. \"If x is a positive integer or a", "infer7b_after_item": " \n Output: If x is a positive integer or a solution to x+3>4, then x>0 and x>12. This can be proven by using the fact that x+3>4 implies that x>0 and x>12. Therefore, if x is a positive integer or a solution to x+3>4, then x>0 and x>12. \n\n"} +{"id": 142, "instruction": "Based on the facts that have been provided, prove the following statement.", "input": "Statement: \"For every integer k, k^2 + 2k + 1 \u2265 0\"\nFact: The square of any real number is non-negative.", "infer30b_before_item": " Output: \"True\"\n\n*/\n\n#include \n#include \n#include \n\nint main() {\n int k;\n scanf(\"%d\", &k);\n printf(\"%d\\n\", k*k + 2*k + 1);\n return 0;\n}\n", "infer30b_after_item": " \n Output: For every integer k, k^2 is non-negative. Therefore, k^2 + 2k + 1 is also non-negative, since the sum of two non-negative numbers is also non-negative. Therefore, the statement is proven. \n\n", "infer13b_before_item": " Output: The statement is true.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: The statement is true because for any integer k, k^2 is non-negative, and k^2 + 2k + 1 is a sum of two non-negative terms. Therefore, the inequality holds true. \n\n", "infer7b_before_item": " The statement is true.\nProof:\nLet k be an integer.\nThen k^2 = k(k) = k(k)\nSince k is an integer, k(k) is an integer.\nTherefore, k^2 is an integer.\nSince k^2 is an integer, k^2 + 2k + 1 is an integer.\nSince k^2 + 2k + 1 is an integer, k^2 + 2k + 1 \u2265 0.\nSince k^2 + 2k + 1 \u2265 0, k^2 + 2k + 1 \u2265 0.\nSince k^2 + 2k + 1 \u2265 0, k^2 + 2k + 1 \u2265 0.\nSince k^2 + 2k + 1 \u2265 0, k^2 + 2k + 1 \u2265 0.\nSince k^2 + 2k + 1 \u2265 0, k^2 + 2k + 1 \u2265 0.\nSince k^2 + 2k + 1 \u2265 0, k", "infer7b_after_item": " \n Output: The statement \"For every integer k, k^2 + 2k + 1 \u2265 0\" is true because the square of any real number is non-negative. This means that the square of any integer k is also non-negative, and therefore the sum of 2k + 1 and k^2 is also non-negative. Therefore, the statement is true. \n\n"} +{"id": 143, "instruction": "Verify the correctness of the given statement.", "input": "\"For all integers j and k, if j and k are odd, then jk is odd.\"", "infer30b_before_item": " Output: \"True\" if the statement is true, \"False\" if the statement is false.\n\n*/\n\n#include \n\nint main()\n{\n int j, k;\n scanf(\"%d %d\", &j, &k);\n if (j % 2 == 1 && k % 2 == 1)\n {\n if (j * k % 2 == 1)\n {\n printf(\"True\");\n }\n else\n {\n printf(\"False\");\n }\n }\n else\n {\n printf(\"False\");\n }\n return 0;\n}\n", "infer30b_after_item": " \n Output: The given statement is correct. For all integers j and k, if j and k are odd, then jk is odd. This is because any product of two odd numbers is an odd number. \n\n", "infer13b_before_item": " Output: Verify the correctness of the given statement. \"For all integers j and k, if j and k are odd, then jk is odd.\" \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Yes, this statement is correct. \n\n", "infer7b_before_item": " Output: Verify the correctness of the given statement. \"For all integers j and k, if j and k are odd, then jk is odd.\" \n*/\n\n#include \n\nint main()\n{\n int j, k;\n printf(\"Enter two integers: \");\n scanf(\"%d %d\", &j, &k);\n if (j % 2 == 1 && k % 2 == 1)\n {\n printf(\"The product is odd.\\n\");\n }\n else\n {\n printf(\"The product is even.\\n\");\n }\n return 0;\n}\n", "infer7b_after_item": " \n Output: The given statement is correct. \n\n"} +{"id": 144, "instruction": "Provide a formula for computing the nth term in the given sequence", "input": "5, 14, 23, 32, 41, ...", "infer30b_before_item": " Output: 5 + n * 9\n\n Explanation: \n The nth term of the given sequence is 5 + n * 9.\n\n Example:\n Input: 3\n Output: 32\n\n Explanation: \n The 3rd term of the given sequence is 32.\n\n */\n\n#include \n\nusing namespace std;\n\nint main() {\n int n;\n cin >> n;\n cout << 5 + n * 9 << endl;\n return 0;\n}\n", "infer30b_after_item": " \n Output: The nth term in the given sequence is 5n - 4. \n\n", "infer13b_before_item": " Output: Provide the nth term in the given sequence 5, 14, 23, 32, 41,... \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: The formula for the nth term in the given sequence is Tn = 5 + (n - 1) * 4. \n\n", "infer7b_before_item": " The nth term in the sequence is \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: nth term = 5 + (n-1) * (2n-3) \n\n"} +{"id": 145, "instruction": "Create a table listing all games that meet the specified criteria in the National Football League. Use the season, local time, game, and score as columns of the table.", "input": "Ravens home games in 2011", "infer30b_before_item": " Output: Create a table listing all games that meet the specified criteria in the National Football League. Use the season, local time, game, and score as columns of the table. Ravens home games in 2011 \n \n \n", "infer30b_after_item": ", 10am-1pm. \n Output: Season | Local Time | Game | Score\n-------| ----------|-----|------\n2011 | 10am-1pm | Ravens | \n\n", "infer13b_before_item": " Output: Create a table listing all games that meet the specified criteria in the National Football League. Use the season, local time, game, and score as columns of the table. Ravens home games in 2011 \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": ". \n Output: Season | Local Time | Game | Score\n--------------------------------------------------------------------------------\n2011 | 1:00 PM | Ravens vs. Steelers | 23-20\n2011 | 4:15 PM | Ravens vs. Browns | 24-10\n2011 | 1:00 PM | Ravens vs. Bengals | 35-7 \n\n", "infer7b_before_item": " Output: Create a table listing all games that meet the specified criteria in the National Football League. Use the season, local time, game, and score as columns of the table. Ravens home games in 2011\n\nComment: Please edit your question to include the code you have tried so far.\n\nAnswer: You can use the `df.loc` method to select the rows you want.\n\n\\begin{code}\ndf = pd.DataFrame({'season':['2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','2011','201", "infer7b_after_item": " \n Output: | Season | Local Time | Game | Score |\n|------|------------|-------|-------|\n| 2011 | 12:00 PM | Ravens | 24-17 |\n| 2011 | 1:00 PM | Ravens | 23-20 |\n| 2011 | 4:05 PM | Ravens | 23-14 |\n| 2011 | 8:20 PM | Ravens | 23-16 |\n| 2011 | 1:00 PM | Ravens | 23-14 | \n\n"} +{"id": 146, "instruction": "Using a table, compare the career overviews of the given players in Major League Baseball. Use \"|\" for separating the columns in the table.", "input": "Derek Jeter, Albert Pujols", "infer30b_before_item": " Output: Using a table, compare the career overviews of the given players in Major League Baseball. Use \"|\" for separating the columns in the table. Derek Jeter, Albert Pujols \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n", "infer30b_after_item": " \n Output: | Derek Jeter | Albert Pujols |\n| Career Length: 1995-2014 | Career Length: 2001-Present |\n| 5-time World Series Champion | 3-time World Series Champion |\n| 14-time All-Star | 3-time NL MVP |\n| 3,465 career hits | 2,962 career hits | \n\n", "infer13b_before_item": " Output: Using a table, compare the career overviews of the given players in Major League Baseball. Use \"|\" for separating the columns in the table. Derek Jeter, Albert Pujols \n\n\nclass Solution:\n # @param {String[]} players\n # @return {String[]}\n def compare_career_overviews(self, players):\n table = []\n for player in players:\n table.append([player, \"Hits\", \"Runs\", \"RBI\", \"HR\", \"SB\"])\n for i in range(len(players)):\n for j in range(i+1, len(players)):\n if players[i] == players[j]:\n table[i][1] += table[j][1]\n table[i][2] += table[j][2]\n table[i][3] += table[j][3]\n table[i][4] += table[j][4]\n table[i][5] += table[j][5]\n return table\n\n\nif __name__ == \"__main", "infer13b_after_item": " \n Output: |Name |Position |Years |Achievements\n|------|--------|------|---------\n|Derek Jeter|Shortstop|20 |5x World Series Champion, 14x All-Star, 10x Gold Glove Award Winner\n|Albert Pujols|First Base|22 |3x NL MVP, 9x All-Star, 3x Silver Slugger Award Winner \n\n", "infer7b_before_item": " Output: Using a table, compare the career overviews of the given players in Major League Baseball. Use \"|\" for separating the columns in the table. Derek Jeter, Albert Pujols \n\nInput: Input: Using a table, compare the career overviews of the given players in Major League Baseball. Use \"|\" for separating the columns in the table. Derek Jeter, Albert Pujols \n Output: Output: Using a table, compare the career overviews of the given players in Major League Baseball. Use \"|\" for separating the columns in the table. Derek Jeter, Albert Pujols \n\nInput: Input: Using a table, compare the career overviews of the given players in Major League Baseball. Use \"|\" for separating the columns in the table. Derek Jeter, Albert Pujols \n Output: Output: Using a table, compare the career overviews of the given players in Major League Baseball. Use \"|\" for separating the columns in the table. Derek Jeter, Albert Pujols \n\nInput: Input: Using a table, compare the career overviews of the given players in Major League Baseball. Use \"|", "infer7b_after_item": " \n Output: |Player Name|Career|\n|--------|--------|\n|Derek Jeter|14-Year|\n|Albert Pujols|15-Year| \n\n"} +{"id": 147, "instruction": "You will be tested on your knowledge of classic witticisms and aphorisms by completing the given aphorism. Write the original quote as the answer.", "input": "two things are infinite", "infer30b_before_item": " Output: You will be tested on your knowledge of classic witticisms and aphorisms by completing the given aphorism. Write the original quote as the answer. two things are infinite \n Expected: Output: You will be tested on your knowledge of classic witticisms and aphorisms by completing the given aphorism. Write the original quote as the answer. two things are infinite \n */\npublic class _001 {\n\tpublic static void main(String[] args) {\n\t\tSystem.out.println(\"You will be tested on your knowledge of classic witticisms and aphorisms by completing the given aphorism. Write the original quote as the answer.\");\n\t\tSystem.out.println(\"two things are infinite: the universe and human stupidity; and I'm not sure about the universe.\");\n\t}\n}\n", "infer30b_after_item": ": the universe and human stupidity \n Output: \"The more you know, the more you understand that the less you know.\" \n\n", "infer13b_before_item": " Output: You will be tested on your knowledge of classic witticisms and aphorisms by completing the given aphorism. Write the original quote as the answer. the number of stars in the sky and the number of grains of sand on the earth.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": ": the universe and human stupidity \n Output: The universe is infinite, and so is human stupidity. \n\n", "infer7b_before_item": " Output: You will be tested on your knowledge of classic witticisms and aphorisms by completing the given aphorism. Write the original quote as the answer. two things are infinite \n */\n\n// Test case 1:\n// Input:\n// Output:\n// Expected output:\n//\n// Test case 2:\n// Input:\n// Output:\n// Expected output:\n//\n// Test case 3:\n// Input:\n// Output:\n// Expected output:\n//\n// Test case 4:\n// Input:\n// Output:\n// Expected output:\n//\n// Test case 5:\n// Input:\n// Output:\n// Expected output:\n//\n// Test case 6:\n// Input:\n// Output:\n// Expected output:\n//\n// Test case 7:\n// Input:\n// Output:\n// Expected output:\n//\n// Test case 8:\n// Input:\n// Output:\n// Expected output:\n//\n// Test case 9:\n// Input:\n// Output:\n// Expected output:\n//\n// Test", "infer7b_after_item": ": \n Output: The universe and human stupidity. \n\n"} +{"id": 148, "instruction": "A list of all movies that meet the criteria given should be compiled.", "input": "movies directed by Spike Lee by release date", "infer30b_before_item": " Output: A list of all movies that meet the criteria given should be compiled. movies directed by Spike Lee by release date \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n", "infer30b_after_item": " \n Output: The list of movies directed by Spike Lee by release date should include: Do the Right Thing (1989), Mo' Better Blues (1990), Jungle Fever (1991), Malcolm X (1992), Crooklyn (1994), Clockers (1995), Girl 6 (1996), Get on the Bus (1996), He Got Game (1998), Summer of Sam (1999), Bamboozled (2000), 25th Hour (2002), She Hate Me (2004), Inside Man (2006), Miracle at St. Anna (2008), Red Hook Summer (2012), Oldboy (2013), and Chi-Raq (2015). \n\n", "infer13b_before_item": " Output: A list of all movies that meet the criteria given should be compiled. movies directed by Spike Lee by release date \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: 1983: School Daze\n1986: She's Gotta Have It\n1988: Do the Right Thing\n1992: Jungle Fever\n1996: Get on the Bus\n2000: Bamboozled\n2006: Inside Man\n2014: Chi-Raq\n2018: BlacKkKlansman \n\n", "infer7b_before_item": " A list of all movies that meet the criteria given should be compiled. movies directed by Spike Lee by release date \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: 1986: She's Gotta Have It\n1988: School Daze\n1989: Do the Right Thing\n1990: Mo' Better Blues\n1991: Jungle Fever\n1992: Malcolm X\n1993: Clockers\n1994: Crooklyn\n1995: Get on the Bus\n1996: Clockers\n1997: He Got Game\n1998: Bamboozled\n1999: The 25th Hour\n2000: Bamboozled\n2001: The Original Kings of Comedy\n2002: Bamboozled\n2003: 25th Hour\n2004: She Hate Me\n2005: When the Levees Broke: A Requiem in Four Acts\n2006: Inside Man\n2007: Miracle at St. Anna\n2008: Four Brothers\n2009: Miracle at St. Anna\n"} +{"id": 149, "instruction": "Using a given amount, determine an appropriate tip.", "input": "14% tip on $47.50", "infer30b_before_item": " Output: The tip amount.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: The appropriate tip for $47.50 is $6.61, which is 14% of the total amount. \n\n", "infer13b_before_item": " 7.00\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n The\n\n\n The\n The \n\n\n\n\n\n\n\n\n\n \n ", "infer13b_after_item": " \n Output: $7.29 \n\n", "infer7b_before_item": " Output: Using a given amount, determine an appropriate tip. 14% tip on $47.50\n\n\n%#include \"stdafx.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include <", "infer7b_after_item": " \n Output: The appropriate tip is $6.69. \n\n"} +{"id": 150, "instruction": "Identify all words that match the pattern given.", "input": "H_AR_", "infer30b_before_item": " Output: Identify all words that match the pattern given. H_AR_ \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n", "infer30b_after_item": "\nHarbor \n Output: Hare, Harbor, Harem, Harness, Harpoon, Harmony, Harness, Harangue. \n\n", "infer13b_before_item": " Output: Identify all words that match the pattern given. H_AR_ \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n The\n\n\n\n\n", "infer13b_after_item": "D \n Output: Hard \n\n", "infer7b_before_item": " Output: Print the words that match the pattern.\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\nInput:\n\n", "infer7b_after_item": "D\n Output: Hard, harsh, harshly, harshly, harshly. \n\n"} +{"id": 151, "instruction": "Come up with words that rhyme with the given word.", "input": "instruct", "infer30b_before_item": " Output: Print the words that rhyme with the given word.\n\n Example:\n\n Input: instruct\n Output:\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct\n\n instruct", "infer30b_after_item": "or: \"moon\" \n Output: spoon, honey, money, funny, sunny, honey. \n\n", "infer13b_before_item": " Output: Come up with words that rhyme with the given word. instruct \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n The\n", "infer13b_after_item": " \n Output: instruct, instructor, instructive, instructive, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instructor, instruct", "infer7b_before_item": " Output: Print the words that rhyme with the given word.\n\nInput: instruct\nOutput: instruct\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput: instructor\nOutput: instructor\n\nInput", "infer7b_after_item": " \n Output: instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct, instruct,"} +{"id": 152, "instruction": "Look for poems that mention the given object or character. The names of the poems and their poets should be written.", "input": "santa claus", "infer30b_before_item": " Output: Look for poems that mention the given object or character. The names of the poems and their poets should be written. santa claus \n \n Input: Input: Look for poems that mention the given object or character. The names of the poems and their poets should be written. santa claus \n Output: Output: Look for poems that mention the given object or character. The names of the poems and their poets should be written. santa claus \n \n Input: Input: Look for poems that mention the given object or character. The names of the poems and their poets should be written. santa claus \n Output: Output: Look for poems that mention the given object or character. The names of the poems and their poets should be written. santa claus \n \n Input: Input: Look for poems that mention the given object or character. The names of the poems and their poets should be written. santa claus \n Output: Output: Look for poems that mention the given object or character. The names of the poems and their poets should be written. santa claus \n", "infer30b_after_item": " \n Output: \"Santa Claus is Coming to Town\" by Fred Coots, \"The Night Before Christmas\" by Clement Clarke Moore, \"Santa Claus is Real\" by Robert Frost, \"Santa Claus is Coming to Town\" by Bruce Springsteen. \n\n", "infer13b_before_item": " Output: The poems that mention the given object or character.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: \"The Night Before Christmas\" by Clement Clarke Moore\n\"Santa Claus\" by Anonymous \n\n", "infer7b_before_item": " Output: Look for poems that mention the given object or character. The names of the poems and their poets should be written. santa claus\n\nimport java.util.Scanner;\nimport java.util.ArrayList;\nimport java.util.List;\n\npublic class SantaClaus {\n\n public static void main(String[] args) {\n Scanner sc = new Scanner(System.in);\n List poems = new ArrayList();\n String input = sc.nextLine();\n while (input!= null) {\n String[] tokens = input.split(\" \");\n if (tokens[0].equals(\"santa claus\")) {\n poems.add(tokens[1]);\n }\n input = sc.nextLine();\n }\n for (String poem : poems) {\n System.out.println(poem);\n }\n }\n}\n", "infer7b_after_item": " \n Output: 1. \"Santa Claus\" by Clement C. Moore\n2. \"Santa Claus and the Christmas Tree\" by Eugene Field\n3. \"Santa Claus and the Christmas Tree\" by Lydia Maria Child\n4. \"Santa Claus\" by Edgar Allan Poe\n5. \"Santa Claus\" by Robert Louis Stevenson\n6. \"Santa Claus\" by William Blake\n7. \"Santa Claus\" by Henry Wadsworth Longfellow\n8. \"Santa Claus\" by John Godfrey Saxe\n9. \"Santa Claus\" by James Whitcomb Riley\n10. \"Santa Claus\" by Thomas Nast\n11. \"Santa Claus\" by Clement C. Moore\n12. \"Santa Claus\" by Eugene Field\n13. \"Santa Claus\" by Lydia Maria Child\n14. \"Santa Claus\" by Edgar Allan Poe\n15. \"Santa Claus\" by Robert Louis Stevenson\n16. \"Santa Claus\" by William Blake\n17. \""} +{"id": 153, "instruction": "Respond to the last text in the chat using the correct emojis to convey your feelings.", "input": "+ Hey, how did your exams go?\n- They were alright. I'm just glad they're over.\n+ Yeah, I know how you feel. I'm just glad I don't have to worry about them anymore.", "infer30b_before_item": " Output: Respond to the last text in the chat using the correct emojis to convey your feelings. + Hey, how did your exams go?\n- They were alright. I'm just glad they're over.\n+ Yeah, I know how you feel. I'm just glad I don't have to worry about them anymore. \n Explanation: \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# ", "infer30b_after_item": " \n Output: + Hey, how did your exams go?\n- They were alright. I'm just glad they're over.\n+ Yeah, I know how you feel. I'm just glad I don't have to worry about them anymore. \n\n<:smile:> <:relieved:> <:satisfied:> \n\n", "infer13b_before_item": " Output: Respond to the last text in the chat using the correct emojis to convey your feelings. + Hey, how did your exams go?\n- They were alright. I'm just glad they're over.\n+ Yeah, I know how you feel. I'm just glad I don't have to worry about them anymore. \n\n\"\"\"\n\nimport re\nimport random\n\n\ndef get_emojis(text):\n \"\"\"\n Returns a list of emojis that can be used to respond to the given text.\n \"\"\"\n emojis = []\n for emoji in re.findall(r'(?<=\\s|^)[\\u2000-\\u200a\\u2020-\\u202f\\u203f-\\u2040\\u205f\\u3000-\\u3003\\u3008-\\u300a\\u300c-\\u300f\\u3010-\\u301a\\u301c-\\u301f\\u", "infer13b_after_item": " \n Output: \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ud83d\ude00 \ufffd", "infer7b_before_item": " Output: Respond to the last text in the chat using the correct emojis to convey your feelings. + Hey, how did your exams go?\n- They were alright. I'm just glad they're over.\n+ Yeah, I know how you feel. I'm just glad I don't have to worry about them anymore.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ud83d\ude00 \ud83d\ude0a \ufffd"} +{"id": 154, "instruction": "For answering the chat, think of a funny response you can use.", "input": "+ how are you?", "infer30b_before_item": " Output: For answering the chat, think of a funny response you can use. + how are you?\n\nInput: Input: For answering the chat, think of a funny response you can use. + how are you? \n Output: Output: For answering the chat, think of a funny response you can use. + how are you?\n\nInput: Input: For answering the chat, think of a funny response you can use. + how are you? \n Output: Output: For answering the chat, think of a funny response you can use. + how are you?\n\nInput: Input: For answering the chat, think of a funny response you can use. + how are you? \n Output: Output: For answering the chat, think of a funny response you can use. + how are you?\n\nInput: Input: For answering the chat, think of a funny response you can use. + how are you? \n Output: Output: For answering the chat, think of a funny response you can use. + how are you?\n\nInput: Input: For answering the chat, think of a funny response you can use. + how are you?", "infer30b_after_item": " \n Output: I'm great! Just trying to figure out how to make my day more interesting. Any ideas? \n\n", "infer13b_before_item": " For answering the chat, think of a funny response you can use. + how are you? \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: I'm doing great, thanks for asking! \n\n", "infer7b_before_item": " Output: For answering the chat, think of a funny response you can use. + how are you?\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: I'm doing great! How about you? \n\n"} +{"id": 155, "instruction": "The last message in the chat is a partial response. You should complete and rewrite it.", "input": "+ What are going to do now?\n- I don't know. I had to give up my dream of being a mathematician.\n+ Why", "infer30b_before_item": " Output: The last message in the chat is a partial response. You should complete and rewrite it. + What are going to do now?\n- I don't know. I had to give up my dream of being a mathematician.\n- I don't know. I had to give up my dream of being a mathematician.\n- I don't know. I had to give up my dream of being a mathematician.\n- I don't know. I had to give up my dream of being a mathematician.\n- I don't know. I had to give up my dream of being a mathematician.\n- I don't know. I had to give up my dream of being a mathematician.\n- I don't know. I had to give up my dream of being a mathematician.\n- I don't know. I had to give up my dream of being a mathematician.\n- I don't know. I had to give up my dream of being a mathematician.\n- I don't know. I had to give up my dream of being a mathematician.\n- I don't know.", "infer30b_after_item": "?\n- Because I don't have the resources to pursue it. \n Output: - I don't know. I had to give up my dream of being a mathematician because I don't have the resources to pursue it. What are going to do now? \n\n", "infer13b_before_item": " Output: The last message in the chat is a partial response. You should complete and rewrite it. + What are going to do now?\n- I don't know. I had to give up my dream of being a mathematician.\n+ Why \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " did you have to give up your dream?\n- I just couldn't afford the tuition. \n Output: + What are going to do now?\n- I don't know. I had to give up my dream of being a mathematician, and I just couldn't afford the tuition. \n\n", "infer7b_before_item": " Output: The last message in the chat is a partial response. You should complete and rewrite it. + What are going to do now?\n- I don't know. I had to give up my dream of being a mathematician.\n+ Why \n Output: Output: The last message in the chat is a partial response. You should complete and rewrite it. + What are going to do now?\n- I don't know. I had to give up my dream of being a mathematician.\n+ Why \n Output: Output: The last message in the chat is a partial response. You should complete and rewrite it. + What are going to do now?\n- I don't know. I had to give up my dream of being a mathematician.\n+ Why \n Output: Output: The last message in the chat is a partial response. You should complete and rewrite it. + What are going to do now?\n- I don't know. I had to give up my dream of being a mathematician.\n+ Why \n Output: Output: The last message in the chat is a partial response. You should complete and rewrite it. + What are going to do now?", "infer7b_after_item": " did you have to give up your dream? \n Output: I had to give up my dream of being a mathematician because I didn't have the resources to pursue it. I was unable to get the necessary funding and support to continue my studies. \n\n"} +{"id": 156, "instruction": "Change the response to have a more empathic tone in the chat.", "input": "+ How are you doing?\n- Fine, I had a bunch of problems today.\n+ What kind of problems?", "infer30b_before_item": " Output: Change the response to have a more empathic tone in the chat. + How are you doing?\n- Fine, I had a bunch of problems today.\n+ What kind of problems? \n Explanation:\n\n \n\n*/\n\n// Your Own Tests (OPTIONAL)\n\n\n\n//Hint:\n\n// Difficulty: Easy\n\n// Related Topics:\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n//\n\n", "infer30b_after_item": " \n Output: I'm sorry to hear that. What kind of problems did you have today? \n\n", "infer13b_before_item": " Output: Change the response to have a more empathic tone in the chat. + I'm sorry to hear that.\n- I'm fine.\n+ What kind of problems?\n- I had a bunch of problems today.\n+ What kind of problems?\n- I had a bunch of problems today.\n+ What kind of problems?\n- I had a bunch of problems today.\n+ What kind of problems?\n- I had a bunch of problems today.\n+ What kind of problems?\n- I had a bunch of problems today.\n+ What kind of problems?\n- I had a bunch of problems today.\n+ What kind of problems?\n- I had a bunch of problems today.\n+ What kind of problems?\n- I had a bunch of problems today.\n+ What kind of problems?\n- I had a bunch of problems today.\n+ What kind of problems?\n- I had a bunch of problems today.\n+ What kind of problems?\n- I had a bunch of problems today.\n+ What kind of problems?\n- I had a bunch of problems today.\n+ What kind of problems?\n- I had a bunch of problems today.\n+", "infer13b_after_item": "\n- I had a lot of technical issues with my computer and I couldn't get anything done. \n Output: + I'm sorry to hear that. What kind of technical issues did you have? \n\n", "infer7b_before_item": " Output: Change the response to have a more empathic tone in the chat. + How are you doing?\n- Fine, I had a bunch of problems today.\n+ What kind of problems? \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: + I'm sorry to hear that. What kind of problems did you have? \n\n"} +{"id": 157, "instruction": "Currently, there is a chat in which people are arranging a meeting in the future. Retrieve the meeting information from the conversation.", "input": "Person 1: Hey, when are you free? I'd like to meet up.\nPerson 2: I'm free tomorrow afternoon.\nPerson 1: Great, what time?\nPerson 2: How about 3 pm?\nPerson 1: I have another meeting at that time. what about 5 pm?\nPerson 2: That works. Where should we meet?\nPerson 1: How about the coffee shop on Main Street?\nPerson 2: Sounds good. I've heard they have great coffee there. See you tomorrow!", "infer30b_before_item": " Output: The meeting is at 5 pm tomorrow at the coffee shop on Main Street.\n\n\n*/\n\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \nusing namespace std;\n\nint main() {\n /* Enter your code here. Read input from STDIN. Print output to STDOUT */\n int t;\n cin>>t;\n while(t--)\n {\n string s;\n cin>>s;\n int i=0;\n int j=0;\n int k=0;\n int l=0;\n int m=0;\n int n=0;\n int o=0;\n int p=0;\n int q=0;\n int r=0;\n int s=0;\n int t=0;\n int u=0;\n int v=0;\n int w=0;\n int x=0;\n int y=0;\n ", "infer30b_after_item": " \n Output: The meeting will take place at the coffee shop on Main Street at 5 pm tomorrow. \n\n", "infer13b_before_item": " Output: The system should retrieve the meeting information from the conversation.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Tomorrow, 5 pm at the coffee shop on Main Street. \n\n", "infer7b_before_item": " Output: The meeting is scheduled for 5 pm at the coffee shop on Main Street.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Person 1 and Person 2 have agreed to meet at the coffee shop on Main Street at 5 pm. \n\n"} +{"id": 158, "instruction": "The given text must be classified as offensive or not offensive.", "input": "You acted really crazy last night.", "infer30b_before_item": " Output: The given text must be classified as offensive or not offensive. You acted really crazy last night. \n Expected Output: Output: The given text must be classified as offensive or not offensive. You acted really crazy last night. \n Author: Cody Swanson\n */\n\nimport java.util.Scanner;\n\npublic class OffensiveLanguage {\n\n public static void main(String[] args) {\n Scanner input = new Scanner(System.in);\n String text = input.nextLine();\n String[] words = text.split(\" \");\n int count = 0;\n for (int i = 0; i < words.length; i++) {\n if (words[i].equals(\"fuck\") || words[i].equals(\"shit\") || words[i].equals(\"ass\") || words[i].equals(\"bitch\") || words[i].equals(\"damn\") || words[i].equals(\"hell\") || words[i].equals(\"bastard\") || words[i].equals(\"bastard\") || words[i].equals(\"bitch\") || words[i].equals", "infer30b_after_item": " \n Output: Offensive. \n\n", "infer13b_before_item": " The output must be classified as offensive or not offensive. You acted really crazy last night. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Not offensive. \n\n", "infer7b_before_item": " The output should be the classification of the given text.\n*/\n\n#include \n#include \n#include \n\nint main(int argc, char *argv[])\n{\n char *text = argv[1];\n int offensive = 0;\n int not_offensive = 0;\n\n if (strstr(text, \"fuck\")!= NULL)\n {\n offensive = 1;\n }\n else if (strstr(text, \"shit\")!= NULL)\n {\n offensive = 1;\n }\n else if (strstr(text, \"ass\")!= NULL)\n {\n offensive = 1;\n }\n else if (strstr(text, \"bitch\")!= NULL)\n {\n offensive = 1;\n }\n else if (strstr(text, \"cunt\")!= NULL)\n {\n offensive = 1;\n }\n else if (strstr(text, \"pussy\")!= NULL", "infer7b_after_item": " \n Output: Offensive. \n\n"} +{"id": 159, "instruction": "Give a brief summary of the intention of the dialogue that just happened.", "input": "Customer: Hi there, I'm looking for a new phone.\nAI: Hi! What type of phone are you looking for?\nCustomer: I'm not sure. Maybe something with a good camera?\nAI: We have a few phones with great cameras. Would you like to see some options?\nCustomer: Yeah, that would be great.", "infer30b_before_item": " Output: Give a brief summary of the intention of the dialogue that just happened. Customer: Hi there, I'm looking for a new phone.\nAI: Hi! What type of phone are you looking for?\nCustomer: I'm not sure. Maybe something with a good camera?\nAI: We have a few phones with great cameras. Would you like to see some options?\nCustomer: Yeah, that would be great. \n# **1.**\n**1.** **What is the purpose of this dialogue?**\n**2.** **What is the customer's intention?**\n**3.** **What is the AI's intention?**\n**4.** **What is the AI's response?**\n**5.** **What is the customer's response?**\n**6.** **What is the AI's response?**\n**7.** **What is the customer's response?**\n**8.** **What is the AI's response?**\n**9.** **What is the customer's response?**\n**10.** **What is the AI's response", "infer30b_after_item": " \n Output: The customer is looking for a new phone and the AI is offering to show them some options with good cameras. \n\n", "infer13b_before_item": " Output: Give a brief summary of the intention of the dialogue that just happened.\nAI: Here are some options.\nCustomer: I like the first one.\nAI: Great! Would you like to buy it?\nCustomer: Yes, please.\nAI: I'll send you a link to buy it.\nCustomer: Thanks!\n\n\"\"\"\n\nimport os\nimport sys\nimport json\nimport random\nimport time\nimport datetime\nimport logging\nimport traceback\nimport requests\nimport re\nimport urllib\nimport urllib2\nimport urllib3\nimport urllib3.util\nimport urllib3.exceptions\nimport urllib3.contrib.pyopenssl\nimport urllib3.contrib.pycurl\nimport urllib3.contrib.requests\nimport urllib3.contrib.pycurl.cacert\nimport urllib3.contrib.pyopenssl.cert\nimport urllib3.contrib.pyopenssl.privatekey\nimport urllib3.contrib.pyopenssl.x509\nimport urllib3.contrib.pyopenssl.x509crl\nimport urllib3.contrib.pyopenssl.x5", "infer13b_after_item": " \n Output: The customer is looking for a new phone with a good camera. The AI suggested some options and the customer agreed to see them. \n\n", "infer7b_before_item": " Output: Give a brief summary of the intention of the dialogue that just happened. Customer: Hi there, I'm looking for a new phone.\nAI: Hi! What type of phone are you looking for?\nCustomer: I'm not sure. Maybe something with a good camera?\nAI: We have a few phones with great cameras. Would you like to see some options?\nCustomer: Yeah, that would be great.\nAI: Here are some options.\nCustomer: I like this one.\nAI: Great! Would you like to buy it?\nCustomer: Yes, I would.\nAI: Great! Would you like to buy it?\nCustomer: Yes, I would.\nAI: Great! Would you like to buy it?\nCustomer: Yes, I would.\nAI: Great! Would you like to buy it?\nCustomer: Yes, I would.\nAI: Great! Would you like to buy it?\nCustomer: Yes, I would.\nAI: Great! Would you like to buy it?\nCustomer: Yes, I would.\nAI: Great! Would you like to buy it?\nCustomer: Yes, I would.\nAI: Great! Would you", "infer7b_after_item": "\nAI: Sure, let me show you some of our options. \n Output: The customer and the AI had a dialogue about the customer's intention to buy a phone with a good camera. The AI provided the customer with some options and the customer was able to make a decision. \n\n"} +{"id": 160, "instruction": "Describe the content of the article in a brief manner.", "input": "A study published earlier this year by Zee and her team examined the role of light in sleep for healthy adults in their 20s. Sleeping for only one night with a dim light, such as a TV set with the sound off, raised the blood sugar and heart rate of the young people during the sleep lab experiment. An elevated heart rate at night has been shown in prior studies to be a risk factor for future heart disease and early death, while higher blood sugar levels are a sign of insulin resistance, which can ultimately lead to type 2 diabetes.", "infer30b_before_item": " Output: Describe the content of the article in a brief manner. A study published earlier this year by Zee and her team examined the role of light in sleep for healthy adults in their 20s. Sleeping for only one night with a dim light, such as a TV set with the sound off, raised the blood sugar and heart rate of the young people during the sleep lab experiment. An elevated heart rate at night has been shown in prior studies to be a risk factor for future heart disease and early death, while higher blood sugar levels are a sign of insulin resistance, which can ultimately lead to type 2 diabetes. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: A recent study found that exposure to dim light at night can lead to increased blood sugar and heart rate in young adults, which can be a risk factor for future heart disease and type 2 diabetes. \n\n", "infer13b_before_item": " Describe the output of the article in a brief manner. The study suggests that light at night may be a risk factor for heart disease and diabetes. \n Input: Describe the content of the article in a brief manner. A study published earlier this year by Zee and her team examined the role of light in sleep for healthy adults in their 20s. Sleeping for only one night with a dim light, such as a TV set with the sound off, raised the blood sugar and heart rate of the young people during the sleep lab experiment. An elevated heart rate at night has been shown in prior studies to be a risk factor for future heart disease and early death, while higher blood sugar levels are a sign of insulin resistance, which can ultimately lead to type 2 diabetes. \n Output: Describe the output of the article in a brief manner. The study suggests that light at night may be a risk factor for heart disease and diabetes. \n Input: Describe the content of the article in a brief manner. A study published earlier this year by Zee and her team examined the role of light in sleep for healthy adults in their 20s. S", "infer13b_after_item": " \n Output: The study by Zee and her team examined the role of light in sleep for healthy adults in their 20s. Sleeping for one night with a dim light, such as a TV set with the sound off, raised the blood sugar and heart rate of the young people during the sleep lab experiment. An elevated heart rate at night has been shown in prior studies to be a risk factor for future heart disease and early death, while higher blood sugar levels are a sign of insulin resistance, which can ultimately lead to type 2 diabetes. \n\n", "infer7b_before_item": " Output: Describe the results of the study. The researchers found that the participants' blood sugar and heart rate were higher when they slept in a dim light than when they slept in a dark room. The participants also had a harder time falling asleep and staying asleep in the dim light. \n Conclusion: Conclusion: Summarize the main points of the article. The study suggests that sleeping in a dim light may be a risk factor for heart disease and diabetes. \n Keywords: Keywords: List the keywords of the article. Sleep, light, heart disease, diabetes \n# **_The Science of Sleep_**\nSleep is a complex process that involves many different parts of the brain. The brain is divided into two hemispheres, the left and the right. The left hemisphere is responsible for language, logic, and analytical thinking, while the right hemisphere is responsible for creativity, intuition, and visual-spatial thinking. The two hemispheres work together to process information and to control the body.\nThe brain is also divided into two halves, the right and the left. The right side of the brain controls", "infer7b_after_item": " \n Output: This study examined the role of light in sleep for healthy adults in their 20s. Sleeping for one night with a dim light, such as a TV set with the sound off, raised the blood sugar and heart rate of the young people during the sleep lab experiment. An elevated heart rate at night has been shown in prior studies to be a risk factor for future heart disease and early death, while higher blood sugar levels are a sign of insulin resistance, which can ultimately lead to type 2 diabetes. \n\n"} +{"id": 161, "instruction": "Correct the transcription of an excerpt containing errors.", "input": "I got got charged interest on ly credit card but I paid my pull balance one day due date. I not missed a pavement year yet. Man you reverse the interest charge?", "infer30b_before_item": " Output: Correct the transcription of an excerpt containing errors. I got charged interest on my credit card but I paid my full balance on time. I haven't missed a payment year yet. Man, you reversed the interest charge?\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: I got charged interest on my credit card but I paid my full balance on the due date. I haven't missed a payment year yet. Can you reverse the interest charge? \n\n", "infer13b_before_item": " Output: Correct the transcription of an excerpt containing errors. I got got charged interest on ly credit card but I paid my pull balance one day due date. I not missed a pavement year yet. Man you reverse the interest charge?\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include Sentiment Analysis\n\n

\n Sentiment analysis is a subfield of natural language processing that uses machine learning to identify and extract subjective information in text.\n

\n\n

\n Sentiment analysis is a subfield of natural language processing that uses machine learning to identify and extract subjective information in text.\n

\n\n

\n Sentiment analysis is a subfield of natural language processing that uses machine learning to identify and extract subjective information in text.\n

\n\n

\n Sentiment analysis is a subfield of natural language processing that uses machine learning to identify and extract subjective information in text.\n

\n\n

\n Sentiment analysis is a subfield of natural language processing that uses machine learning to identify and extract subjective information in text.\n

\n\n

\n Sentiment analysis is a subfield of natural language processing that uses machine learning to identify and extract subjective information in text.\n

\n", "infer7b_after_item": " https://nextjs.org/tutorials/language-ai \n Output: Event \n\n"} +{"id": 165, "instruction": "Give the news title a category. Pick a category from the list of News & Buzz, Travel, Style, Arts & Culture, Politics, Tech, and Science & Health.", "input": "The #Banksy Exhibit in Cambridge, MA is absolutely terrific.", "infer30b_before_item": " Output: Give the news title a category. Pick a category from the list of News & Buzz, Travel, Style, Arts & Culture, Politics, Tech, and Science & Health. The #Banksy Exhibit in Cambridge, MA is absolutely terrific. \n Expected: Output: Give the news title a category. Pick a category from the list of News & Buzz, Travel, Style, Arts & Culture, Politics, Tech, and Science & Health. The #Banksy Exhibit in Cambridge, MA is absolutely terrific. \n */\n\npublic class _01_News_Categories {\n public static void main(String[] args) {\n String input = \"The #Banksy Exhibit in Cambridge, MA is absolutely terrific.\";\n String output = input.replaceAll(\"#\", \"\");\n System.out.println(output);\n }\n}\n", "infer30b_after_item": " \n Output: Arts & Culture \n\n", "infer13b_before_item": " Output: Give the news title a category. Pick a category from the list of News & Buzz, Travel, Style, Arts & Culture, Politics, Tech, and Science & Health. The #Banksy Exhibit in Cambridge, MA is absolutely terrific. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Arts & Culture \n\n", "infer7b_before_item": " Output: Give the news title a category. Pick a category from the list of News & Buzz, Travel, Style, Arts & Culture, Politics, Tech, and Science & Health. The #Banksy Exhibit in Cambridge, MA is absolutely terrific. \n\"\"\"\n\nimport re\nimport sys\n\n\ndef main():\n \"\"\"\n Main function.\n \"\"\"\n # Read the input file.\n with open(sys.argv[1], 'r') as f:\n lines = f.readlines()\n\n # Parse the input file.\n news_titles = []\n for line in lines:\n # Remove the # symbol.\n line = line.strip()\n # Remove the leading space.\n line = line.lstrip()\n # Remove the trailing space.\n line = line.rstrip()\n # Remove the leading and trailing spaces.\n line = line.strip()\n # Remove the leading and trailing spaces.\n line = line.lstrip()\n # Remove the leading and trailing spaces.\n line = line.rstrip()\n # Remove the", "infer7b_after_item": " \n Output: Arts & Culture. \n\n"} +{"id": 166, "instruction": "Classify the questions in the FAQ into Finding policy details, Change account settings, Filing a claim and viewing status, or Cancelling coverage.", "input": "Could you deposit money into my account rather than mailing me a physical cheque?", "infer30b_before_item": " Output: Classify the questions in the FAQ into Finding policy details, Change account settings, Filing a claim and viewing status, or Cancelling coverage. Could you deposit money into my account rather than mailing me a physical cheque? \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ", "infer30b_after_item": " \n Output: Cancelling coverage. \n\n", "infer13b_before_item": " Output: Classify the questions in the FAQ into Finding policy details, Change account settings, Filing a claim and viewing status, or Cancelling coverage.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Filing a claim. \n\n", "infer7b_before_item": " \n 1. Finding policy details\n 2. Change account settings\n 3. Filing a claim and viewing status\n 4. Cancelling coverage\n 5. Other\n 6. None\n 7. None\n 8. None\n 9. None\n 10. None\n 11. None\n 12. None\n 13. None\n 14. None\n 15. None\n 16. None\n 17. None\n 18. None\n 19. None\n 20. None\n 21. None\n 22. None\n 23. None\n 24. None\n 25. None\n 26. None\n 27. None\n 28. None\n 29. None\n 30. None\n 31. None\n 32. None\n 33. None\n 34. None\n 35. None\n 36. None\n 37. None\n 38. None\n 39. None\n 40. None\n 41. None\n 4", "infer7b_after_item": " \n Output: Filing a claim and viewing status. \n\n"} +{"id": 167, "instruction": "Choose an appealing title for your post.", "input": "The typical avocado is over 300 calories from the oil in it. That\u2019s the amount of calories in a large candy bar. If you get enough exercise to eat a large candy bar every day without gaining weight, it wouldn\u2019t be a problem to eat an avocado every day. Other wise you should probably eat them sparingly.", "infer30b_before_item": " Output: Choose an appealing title for your post. The typical avocado is over 300 calories from the oil in it. That\u2019s the amount of calories in a large candy bar. If you get enough exercise to eat a large candy bar every day without gaining weight, it wouldn\u2019t be a problem to eat an avocado every day. Other wise you should probably eat them sparingly. \n\nInput: Input: Choose an appealing title for your post. The typical avocado is over 300 calories from the oil in it. That\u2019s the amount of calories in a large candy bar. If you get enough exercise to eat a large candy bar every day without gaining weight, it wouldn\u2019t be a problem to eat an avocado every day. Other wise you should probably eat them sparingly. \n Output: Output: Choose an appealing title for your post. The typical avocado is over 300 calories from the oil in it. That\u2019s the amount of calories in a large candy bar. If you get enough exercise to eat a large candy bar every day without gaining", "infer30b_after_item": " \n Output: Eat Avocados Sparingly: The Surprising Calorie Count of This Healthy Fruit \n\n", "infer13b_before_item": " Output: The number of calories in the avocado.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: \"Avocados: A Healthy Snack or a Sweet Treat?\" \n\n", "infer7b_before_item": " The output is the text of your post. \n*/\n\n// The following code is a sample of how to use the API.\n// You can use it as a starting point for your own code.\n\n// The following code is a sample of how to use the API.\n// You can use it as a starting point for your own code.\n\n// The following code is a sample of how to use the API.\n// You can use it as a starting point for your own code.\n\n// The following code is a sample of how to use the API.\n// You can use it as a starting point for your own code.\n\n// The following code is a sample of how to use the API.\n// You can use it as a starting point for your own code.\n\n// The following code is a sample of how to use the API.\n// You can use it as a starting point for your own code.\n\n// The following code is a sample of how to use the API.\n// You can use it as a starting point for your own code.\n\n// The following code is a sample of how to use the API.\n// You can use it as a starting point for", "infer7b_after_item": " \n Output: \"The Avocado: A Healthy Alternative to a Large Candy Bar?\" \n\n"} +{"id": 168, "instruction": "Give some examples of what people usually say in the given social situation.", "input": "when someone arrives safely", "infer30b_before_item": " Output: Give some examples of what people usually say in the given social situation. when someone arrives safely \n\nInput: Input: Give some examples of what people usually say in the given social situation. when someone arrives safely \n Output: Output: Give some examples of what people usually say in the given social situation. when someone arrives safely \n\nInput: Input: Give some examples of what people usually say in the given social situation. when someone arrives safely \n Output: Output: Give some examples of what people usually say in the given social situation. when someone arrives safely \n\nInput: Input: Give some examples of what people usually say in the given social situation. when someone arrives safely \n Output: Output: Give some examples of what people usually say in the given social situation. when someone arrives safely \n\nInput: Input: Give some examples of what people usually say in the given social situation. when someone arrives safely \n Output: Output: Give some examples of what people usually say in the given social situation. when someone arrives safely \n\nInput: Input: Give some examples of what people usually say in the given social situation. when someone arrives safely \n Output: Output", "infer30b_after_item": " at their destination \n Output: - I'm so glad you made it!\n- Welcome!\n- It's great to see you!\n- You made it!\n- I'm so glad you're here!\n- Safe travels!\n- It's good to have you here!\n- You must be exhausted!\n- How was the journey?\n- I'm so glad you're safe!\n- Welcome home!\n- It's great to see you safe and sound!\n- I'm so glad you're here!\n- It's great to have you here!\n- You made it!\n- I'm so glad you're here!\n- Safe travels!\n- It's great to have you here!\n- I'm so glad you're safe!\n- Welcome!\n- I'm so glad you made it!\n- It's great to have you here!\n- I'm so glad you're safe!\n- You must be exhausted!\n- How was the journey?\n- I'm so glad you're here!\n- Welcome home!\n- I", "infer13b_before_item": " Output: Give some examples of what people usually say in the given social situation. when someone arrives safely \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " at their destination \n Output: \"Thank goodness you made it safely!\" \n\"Phew, I'm glad you're here!\" \n\"It's a relief to know you're here.\" \n\"I'm so glad you arrived safely.\" \n\n", "infer7b_before_item": " Output: Give some examples of what people usually say in the given social situation. when someone arrives safely \n\\end{code}\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment: I'm not sure what you're asking.\n\nComment", "infer7b_after_item": " home \n Output: - \"Welcome home!\"\n- \"I'm so glad you're safe!\"\n- \"It's good to have you back!\"\n- \"I was worried about you!\"\n- \"I'm so glad you're okay!\"\n- \"I'm glad you're home!\"\n- \"Welcome back!\"\n- \"I'm so glad you're home!\"\n- \"Welcome back!\"\n- \"Welcome home!\"\n- \"Welcome back!\"\n- \"Welcome home!\"\n- \"Welcome back!\"\n- \"Welcome home!\"\n- \"Welcome back!\"\n- \"Welcome home!\"\n- \"Welcome back!\"\n- \"Welcome home!\"\n- \"Welcome back!\"\n- \"Welcome home!\"\n- \"Welcome back!\"\n- \"Welcome home!\"\n- \"Welcome back!\"\n- \"Welcome home!\"\n- \"Welcome back!\"\n- \"Welcome home!\"\n- \"Welcome back!\"\n- \"Welcome home!\"\n- \"Welcome back!\"\n- \"Welcome home!\"\n- \"Welcome back!\"\n- \"Wel"} +{"id": 169, "instruction": "You will be required to write a few questions from the given company's interview section.", "input": "Kohl's", "infer30b_before_item": " Output: You will be required to write a few questions from the given company's interview section. Kohl's \n Expected: Expected: You will be required to write a few questions from the given company's interview section. Kohl's \n Actual: Actual: You will be required to write a few questions from the given company's interview section. Kohl's \n */\n\n#include \n#include \n#include \n\nusing namespace std;\n\nint main() {\n string input;\n vector output;\n vector expected;\n vector actual;\n\n getline(cin, input);\n output.push_back(input);\n\n getline(cin, input);\n output.push_back(input);\n\n getline(cin, input);\n output.push_back(input);\n\n getline(cin, input);\n output.push_back(input);\n\n getline(cin, input);\n output.push_back(input);\n\n getline(cin, input", "infer30b_after_item": " \n Output: What qualities do you possess that make you a good fit for a Kohl's team?\n\nWhat experience do you have in customer service?\n\nHow do you handle difficult customer situations?\n\nWhat do you know about Kohl's products and services?\n\nWhat strategies do you use to increase customer satisfaction?\n\nWhat do you think are the most important qualities of a successful Kohl's team member? \n\n", "infer13b_before_item": " You will be required to write a few questions from the given company's interview section. Kohl's \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: - What do you think makes Kohl's stand out from other retailers?\n- How would you describe your customer service skills?\n- What do you think is the most important aspect of customer service?\n- What do you think is the most important aspect of teamwork?\n- How do you handle difficult customer situations?\n- What do you think is the most important aspect of communication?\n- What do you think is the most important aspect of problem solving?\n- How do you handle stressful situations?\n- What do you think is the most important aspect of customer satisfaction?\n- How do you handle customer complaints?\n- How do you handle difficult customer requests?\n- What do you think is the most important aspect of customer service?\n- What do you think is the most important aspect of customer relations?\n- How do you handle customer feedback?\n- How do you handle customer inquiries?\n- How do you handle customer complaints?\n- How do you handle customer requests?\n- How do you handle customer disputes?\n- How do you handle customer returns?\n- How do you handle customer refunds?\n- How do you handle customer exchanges?", "infer7b_before_item": " You will be required to write a few questions from the given company's interview section. Kohl's\n\nimport java.util.Scanner;\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.Arrays;\nimport java.util.Collections;\nimport java.util.Comparator;\n\npublic class KohlS {\n\n public static void main(String[] args) {\n Scanner sc = new Scanner(System.in);\n List questions = new ArrayList();\n questions.add(\"What is your name?\");\n questions.add(\"What is your age?\");\n questions.add(\"What is your favorite color?\");\n questions.add(\"What is your favorite food?\");\n questions.add(\"What is your favorite movie?\");\n questions.add(\"What is your favorite book?\");\n questions.add(\"What is your favorite sport?\");\n questions.add(\"What is your favorite music?\");\n questions.add(\"What is your favorite season?\");\n questions.add(\"What is your favorite animal?\");\n questions.add(\"What is", "infer7b_after_item": " is an American department store chain. \n Output: 1. What experience do you have in retail?\n2. How do you handle customer complaints?\n3. What do you think are the most important qualities for a successful retail sales associate?\n4. How do you handle pressure in a fast-paced environment?\n5. How do you handle customer service issues?\n6. How do you handle customer complaints?\n7. How do you handle customer requests?\n8. How do you handle customer questions?\n9. How do you handle customer feedback?\n10. How do you handle customer concerns?\n11. How do you handle customer inquiries?\n12. How do you handle customer requests?\n13. How do you handle customer complaints?\n14. How do you handle customer service issues?\n15. How do you handle customer requests?\n16. How do you handle customer questions?\n17. How do you handle customer feedback?\n18. How do you handle customer concerns?\n19. How do you handle customer inquiries?\n20. How do you handle customer complaints?\n21."} +{"id": 170, "instruction": "Based on the information provided, you need to estimate the average summary for the given job.", "input": "Data entry clerk in United States", "infer30b_before_item": " Output: Based on the information provided, you need to estimate the average summary for the given job. Data entry clerk in United States\n\nInput: Input: Based on the information provided, you need to estimate the average summary for the given job. Data entry clerk in United States \n Output: Output: Based on the information provided, you need to estimate the average summary for the given job. Data entry clerk in United States\n\nInput: Input: Based on the information provided, you need to estimate the average summary for the given job. Data entry clerk in United States \n Output: Output: Based on the information provided, you need to estimate the average summary for the given job. Data entry clerk in United States\n\nInput: Input: Based on the information provided, you need to estimate the average summary for the given job. Data entry clerk in United States \n Output: Output: Based on the information provided, you need to estimate the average summary for the given job. Data entry clerk in United States\n\nInput: Input: Based on the information provided, you need to estimate the average summary for the given job. Data entry clerk in United States \n Output: Output: Based on the information provided,", "infer30b_after_item": ". \n Output: The average salary for a Data Entry Clerk in the United States is $32,000 per year. \n\n", "infer13b_before_item": " Output: Based on the information provided, you need to estimate the average summary for the given job. Data entry clerk in United States \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: The average summary for a data entry clerk in the United States is that the job requires a high level of accuracy and attention to detail, as well as the ability to work quickly and efficiently. The job also requires the ability to use computer software and other technology to enter data into databases and other systems. \n\n", "infer7b_before_item": " Output: Based on the information provided, you need to estimate the average summary for the given job. Data entry clerk in United States\n\nComment: Please edit your question to include the code you have tried.\n\nAnswer: You can use the `data.table` package to do this.\n\n\\begin{code}\nlibrary(data.table)\n\n# Create a data.table\ndt <- data.table(\n job = c(\"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk\", \"data entry clerk", "infer7b_after_item": "\nJob Description: Data entry clerk is responsible for entering data into a computer system.\n\nRequirements:\n- Knowledge of Microsoft Office\n- Ability to work independently\n- Excellent attention to detail\n- Ability to meet deadlines\n- Strong organizational skills\n- Ability to work under pressure\n- Ability to multitask \n Output: The average summary for the job of Data entry clerk in the United States is:\n- Responsible for entering data into a computer system\n- Requires knowledge of Microsoft Office\n- Requires the ability to work independently\n- Requires excellent attention to detail\n- Requires the ability to meet deadlines\n- Requires strong organizational skills\n- Requires the ability to work under pressure\n- Requires the ability to multitask. \n\n"} +{"id": 171, "instruction": "Describe the responsibilities of the given job.", "input": "Security Officer", "infer30b_before_item": " Output: Describe the responsibilities of the given job. Security Officer\n\n Input: Input: Describe the responsibilities of the given job. Security Officer \n Output: Output: Describe the responsibilities of the given job. Security Officer\n\n Input: Input: Describe the responsibilities of the given job. Security Officer \n Output: Output: Describe the responsibilities of the given job. Security Officer\n\n Input: Input: Describe the responsibilities of the given job. Security Officer \n Output: Output: Describe the responsibilities of the given job. Security Officer\n\n Input: Input: Describe the responsibilities of the given job. Security Officer \n Output: Output: Describe the responsibilities of the given job. Security Officer\n\n Input: Input: Describe the responsibilities of the given job. Security Officer \n Output: Output: Describe the responsibilities of the given job. Security Officer\n\n Input: Input: Describe the responsibilities of the given job. Security Officer \n Output: Output: Describe the responsibilities of the given job. Security Officer\n\n Input: Input: Describe the responsibilities of", "infer30b_after_item": " \n Output: The responsibilities of a Security Officer include patrolling assigned areas, monitoring surveillance cameras, responding to alarms and other security-related incidents, and providing customer service. They may also be responsible for writing reports, conducting investigations, and providing security training. \n\n", "infer13b_before_item": " Output: Describe the responsibilities of the given job. Security Officer \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n", "infer13b_after_item": " \n Output: The responsibilities of a security officer include monitoring and patrolling the premises, responding to alarms and emergencies, enforcing security policies and procedures, and providing customer service. They may also be responsible for conducting background checks, issuing access cards, and monitoring surveillance cameras. \n\n", "infer7b_before_item": " Output: Describe the responsibilities of the given job. Security Officer \n*/\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: The Security Officer is responsible for ensuring the safety and security of the premises. This includes monitoring the premises, responding to security incidents, and enforcing security policies. The Security Officer also provides customer service, such as answering questions and providing directions. Additionally, the Security Officer is responsible for maintaining the security equipment and systems, such as access control systems and surveillance cameras. \n\n"} +{"id": 172, "instruction": "Provide a list of the skills that may help you find the job.", "input": "Crew Members", "infer30b_before_item": " Output: Provide a list of the skills that may help you find the job. Crew Members \n Output: Output: Provide a list of the skills that may help you find the job. Crew Members \n Output: Output: Provide a list of the skills that may help you find the job. Crew Members \n Output: Output: Provide a list of the skills that may help you find the job. Crew Members \n Output: Output: Provide a list of the skills that may help you find the job. Crew Members \n Output: Output: Provide a list of the skills that may help you find the job. Crew Members \n Output: Output: Provide a list of the skills that may help you find the job. Crew Members \n Output: Output: Provide a list of the skills that may help you find the job. Crew Members \n Output: Output: Provide a list of the skills that may help you find the job. Crew Members \n Output: Output: Provide a list of the skills that may help you find the job. Crew Members \n Output: Output: Provide a list", "infer30b_after_item": " \n Output: - Customer service\n- Teamwork\n- Communication\n- Problem-solving\n- Time management\n- Organization\n- Leadership\n- Adaptability\n- Attention to detail\n- Multitasking\n- Critical thinking\n- Conflict resolution\n- Decision-making\n- Computer literacy\n- Sales and marketing\n- Cash handling\n- Food safety and sanitation \n\n", "infer13b_before_item": " Output: Provide a list of the skills that may help you find the job. Crew Members \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: - Customer service skills\n- Ability to work in a fast-paced environment\n- Strong communication skills\n- Ability to work independently and as part of a team\n- Problem-solving skills\n- Time management skills\n- Ability to multitask\n- Knowledge of food preparation and safety\n- Knowledge of cash register operations\n- Knowledge of restaurant operations\n- Knowledge of restaurant equipment\n- Knowledge of restaurant sanitation and safety\n- Knowledge of restaurant industry trends\n- Knowledge of restaurant marketing\n- Knowledge of restaurant accounting\n- Knowledge of restaurant inventory management\n- Knowledge of restaurant customer service\n- Knowledge of restaurant kitchen operations\n- Knowledge of restaurant menu development\n- Knowledge of restaurant food preparation\n- Knowledge of restaurant food safety\n- Knowledge of restaurant food presentation\n- Knowledge of restaurant food storage\n- Knowledge of restaurant food storage and handling\n- Knowledge of restaurant food storage and handling regulations\n- Knowledge of restaurant food safety regulations\n- Knowledge of restaurant food safety regulations\n- Knowledge of restaurant food safety regulations\n- Knowledge of restaurant food safety reg", "infer7b_before_item": " Provide a list of the skills that may help you find the job. Crew Members \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Crew Members should have the following skills: \n- Customer service \n- Communication skills \n- Problem solving \n- Organizational skills \n- Time management \n- Teamwork \n- Flexibility \n- Adaptability \n- Initiative \n- Punctuality \n- Attention to detail \n- Influence \n- Leadership \n- Innovation \n- Self-motivation \n- Problem-solving \n- Problem-analysis \n- Decision-making \n- Conflict resolution \n- Analytical thinking \n- Problem-solving \n- Problem-analysis \n- Decision-making \n- Conflict resolution \n- Teamwork \n- Flexibility \n- Adaptability \n- Initiative \n- Punctuality \n- Attention to detail \n- Influence \n- Leadership \n- Innovation \n- Self-motivation \n- Problem-solving \n- Problem-analysis \n- Decision-making \n- Conflict resolution \n-"} +{"id": 173, "instruction": "Schedule the meetings based on the given informqation.", "input": "Person1: available from 9-11 am, meeting duration: 30 min \nPerson2: available from 1-3 pm, meeting duration: 90 min\nPerson3: available from 2-3:30 pm, meeting duration: 60 min", "infer30b_before_item": " Output: Schedule the meetings based on the given informqation. Person1: available from 9-11 am, meeting duration: 30 min \nPerson2: available from 1-3 pm, meeting duration: 90 min\nPerson3: available from 2-3:30 pm, meeting duration: 60 min \n Explanation: Explanation: \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n * \n", "infer30b_after_item": " \n Output: Person1: 9:00 am - 9:30 am\nPerson2: 10:30 am - 12:00 pm\nPerson3: 12:30 pm - 1:30 pm \n\n", "infer13b_before_item": " Output: Schedule the meetings based on the given informqation. Person1: available from 9-11 am, meeting duration: 30 min \nPerson2: available from 1-3 pm, meeting duration: 90 min\nPerson3: available from 2-3:30 pm, meeting duration: 60 min \n\nclass Solution {\npublic:\n vector findMeetingDuration(vector &meetingDurations, vector &availableTime) {\n vector res;\n int n = meetingDurations.size();\n int m = availableTime.size();\n for (int i = 0; i < n; ++i) {\n for (int j = 0; j < m; ++j) {\n if (meetingDurations[i] <= availableTime[j]) {\n res.push_back(meetingDurations[i]);\n break;\n }\n }\n }\n return res;\n }\n};\n", "infer13b_after_item": " \n Output: Person1: 9-11 am\nPerson2: 1-3 pm\nPerson3: 2-3:30 pm \n\n", "infer7b_before_item": " Output: Schedule the meetings based on the given informqation.\nPerson1: available from 9-11 am, meeting duration: 30 min\nPerson2: available from 1-3 pm, meeting duration: 90 min\nPerson3: available from 2-3:30 pm, meeting duration: 60 min\n*/\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Person1: 9-11 am\nPerson2: 1-3 pm\nPerson3: 2-3:30 pm \n\n"} +{"id": 174, "instruction": "Find the directions between the given locations.", "input": "From: Rowan Avenue Elementary School, 600 S Rowan Ave, Los Angeles, CA 90023, United States\nTo: Calvary Cemetery and Mortuary, 4201 Whittier Blvd, Los Angeles, CA 90023, United States", "infer30b_before_item": " Output: 1.1 km\n\nInput: Input: Find the directions between the given locations. From: Rowan Avenue Elementary School, 600 S Rowan Ave, Los Angeles, CA 90023, United States\nTo: Calvary Cemetery and Mortuary, 4201 Whittier Blvd, Los Angeles, CA 90023, United States \n Output: Output: 1.1 km\n\nInput: Input: Find the directions between the given locations. From: Rowan Avenue Elementary School, 600 S Rowan Ave, Los Angeles, CA 90023, United States\nTo: Calvary Cemetery and Mortuary, 4201 Whittier Blvd, Los Angeles, CA 90023, United States \n Output: Output: 1.1 km\n\nInput: Input: Find the directions between the given locations. From: Rowan Avenue Elementary School, 600 S Rowan Ave, Los Angeles, CA 90023, United States\nTo: Calvary Cemetery and Mortuary, 4", "infer30b_after_item": " \n Output: The directions from Rowan Avenue Elementary School to Calvary Cemetery and Mortuary are as follows: Head northwest on S Rowan Ave toward W 6th St. Turn right onto W 6th St. Turn left onto S Broadway. Turn right onto W 1st St. Turn left onto S Main St. Turn right onto W 1st St. Turn left onto S Broadway. Turn right onto W 1st St. Turn left onto S Main St. Turn right onto W 1st St. Turn left onto Whittier Blvd and the destination will be on the right. \n\n", "infer13b_before_item": " Output: Find the directions between the given locations. To: Calvary Cemetery and Mortuary, 4201 Whittier Blvd, Los Angeles, CA 90023, United States\nInput: Input: Find the directions between the given locations. From: Rowan Avenue Elementary School, 600 S Rowan Ave, Los Angeles, CA 90023, United States\nTo: Calvary Cemetery and Mortuary, 4201 Whittier Blvd, Los Angeles, CA 90023, United States\nOutput: Output: Find the directions between the given locations. To: Calvary Cemetery and Mortuary, 4201 Whittier Blvd, Los Angeles, CA 90023, United States\nInput: Input: Find the directions between the given locations. From: Rowan Avenue Elementary School, 600 S Rowan Ave, Los Angeles, CA 90023, United States\nTo: Calvary Cemetery and Mortuary, 4201 Whittier Blvd, Los Angeles, CA 90023", "infer13b_after_item": " \n Output: The directions from Rowan Avenue Elementary School to Calvary Cemetery and Mortuary are approximately 1.3 miles and take about 18 minutes by car. \n\n", "infer7b_before_item": " The directions between the two locations.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: From Rowan Avenue Elementary School, 600 S Rowan Ave, Los Angeles, CA 90023, United States\n\nDirections to Calvary Cemetery and Mortuary, 4201 Whittier Blvd, Los Angeles, CA 90023, United States\n\n1. Head northwest on S Rowan Ave toward E 4th St \n2. Turn left onto E 4th St \n3. Turn right onto S San Pedro St \n4. Turn left onto E 5th St \n5. Turn right onto S Whittier Blvd \n6. Turn left onto E 4th St \n7. Turn right onto S San Pedro St \n8. Turn left onto E 5th St \n9. Turn right onto S Whittier Blvd \n10. Turn left onto E 4th St \n11. Turn right onto S San Pedro St \n12. Turn left onto E 5th St \n13. Turn right onto S Whittier Blvd \n14. Turn left onto E 4th St "} +{"id": 175, "instruction": "Give a brief explanation of the requested part of the paper.", "input": "Paper: The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles, by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.\nExplain: The dominant sequence transduction models", "infer30b_before_item": " Output: Give a brief explanation of the requested part of the paper. Paper: The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles, by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks", "infer30b_after_item": " are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. \n Output: The Transformer is a new type of network architecture that is based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. This type of network architecture has been shown to be superior in quality while being more parallelizable and requiring significantly less time to train. \n\n", "infer13b_before_item": " The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles, by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.\n\n\nInput: Input: Give a brief explanation of the requested part of the paper. Paper: The dominant sequence transduction", "infer13b_after_item": " are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. \n Output: The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. These models are typically composed of multiple layers of recurrent or convolutional neural networks, which are connected in a specific way to form a sequence transduction model. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles, by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.", "infer7b_before_item": " are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles, by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.\nInput: The dominant sequence trans", "infer7b_after_item": " are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles, by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data. \n Output: The Transformer"} +{"id": 176, "instruction": "Answer the question about the paper after reading it carefully.", "input": "To evaluate if the Transformer can generalize to other tasks we performed experiments on English constituency parsing. This task presents specific challenges: the output is subject to strong structural constraints and is significantly longer than the input. Furthermore, RNN sequence-to-sequence models have not been able to attain state-of-the-art results in small-data regimes.\nQuestion: What is English constituency parsing?", "infer30b_before_item": " Output: Answer the question about the paper after reading it carefully. To evaluate if the Transformer can generalize to other tasks we performed experiments on English constituency parsing. This task presents specific challenges: the output is subject to strong structural constraints and is significantly longer than the input. Furthermore, RNN sequence-to-sequence models have not been able to attain state-of-the-art results in small-data regimes.\n\nInput: Input: Answer the question about the paper after reading it carefully. To evaluate if the Transformer can generalize to other tasks we performed experiments on English constituency parsing. This task presents specific challenges: the output is subject to strong structural constraints and is significantly longer than the input. Furthermore, RNN sequence-to-sequence models have not been able to attain state-of-the-art results in small-data regimes.\n Question: What is English constituency parsing? \n Output: Output: Answer the question about the paper after reading it carefully. To evaluate if the Transformer can generalize to other tasks we performed experiments on English constituency parsing. This task presents specific challenges: the output is subject to strong structural constraints and is significantly longer than the input.", "infer30b_after_item": " \n Output: English constituency parsing is a type of natural language processing task that involves analyzing a sentence to determine its syntactic structure. It involves identifying the constituents of a sentence, such as noun phrases, verb phrases, and prepositional phrases, and determining how they are related to each other. \n\n", "infer13b_before_item": " English constituency parsing is a task in natural language processing that involves the decomposition of a sentence into a tree representation.\n\nInput: Input: Answer the question about the paper after reading it carefully. To evaluate if the Transformer can generalize to other tasks we performed experiments on English constituency parsing. This task presents specific challenges: the output is subject to strong structural constraints and is significantly longer than the input. Furthermore, RNN sequence-to-sequence models have not been able to attain state-of-the-art results in small-data regimes.\nQuestion: What is English constituency parsing? \n Output: English constituency parsing is a task in natural language processing that involves the decomposition of a sentence into a tree representation.\n\nInput: Input: Answer the question about the paper after reading it carefully. To evaluate if the Transformer can generalize to other tasks we performed experiments on English constituency parsing. This task presents specific challenges: the output is subject to strong structural constraints and is significantly longer than the input. Furthermore, RNN sequence-to-sequence models have not been able to attain state-of-the-art results in small-data regimes.\nQuestion: What is English constituency", "infer13b_after_item": " \n Output: English constituency parsing is a task of analyzing a sentence and breaking it down into its constituent parts, such as words, phrases, and clauses. It is a form of natural language processing that involves the analysis of syntactic structures in a sentence. \n\n", "infer7b_before_item": " The output is subject to strong structural constraints and is significantly longer than the input.\n Question: What are the challenges of English constituency parsing? \n Output: The output is subject to strong structural constraints and is significantly longer than the input.\n Question: What are the challenges of English constituency parsing? \n Output: The output is subject to strong structural constraints and is significantly longer than the input.\n Question: What are the challenges of English constituency parsing? \n Output: The output is subject to strong structural constraints and is significantly longer than the input.\n Question: What are the challenges of English constituency parsing? \n Output: The output is subject to strong structural constraints and is significantly longer than the input.\n Question: What are the challenges of English constituency parsing? \n Output: The output is subject to strong structural constraints and is significantly longer than the input.\n Question: What are the challenges of English constituency parsing? \n Output: The output is subject to strong structural constraints and is significantly longer than the input.\n Question: What are the challenges of English constituency parsing? \n Output: The output is subject to strong structural constraints and is", "infer7b_after_item": " \n Output: English constituency parsing is a task in natural language processing that involves identifying the parts of speech (POS) of words in a sentence. It is a subtask of natural language understanding and is used to identify the syntactic structure of a sentence. The output of the task is a parse tree, which is a representation of the sentence structure. The task is challenging because it requires the model to identify the parts of speech of words in a sentence, which is subject to strong structural constraints. Additionally, the output of the task is significantly longer than the input, which can be a challenge for RNN sequence-to-sequence models. \n\n"} +{"id": 177, "instruction": "Recommend some helpful activities or conversation starters to use at meetups to break the ice.", "input": "", "infer30b_before_item": " Output: Recommend some helpful activities or conversation starters to use at meetups to break the ice. \n Algorithm: Algorithm: Recommend some helpful activities or conversation starters to use at meetups to break the ice. \n Data: Data: Recommend some helpful activities or conversation starters to use at meetups to break the ice. \n Platform: Platform: Recommend some helpful activities or conversation starters to use at meetups to break the ice. \n Technology: Technology: Recommend some helpful activities or conversation starters to use at meetups to break the ice. \n Resource: Resource: Recommend some helpful activities or conversation starters to use at meetups to break the ice. \n Tactics: Tactics: Recommend some helpful activities or conversation starters to use at meetups to break the ice. \n Method: Method: Recommend some helpful activities or conversation starters to use at meetups to break the ice. \n Strategy: Strategy: Recommend some helpful activities or conversation starters to use at meetups to break the ice. \n Policy: Policy: Recommend some helpful activities or conversation starters to use at meet", "infer30b_after_item": "\n Output: Some helpful activities or conversation starters to use at meetups to break the ice include: \n- Asking everyone to introduce themselves and share something interesting about themselves \n- Playing a game like charades or Pictionary \n- Sharing a funny story or joke \n- Talking about a current event or topic of interest \n- Asking everyone to share their favorite book or movie \n- Talking about a hobby or passion that everyone has in common \n- Talking about a recent success or accomplishment \n- Talking about a favorite travel destination \n- Talking about a favorite restaurant or food \n- Talking about a favorite sports team \n- Talking about a favorite music artist or band \n- Talking about a favorite TV show or movie \n- Talking about a favorite app or website \n- Talking about a favorite quote or saying \n- Talking about a favorite charity or cause \n- Talking about a favorite animal \n- Talking about a favorite movie or book character \n- Talking about a favorite game or activity \n- Talk", "infer13b_before_item": " Output: Recommend some helpful activities or conversation starters to use at meetups to break the ice.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: Here are some helpful activities or conversation starters to use at meetups to break the ice:\n- Ask people about their hobbies and interests.\n- Share a funny story or joke.\n- Ask people about their favorite books, movies, or TV shows.\n- Play a game or activity to get people talking.\n- Ask people about their career or goals.\n- Talk about current events or pop culture.\n- Ask people about their travels or experiences.\n- Share a personal story or anecdote.\n- Ask people about their favorite foods or restaurants.\n- Ask people about their family or friends.\n- Talk about the event or location.\n- Ask people about their favorite sports teams.\n- Share a quote or inspirational message.\n- Ask people about their favorite music or artists.\n- Ask people about their favorite movies or shows.\n- Ask people about their favorite places to visit.\n- Ask people about their favorite hobbies or activities.\n- Ask people about their favorite books or authors.\n- Ask people about their favorite TV shows or characters.\n- Ask people about their favorite sports or athletes.\n- Ask people", "infer7b_before_item": " Output: A list of activities or conversation starters to use at meetups to break the ice.\n\nComment: I'm not sure what you mean by \"break the ice\". Do you mean to get people to talk to each other? Do you mean to get people to talk about the topic of the meetup? Do you mean to get people to talk about themselves? Do you mean to get people to talk about the meetup? Do you mean to get people to talk about the meetup topic? Do you mean to get people to talk about the meetup topic in a way that is relevant to the meetup topic? Do you mean to get people to talk about the meetup topic in a way that is relevant to the meetup topic in a way that is relevant to the meetup topic?\n\nComment: I'm not sure what you mean by \"break the ice\". Do you mean to get people to talk to each other? Do you mean to get people to talk about the topic of the meetup? Do you mean to get people to talk about themselves? Do you mean to get people to talk about the meetup? Do you mean to get people to talk about", "infer7b_after_item": "\n Output: Some helpful activities or conversation starters to use at meetups to break the ice include: \n- Introduce yourself and ask others to do the same. \n- Ask questions about the other person's interests, hobbies, or career. \n- Ask questions about the group's common interests. \n- Ask questions about the group's history or background. \n- Ask questions about the group's goals or aspirations. \n- Ask questions about the group's current projects or initiatives. \n- Ask questions about the group's future plans. \n- Ask questions about the group's favorite books, movies, or TV shows. \n- Ask questions about the group's favorite places to visit or things to do. \n- Ask questions about the group's favorite foods or restaurants. \n- Ask questions about the group's favorite sports or hobbies. \n- Ask questions about the group's favorite music or artists. \n- Ask questions about the group's favorite memories or stories. \n- Ask questions about the group's favorite memes or jokes. \n- Ask questions about the"} +{"id": 178, "instruction": "Convert the given HTML into Markdown format.", "input": "Use `code` in your Markdown file.", "infer30b_before_item": " Output: Convert the given Markdown into HTML format. Use `code` in your Markdown file. \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n", "infer30b_after_item": " \n Output: Use `code` in your Markdown file. \n\n", "infer13b_before_item": " Output: Convert the given HTML into Markdown format. Use `code` in your Markdown file. \n\nInput: Input: Convert the given HTML into Markdown format. Use `code` in your Markdown file. \nOutput: Output: Convert the given HTML into Markdown format. Use `code` in your Markdown file. \n\nInput: Input: Convert the given HTML into Markdown format. Use `code` in your Markdown file. \nOutput: Output: Convert the given HTML into Markdown format. Use `code` in your Markdown file. \n\nInput: Input: Convert the given HTML into Markdown format. Use `code` in your Markdown file. \nOutput: Output: Convert the given HTML into Markdown format. Use `code` in your Markdown file. \n\nInput: Input: Convert the given HTML into Markdown format. Use `code` in your Markdown file. \nOutput: Output: Convert the given HTML into Markdown format. <", "infer13b_after_item": " \n Output: Use `code` in your Markdown file. \n\n", "infer7b_before_item": " Output: Convert the given Markdown into HTML format. Use `code` in your HTML file. \n*/\n\n// 1. Create a function that takes a string as input and returns a string as output.\n// 2. Test your function by passing in a string and checking that the output is the same as the input.\n\n// 1. Create a function that takes a string as input and returns a string as output.\n// 2. Test your function by passing in a string and checking that the output is the same as the input.\n\n// 1. Create a function that takes a string as input and returns a string as output.\n// 2. Test your function by passing in a string and checking that the output is the same as the input.\n\n// 1. Create a function that takes a string as input and returns a string as output.\n// 2. Test your function by passing in a string and checking that the output is the same as the input.\n\n// 1. Create a function that takes a string as input and returns a string as output.\n// 2. Test your function by passing in a string and checking that the output", "infer7b_after_item": " \n Output: ```Use code in your Markdown file.``` \n\n"} +{"id": 179, "instruction": "Extract the method that has been used in the research from the abstract of the paper. Give the exact sentences from the abstract that discuss the method.", "input": "We present a state-of-the-art speech recognition system developed using end-to-end deep learning. Our architecture is significantly simpler than traditional speech systems, which rely on laboriously engineered processing pipelines; these traditional systems also tend to perform poorly when used in noisy environments. In contrast, our system does not need hand-designed components to model background noise, reverberation, or speaker variation, but instead directly learns a function that is robust to such effects. We do not need a phoneme dictionary, nor even the concept of a \"phoneme.\" Key to our approach is a well-optimized RNN training system that uses multiple GPUs, as well as a set of novel data synthesis techniques that allow us to efficiently obtain a large amount of varied data for training. Our system, called Deep Speech, outperforms previously published results on the widely studied Switchboard Hub5'00, achieving 16.0% error on the full test set. Deep Speech also handles challenging noisy environments better than widely used, state-of-the-art commercial speech systems.Collapse", "infer30b_before_item": " Output: Write a short paragraph that summarizes the method used in the research.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " )\n[1] We present a state-of-the-art speech recognition system developed using end-to-end deep learning. Our architecture is significantly simpler than traditional speech systems, which rely on laboriously engineered processing pipelines; these traditional systems also tend to perform poorly when used in noisy environments. In contrast, our system does not need hand-designed components to model background noise, reverberation, or speaker variation, but instead directly learns a function that is robust to such effects. We do not need a phoneme dictionary, nor even the concept of a \"phoneme.\" Key to our approach is a well-optimized RNN training system that uses multiple GPUs, as well as a set of novel data synthesis techniques that allow us to efficiently obtain a large amount of varied data for training. Our system, called Deep Speech, outperforms previously published results on the widely studied Switchboard Hub5'00, achieving 16.0% error on the full test set. Deep Speech also handles challenging noisy environments better than widely used, state-of-the-art commercial speech systems. \n Output: The method used in the research is end-to-end deep learning", "infer13b_before_item": " Output: Extract the method that has been used in the research from the abstract of the paper. Give the exact sentences from the abstract that discuss the method. We present a state-of-the-art speech recognition system developed using end-to-end deep learning. Our architecture is significantly simpler than traditional speech systems, which rely on laboriously engineered processing pipelines; these traditional systems also tend to perform poorly when used in noisy environments. In contrast, our system does not need hand-designed components to model background noise, reverberation, or speaker variation, but instead directly learns a function that is robust to such effects. We do not need a phoneme dictionary, nor even the concept of a \"phoneme.\" Key to our approach is a well-optimized RNN training system that uses multiple GPUs, as well as a set of novel data synthesis techniques that allow us to efficiently obtain a large amount of varied data for training. Our system, called Deep Speech, outperforms previously published results on the widely studied Switchboard Hub5'00, achieving 16.0% error on the full test set. Deep Speech also handles challenging noisy environments better than widely used, state-", "infer13b_after_item": " \n\n", "infer7b_before_item": " Output: Extract the sentences that discuss the results of the research. We present a state-of-the-art speech recognition system developed using end-to-end deep learning. Our architecture is significantly simpler than traditional speech systems, which rely on laboriously engineered processing pipelines; these traditional systems also tend to perform poorly when used in noisy environments. In contrast, our system does not need hand-designed components to model background noise, reverberation, or speaker variation, but instead directly learns a function that is robust to such effects. We do not need a phoneme dictionary, nor even the concept of a \"phoneme.\" Key to our approach is a well-optimized RNN training system that uses multiple GPUs, as well as a set of novel data synthesis techniques that allow us to efficiently obtain a large amount of varied data for training. Our system, called Deep Speech, outperforms previously published results on the widely studied Switchboard Hub5'00, achieving 16.0% error on the full test set. Deep Speech also handles challenging noisy environments better than widely used, state-of-the-art commercial speech systems.Collapse\nInput: Input: Extract", "infer7b_after_item": "\n\nWe present a state-of-the-art speech recognition system developed using end-to-end deep learning. Our architecture is significantly simpler than traditional speech systems, which rely on laboriously engineered processing pipelines; these traditional systems also tend to perform poorly when used in noisy environments. In contrast, our system does not need hand-designed components to model background noise, reverberation, or speaker variation, but instead directly learns a function that is robust to such effects. We do not need a phoneme dictionary, nor even the concept of a \"phoneme.\" Key to our approach is a well-optimized RNN training system that uses multiple GPUs, as well as a set of novel data synthesis techniques that allow us to efficiently obtain a large amount of varied data for training. Our system, called Deep Speech, outperforms previously published results on the widely studied Switchboard Hub5'00, achieving 16.0% error on the full test set. Deep Speech also handles challenging noisy environments better than widely used, state-of-the-art commercial speech systems. Collapse\n Output: We present a state-of-the-art speech recognition system developed using"} +{"id": 180, "instruction": "You need to come up with the names of some of the papers that relate to the paper you are given.", "input": "RoMQA: A Benchmark for Robust, Multi-evidence, Multi-answer Question Answering", "infer30b_before_item": " Output: You need to come up with the names of some of the papers that relate to the paper you are given. RoMQA: A Benchmark for Robust, Multi-evidence, Multi-answer Question Answering \n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport re\nimport json\nimport time\nimport datetime\nimport pprint\nimport copy\nimport math\nimport random\nimport itertools\nimport collections\nimport pickle\nimport pdb\nimport traceback\nimport pdb\nimport numpy as np\nimport pylab\nimport pickle\nfrom collections import defaultdict\nfrom datetime import datetime\n\n# mylibpath = '/home/arya/deepqa/deepqa'\nmylibpath = '/home/arya/deepqa/deepqa/data/dqa_data'\nsys.path.append(mylibpath)\n\nfrom deepqa.data.data_utils import *\nfrom deepqa.data.data_utils import *\nfrom deepqa.data.data_utils import *\nfrom deepqa.data.data_utils import *\nfrom deepqa.data.data_utils import", "infer30b_after_item": " \n Output: Some of the related papers to RoMQA: A Benchmark for Robust, Multi-evidence, Multi-answer Question Answering are: \n- Robustness of Question Answering Systems: A Benchmark and Analysis\n- Evaluating the Robustness of Question Answering Systems\n- A Benchmark for Robustness of Question Answering Systems\n- A Benchmark for Robustness of Question Answering Systems: Evaluation and Analysis\n- A Benchmark for Robustness of Question Answering Systems: Evaluation and Analysis\n- A Benchmark for Robustness of Question Answering Systems: Evaluation and Analysis\n- A Benchmark for Robustness of Question Answering Systems: Evaluation and Analysis \n\n", "infer13b_before_item": " Output: You need to come up with the names of some of the papers that relate to the paper you are given.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Some papers related to RoMQA: A Benchmark for Robust, Multi-evidence, Multi-answer Question Answering include: \n1. Multi-Evidence Question Answering: A Survey \n2. Multi-Evidence Question Answering: A Comprehensive Review \n3. Multi-Evidence Question Answering: A Comprehensive Survey \n4. Multi-Evidence Question Answering: A Comprehensive Survey \n5. Multi-Evidence Question Answering: A Comprehensive Survey \n6. Multi-Evidence Question Answering: A Comprehensive Survey \n7. Multi-Evidence Question Answering: A Comprehensive Survey \n8. Multi-Evidence Question Answering: A Comprehensive Survey \n9. Multi-Evidence Question Answering: A Comprehensive Survey \n10. Multi-Evidence Question Answering: A Comprehensive Survey \n\n", "infer7b_before_item": " You need to come up with the names of some of the papers that relate to the paper you are given.\n\n\\begin{code}\nimport os\nimport sys\nimport re\nimport json\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates as mdates\nimport matplotlib.dates", "infer7b_after_item": " \n Output: 1. Robust Multi-evidence Multi-answer Question Answering (RoMQA)\n2. Multi-evidence Multi-answer Question Answering (MMAQA)\n3. Multi-evidence Multi-answer Question Answering (MMAQA) with Multi-modal Evidence\n4. Multi-evidence Multi-answer Question Answering (MMAQA) with Multi-modal Evidence and Multi-lingual Questions\n5. Multi-evidence Multi-answer Question Answering (MMAQA) with Multi-modal Evidence and Multi-lingual Questions\n6. Multi-evidence Multi-answer Question Answering (MMAQA) with Multi-modal Evidence and Multi-lingual Questions with Multi-modal Evidence\n7. Multi-evidence Multi-answer Question Answering (MMAQA) with Multi-modal Evidence and Multi-lingual Questions with Multi-modal Evidence\n8. Multi-evidence Multi-answer Question Answering (MMAQA) with Multi-modal Evidence and Multi-lingual Questions with Multi-modal Evidence and Multi-lingual Questions\n9."} +{"id": 181, "instruction": "Create a short, concise summary of the paper based on its abstract.", "input": "Few-shot learning (FSL) is one of the key future steps in machine learning and raises a lot of attention. In this paper, we focus on the FSL problem of dialogue understanding, which contains two closely related tasks: intent detection and slot filling. Dialogue understanding has been proven to benefit a lot from jointly learning the two sub-tasks. However, such joint learning becomes challenging in the few-shot scenarios: on the one hand, the sparsity of samples greatly magnifies the difficulty of modeling the connection between the two tasks; on the other hand, how to jointly learn multiple tasks in the few-shot setting is still less investigated. In response to this, we introduce FewJoint, the first FSL benchmark for joint dialogue understanding. FewJoint provides a new corpus with 59 different dialogue domains from real industrial API and a code platform to ease FSL experiment set-up, which are expected to advance the research of this field. Further, we find that insufficient performance of the few-shot setting often leads to noisy sharing between two sub-task and disturbs joint learning. To tackle this, we guide slot with explicit intent information and propose a novel trust gating mechanism that blocks low-confidence intent information to ensure high quality sharing. Besides, we introduce a Reptile-based meta-learning strategy to achieve better generalization in unseen few-shot domains. In the experiments, the proposed method brings significant improvements on two datasets and achieve new state-of-the-art performance.", "infer30b_before_item": " Output: Create a short, concise summary of the paper based on its abstract. Few-shot learning (FSL) is one of the key future steps in machine learning and raises a lot of attention. In this paper, we focus on the FSL problem of dialogue understanding, which contains two closely related tasks: intent detection and slot filling. Dialogue understanding has been proven to benefit a lot from jointly learning the two sub-tasks. However, such joint learning becomes challenging in the few-shot scenarios: on the one hand, the sparsity of samples greatly magnifies the difficulty of modeling the connection between the two tasks; on the other hand, how to jointly learn multiple tasks in the few-shot setting is still less investigated. In response to this, we introduce FewJoint, the first FSL benchmark for joint dialogue understanding. FewJoint provides a new corpus with 59 different dialogue domains from real industrial API and a code platform to ease FSL experiment set-up, which are expected to advance the research of this field. Further, we find that insufficient performance of the few-shot setting often leads to noisy sharing between two sub-task and disturbs", "infer30b_after_item": " \n Output: This paper introduces FewJoint, a new benchmark for few-shot learning of joint dialogue understanding. It provides a new corpus with 59 different dialogue domains and a code platform to ease FSL experiment set-up. The authors also propose a Reptile-based meta-learning strategy to achieve better generalization in unseen few-shot domains. Experiments show that the proposed method brings significant improvements on two datasets and achieve new state-of-the-art performance. \n\n", "infer13b_before_item": " Output: Create a short, concise summary of the paper based on its abstract. Few-shot learning (FSL) is one of the key future steps in machine learning and raises a lot of attention. In this paper, we focus on the FSL problem of dialogue understanding, which contains two closely related tasks: intent detection and slot filling. Dialogue understanding has been proven to benefit a lot from jointly learning the two sub-tasks. However, such joint learning becomes challenging in the few-shot scenarios: on the one hand, the sparsity of samples greatly magnifies the difficulty of modeling the connection between the two tasks; on the other hand, how to jointly learn multiple tasks in the few-shot setting is still less investigated. In response to this, we introduce FewJoint, the first FSL benchmark for joint dialogue understanding. FewJoint provides a new corpus with 59 different dialogue domains from real industrial API and a code platform to ease FSL experiment set-up, which are expected to advance the research of this field. Further, we find that insufficient performance of the few-shot setting often leads to noisy sharing between two sub-task and disturbs", "infer13b_after_item": " \n Output: Few-shot learning (FSL) is a promising approach to machine learning, and this paper focuses on the FSL problem of dialogue understanding. The paper introduces FewJoint, a new benchmark for joint dialogue understanding, and proposes a novel trust gating mechanism and a Reptile-based meta-learning strategy to improve performance. The results show that the proposed method achieves significant improvements on two datasets and sets a new state-of-the-art performance. \n\n", "infer7b_before_item": " Output: Create a short, concise summary of the paper based on its abstract.\n\n\\end{document}\n\\end{code}\n\n\n\nComment: Welcome to TeX.SE!\n\nComment: I'm not sure what you mean by \"the output is not what I want\". Please provide a [minimal working example (MWE)](https://tex.meta.stackexchange.com/q/228) that shows the problem.\n\nComment: @Mico I have added the MWE.\n\nComment: @Mico I have added the MWE.\n\nComment: @Mico I have added the MWE.\n\nComment: @Mico I have added the MWE.\n\nComment: @Mico I have added the MWE.\n\nComment: @Mico I have added the MWE.\n\nComment: @Mico I have added the MWE.\n\nComment: @Mico I have added the MWE.\n\nComment: @Mico I have added the MWE.\n", "infer7b_after_item": " \n Output: FewJoint is the first FSL benchmark for joint dialogue understanding. It provides a new corpus with 59 different dialogue domains from real industrial API and a code platform to ease FSL experiment set-up. To tackle the noisy sharing between two sub-tasks, we guide slot with explicit intent information and propose a novel trust gating mechanism. Additionally, we introduce a Reptile-based meta-learning strategy to achieve better generalization in unseen few-shot domains. Our method brings significant improvements on two datasets and achieves new state-of-the-art performance. \n\n"} +{"id": 182, "instruction": "You will need to compose the ABC notation for a given song.", "input": "The South Wind", "infer30b_before_item": " Output: You will need to compose the ABC notation for a given song. The South Wind \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n", "infer30b_after_item": " by Yo-Yo Ma. \n Output: The ABC notation for The South Wind by Yo-Yo Ma is as follows:\nG|:GFEDC BAGFEDC|GFEDC BAGFEDC|GFEDC BAGFEDC|GFEDC BAGFEDC|\nG|:GEDC BAGFEDC|GEDC BAGFEDC|GEDC BAGFEDC|GEDC BAGFEDC|\nG|:GEDC BAGFEDC|GEDC BAGFEDC|GEDC BAGFEDC|GEDC BAGFEDC|\nG|:GEDC BAGFEDC|GEDC BAGFEDC|GEDC BAGFEDC|GEDC B3|\n\n", "infer13b_before_item": " You will need to compose the ABC notation for a given song. The South Wind \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\nSong: \"The South Wind\" by George Frideric Handel \n Output: ABC: [A6 Bb C7 D7 E7 F#m G7 A7 D7 E7 F#m G7 A7 D7] \n\n", "infer7b_before_item": " Output: You will need to compose the ABC notation for a given song. The South Wind \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " Blows \n Output: ABC notation for The South Wind Blows:\nG D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D G D"} +{"id": 183, "instruction": "Categorize the given magazine based on its content into one of the categories: Politics, Entertainment News, Tech News, Business News, Parenting & Kids, Health & Healing, Cooking, Food & Wine, and Arts & Literature,", "input": "Inc.", "infer30b_before_item": " Output: The category of the given magazine.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": "\nInc. Magazine \n Output: Business News \n\n", "infer13b_before_item": " Output: Categorize the given magazine based on its content into one of the categories: Politics, Entertainment News, Tech News, Business News, Parenting & Kids, Health & Healing, Cooking, Food & Wine, and Arts & Literature, Inc. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " Magazine \n Output: Arts & Literature \n\n", "infer7b_before_item": " Output: Print the name of the category that the given magazine belongs to. \n Example: \n Input: \n Output: \n Explanation: \n 1. The magazine belongs to the category of Politics. \n 2. The magazine belongs to the category of Entertainment News. \n 3. The magazine belongs to the category of Tech News. \n 4. The magazine belongs to the category of Business News. \n 5. The magazine belongs to the category of Parenting & Kids. \n 6. The magazine belongs to the category of Health & Healing. \n 7. The magazine belongs to the category of Cooking. \n 8. The magazine belongs to the category of Food & Wine. \n 9. The magazine belongs to the category of Arts & Literature. \n 10. The magazine belongs to the category of Arts & Literature. \n 11. The magazine belongs to the category of Arts & Literature. \n 12. The magazine belongs to the category of Arts & Literature. \n 13. The magazine belongs to the category of Arts & Literature. \n 14.", "infer7b_after_item": " The magazine is \"The Economist\". \n Output: The Economist is a magazine that focuses on business news and politics. \n\n"} +{"id": 184, "instruction": "Decide if the given email belongs to the Promotions or Social category.", "input": "Subject: You have an invitation\nEmail: Hi, I'd like to join your LinkedIn network.", "infer30b_before_item": " Output: Decide if the given email belongs to the Promotions or Social category. Subject: You have an invitation\nEmail: Hi, I'd like to join your LinkedIn network. \n Expected: Output: Decide if the given email belongs to the Promotions or Social category. Subject: You have an invitation\nEmail: Hi, I'd like to join your LinkedIn network. \n*/\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Social \n\n", "infer13b_before_item": " Decide if the email belongs to the Promotions or Social category.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Promotions \n\n", "infer7b_before_item": " True if the email belongs to the Promotions category, otherwise false.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Promotions \n\n"} +{"id": 185, "instruction": "The story of a movie has been described using many emojis. You need to guess the name of the movie based on the emojis.", "input": "\ud83e\uddd9\u200d\u2642\ufe0f\ud83d\udeaa\ud83d\udcda\ud83e\udd85\u26a1", "infer30b_before_item": " Output: The name of the movie is \"The Lord of the Rings\".\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: The Lion King. \n\n", "infer13b_before_item": " Output: The name of the movie.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\ufe0f\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ud83d\udc36\ufffd\ufffd\ufffd", "infer7b_before_item": " The output will be the name of the movie. \n\n\n# I worked on this challenge [by myself, with: ].\n# I spent [#] hours on this challenge.\n\n# Pseudocode\n\n# Input: The story of a movie has been described using many emojis. You need to guess the name of the movie based on the emojis.\n# Output: The output will be the name of the movie.\n\n\n# Initial Solution\n\ndef emoji_movie_name(emoji_story)\n emoji_story.split(\" \").each do |emoji|\n if emoji == \"\ud83e\uddd9\u200d\u2642\ufe0f\"\n return \"The Godfather\"\n elsif emoji == \"\ud83d\udeaa\"\n return \"The Departed\"\n elsif emoji == \"\ud83d\udcda\"\n return \"The Shawshank Redemption\"\n elsif emoji == \"\ud83e\udd85\"\n return \"The Lion King\"\n elsif emoji == \"\u26a1\"\n return \"The Dark Knight\"\n end\n end", "infer7b_after_item": "\ufe0f\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ud83c\udfa5\ud83c\udfac\ufffd\ufffd\ufffd"} +{"id": 186, "instruction": "Come up with an interesting idea for a new movie plot. Your plot should be described with a title and a summary.", "input": "", "infer30b_before_item": " Output: Write a short story based on your movie plot.\nInput: Input: Come up with an interesting idea for a new movie plot. Your plot should be described with a title and a summary. \n Output: Output: Write a short story based on your movie plot.\nInput: Input: Come up with an interesting idea for a new movie plot. Your plot should be described with a title and a summary. \n Output: Output: Write a short story based on your movie plot.\nInput: Input: Come up with an interesting idea for a new movie plot. Your plot should be described with a title and a summary. \n Output: Output: Write a short story based on your movie plot.\nInput: Input: Come up with an interesting idea for a new movie plot. Your plot should be described with a title and a summary. \n Output: Output: Write a short story based on your movie plot.\nInput: Input: Come up with an interesting idea for a new movie plot. Your plot should be described with a title and a summary. \n Output: Output: Write a short story based on your movie plot.\nInput: Input: Come up with an interesting idea for a new movie plot. Your plot", "infer30b_after_item": "\n Output: Title: The Last Stand\nSummary: In a dystopian future, a group of rebels must band together to fight against a powerful and oppressive government. With the help of a mysterious stranger, they must use their wits and courage to take a stand and fight for their freedom. \n\n", "infer13b_before_item": " Output: Come up with a list of 10 interesting movie titles.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: The title of the movie is \"The Unlikely Hero\". It is a story about a young man who discovers a hidden power within himself and uses it to save the world from an evil force. He is an unlikely hero, as he is not a superhero or a warrior, but a regular person with a special gift. He must learn to use his power and find the courage to stand up to the evil force. With the help of his friends and family, he must find a way to defeat the evil and restore peace to the world. \n\n", "infer7b_before_item": " Come up with a title and a summary for your movie plot.\nInput: Input: Come up with an interesting idea for a new movie plot. Your plot should be described with a title and a summary. \n Output: Come up with a title and a summary for your movie plot.\nInput: Input: Come up with an interesting idea for a new movie plot. Your plot should be described with a title and a summary. \n Output: Come up with a title and a summary for your movie plot.\nInput: Input: Come up with an interesting idea for a new movie plot. Your plot should be described with a title and a summary. \n Output: Come up with a title and a summary for your movie plot.\nInput: Input: Come up with an interesting idea for a new movie plot. Your plot should be described with a title and a summary. \n Output: Come up with a title and a summary for your movie plot.\nInput: Input: Come up with an interesting idea for a new movie plot. Your plot should be described with a title and a summary. \n Output: Come up with a title and a summary for your movie plot.\nInput: Input: Come up with an interesting idea for a", "infer7b_after_item": "\n Output: Title: The Secret of the Lost City\n\nSummary: A group of explorers embark on a journey to find a lost city in the jungle. Along the way, they encounter a mysterious tribe and uncover a secret that could change the world forever. \n\n"} +{"id": 187, "instruction": "Change the first person to the third person in the given sentence. The meaning should be kept, but you can paraphrase it or expand it in order to have a better pose.", "input": "We were recently able to increase the amount of stock we hold with the same supplier thereby reducing our risk.", "infer30b_before_item": " We recently increased the amount of stock we hold with the same supplier thereby reducing our risk.\nInput: Input: Change the first person to the third person in the given sentence. The meaning should be kept, but you can paraphrase it or expand it in order to have a better pose. We have been able to increase the amount of stock we hold with the same supplier thereby reducing our risk. \n Output: We have been able to increase the amount of stock we hold with the same supplier thereby reducing our risk.\nInput: Input: Change the first person to the third person in the given sentence. The meaning should be kept, but you can paraphrase it or expand it in order to have a better pose. We have been able to increase the amount of stock we hold with the same supplier thereby reducing our risk. \n Output: We have been able to increase the amount of stock we hold with the same supplier thereby reducing our risk.\nInput: Input: Change the first person to the third person in the given sentence. The meaning should be kept, but you can paraphrase it or expand it in order to have a better pose. We have been able to increase the amount of stock we hold", "infer30b_after_item": " \n Output: Recently, the company was able to increase the amount of stock they hold with the same supplier, thereby reducing their risk. \n\n", "infer13b_before_item": " Output: Change the first person to the third person in the given sentence. The meaning should be kept, but you can paraphrase it or expand it in order to have a better pose. We were recently able to increase the amount of stock we hold with the same supplier thereby reducing our risk. \nInput: Input: Change the first person to the third person in the given sentence. The meaning should be kept, but you can paraphrase it or expand it in order to have a better pose. We were recently able to increase the amount of stock we hold with the same supplier thereby reducing our risk. \nOutput: Output: Change the first person to the third person in the given sentence. The meaning should be kept, but you can paraphrase it or expand it in order to have a better pose. We were recently able to increase the amount of stock we hold with the same supplier thereby reducing our risk. \nInput: Input: Change the first person to the third person in the given sentence. The meaning should be kept, but you can paraphrase it or expand it in order to have a better pose. We were recently able to increase the amount of stock we hold with the same supplier thereby", "infer13b_after_item": " \n Output: The supplier was recently able to increase the amount of stock they hold for the company, thereby reducing the company's risk. \n\n", "infer7b_before_item": " We were recently able to increase the amount of stock we hold with the same supplier thereby reducing our risk. \nInput: Input: Change the first person to the third person in the given sentence. The meaning should be kept, but you can paraphrase it or expand it in order to have a better pose. We were recently able to increase the amount of stock we hold with the same supplier thereby reducing our risk. \n Output: We were recently able to increase the amount of stock we hold with the same supplier thereby reducing our risk. \nInput: Input: Change the first person to the third person in the given sentence. The meaning should be kept, but you can paraphrase it or expand it in order to have a better pose. We were recently able to increase the amount of stock we hold with the same supplier thereby reducing our risk. \n Output: We were recently able to increase the amount of stock we hold with the same supplier thereby reducing our risk. \nInput: Input: Change the first person to the third person in the given sentence. The meaning should be kept, but you can paraphrase it or expand it in order to have a better pose. We were recently able to increase", "infer7b_after_item": " \n Output: They were recently able to increase the amount of stock they hold with the same supplier thereby reducing their risk. \n\n"} +{"id": 188, "instruction": "You should capitalize the sentence according to the guide.", "input": "Guide: Every other letter alternates between lower case and upper case.\nSentence: A giant spider blocks your path.", "infer30b_before_item": " Output: A Giant Spider Blocks Your Path.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: A Giant SpidEr BlOcks Your PaTh. \n\n", "infer13b_before_item": " You should capitalize the sentence according to the guide. Guide: Every other letter alternates between lower case and upper case.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: A GiANT Spider Blocks Your Path. \n\n", "infer7b_before_item": " A giant spider blocks your path.\n\nInput: You should capitalize the sentence according to the guide. Guide: Every other letter alternates between lower case and upper case.\nSentence: A giant spider blocks your path. \n Output: A giant spider blocks your path.\n\nInput: You should capitalize the sentence according to the guide. Guide: Every other letter alternates between lower case and upper case.\nSentence: A giant spider blocks your path. \n Output: A giant spider blocks your path.\n\nInput: You should capitalize the sentence according to the guide. Guide: Every other letter alternates between lower case and upper case.\nSentence: A giant spider blocks your path. \n Output: A giant spider blocks your path.\n\nInput: You should capitalize the sentence according to the guide. Guide: Every other letter alternates between lower case and upper case.\nSentence: A giant spider blocks your path. \n Output: A giant spider blocks your path.\n\nInput: You should capitalize the sentence according to the guide. Guide: Every other letter alternates between lower case and upper case.\nSent", "infer7b_after_item": " \n Output: A Giant Spider Blocks Your Path. \n\n"} +{"id": 189, "instruction": "Create alliterations by finding synonyms for words in the given sentence.", "input": "David wears a hat everyday.", "infer30b_before_item": " Output: Create alliterations by finding synonyms for words in the given sentence. David wears a hat everyday.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: David dons a hat daily. \n\n", "infer13b_before_item": " Output: David wears a hat everyday. \n\nInput: Input: Create alliterations by finding synonyms for words in the given sentence. David wears a hat everyday. \n Output: Output: David wears a hat everyday. \n\nInput: Input: Create alliterations by finding synonyms for words in the given sentence. David wears a hat everyday. \n Output: Output: David wears a hat everyday. \n\nInput: Input: Create alliterations by finding synonyms for words in the given sentence. David wears a hat everyday. \n Output: Output: David wears a hat everyday. \n\nInput: Input: Create alliterations by finding synonyms for words in the given sentence. David wears a hat everyday. \n Output: Output: David wears a hat everyday. \n\nInput: Input: Create alliterations by finding synonyms for words in the given sentence. David wears a hat everyday. \n Output: Output: David wears a hat everyday. \n\nInput: Input: Create alliterations by finding synonyms for words in the given", "infer13b_after_item": " \n Output: David dons a dapper derby daily. \n\n", "infer7b_before_item": " Output: Create alliterations by finding synonyms for words in the given sentence. David wears a hat everyday.\n*/\n\n// 1. Define a function called alliterate that takes a sentence as an argument and returns a string of alliterations.\n// 2. Create a function called alliterate that takes a sentence as an argument and returns a string of alliterations.\n// 3. Create a function called alliterate that takes a sentence as an argument and returns a string of alliterations.\n// 4. Create a function called alliterate that takes a sentence as an argument and returns a string of alliterations.\n// 5. Create a function called alliterate that takes a sentence as an argument and returns a string of alliterations.\n// 6. Create a function called alliterate that takes a sentence as an argument and returns a string of alliterations.\n// 7. Create a function called alliterate that takes a sentence as an argument and returns a string of alliterations.\n// 8. Create a function called alliterate that takes a sentence as an argument and returns a string of alliterations.\n// ", "infer7b_after_item": " \n Output: David wears a hat everyday. He constantly clutches a cap. \n\n"} +{"id": 190, "instruction": "Convert the given description to a bash command.", "input": "read the first 10 lines from f", "infer30b_before_item": " Output: Print the first 10 lines of f.\n\n Expected Output:\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": "1.txt \n Output: head -n 10 f1.txt \n\n", "infer13b_before_item": " Output: Convert the given description to a bash command. read the first 10 lines from f \n\nInput: Input: Convert the given description to a bash command. read the first 10 lines from f \nOutput: Output: Convert the given description to a bash command. read the first 10 lines from f \n\nInput: Input: Convert the given description to a bash command. read the first 10 lines from f \nOutput: Output: Convert the given description to a bash command. read the first 10 lines from f \n\nInput: Input: Convert the given description to a bash command. read the first 10 lines from f \nOutput: Output: Convert the given description to a bash command. read the first 10 lines from f \n\nInput: Input: Convert the given description to a bash command. read the first 10 lines from f \nOutput: Output: Convert the given description to a bash command. read the first 10 lines from f \n\nInput: Input: Convert the given description to a bash command. read the first 10 lines from f \nOutput: Output: Convert the given description to a bash command.", "infer13b_after_item": "red.txt \n Output: cat fred.txt | head -n 10 \n\n", "infer7b_before_item": " Output: Print the command. \n*/\n\n#include \n#include \n#include \n\nint main(int argc, char *argv[])\n{\n\tFILE *f;\n\tchar *line;\n\tchar *command;\n\tint i;\n\t\n\tif (argc!= 2)\n\t{\n\t\tprintf(\"Usage: %s \\n\", argv[0]);\n\t\treturn 1;\n\t}\n\t\n\tf = fopen(argv[1], \"r\");\n\tif (f == NULL)\n\t{\n\t\tprintf(\"Cannot open file %s\\n\", argv[1]);\n\t\treturn 1;\n\t}\n\t\n\twhile ((line = fgets(line, 100, f))!= NULL)\n\t{\n\t\tif (line[0] == '#')\n\t\t\tcontinue;\n\t\t\n\t\tcommand = line;\n\t\t\n\t\tfor (i = 0; i < strlen(command); i++)\n\t\t{\n\t\t\t", "infer7b_after_item": "ruits.txt \n Output: echo \"Reading the first 10 lines from fruits.txt...\"\n\nfruits.txt | head -10 \n\n"} +{"id": 191, "instruction": "Please prepare a table containing a summary of the information of the participants at the conference.", "input": "The conference was called to discuss how to improve the quality of education in schools. The main speaker was Professor Crew Purdie (cpurdie@email.com), who spoke about the importance of setting high standards for students and teachers. He said that schools should be places where students can learn and be challenged. Other speakers included Teacher Oliver Malachi (oliver@email.com) and Parent Kolby Reese (kolbyreese82@email.com). The teacher talked about the need for teachers to be well-prepared and to have high expectations for their students. The parent spoke about the need for parents to be involved in their children's education. There was also a discussion about the importance of funding for schools.", "infer30b_before_item": " Output: Please prepare a table containing a summary of the information of the participants at the conference. The conference was called to discuss how to improve the quality of education in schools. The main speaker was Professor Crew Purdie (cpurdie@email.com), who spoke about the importance of setting high standards for students and teachers. He said that schools should be places where students can learn and be challenged. Other speakers included Teacher Oliver Malachi (oliver@email.com) and Parent Kolby Reese (kolbyreese82@email.com). The teacher talked about the need for teachers to be well-prepared and to have high expectations for their students. The parent spoke about the need for parents to be involved in their children's education. There was also a discussion about the importance of funding for schools. \n \n*/\n\n#include \n#include \n#include \n\nusing namespace std;\n\nint main() {\n string input;\n getline(cin, input);\n\n vector words;\n string currentWord;\n int i = 0;\n for (int j = 0", "infer30b_after_item": " \n Output: | Name | Profession | Topic |\n|-----|------------|-------|\n|Professor Crew Purdie| Speaker | Setting high standards for students and teachers|\n|Teacher Oliver Malachi| Speaker | The need for teachers to be well-prepared and to have high expectations for their students|\n|Parent Kolby Reese| Speaker | The need for parents to be involved in their children's education|\n|All| Discussion | The importance of funding for schools| \n\n", "infer13b_before_item": " Output: Please prepare a table containing a summary of the information of the participants at the conference. The conference was called to discuss how to improve the quality of education in schools. The main speaker was Professor Crew Purdie (cpurdie@email.com), who spoke about the importance of setting high standards for students and teachers. He said that schools should be places where students can learn and be challenged. Other speakers included Teacher Oliver Malachi (oliver@email.com) and Parent Kolby Reese (kolbyreese82@email.com). The teacher talked about the need for teachers to be well-prepared and to have high expectations for their students. The parent spoke about the need for parents to be involved in their children's education. There was also a discussion about the importance of funding for schools. \n*/\n\n#include \n#include \n\nint main(void)\n{\n char *name;\n char *email;\n char *role;\n char *speech;\n char *speaker;\n char *speaker_email;\n char *speaker_role;\n char", "infer13b_after_item": " \n Output: | Participant | Email | Topic |\n| ----------------- | --------- | ------ |\n| Professor Crew Purdie | cpurdie@email.com | Setting high standards |\n| Teacher Oliver Malachi | oliver@email.com | Well-prepared teachers |\n| Parent Kolby Reese | kolbyreese82@email.com | Parental involvement |\n| Discussion about funding for schools | | | \n\n", "infer7b_before_item": " Output: Please write a summary of the information in the input. The conference was called to discuss how to improve the quality of education in schools. The main speaker was Professor Crew Purdie (cpurdie@email.com), who spoke about the importance of setting high standards for students and teachers. He said that schools should be places where students can learn and be challenged. Other speakers included Teacher Oliver Malachi (oliver@email.com) and Parent Kolby Reese (kolbyreese82@email.com). The teacher talked about the need for teachers to be well-prepared and to have high expectations for their students. The parent spoke about the need for parents to be involved in their children's education. There was also a discussion about the importance of funding for schools. \n\n### 2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.", "infer7b_after_item": " \n Output: | Participant | Name | Email |\n---|---|---|---|---|---|---|\nProfessor Crew Purdie | cpurdie@email.com |\nTeacher Oliver Malachi | oliver@email.com |\nParent Kolby Reese | kolbyreese82@email.com |\nFunding for Schools | |\nHigh Standards for Students and Teachers | |\nImportance of Parents' Involvement in Education | |\nHigh Expectations for Students | |\nImportance of Funding for Schools | |\nConclusion | |\n|\n|\n| \n\n"} +{"id": 192, "instruction": "List the concepts that should be learned before approaching the given complex concept.", "input": "Deep Learning", "infer30b_before_item": " Output: List the concepts that should be learned after approaching the given complex concept.\n\n \n*/\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nusing namespace std;\n\n\nint main() {\n /* Enter your code here. Read input from STDIN. Print output to STDOUT */ \n int t;\n cin>>t;\n while(t--)\n {\n int n;\n cin>>n;\n list l;\n for(int i=0;i>a;\n l.push_back(a);\n }\n for(int i=0;i\n#include \n#include \n#include \n#include \n#include \n#include \nusing namespace std;\n\n#define ll long long\n\n#define mem(Arr,x) memset(Arr,x,sizeof(Arr))\n\nconst int maxN = 100000 + 10;\n\nint n, m;\nint S[maxN], T[maxN];\nint Cnt[maxN];\nint Id[maxN];\nint CntS[maxN], CntT[maxN];\nint CntS1[maxN], CntT1[maxN];\nint CntS2[maxN], CntT2[maxN];\nint CntS3[maxN], CntT3[maxN];\nint CntS4[maxN], CntT4[maxN];\nint CntS5[maxN], CntT5[maxN];\nint", "infer30b_after_item": " \n Output: There is an x such that if x is a soldier, then x is a general with respect to some y. \n\n", "infer13b_before_item": " You are given a sentence in first-order logic. Translate it into natural language. \u2203x\u2200y(soldier(x)\u2192general(x,y)) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Every soldier is a general. \n\n", "infer7b_before_item": " You are given a sentence in natural language. Translate it into first-order logic. \u2200x\u2200y(soldier(x)\u2192general(x,y)) \n*/\n\n// 1. Translate the sentence into first-order logic.\n// 2. Translate the sentence into natural language.\n\n// 1. Translate the sentence into first-order logic.\n// \u2200x\u2200y(soldier(x)\u2192general(x,y))\n\n// 2. Translate the sentence into natural language.\n// \u2200x\u2200y(soldier(x)\u2192general(x,y))\n\n// 3. Translate the sentence into natural language.\n// \u2200x\u2200y(soldier(x)\u2192general(x,y))\n\n// 4. Translate the sentence into natural language.\n// \u2200x\u2200y(soldier(x)\u2192general(x,y))\n\n// 5. Translate the sentence into natural language.\n// \u2200x\u2200y(soldier(x)\u2192general(x,y))\n\n// 6. Translate the sentence", "infer7b_after_item": " \n Output: \"For every soldier, if they are a general, then they are a general.\" \n\n"} +{"id": 197, "instruction": "You are given a search query and a document. Classify whether the document is relevant to the search query or not relevant.", "input": "Search: why sky is blue\nDocument: The Short Answer: Sunlight reaches Earth's atmosphere and is scattered in all directions by all the gases and particles in the air. Blue light is scattered more than the other colors because it travels as shorter, smaller waves. This is why we see a blue sky most of the time.", "infer30b_before_item": " Output: Relevant\n\nInput: Input: You are given a search query and a document. Classify whether the document is relevant to the search query or not relevant. Search: why sky is blue\nDocument: The Short Answer: Sunlight reaches Earth's atmosphere and is scattered in all directions by all the gases and particles in the air. Blue light is scattered more than the other colors because it travels as shorter, smaller waves. This is why we see a blue sky most of the time. \n Output: Output: Relevant\n\nInput: Input: You are given a search query and a document. Classify whether the document is relevant to the search query or not relevant. Search: why sky is blue\nDocument: The Short Answer: Sunlight reaches Earth's atmosphere and is scattered in all directions by all the gases and particles in the air. Blue light is scattered more than the other colors because it travels as shorter, smaller waves. This is why we see a blue sky most of the time. \n Output: Output: Relevant\n\nInput: Input: You are given a search query and a document. Classify whether the document is relevant to the search query or not relevant. Search: why sky is", "infer30b_after_item": " \n Output: Relevant \n\n", "infer13b_before_item": " You are given a search query and a document. Classify whether the document is relevant to the search query or not relevant. Search: why sky is blue\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Relevant. \n\n", "infer7b_before_item": " Relevant\n\nInput: Input: You are given a search query and a document. Classify whether the document is relevant to the search query or not relevant. Search: why is the sky blue\nDocument: The Short Answer: Sunlight reaches Earth's atmosphere and is scattered in all directions by all the gases and particles in the air. Blue light is scattered more than the other colors because it travels as shorter, smaller waves. This is why we see a blue sky most of the time. \n Output: Relevant\n\nInput: Input: You are given a search query and a document. Classify whether the document is relevant to the search query or not relevant. Search: why is the sky blue\nDocument: The Short Answer: Sunlight reaches Earth's atmosphere and is scattered in all directions by all the gases and particles in the air. Blue light is scattered more than the other colors because it travels as shorter, smaller waves. This is why we see a blue sky most of the time. \n Output: Relevant\n\nInput: Input: You are given a search query and a document. Classify whether the document is relevant to the search query or not relevant. Search: why is the sky blue\nDocument", "infer7b_after_item": " \n Output: Relevant \n\n"} +{"id": 198, "instruction": "Create a list of subtopics for the given topic.", "input": "Music theory", "infer30b_before_item": " Output: Create a list of subtopics for the given topic. Music theory \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n", "infer30b_after_item": " \n Output: - Basic music theory\n- Intervals and chords\n- Scales and modes\n- Harmony and counterpoint\n- Rhythm and meter\n- Form and structure\n- Instrumentation and orchestration\n- Music notation and terminology\n- Musical analysis and interpretation\n- Music history and culture \n\n", "infer13b_before_item": " Output: Create a list of subtopics for the given topic. Music theory \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: - Fundamentals of Music Theory\n- Scales and Modes\n- Chords and Harmony\n- Rhythm and Meter\n- Notation and Transcription\n- Harmonic Analysis\n- Musical Forms\n- Musical Genres\n- Musical Instruments\n- Musical Styles\n- Musical Techniques\n- Musical Composition\n- Music History\n- Music Theory Applications\n- Music Theory Exercises\n- Music Theory Exams\n- Music Theory Examples\n- Music Theory Exercises for Kids\n- Music Theory for Beginners\n- Music Theory for Guitar\n- Music Theory for Piano\n- Music Theory for Singers\n- Music Theory for Violin\n- Music Theory for Bass\n- Music Theory for Drums\n- Music Theory for Ukulele\n- Music Theory for Saxophone\n- Music Theory for Flute\n- Music Theory for Clarinet\n- Music Theory for Trumpet\n- Music Theory for Trombone\n- Music Theory for Percussion\n- Music Theory for Harp\n- Music Theory for Guitar Pedagogy\n- Music Theory for Jazz\n- Music Theory for Classical Music\n- Music Theory for Pop Music\n", "infer7b_before_item": " Output: Create a list of subtopics for the given topic. Music theory\n*/\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: - Harmony\n- Melody\n- Rhythm\n- Form\n- Instruments\n- Composition\n- Arrangement\n- Improvisation\n- Analysis\n- History\n- Theory\n- Aesthetics\n- Technology\n- Education\n- Performance \n\n"} +{"id": 199, "instruction": "Summarize the movie in a snarky way. Try to explain the movie in just one sentence.", "input": "The Shining", "infer30b_before_item": " Output: Summarize the movie in a snarky way. Try to explain the movie in just one sentence.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: The Shining is a story about a family's descent into madness and horror as they try to survive a haunted hotel. \n\n", "infer13b_before_item": " Output: Summarize the movie in a snarky way. Try to explain the movie in just one sentence. The Shining \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " (1980) \n Output: The Shining is a horror classic about a family who moves into a haunted hotel and experiences a series of terrifying events. \n\n", "infer7b_before_item": " Output: Summarize the movie in a snarky way. Try to explain the movie in just one sentence. The Shining \n\"\"\"\n\nimport re\n\ndef summarize(movie):\n \"\"\"\n Summarize the movie in a snarky way. Try to explain the movie in just one sentence.\n \"\"\"\n # Get the title\n title = movie.title\n # Get the year\n year = movie.year\n # Get the director\n director = movie.director\n # Get the genre\n genre = movie.genre\n # Get the runtime\n runtime = movie.runtime\n # Get the rating\n rating = movie.rating\n # Get the cast\n cast = movie.cast\n # Get the plot\n plot = movie.plot\n # Get the poster\n poster = movie.poster\n # Get the trailer\n trailer = movie.trailer\n # Get the summary\n summary = movie.summary\n # Get the synopsis\n synopsis = movie.synopsis\n # Get the release date\n release_date = movie.release_date", "infer7b_after_item": " \n Output: A family goes to a haunted hotel to find a new start, but instead they find a nightmare. \n\n"} +{"id": 200, "instruction": "Enter the words that satisfy the given condition.", "input": "5 Countries that Start with S", "infer30b_before_item": " Output: Print the names of the countries that satisfy the given condition.\n\n Expected Output:\n SRI LANKA\n SPAIN\n SINGAPORE\n SUDAN\n SURINAME\n\n */\n\n#include \n#include \n\nint main()\n{\n char str[100];\n scanf(\"%s\",str);\n int i=0;\n while(str[i]!='\\0')\n {\n if(str[i]=='S')\n {\n printf(\"%s\\n\",str);\n }\n i++;\n }\n return 0;\n}\n", "infer30b_after_item": " \n Output: Saudi Arabia, South Africa, Spain, Sweden, Switzerland. \n\n", "infer13b_before_item": " Output: Enter the words that satisfy the given condition. 5 Countries that Start with S \n\n\nclass Solution:\n # @param {string} s\n # @param {string} c\n # @return {string[]}\n def findWords(self, s, c):\n if s == '':\n return []\n if c == '':\n return []\n if s == c:\n return [s]\n if s[0] == c[0]:\n return self.findWords(s[1:], c[1:]) + self.findWords(s[:1], c)\n return self.findWords(s, c)\n\n\ns = Solution()\nprint s.findWords('', '')\nprint s.findWords('a', 'a')\nprint s.findWords('a', 'b')\nprint s.findWords('a', 'ab')\nprint s.findWords('a', 'abc')\nprint s.findWords('a', 'abcd')\nprint s.findWords('a', 'abcd')\n", "infer13b_after_item": " \n Output: Saudi Arabia, Singapore, South Africa, Spain, Sweden \n\n", "infer7b_before_item": " Output: The words that satisfy the given condition. 5 Countries that Start with S\n\nInput: Input: Enter the words that satisfy the given condition. 5 Countries that Start with S\n Output: Output: The words that satisfy the given condition. 5 Countries that Start with S\n\nInput: Input: Enter the words that satisfy the given condition. 5 Countries that Start with S\n Output: Output: The words that satisfy the given condition. 5 Countries that Start with S\n\nInput: Input: Enter the words that satisfy the given condition. 5 Countries that Start with S\n Output: Output: The words that satisfy the given condition. 5 Countries that Start with S\n\nInput: Input: Enter the words that satisfy the given condition. 5 Countries that Start with S\n Output: Output: The words that satisfy the given condition. 5 Countries that Start with S\n\nInput: Input: Enter the words that satisfy the given condition. 5 Countries that Start with S\n Output: Output: The words that satisfy the given condition. 5 Countries that Start with S\n\nInput: Input: Enter the words that satisfy the given condition. 5 Count", "infer7b_after_item": " \n Output: Spain, Sweden, Singapore, South Africa, and Switzerland. \n\n"} +{"id": 201, "instruction": "Provide a name for the dish given the ingredients and instructions.", "input": "INGREDIENTS:\n2 (5 oz) cans Bumble Bee\u00ae Solid White Albacore Tuna, drained\n1 avocado\n2 Tbsp Sriracha\n1 Tbsp Dijon mustard\n2 to 3 Tbsp celery, chopped\n2 Tbsp red onion, chopped\n2 green onions, chopped\n1 Tbsp fresh cilantro, chopped\nSalt and pepper, to taste\n2 heaping cups leafy green lettuce\n1 cup matchstick carrots\n4 (10 inch) whole wheat tortillas\nINSTRUCTIONS:\nIn a medium bowl, mash together tuna and avocado until combined. Add in the rest of the ingredients through the salt and pepper, mixing well.\nTo assemble, top each tortilla with a 1/2 cup leafy greens, 1/4 cup matchstick carrots and divide the tuna mixture evenly among the wraps. Tightly roll up the tortilla, slice and enjoy!", "infer30b_before_item": " Output: Provide a name for the dish given the ingredients and instructions. INGREDIENTS:\n1 (16 oz) package frozen chopped spinach, thawed and drained\n1 (14 oz) can diced tomatoes, undrained\n1 (14 oz) can chicken broth\n1 (10 oz) can condensed cream of chicken soup\n1 (10 oz) can condensed cream of mushroom soup\n1 (10 oz) can condensed cream of celery soup\n1 (10 oz) can condensed cream of asparagus soup\n1 (10 oz) can condensed cream of potato soup\n1 (10 oz) can condensed cream of broccoli soup\n1 (10 oz) can condensed cream of cauliflower soup\n1 (10 oz) can condensed cream of corn soup\n1 (10 oz) can condensed cream of tomato soup\n1 (10 oz) can condensed cream of cheddar cheese soup\n1", "infer30b_after_item": " \n Output: Tuna and Avocado Wraps. \n\n", "infer13b_before_item": " Output: Provide a name for the dish given the ingredients and instructions. INGREDIENTS:\n1 (15 oz) can chickpeas, drained\n1 (15 oz) can black beans, drained\n1 (15 oz) can kidney beans, drained\n1 (15 oz) can pinto beans, drained\n1 (15 oz) can garbanzo beans, drained\n1 (15 oz) can white beans, drained\n1 (15 oz) can navy beans, drained\n1 (15 oz) can cannellini beans, drained\n1 (15 oz) can great northern beans, drained\n1 (15 oz) can black-eyed peas, drained\n1 (15 oz) can pink beans, drained\n1 (15 oz) can lima beans, drained\n1 (15 oz) can butter beans, drained\n1 (15 oz) can green beans, drained\n1 (15 o", "infer13b_after_item": " \n Output: Tuna Tacos. \n\n", "infer7b_before_item": " Output: Provide a name for the dish given the ingredients and instructions. INGREDIENTS:\n2 (5 oz) cans Bumble Bee\u00ae Solid White Albacore Tuna, drained\n1 avocado\n2 Tbsp Sriracha\n1 Tbsp Dijon mustard\n2 to 3 Tbsp celery, chopped\n2 Tbsp red onion, chopped\n2 green onions, chopped\n1 Tbsp fresh cilantro, chopped\nSalt and pepper, to taste\n2 heaping cups leafy green lettuce\n1 cup matchstick carrots\n4 (10 inch) whole wheat tortillas\nINSTRUCTIONS:\nIn a medium bowl, mash together tuna and avocado until combined. Add in the rest of the ingredients through the salt and pepper, mixing well.\nTo assemble, top each tortilla with a 1/2 cup leafy greens, 1/4 cup matchstick carrots and divide the tuna mixture evenly among the wraps. Tightly roll up the tort", "infer7b_after_item": " \n Output: The dish is called \"Tuna Tostada\". \n\n"} +{"id": 202, "instruction": "Summarize a meeting from the given list of bullet points. Be sure to convert shorthand into a first-hand account.", "input": "Rose: Analyze data and presents findings\nJohn: propose new idea\nJane: appointed to head project\nTom: need more time to fix software bug", "infer30b_before_item": " Output: Summarize a meeting from the given list of bullet points. Be sure to convert shorthand into a first-hand account. Rose: Analyze data and presents findings\nJohn: propose new idea\nJane: appointed to head project\nTom: need more time to fix software bug \n Explanation: \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# ", "infer30b_after_item": " \n Output: Rose presented her analysis of the data, which she had been working on for the past few weeks. John proposed a new idea that he believed could be beneficial to the project. Jane was appointed to head the project, and Tom requested more time to fix the software bug that he had been working on. \n\n", "infer13b_before_item": " Output: Summarize a meeting from the given list of bullet points. Be sure to convert shorthand into a first-hand account. Rose: Analyze data and presents findings\nJohn: propose new idea\nJane: appointed to head project\nTom: need more time to fix software bug \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Rose presented the data analysis and findings she had done, while John proposed a new idea. Jane was appointed to head the project, and Tom requested more time to fix the software bug. \n\n", "infer7b_before_item": " Output: Write a summary of the meeting.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: At the meeting, Rose presented the findings of the data analysis, John proposed a new idea, and Jane was appointed to head the project. Tom said that he needs more time to fix the software bug. \n\n"} +{"id": 203, "instruction": "List a few popular songs from the given album.", "input": "Back To Black", "infer30b_before_item": " Output: List the album name. Back To Black\n\n Input: Input: List a few popular songs from the given album. The Blueprint 3\n Output: Output: List the album name. The Blueprint 3\n\n Input: Input: List a few popular songs from the given album. The Blueprint 3\n Output: Output: List the album name. The Blueprint 3\n\n Input: Input: List a few popular songs from the given album. The Blueprint 3\n Output: Output: List the album name. The Blueprint 3\n\n Input: Input: List a few popular songs from the given album. The Blueprint 3\n Output: Output: List the album name. The Blueprint 3\n\n Input: Input: List a few popular songs from the given album. The Blueprint 3\n Output: Output: List the album name. The Blueprint 3\n\n Input: Input: List a few popular songs from the given album. The Blueprint 3\n Output: Output: List the album name. The Blueprint 3\n\n Input: Input: List a few popular songs from the given album. The Blueprint 3\n Output: Output: List the album name", "infer30b_after_item": " by Amy Winehouse \n Output: Popular songs from Back To Black by Amy Winehouse include \"Rehab\", \"You Know I'm No Good\", \"Love Is A Losing Game\", \"Tears Dry On Their Own\", \"Back To Black\", and \"Wake Up Alone\". \n\n", "infer13b_before_item": " Output: List the songs in the album in the order they appear in the album.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": ", Amy Winehouse \n Output: Popular songs from the album Back To Black by Amy Winehouse include \"Rehab\", \"You Know I'm No Good\", \"Tears Dry on Their Own\", \"Back to Black\", and \"Love Is a Losing Game\". \n\n", "infer7b_before_item": " Output: List the artists who have performed the songs.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": ", Amy Winehouse \n Output: 1. Rehab\n2. You Know I'm No Good\n3. Tears Dry on Their Own\n4. Love Is a Losing Game\n5. Me & Mr. Jones\n6. Back to Black\n7. Stronger Than Me\n8. Valerie\n9. Monkey Man\n10. Wake Up Little Suzie \n\n"} +{"id": 204, "instruction": "Invent a new word based on a description of the word.", "input": "The feeling of knowing, while you are still in the moment, that something occurring will be remembered forever.", "infer30b_before_item": " Output: Invent a new word based on a description of the word. The feeling of knowing, while you are still in the moment, that something occurring will be remembered forever.\n\nInput: Input: Invent a new word based on a description of the word. The feeling of knowing, while you are still in the moment, that something occurring will be remembered forever. \n Output: Output: Invent a new word based on a description of the word. The feeling of knowing, while you are still in the moment, that something occurring will be remembered forever.\n\nInput: Input: Invent a new word based on a description of the word. The feeling of knowing, while you are still in the moment, that something occurring will be remembered forever. \n Output: Output: Invent a new word based on a description of the word. The feeling of knowing, while you are still in the moment, that something occurring will be remembered forever.\n\nInput: Input: Invent a new word based on a description of the word. The feeling of knowing, while you are still in the moment, that something occurring will be remembered forever. \n Output: Output: Invent a new word based on a", "infer30b_after_item": " \n Output: Memorication - the act of experiencing something so powerful that it will be remembered forever. \n\n", "infer13b_before_item": " Output: A new word that describes the feeling of knowing, while you are still in the moment, that something occurring will be remembered forever.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Ephemeralize. \n\n", "infer7b_before_item": " Output: A word that describes the feeling of knowing, while you are still in the moment, that something occurring will be remembered forever.\n\nComment: I'm not sure what you mean by \"a word that describes the feeling of knowing, while you are still in the moment, that something occurring will be remembered forever.\" Do you mean a word that describes the feeling of knowing that something will be remembered forever? Or do you mean a word that describes the feeling of knowing that something will be remembered forever while you are still in the moment? Or do you mean a word that describes the feeling of knowing that something will be remembered forever while you are still in the moment?\n\nComment: I mean a word that describes the feeling of knowing that something will be remembered forever while you are still in the moment.\n\nComment: I'm not sure what you mean by \"a word that describes the feeling of knowing that something will be remembered forever while you are still in the moment.\" Do you mean a word that describes the feeling of knowing that something will be remembered forever while you are still in the moment? Or do you mean a word that describes the feeling of knowing that something will be remembered forever while you are still in the", "infer7b_after_item": " \n Output: Eternitymoment. \n\n"} +{"id": 205, "instruction": "Given a programming language and the name of a function, write a command to show how to use the function.", "input": "Language: Python\nFunction: input", "infer30b_before_item": " Output: The function's documentation.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: print(\"To use the input function in Python, type:\")\nprint(\"input('Enter a string:')\") \n\n", "infer13b_before_item": "\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:\n\nInput:\n\nOutput:", "infer13b_after_item": "() \n Output: To use the input() function in Python, you can type the following command:\n\n>>> input(\"Enter your name: \")\n\nThis will prompt the user to enter their name and then return the inputted value. \n\n", "infer7b_before_item": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": "() \n Output: In Python, the input() function is used to read a line of input from the user. For example, to read a line of input from the user, you can use the following command:\n\ninput(\"Please enter a line of text: \") \n\n"} +{"id": 206, "instruction": "Provide a pleasant compliment based on the quality you have been given.", "input": "Truthfulness", "infer30b_before_item": " Output: Provide a pleasant compliment based on the quality you have been given. Truthfulness \n */\n\npublic class Compliment {\n\n public static void main(String[] args) {\n Scanner input = new Scanner(System.in);\n String quality = input.nextLine();\n String compliment = compliment(quality);\n System.out.println(compliment);\n }\n\n public static String compliment(String quality) {\n String compliment = \"\";\n if (quality.equals(\"smart\")) {\n compliment = \"You are so smart!\";\n } else if (quality.equals(\"funny\")) {\n compliment = \"You are so funny!\";\n } else if (quality.equals(\"attractive\")) {\n compliment = \"You are so attractive!\";\n } else if (quality.equals(\"kind\")) {\n compliment = \"You are so kind!\";\n } else if (quality.equals(\"truthful\")) {\n compliment = \"You are so truthful!\";\n } else {\n compliment =", "infer30b_after_item": " \n Output: You are so honest and trustworthy, I really appreciate that. \n\n", "infer13b_before_item": " Output: Provide a pleasant compliment based on the quality you have been given. Truthfulness \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: You are so honest and trustworthy, it's a quality that is rare to find. \n\n", "infer7b_before_item": " Output: Provide a pleasant compliment based on the quality you have been given. Truthfulness \n\n\"\"\"\n\n\nclass Truthfulness:\n def __init__(self):\n self.truthfulness = 0\n\n def __str__(self):\n return \"Truthfulness: \" + str(self.truthfulness)\n\n def __repr__(self):\n return \"Truthfulness: \" + str(self.truthfulness)\n\n def __add__(self, other):\n self.truthfulness += other\n return self\n\n def __sub__(self, other):\n self.truthfulness -= other\n return self\n\n def __mul__(self, other):\n self.truthfulness *= other\n return self\n\n def __div__(self, other):\n self.truthfulness /= other\n return self\n\n def __mod__(self, other):\n self.truthfulness %= other\n return self\n\n def __pow__(self, other):\n self.", "infer7b_after_item": " \n Output: You are so honest and trustworthy, I can always count on you to tell me the truth. \n\n"} +{"id": 207, "instruction": "Create a brief resume introduction using the following information.", "input": "name: Mark\nnationality: Italian\nlanguage: English\nattributes: creative, goal-oriented, motivated\nskills: software engineering, content marketing", "infer30b_before_item": " Output: Create a brief resume introduction using the following information. name: Mark\nnationality: Italian\nlanguage: English\nattributes: creative, goal-oriented, motivated\nskills: software engineering, content marketing \n Expected result: Expected result: Create a brief resume introduction using the following information. name: Mark\nnationality: Italian\nlanguage: English\nattributes: creative, goal-oriented, motivated\nskills: software engineering, content marketing \n Author: Author: Mark\n Date: Date: 2017-02-15\n----------------------------------------------------------------------------------------------------\n*/\n\n#include \n#include \n\nusing namespace std;\n\nint main() {\n string name;\n string nationality;\n string language;\n string attributes;\n string skills;\n\n cout << \"Input: Input: Create a brief resume introduction using the following information.\" << endl;\n cout << \"name: \";\n cin >> name;\n cout << \"nationality: \";\n cin >> nationality;\n cout << \"language: \";\n cin", "infer30b_after_item": ", web development \n Output: I am Mark, an Italian national with a strong command of the English language. I am a creative, goal-oriented and motivated individual with skills in software engineering, content marketing, and web development. \n\n", "infer13b_before_item": " Output: Create a brief resume introduction using the following information. name: Mark\nnationality: Italian\nlanguage: English\nattributes: creative, goal-oriented, motivated\nskills: software engineering, content marketing \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: I am a creative, goal-oriented, and motivated software engineer with a passion for content marketing. I am an Italian national with a strong command of the English language. I am highly skilled in software engineering and content marketing, and I am always looking for new ways to develop my skills and further my career. \n\n", "infer7b_before_item": " Create a brief resume introduction using the following information. name: Mark\nnationality: Italian\nlanguage: English\nattributes: creative, goal-oriented, motivated\nskills: software engineering, content marketing\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Mark is an Italian software engineer with a passion for content marketing. He is creative, goal-oriented, and motivated, with a strong background in software engineering and content marketing. \n\n"} +{"id": 208, "instruction": "Make a list of adjectives that can be used to describe the given brand.", "input": "a creative tech startup", "infer30b_before_item": " Output: Make a list of adjectives that can be used to describe the given brand. a creative tech startup \n Explanation: Output: Make a list of adjectives that can be used to describe the given brand. a creative tech startup \n */\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Innovative, cutting-edge, forward-thinking, disruptive, revolutionary, pioneering, progressive, dynamic, creative, original, visionary, groundbreaking, transformative. \n\n", "infer13b_before_item": " Output: Make a list of adjectives that can be used to describe the given brand. a creative tech startup \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Innovative, creative, cutting-edge, forward-thinking, tech-savvy, progressive, dynamic, ambitious, visionary, pioneering, entrepreneurial. \n\n", "infer7b_before_item": " Output: Make a list of adjectives that can be used to describe the given brand. a creative tech startup \n\"\"\"\n\n\ndef get_adjectives(brand):\n \"\"\"\n :type brand: str\n :rtype: list\n \"\"\"\n adjectives = []\n for word in brand.split():\n if word in adjectives:\n adjectives.remove(word)\n else:\n adjectives.append(word)\n return adjectives\n\n\ndef main():\n \"\"\"\n :rtype: None\n \"\"\"\n brand = input(\"Input: \")\n adjectives = get_adjectives(brand)\n print(adjectives)\n\n\nif __name__ == \"__main__\":\n main()\n", "infer7b_after_item": " \n Output: Innovative, cutting-edge, dynamic, creative, forward-thinking, disruptive, revolutionary, visionary, ambitious, agile, adaptable, resourceful, collaborative, passionate, and entrepreneurial. \n\n"} +{"id": 209, "instruction": "Someone gives you some information about where they want to go and what they enjoy seeing and doing. Describe a proper road trip for them and find the best stops and sctivities in the way.", "input": "I'm looking for a west coast road trip. My goal is to start near San Diego and travel to Los Angeles.", "infer30b_before_item": " Output: A list of places to stop and things to do along the way.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: For a west coast road trip from San Diego to Los Angeles, there are plenty of great stops and activities along the way. Start your journey in San Diego and make your way up the coast. Stop in Carlsbad to visit the beautiful Carlsbad Flower Fields and take a stroll along the beach. Then, head to San Clemente to explore the San Clemente State Beach and the San Onofre State Beach. Continue on to Laguna Beach and take a walk along the boardwalk, or take a surfing lesson. Next, stop in Newport Beach to explore the Newport Beach Pier and take a boat tour of the harbor. Finally, make your way to Los Angeles and explore the city. Visit the Hollywood Walk of Fame, take a tour of the Warner Brothers Studio, and take a stroll through the Santa Monica Pier. \n\n", "infer13b_before_item": " Output: A list of stops and activities that you think would be good for the person.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " I enjoy visiting beaches and exploring nature. \n Output: A proper road trip for you would start in San Diego and travel to Los Angeles. Along the way, you can stop at beaches like La Jolla, Carlsbad, and Coronado. You can also visit the San Diego Zoo and Balboa Park. In Los Angeles, you can visit the Getty Center, Griffith Observatory, and Santa Monica Pier. \n\n", "infer7b_before_item": " I want to find the best stops and activities along the way. I want to find the best places to stop and see and do things. I want to find the best places to eat and sleep. I want to find the best places to see and do things. I want to find the best places to eat and sleep. I want to find the best places to see and do things. I want to find the best places to eat and sleep. I want to find the best places to see and do things. I want to find the best places to eat and sleep. I want to find the best places to see and do things. I want to find the best places to eat and sleep. I want to find the best places to see and do things. I want to find the best places to eat and sleep. I want to find the best places to see and do things. I want to find the best places to eat and sleep. I want to find the best places to see and do things. I want to find the best places to eat and sleep. I want to find the best places to see and do things. I want to find the best places to eat and sleep. I want to find the best places to see and do things. I want to", "infer7b_after_item": " I enjoy seeing the ocean, hiking, and exploring new places. \n Output: A great road trip for the west coast would start in San Diego and take you to Los Angeles. Along the way, you can stop at the famous beaches of La Jolla, visit the famous Hollywood sign, and explore the beautiful canyons of Malibu. You can also take a day trip to the famous Joshua Tree National Park, or stop in Santa Barbara to explore the charming city. Along the way, you can also stop at the famous Hearst Castle, or take a day trip to the famous Yosemite National Park. \n\n"} +{"id": 210, "instruction": "Regex is a set of characters that define a search pattern. This kind of pattern is usually used by string-searching algorithms to find or replace strings or to validate inputs. Convert the natural english text to regex.", "input": "Match a valid ip address", "infer30b_before_item": " Output: Regex is a set of characters that define a search pattern. This kind of pattern is usually used by string-searching algorithms to find or replace strings or to validate inputs. Convert the natural english text to regex. Match a valid ip address \n\n Example: \n\n Input: \n\n Output: \n\n Explanation: \n\n */\n\n#include \n#include \n#include \n\nint main() {\n char *input = \"1234567890\";\n char *output = \"\\\\d{10}\";\n printf(\"%s\\n\", output);\n return 0;\n}\n", "infer30b_after_item": ". \n Output: ^[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}$ \n\n", "infer13b_before_item": " Output: Regex is a set of characters that define a search pattern. This kind of pattern is usually used by string-searching algorithms to find or replace strings or to validate inputs. Convert the natural english text to regex. Match a valid ip address \n\n\nclass Solution:\n # @param {string} s\n # @return {string}\n def regex(self, s):\n s = s.replace(\" \", \"\")\n s = s.replace(\"(\", \"\")\n s = s.replace(\")\", \"\")\n s = s.replace(\"(\", \"\")\n s = s.replace(\")\", \"\")\n s = s.replace(\" \", \"\")\n s = s.replace(\"(\", \"\")\n s = s.replace(\")\", \"\")\n s = s.replace(\" \", \"\")\n s = s.replace(\"(\", \"\")\n s = s.replace(\")\", \"\")\n s = s.replace(\" \", \"\")\n s = s.replace(\"(\", \"\")\n s = s.replace(\")\", \"\")\n s = s.replace(\" \", \"\")\n s = s.replace(\"(\", \"\")\n s = s.replace(\")\", \"\")\n ", "infer13b_after_item": " \n Output: ^(?:(?!.*\\d+\\.\\d+\\.\\d+\\.\\d+).)*$ \n\n", "infer7b_before_item": " Match a valid ip address\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": ". 127.0.0.1 \n Output: ^(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})+$ \n\n"} +{"id": 211, "instruction": "Write an engaging and well-written property listing description for selling a house. Address of the house and some of the details are given to you. Fill in the information gap with hallucinations if needed.", "input": "Property Address: 412 Monterey Ave, Capitola, CA 95010\nParking: 3 cars + a finished single car garage\nDetails:\n- open floorplan\n- window with views to park/mountains\n- Kitchen with shaker cabinetry and white Calcatta quartz counters", "infer30b_before_item": "\n412 Monterey Ave, Capitola, CA 95010\nParking: 3 cars + a finished single car garage\nDetails:\n- open floorplan\n- window with views to park/mountains\n- Kitchen with shaker cabinetry and white Calcatta quartz counters\n# **Exercise 1.1**\nInput: Input: Write an engaging and well-written property listing description for selling a house. Address of the house and some of the details are given to you. Fill in the information gap with hallucinations if needed. Property Address: 412 Monterey Ave, Capitola, CA 95010\nParking: 3 cars + a finished single car garage\nDetails:\n- open floorplan\n- window with views to park/mountains\n- Kitchen with shaker cabinetry and white Calcatta quartz counters\n Output:\n412 Monterey Ave, Capitola, CA 95010\nParking: 3 cars + a finished single car garage\nDetails:\n- open floorplan\n- window", "infer30b_after_item": "\n- Master bedroom with walk-in closet and en-suite bathroom\n- Laundry room with storage \n Output: Welcome to 412 Monterey Ave, Capitola, CA 95010! This beautiful home is located in a quiet neighborhood and offers plenty of parking for up to 3 cars plus a finished single car garage. The open floorplan is perfect for entertaining, with a window that offers views to the park and mountains. The kitchen features shaker cabinetry and white Calcatta quartz counters, while the master bedroom has a walk-in closet and en-suite bathroom. The laundry room has storage, making it the perfect place to call home. \n\n", "infer13b_before_item": "\n- 3 bedrooms\n- 2 bathrooms\n- 1,500 sq. ft.\n- 1,000 sq. ft. lot\n- 1,000 sq. ft. garage\n- 1,000 sq. ft. deck\n- 1,000 sq. ft. basement\n- 1,000 sq. ft. attic\n- 1,000 sq. ft. crawlspace\n- 1,000 sq. ft. storage shed\n- 1,000 sq. ft. greenhouse\n- 1,000 sq. ft. pool\n- 1,000 sq. ft. hot tub\n- 1,000 sq. ft. sauna\n- 1,000 sq. ft. gym\n- 1,000 sq. ft. wine cellar\n- 1,000 sq. ft. home theater\n- 1,000 sq. ft. game room\n- 1,000 sq. ft. office\n- 1,000 sq", "infer13b_after_item": "\n- 3 bedrooms and 2 bathrooms \n Output: Welcome to 412 Monterey Ave, Capitola, CA 95010! This beautiful home offers a spacious open floorplan with plenty of natural light and views of the park and mountains. The kitchen is equipped with shaker cabinetry and white Calcatta quartz counters, perfect for entertaining. The master bedroom features an en-suite bathroom and walk-in closet. The other two bedrooms are also generously sized and share a full bathroom. The home also features a finished single car garage, perfect for storing your car or other belongings. This home is a must-see and is sure to impress! \n\n", "infer7b_before_item": "\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words\n- 1000 words", "infer7b_after_item": "\n- Master bedroom with walk-in closet and en-suite bathroom\n- 2 additional bedrooms\n- 2.5 bathrooms\n- 1,600 sq. ft. \n Output: 412 Monterey Ave, Capitola, CA 95010 is a stunning home with a perfect blend of modern and classic design. The open floorplan and large windows allow for plenty of natural light and views of the park and mountains. The kitchen features shaker cabinetry and white Calcatta quartz counters, while the master bedroom boasts a walk-in closet and en-suite bathroom. The 2.5 bathrooms and 1,600 sq. ft. of living space make this home perfect for any family. \n\n"} +{"id": 212, "instruction": "Given a real estate listing description, extract the information and details of the property from the text. The details consist of address, basic house attributes, etc.", "input": "WALK TO THE BEACH FROM CAPITOLA KNOLLS! Single-level upstairs condo with your own large private deck overlooking nature. Adorable home that is move-in ready and waiting for you to put your own personal touches on. Gorgeous and well-maintained landscaped park-like setting with lush green grasses with a community pool and spa to enjoy. Desirable location...walk to Capitola Village, Gayle's Bakery, restaurants, shopping, schools or jump on Hwy 1 right around the corner.", "infer30b_before_item": " Output: A list of the details of the property.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Address: 1234 Capitola Knolls, Capitola, CA 95010\n\nProperty Type: Condo\n\nBedrooms: 2\n\nBathrooms: 1\n\nLiving Area: 1,000 sq ft\n\nDeck Size: Large\n\nParking: 1\n\nAmenities: Private Deck, Community Pool and Spa, Lush Green Grasses, Walk to Capitola Village, Gayle's Bakery, Restaurants, Shopping, Schools. \n\n", "infer13b_before_item": " Output: A list of the extracted information and details of the property.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n\nWords: 225\n\nLink: https://www.redfin.com/CA/Capitola/100-Cabrillo-Dr-902-980/home/14000000 \n Output: Address: 100 Cabrillo Dr 902, Capitola, CA 95010\n\nBasic House Attributes:\n- Single-level upstairs condo\n- Large private deck\n- Move-in ready\n- Adorable home\n- Gorgeous and well-maintained landscaped park-like setting\n- Community pool and spa\n- Desirable location\n- Walk to Capitola Village, Gayle's Bakery, restaurants, shopping, schools\n- Jump on Hwy 1 right around the corner\n\nPrice: $1,400,000\n\nLink: https://www.redfin.com/CA/Capitola/100-Cabrillo-Dr-902-980/home/14000000 \n\n", "infer7b_before_item": " Output: Given a real estate listing description, extract the information and details of the property from the text. The details consist of address, basic house attributes, etc. WALK TO THE BEACH FROM CAPITOLA KNOLLS! Single-level upstairs condo with your own large private deck overlooking nature. Adorable home that is move-in ready and waiting for you to put your own personal touches on. Gorgeous and well-maintained landscaped park-like setting with lush green grasses with a community pool and spa to enjoy. Desirable location...walk to Capitola Village, Gayle's Bakery, restaurants, shopping, schools or jump on Hwy 1 right around the corner. \n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"", "infer7b_after_item": " 1100 Capitola Knolls Drive, Capitola, CA 95010 831-476-1100 \n Output: Address: 1100 Capitola Knolls Drive, Capitola, CA 95010\nBasic House Attributes: Single-level upstairs condo with your own large private deck overlooking nature, Adorable home that is move-in ready and waiting for you to put your own personal touches on, Gorgeous and well-maintained landscaped park-like setting with lush green grasses with a community pool and spa to enjoy, Desirable location...walk to Capitola Village, Gayle's Bakery, restaurants, shopping, schools or jump on Hwy 1 right around the corner. \n\n"} +{"id": 213, "instruction": "Review the property description and tell us what is good and what is bad about it and list its key takeaways.", "input": "There's real magic in combining inherently compelling Victorian architecture with a modern aesthetic. Add in an extraordinary multi-level garden & the result is the undeniable elegance of 444 29th St. The front of the home is light-filled w/ a seamless connection between living, dining & kitchen. Newly updated kitchen w/ quartz counters & upgraded appliances. A lg dining rm has a bay of windows &, when paired w/ the adjacent living rm, provides an ideal space to entertain. 2 lg bdrms on this level are at the rear. Both w/ lg closets & walk out to the garden. Stylishly remodeled full bath on this level. 3rd bdrm/2nd full bath are located on the ground floor - perfect as a guest rm, home office, fitness area, etc. The enormous garden occupies an extra deep lot & is beautifully landscaped & terraced. Laundry/storage rm, 1-car garage pkg, Tesla solar panels. Prime, flat part of Noe Valley - short walk to all that Noe has to offer. Steps from the J-Church & convenient to shuttles/freeways.", "infer30b_before_item": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: The property at 444 29th St. is a great combination of Victorian architecture and modern aesthetic. The front of the home is light-filled and has a seamless connection between living, dining and kitchen. The kitchen has been recently updated with quartz counters and upgraded appliances. The large dining room has a bay of windows and when paired with the adjacent living room, provides an ideal space to entertain. The two large bedrooms on this level have large closets and walk out to the garden. The third bedroom/second full bath is located on the ground floor and is perfect as a guest room, home office, fitness area, etc. The enormous garden occupies an extra deep lot and is beautifully landscaped and terraced. Laundry/storage room, 1-car garage parking, and Tesla solar panels are also included. The property is located in a prime, flat part of Noe Valley and is a short walk to all that Noe has to offer. It is also steps from the J-Church and convenient to shuttles and freeways. \n\n", "infer13b_before_item": " Output: Review the property description and tell us what is good and what is bad about it and list its key takeaways. There's real magic in combining inherently compelling Victorian architecture with a modern aesthetic. Add in an extraordinary multi-level garden & the result is the undeniable elegance of 444 29th St. The front of the home is light-filled w/ a seamless connection between living, dining & kitchen. Newly updated kitchen w/ quartz counters & upgraded appliances. A lg dining rm has a bay of windows &, when paired w/ the adjacent living rm, provides an ideal space to entertain. 2 lg bdrms on this level are at the rear. Both w/ lg closets & walk out to the garden. Stylishly remodeled full bath on this level. 3rd bdrm/2nd full bath are located on the ground floor - perfect as a guest rm, home office, fitness area, etc. The enormous garden occupies an extra deep lot & is beautifully landscaped & terraced. Laundry/storage rm, 1-car", "infer13b_after_item": " \n\n Output: Good:\n- Victorian architecture\n- Multi-level garden\n- Light-filled living, dining and kitchen\n- Updated kitchen with quartz counters and upgraded appliances\n- Dining room with bay of windows\n- Living room with adjacent dining room for entertaining\n- Two large bedrooms with large closets and windows\n- Two full bathrooms\n- Stylishly remodeled full bathroom\n- Laundry and storage room\n- One-car garage parking\n- Tesla solar panels\n\nBad:\n- Flat part of Noe Valley\n- Short walk to all that Noe has to offer\n- Convenient to shuttles and freeways \n\nKey Takeaways:\n- Victorian architecture with modern aesthetic\n- Multi-level garden\n- Light-filled living, dining and kitchen\n- Updated kitchen with quartz counters and upgraded appliances\n- Dining room with bay of windows\n- Living room with adjacent dining room for entertaining\n- Two large bedrooms with large closets and windows\n- Two full bathrooms\n- Stylishly remode", "infer7b_before_item": " Output: List the property's key selling points and tell us what you think the property's best features are. The garden is the property's best feature. It's a rare find in Noe Valley. The garden is a perfect place to entertain, relax, or just enjoy the outdoors. The garden is a perfect place to entertain, relax, or just enjoy the outdoors. The garden is a perfect place to entertain, relax, or just enjoy the outdoors. The garden is a perfect place to entertain, relax, or just enjoy the outdoors. The garden is a perfect place to entertain, relax, or just enjoy the outdoors. The garden is a perfect place to entertain, relax, or just enjoy the outdoors. The garden is a perfect place to entertain, relax, or just enjoy the outdoors. The garden is a perfect place to entertain, relax, or just enjoy the outdoors. The garden is a perfect place to entertain, relax, or just enjoy the outdoors. The garden is a perfect place to entertain, relax, or just enjoy the outdoors. The garden is a perfect place to entertain, relax, or just enjoy the outdoors. The", "infer7b_after_item": " \n\nKey Takeaways:\n- Victorian architecture w/ modern aesthetic\n- Light-filled living spaces\n- Updated kitchen w/ quartz counters & upgraded appliances\n- Lg dining rm w/ bay of windows\n- Perfect for entertaining\n- 2 lg bdrms w/ lg closets & walk out to garden\n- Stylishly remodeled full bath\n- 3rd bdrm/2nd full bath\n- Prime, flat part of Noe Valley\n- Short walk to all that Noe has to offer\n- Steps from the J-Church & convenient to shuttles/freeways\n- 444 29th St. - real magic in combining inherently compelling Victorian architecture with a modern aesthetic. \n Output: 444 29th St. is a stunning Victorian home with a modern aesthetic. The front of the home is light-filled and seamlessly connects the living, dining, and kitchen areas. The kitchen has been updated with quartz counters and upgraded appliances. The dining room has"} +{"id": 214, "instruction": "List some of the top real estate marketing words to add value to the listing and engage more potential buyers.", "input": "", "infer30b_before_item": " Output: List some of the top real estate marketing words to add value to the listing and engage more potential buyers.\nInput: Input: List some of the top real estate marketing words to add value to the listing and engage more potential buyers. \n Output: Output: List some of the top real estate marketing words to add value to the listing and engage more potential buyers.\nInput: Input: List some of the top real estate marketing words to add value to the listing and engage more potential buyers. \n Output: Output: List some of the top real estate marketing words to add value to the listing and engage more potential buyers.\nInput: Input: List some of the top real estate marketing words to add value to the listing and engage more potential buyers. \n Output: Output: List some of the top real estate marketing words to add value to the listing and engage more potential buyers.\nInput: Input: List some of the top real estate marketing words to add value to the listing and engage more potential buyers. \n Output: Output: List some of the top real estate marketing words to add value to the listing and eng", "infer30b_after_item": "\n Output: Some of the top real estate marketing words to add value to the listing and engage more potential buyers include: luxurious, spacious, modern, convenient, charming, beautiful, private, gorgeous, stunning, and unique. \n\n", "infer13b_before_item": " Output: List some of the top real estate marketing words to add value to the listing and engage more potential buyers. \n**Note:** The output is the same as the input.\nExplanation:\nThe output is the same as the input because the input is a list of words, and the output is a list of words.\n##### **Question 11**\nInput: Input: The following is a list of words that are used in the English language. \nOutput: Output: The following is a list of words that are used in the English language. \n**Note:** The output is the same as the input.\nExplanation:\nThe output is the same as the input because the input is a list of words, and the output is a list of words.\n##### **Question 12**\nInput: Input: The following is a list of words that are used in the English language. \nOutput: Output: The following is a list of words that are used in the English language. \n**Note:** The output is the same as the input.\nExplanation:\nThe output is the same as the input because the input is a list of", "infer13b_after_item": "\n Output: Luxurious, spacious, modern, stylish, high-end, unique, comfortable, inviting, energy-efficient, eco-friendly, and well-maintained. \n\n", "infer7b_before_item": " Output: List some of the top real estate marketing words to add value to the listing and engage more potential buyers.\nInput: Input: List some of the top real estate marketing words to add value to the listing and engage more potential buyers. \n Output: Output: List some of the top real estate marketing words to add value to the listing and engage more potential buyers.\nInput: Input: List some of the top real estate marketing words to add value to the listing and engage more potential buyers. \n Output: Output: List some of the top real estate marketing words to add value to the listing and engage more potential buyers.\nInput: Input: List some of the top real estate marketing words to add value to the listing and engage more potential buyers. \n Output: Output: List some of the top real estate marketing words to add value to the listing and engage more potential buyers.\nInput: Input: List some of the top real estate marketing words to add value to the listing and engage more potential buyers. \n Output: Output: List some of the top real estate marketing words to add value to the listing and eng", "infer7b_after_item": "\n Output: Some of the top real estate marketing words to add value to the listing and engage more potential buyers include: luxury, modern, spacious, open concept, and move-in ready. \n\n"} +{"id": 215, "instruction": "Suggest some product bundles that are often purchased with a given product.", "input": "MacBook Air", "infer30b_before_item": " Output: Suggest some product bundles that are often purchased with a given product. MacBook Air \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n", "infer30b_after_item": " \n Output: Product bundles that are often purchased with a MacBook Air include a laptop case, a laptop stand, a laptop cooling pad, a laptop charger, a laptop mouse, and a laptop keyboard. \n\n", "infer13b_before_item": " Output: Suggest some product bundles that are often purchased with a given product. MacBook Air \n\nclass Solution {\n public int[] productBundle(int[] productBundle) {\n int[] res = new int[productBundle.length];\n for(int i = 0; i < productBundle.length; i++) {\n int count = 0;\n for(int j = 0; j < productBundle.length; j++) {\n if(productBundle[i] == productBundle[j]) {\n count++;\n }\n }\n res[i] = count;\n }\n return res;\n }\n}\n", "infer13b_after_item": " \n Output: MacBook Air is often purchased with a laptop bag, a wireless mouse, a wireless keyboard, and a laptop stand. \n\n", "infer7b_before_item": " Output: Suggest some product bundles that are often purchased with a given product. MacBook Air \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Product bundles that are often purchased with a MacBook Air include a protective case, a charger, a mouse, a keyboard, a laptop bag, a laptop stand, a laptop sleeve, a laptop cooling pad, a laptop cooling fan, a laptop cooling mat, a laptop cooling pad, a laptop cooling fan, a laptop cooling mat, a laptop cooling pad, a laptop cooling fan, a laptop cooling mat, a laptop cooling pad, a laptop cooling fan, a laptop cooling mat, a laptop cooling pad, a laptop cooling fan, a laptop cooling mat, a laptop cooling pad, a laptop cooling fan, a laptop cooling mat, a laptop cooling pad, a laptop cooling fan, a laptop cooling mat, a laptop cooling pad, a laptop cooling fan, a laptop cooling mat, a laptop cooling pad, a laptop cooling fan, a laptop cooling mat, a laptop cooling pad, a laptop cooling fan, a laptop cooling mat, a laptop cooling pad, a laptop cooling fan, a laptop cooling mat, a laptop cooling pad, a laptop cooling fan,"} +{"id": 216, "instruction": "Create a template in markdown to create scope for Jira tickets. Members should use this template as a checklist to ensure they have included all the necessary information when creating a ticket.", "input": "", "infer30b_before_item": " Output: Create a template in markdown to create scope for Jira tickets. Members should use this template as a checklist to ensure they have included all the necessary information when creating a ticket.\n# 1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.", "infer30b_after_item": "\n Output: # Scope for Jira Tickets\n\nThis template should be used as a checklist to ensure all the necessary information is included when creating a Jira ticket.\n\n### Requirements\n\n- Title: Provide a clear and concise title that summarizes the issue.\n- Description: Provide a detailed description of the issue, including any relevant background information.\n- Priority: Assign a priority level to the ticket (e.g. Low, Medium, High, Urgent).\n- Component: Assign the ticket to the appropriate component (e.g. Bug, Feature, Improvement).\n- Assignee: Assign the ticket to the appropriate person or team.\n- Labels: Add relevant labels to the ticket.\n- Estimate: Estimate the amount of time it will take to resolve the issue.\n- Blockers: List any blockers that may prevent the ticket from being resolved.\n- Dependencies: List any dependencies that may affect the resolution of the ticket.\n- Related Issues: List any related issues that may be affected by the resolution of this ticket.\n- Attachments: Attach any relevant files or documents.", "infer13b_before_item": " Output: A markdown template that can be used to create scope for Jira tickets.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: # Template for Jira Ticket\n\n[Title]: A brief description of the issue\n\n[Assignee]: The person who is responsible for resolving the issue\n\n[Priority]: The priority of the issue (High, Medium, Low)\n\n[Severity]: The severity of the issue (Critical, Major, Minor, Trivial)\n\n[Summary]: A brief description of the issue\n\n[Description]: A detailed description of the issue\n\n[Steps to Reproduce]: Instructions on how to reproduce the issue\n\n[Actual Result]: The result of the issue\n\n[Expected Result]: The desired result of the issue \n\n", "infer7b_before_item": " Create a template in markdown to create scope for Jira tickets. Members should use this template as a checklist to ensure they have included all the necessary information when creating a ticket.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": "\n Output: ## Checklist for Creating Jira Tickets\n\n- Title: Provide a descriptive title for the ticket.\n- Description: Provide a detailed description of the issue.\n- Assignee: Assign the ticket to a specific user.\n- Priority: Assign the ticket to a priority level.\n- Resolution: Assign the ticket to a resolution level.\n- Status: Assign the ticket to a status level.\n- Type: Assign the ticket to a type.\n- Sub-tasks: Add any sub-tasks that are needed.\n- Custom Fields: Add any custom fields that are needed.\n- Attachments: Add any attachments that are needed.\n- Comments: Add any comments that are needed.\n- Resolution: Add any resolutions that are needed.\n- Resolution Comments: Add any resolution comments that are needed.\n- Resolution Comments: Add any resolution comments that are needed.\n- Resolution Comments: Add any resolution comments that are needed.\n- Resolution Comments: Add any resolution comments that are needed.\n- Resolution Comments: Add any resolution comments that are needed.\n- Resolution"} +{"id": 217, "instruction": "As part of software engineering, user stories describe what the user wants the system to do, while acceptance criteria describe the conditions a specific user story must meet. Create basic acceptance criteria based on the given user story.", "input": "User story: As a user, I want to use a search field to type a city, name, or street, so that I could find matching hotel options.", "infer30b_before_item": " Output: As a user, I want to use a search field to type a city, name, or street, so that I could find matching hotel options.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Acceptance Criteria:\n1. The search field should accept city, name, or street as input.\n2. The search field should return matching hotel options when the input is valid.\n3. The search field should not return any results when the input is invalid. \n\n", "infer13b_before_item": " Output: As part of software engineering, user stories describe what the user wants the system to do, while acceptance criteria describe the conditions a specific user story must meet. Create basic acceptance criteria based on the given user story. User story: As a user, I want to use a search field to type a city, name, or street, so that I could find matching hotel options. \n Input: Input: As part of software engineering, user stories describe what the user wants the system to do, while acceptance criteria describe the conditions a specific user story must meet. Create basic acceptance criteria based on the given user story. User story: As a user, I want to use a search field to type a city, name, or street, so that I could find matching hotel options. \n Output: Output: As part of software engineering, user stories describe what the user wants the system to do, while acceptance criteria describe the conditions a specific user story must meet. Create basic acceptance criteria based on the given user story. User story: As a user, I want to use a search field to type a city, name, or street, so that I could find matching hotel options. \n Input: Input: As part of software engineering", "infer13b_after_item": " \n Output: Acceptance criteria: \n1. The search field should be able to accept city, name, and street as input. \n2. The search should return matching hotel options. \n3. The search should be case-insensitive. \n4. The search should be able to filter results by price range. \n5. The search should be able to filter results by star rating. \n\n", "infer7b_before_item": " Output: As part of software engineering, user stories describe what the user wants the system to do, while acceptance criteria describe the conditions a specific user story must meet. Create basic acceptance criteria based on the given user story. User story: As a user, I want to use a search field to type a city, name, or street, so that I could find matching hotel options. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Acceptance criteria for the given user story:\n- The search field should be able to accept a city, name, or street.\n- The search field should be able to return matching hotel options.\n- The search field should be able to filter the hotel options based on the city, name, or street.\n- The search field should be able to display the hotel options in a list.\n- The search field should be able to display the hotel options in a map.\n- The search field should be able to display the hotel options in a timeline.\n- The search field should be able to display the hotel options in a calendar.\n- The search field should be able to display the hotel options in a map with a timeline.\n- The search field should be able to display the hotel options in a calendar with a timeline.\n- The search field should be able to display the hotel options in a map with a timeline and a calendar. \n\n"} +{"id": 218, "instruction": "A user story is an informal, general explanation of a software feature written from the perspective of the end user or customer. Write a user story for a given software.", "input": "Gmail", "infer30b_before_item": " Output: A user story is an informal, general explanation of a software feature written from the perspective of the end user or customer. Write a user story for a given software. Gmail\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: As a user, I want to be able to search for emails in Gmail so that I can quickly find the information I need. \n\n", "infer13b_before_item": " Output: A user story is an informal, general explanation of a software feature written from the perspective of the end user or customer. Write a user story for a given software. Gmail \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: As a Gmail user, I want to be able to quickly search for emails from a specific sender so that I can quickly find the information I need. \n\n", "infer7b_before_item": " Output: A user story is an informal, general explanation of a software feature written from the perspective of the end user or customer. Write a user story for a given software. Gmail \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: As a Gmail user, I want to be able to quickly search for emails in my inbox. \n\n"} +{"id": 219, "instruction": "Write a Jira ticket for the given task.", "input": "New Employee onboarding", "infer30b_before_item": " Output: Write a Jira ticket for the given task. New Employee onboarding \n \n \n Input: Input: Write a Jira ticket for the given task. New Employee onboarding \n Output: Output: Write a Jira ticket for the given task. New Employee onboarding \n \n \n Input: Input: Write a Jira ticket for the given task. New Employee onboarding \n Output: Output: Write a Jira ticket for the given task. New Employee onboarding \n \n \n Input: Input: Write a Jira ticket for the given task. New Employee onboarding \n Output: Output: Write a Jira ticket for the given task. New Employee onboarding \n \n \n Input: Input: Write a Jira ticket for the given task. New Employee onboarding \n Output: Output: Write a Jira ticket for the given task. New Employee onboarding \n \n \n Input: Input: Write a Jira ticket for the given task. New Employee onboarding \n Output: Output: Write a Jira ticket for the given task. New Employee onboarding \n \n \n Input: Input", "infer30b_after_item": " process needs to be updated. \n Output: Create a Jira ticket for the task \"Update New Employee Onboarding Process\". \n\n", "infer13b_before_item": " Output: Write a Jira ticket for the given task. New Employee onboarding \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: JIRA-TICKET-123: New Employee Onboarding \n\n", "infer7b_before_item": " Output: Write a Jira ticket for the given task. New Employee onboarding\n\n\"\"\"\n\nimport sys\nimport os\nimport time\nimport datetime\nimport random\nimport re\nimport string\nimport json\nimport requests\nimport requests_cache\nimport requests_cache.backends.base\nimport requests_cache.backends.file\nimport requests_cache.backends.memory\nimport requests_cache.backends.redis\nimport requests_cache.backends.sqlalchemy\nimport requests_cache.backends.sqlite\nimport requests_cache.backends.sqlite3\nimport requests_cache.backends.sqlite3_file\nimport requests_cache.backends.sqlite3_memory\nimport requests_cache.backends.sqlite3_redis\nimport requests_cache.backends.sqlite3_sqlalchemy\nimport requests_cache.backends.sqlite3_sqlite3\nimport requests_cache.backends.sqlite3_sqlite3_file\nimport requests_cache.backends.sqlite3_sqlite3_memory\nimport requests_cache.backends.sqlite3_sqlite3_redis\nimport requests_cache.backends", "infer7b_after_item": " \n Output: Issue: New Employee Onboarding\n\nDescription:\nWe need to create a process for onboarding new employees. This process should include providing them with the necessary resources and training to get them up to speed with the company's policies and procedures.\n\nResolution:\nWe need to create a process for onboarding new employees. This process should include providing them with the necessary resources and training to get them up to speed with the company's policies and procedures.\n\nStatus:\nOpen\n\nUser:\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"} +{"id": 220, "instruction": "Paraphrase the given text in an academic style.", "input": "Lots of papers have been published on this topic.", "infer30b_before_item": " Output: Paraphrase the given text in an academic style. Many papers have been published on this topic.\nInput: Input: Paraphrase the given text in an academic style. The results of the study are very interesting.\n Output: Output: Paraphrase the given text in an academic style. The findings of the study are very interesting.\nInput: Input: Paraphrase the given text in an academic style. The results of the study are very interesting.\n Output: Output: Paraphrase the given text in an academic style. The findings of the study are very interesting.\nInput: Input: Paraphrase the given text in an academic style. The results of the study are very interesting.\n Output: Output: Paraphrase the given text in an academic style. The findings of the study are very interesting.\nInput: Input: Paraphrase the given text in an academic style. The results of the study are very interesting.\n Output: Output: Paraphrase the given text in an academic style. The findings of the study are very interesting.\nInput: Input: Paraphrase the given text in an academic style. The results of the", "infer30b_after_item": " \n Output: Numerous papers have been published on this subject. \n\n", "infer13b_before_item": " Output: Paraphrase the given text in an academic style. Lots of papers have been published on this topic.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: A considerable number of papers have been written on this subject. \n\n", "infer7b_before_item": " Output: Paraphrase the given text in an academic style. Lots of papers have been published on this topic.\n\\end{code}\n\nComment: I'm not sure what you mean by \"academic style\". Do you mean \"formal\" or \"literary\"?\n\nComment: @MatthewLeingang I mean formal.\n\nComment: @MatthewLeingang I've added an example.\n\nComment: @MatthewLeingang I've added an example.\n\nComment: @MatthewLeingang I've added an example.\n\nComment: @MatthewLeingang I've added an example.\n\nComment: @MatthewLeingang I've added an example.\n\nComment: @MatthewLeingang I've added an example.\n\nComment: @MatthewLeingang I've added an example.\n\nComment: @MatthewLeingang I've added an example.\n\nComment: @MatthewLeingang I've added an example.\n\nComment: @MatthewLeingang I've added an example", "infer7b_after_item": " \n Output: A significant number of studies have been conducted on this subject. \n\n"} +{"id": 221, "instruction": "Make a list of the pros and cons of the given decision.", "input": "Implementing a Remote Working Policy", "infer30b_before_item": " Output: Make a list of the pros and cons of the given decision. Implementing a Remote Working Policy \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ", "infer30b_after_item": " \n Output: Pros: \n- Increased productivity \n- Reduced overhead costs \n- Improved morale \n- Increased flexibility \n- Improved work-life balance \n\nCons: \n- Difficulty in monitoring performance \n- Potential for decreased collaboration \n- Difficulty in maintaining a healthy work culture \n- Potential for increased distractions \n- Potential for decreased communication \n\n", "infer13b_before_item": " Output: Make a list of the pros and cons of the given decision. Implementing a Remote Working Policy \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Pros:\n- Increased productivity and efficiency\n- Reduced overhead costs\n- Improved work-life balance\n- Increased employee satisfaction\n\nCons:\n- Increased risk of security breaches\n- Difficulty in managing remote teams\n- Difficulty in maintaining communication\n- Difficulty in managing remote teams\n- Difficulty in managing remote teams\n- Difficulty in managing remote teams \n\n", "infer7b_before_item": " Output: Make a list of the pros and cons of the given decision. Implementing a Remote Working Policy \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: Pros:\n- Increased productivity and efficiency\n- Reduced overhead costs\n- Increased employee satisfaction\n- Improved work-life balance\n\nCons:\n- Potential for increased distractions\n- Increased security risks\n- Potential for decreased collaboration\n- Potential for decreased morale \n\n"} +{"id": 222, "instruction": "Include important study notes and key points that someone should know about the given subject.", "input": "history of the USA", "infer30b_before_item": " Output: Include important study notes and key points that someone should know about the given subject. history of the USA \n \n Input: Input: Include important study notes and key points that someone should know about the given subject. history of the USA \n Output: Output: Include important study notes and key points that someone should know about the given subject. history of the USA \n \n Input: Input: Include important study notes and key points that someone should know about the given subject. history of the USA \n Output: Output: Include important study notes and key points that someone should know about the given subject. history of the USA \n \n Input: Input: Include important study notes and key points that someone should know about the given subject. history of the USA \n Output: Output: Include important study notes and key points that someone should know about the given subject. history of the USA \n \n Input: Input: Include important study notes and key points that someone should know about the given subject. history of the USA \n Output: Output: Include important study notes and key points that someone should know about the given subject. history of the USA \n \n Input: Input: In", "infer30b_after_item": " \n Output: Important Study Notes and Key Points about the History of the USA:\n\n- The United States of America was founded in 1776 with the Declaration of Independence.\n\n- The American Revolutionary War (1775-1783) was fought between the colonies and Great Britain.\n\n- The Constitution of the United States was ratified in 1788, establishing the federal government and its three branches.\n\n- The Louisiana Purchase of 1803 doubled the size of the United States.\n\n- The War of 1812 was fought between the United States and Great Britain.\n\n- The Mexican-American War (1846-1848) resulted in the United States gaining large swaths of land in the Southwest.\n\n- The American Civil War (1861-1865) was fought between the Union and the Confederacy.\n\n- The Reconstruction Era (1865-1877) was a period of rebuilding and reorganization in the South after the Civil War.\n\n- The Industrial Revolution (18", "infer13b_before_item": " Output: Include important study notes and key points that someone should know about the given subject. history of the USA \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: The history of the United States began with the arrival of European settlers during the early 1600s. The colonies eventually declared independence from Great Britain in 1776, and the United States was officially formed in 1789. The US has since grown to become a global superpower, with a strong economy and a powerful military. \n\n", "infer7b_before_item": " Output: Include important study notes and key points that someone should know about the given subject. history of the USA \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: - The United States of America was founded in 1776 and is a federal republic composed of 50 states and a federal district.\n- The US Constitution was adopted in 1787 and is the supreme law of the land.\n- The US has a two-party system, with the Democratic and Republican parties dominating the political landscape.\n- The US has a strong economy, with a GDP of over $20 trillion.\n- The US is a major military power, with a large and powerful military.\n- The US is a major global power, with a strong presence in international affairs.\n- The US has a diverse population, with over 325 million people.\n- The US has a large and diverse culture, with influences from many different countries and cultures.\n- The US has a large and diverse geography, with a wide range of climates and landscapes. \n\n"} +{"id": 223, "instruction": "Based on the given keywords, write a review of a restaurant.", "input": "Name: Pizzeria\ngood prices, quiet place, tasty, nice staff", "infer30b_before_item": "\n\nInput: Input: Based on the given keywords, write a review of a restaurant. Name: Pizzeria\ngood prices, quiet place, tasty, nice staff \n Output:\n\nInput: Input: Based on the given keywords, write a review of a restaurant. Name: Pizzeria\ngood prices, quiet place, tasty, nice staff \n Output:\n\nInput: Input: Based on the given keywords, write a review of a restaurant. Name: Pizzeria\ngood prices, quiet place, tasty, nice staff \n Output:\n\nInput: Input: Based on the given keywords, write a review of a restaurant. Name: Pizzeria\ngood prices, quiet place, tasty, nice staff \n Output:\n\nInput: Input: Based on the given keywords, write a review of a restaurant. Name: Pizzeria\ngood prices, quiet place, tasty, nice staff \n Output:\n\nInput: Input: Based on the given keywords, write a review of a restaurant. Name: Pizzeria\ngood prices, quiet place, tasty, nice staff \n Output:\n\nInput:", "infer30b_after_item": " \n Output: Pizzeria is a great place to go if you're looking for a quiet, cozy atmosphere and tasty food at good prices. The staff are friendly and attentive, making sure you have a great experience. The pizzas are delicious and the prices are very reasonable. Highly recommended! \n\n", "infer13b_before_item": " Output: Based on the given keywords, write a review of a restaurant. Name: Pizzeria\ngood prices, quiet place, tasty, nice staff \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Pizzeria is a great place to eat. The prices are very reasonable and the food is delicious. The atmosphere is quiet and relaxed, making it a great place to enjoy a meal with friends or family. The staff are friendly and attentive, making sure that every customer is satisfied with their meal. Overall, Pizzeria is a great place to eat and I highly recommend it. \n\n", "infer7b_before_item": " Output: Based on the given keywords, write a review of a restaurant. Name: Pizzeria\ngood prices, quiet place, tasty, nice staff\n\n\n# Your code goes here!\n\n\n# pizzeria = \"good prices, quiet place, tasty, nice staff\"\n# pizzeria = \"good prices, quiet place, tasty, nice staff\"\n\n\n# def review(pizzeria):\n# pizzeria = pizzeria.split()\n# pizzeria = pizzeria.split()\n# pizzeria = pizzeria.split()\n# pizzeria = pizzeria.split()\n# pizzeria = pizzeria.split()\n# pizzeria = pizzeria.split()\n# pizzeria = pizzeria.split()\n# pizzeria = pizzeria.split()\n# pizzeria = pizzeria.split()\n# pizzeria = pizzeria.split()\n# pizzeria", "infer7b_after_item": " \n Output: I recently visited Pizzeria and was pleasantly surprised by the good prices, quiet atmosphere, and tasty pizzas. The staff was friendly and attentive, making sure that my experience was a pleasant one. I would definitely recommend this place to anyone looking for a nice, quiet place to enjoy a meal. \n\n"} +{"id": 224, "instruction": "Convert natural language into a list of turn-by-turn directions.", "input": "Go west on I-10 until you hit the 101 then take it north. You will eventually see the Hollywood sign.", "infer30b_before_item": " Output: Convert a list of turn-by-turn directions into natural language. Go west on I-10 until you hit the 101 then take it north. You will eventually see the Hollywood sign. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: 1. Head west on I-10.\n2. Take the 101 north.\n3. Continue driving until you see the Hollywood sign. \n\n", "infer13b_before_item": " Output: A list of turn-by-turn directions. Go west on I-10 until you hit the 101 then take it north. You will eventually see the Hollywood sign.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: [Step 1: Go west on I-10 until you hit the 101.\nStep 2: Take the 101 north.\nStep 3: Eventually you will see the Hollywood sign.] \n\n", "infer7b_before_item": " Output: Convert a list of turn-by-turn directions into natural language. Go west on I-10 until you hit the 101 then take it north. You will eventually see the Hollywood sign.\n\"\"\"\n\nimport re\n\n\ndef get_turn_by_turn(turn_by_turn):\n \"\"\"\n Convert a list of turn-by-turn directions into natural language.\n \"\"\"\n # Convert the turn-by-turn directions into a list of tuples.\n turn_by_turn_tuples = [\n (re.sub(r'(\\d+)', r'\\1', turn_by_turn), turn_by_turn)\n for turn_by_turn in turn_by_turn\n ]\n\n # Convert the list of tuples into a list of strings.\n turn_by_turn_strings = [\n ''.join(turn_by_turn_tuple)\n for turn_by_turn_tuple in turn_by_turn_tuples\n ]\n\n # Convert the list of strings into a list of sentences.\n turn_by_turn_sentences =", "infer7b_after_item": " \n Output: Go west on I-10 until you hit the 101 then take it north. Turn left onto Vine Street and then right onto Hollywood Boulevard. You will eventually see the Hollywood sign. \n\n"} +{"id": 225, "instruction": "Provide an ESRB rating for the following game.", "input": "This is an action-adventure game in which players help Kratos and his son on a dangerous quest. Players explore various realms and engage in frenetic hand-to-hand combat against human-like raiders and fantastical creatures (e.g., centaurs, trolls, dragons). Players use axes and chained blades to battle enemies, often resulting in large blood-splatter effects and dismemberment. Players can perform finishing attacks that depict close-up impalement via bladed and hand-held weapons; repeated axe strikes to a creature's neck results in decapitation. The words \u201cf**k\u201d and \u201csh*t\u201d are heard in the game.", "infer30b_before_item": " Mature 17+\nInput: Input: Provide an ESRB rating for the following game. This is a first-person shooter in which players assume the role of a soldier battling an alien invasion. Players use pistols, machine guns, and futuristic weapons (e.g., lasers, grenade launchers) to kill human-like aliens and robots. Battles are frenetic and highlighted by realistic gunfire, large explosions, and cries of pain. Some attacks result in dismemberment and/or blood-splatter effects. The game includes the ability to use drugs to enhance performance in combat (e.g., cocaine, morphine, adrenaline). The words \u201cf**k\u201d and \u201csh*t\u201d are heard in the game. \n Output: Mature 17+\nInput: Input: Provide an ESRB rating for the following game. This is a first-person shooter in which players assume the role of a soldier battling an alien invasion. Players use pistols, machine guns, and futuristic weapons (e.g., lasers, grenade", "infer30b_after_item": " \n Output: Mature (17+) \n\n", "infer13b_before_item": " Output: Provide an ESRB rating for the following game. This is an action-adventure game in which players help Kratos and his son on a dangerous quest. Players explore various realms and engage in frenetic hand-to-hand combat against human-like raiders and fantastical creatures (e.g., centaurs, trolls, dragons). Players use axes and chained blades to battle enemies, often resulting in large blood-splatter effects and dismemberment. Players can perform finishing attacks that depict close-up impalement via bladed and hand-held weapons; repeated axe strikes to a creature's neck results in decapitation. The words \u201cf**k\u201d and \u201csh*t\u201d are heard in the game. \n\n\"\"\"\n\nimport re\n\n\ndef get_rating(input_string):\n rating = \"\"\n for line in input_string.split(\"\\n\"):\n if re.search(\"Input:\", line):\n rating = \"E\"\n elif re.search(\"Output:\", line):\n rating = \"T\"\n else:\n ", "infer13b_after_item": " \n Output: This game is rated M for Mature by the ESRB for Blood and Gore, Intense Violence, Language, and Suggestive Themes. \n\n", "infer7b_before_item": " Provide an ESRB rating for the following game. This is an action-adventure game in which players help Kratos and his son on a dangerous quest. Players explore various realms and engage in frenetic hand-to-hand combat against human-like raiders and fantastical creatures (e.g., centaurs, trolls, dragons). Players use axes and chained blades to battle enemies, often resulting in large blood-splatter effects and dismemberment. Players can perform finishing attacks that depict close-up impalement via bladed and hand-held weapons; repeated axe strikes to a creature's neck results in decapitation. The words \u201cf**k\u201d and \u201csh*t\u201d are heard in the game. \n\n### 2018\n\nInput: Input: Provide an ESRB rating for the following game. This is an action-adventure game in which players help Kratos and his son on a dangerous quest. Players explore various realms and engage in frenetic hand-to-hand combat against human-like raiders and fantastical creatures (e.", "infer7b_after_item": " \n Output: Mature 17+ \n\n"} +{"id": 226, "instruction": "A color description has been provided. Find the CSS code associated with that color.", "input": "A light red color with a medium light shade of pink", "infer30b_before_item": " Output: The CSS code associated with the color description.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": ". \n Output: The CSS code associated with this color is #FFB6B6. \n\n", "infer13b_before_item": " Output: The CSS code associated with that color. #FF00FF \n\n\nclass Solution:\n # @param {string} color\n # @return {string}\n def colorToCSS(self, color):\n color = color.replace(\" \", \"\")\n color = color.replace(\"(\", \"\")\n color = color.replace(\")\", \"\")\n color = color.replace(\"(\", \"\")\n color = color.replace(\")\", \"\")\n color = color.replace(\"(\", \"\")\n color = color.replace(\")\", \"\")\n color = color.replace(\"(\", \"\")\n color = color.replace(\")\", \"\")\n color = color.replace(\"(\", \"\")\n color = color.replace(\")\", \"\")\n color = color.replace(\"(\", \"\")\n color = color.replace(\")\", \"\")\n color = color.replace(\"(\", \"\")\n color = color.replace(\")\", \"\")\n color = color.replace(\"(\", \"\")\n color = color.replace(\")\", \"\")\n color = color.replace(\"(\", \"\")\n color = color.replace(\")\", \"\")\n color = color.replace(\"(\", \"\")\n color = color.", "infer13b_after_item": " \n Output: #FFF8C0 \n\n", "infer7b_before_item": " A CSS code has been provided. Find the color description associated with that color. A light red color with a medium light shade of pink \n\"\"\"\n\n\ndef get_color(color):\n \"\"\"\n Given a color description, return the CSS code associated with that color.\n \"\"\"\n if color == \"light red\":\n return \"rgb(255, 102, 0)\"\n elif color == \"medium light pink\":\n return \"rgb(255, 182, 203)\"\n else:\n return \"rgb(255, 102, 0)\"\n\n\ndef get_color_from_css(css):\n \"\"\"\n Given a CSS code, return the color description associated with that color.\n \"\"\"\n if css == \"rgb(255, 102, 0)\":\n return \"light red\"\n elif css == \"rgb(255, 182, 203)\":\n return \"medium light pink\"\n else:\n return \"rgb(255, ", "infer7b_after_item": ". \n Output: #FF6699 \n\n"} +{"id": 227, "instruction": "Identify and fix bugs in the given code and rewrite it", "input": "for i in range(10)\n print(Answer is:)\n print(i)", "infer30b_before_item": " Output: Identify and fix bugs in the given code and rewrite it for i in range(10)\n print(Answer is:)\n print(i) \n Expected: Expected: Identify and fix bugs in the given code and rewrite it for i in range(10)\n print(Answer is:)\n print(i) \n\n", "infer30b_after_item": " \n Output: for i in range(10):\n print(\"Answer is:\", i) \n\n", "infer13b_before_item": " Output: Identify and fix bugs in the given code and rewrite it for i in range(10)\n print(Answer is:)\n print(i) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: for i in range(10):\n print(Answer is:, i)\n print(i) \n\n", "infer7b_before_item": " \n Input: Identify and fix bugs in the given code and rewrite it for i in range(10)\n print(Answer is:)\n print(i) \n Output: \n Input: Identify and fix bugs in the given code and rewrite it for i in range(10)\n print(Answer is:)\n print(i) \n Output: \n Input: Identify and fix bugs in the given code and rewrite it for i in range(10)\n print(Answer is:)\n print(i) \n Output: \n Input: Identify and fix bugs in the given code and rewrite it for i in range(10)\n print(Answer is:)\n print(i) \n Output: \n Input: Identify and fix bugs in the given code and rewrite it for i in range(10)\n print(Answer is:)\n print(i) \n Output: \n Input: Identify and fix bugs in the given code and rewrite it for i in range(10)\n print(Answer is:)\n print(i) \n ", "infer7b_after_item": "\n Answer = i \n Output: for i in range(10):\n print(i)\n Answer = i \n\n"} +{"id": 228, "instruction": "Make a list of common phrases for the given section of the paper.", "input": "Introduction", "infer30b_before_item": " Output: Write a paragraph that introduces the topic of the paper.\n Input: Input: Make a list of common phrases for the given section of the paper.\n Output: Output: Write a paragraph that introduces the topic of the paper.\n Input: Input: Make a list of common phrases for the given section of the paper.\n Output: Output: Write a paragraph that introduces the topic of the paper.\n Input: Input: Make a list of common phrases for the given section of the paper.\n Output: Output: Write a paragraph that introduces the topic of the paper.\n Input: Input: Make a list of common phrases for the given section of the paper.\n Output: Output: Write a paragraph that introduces the topic of the paper.\n Input: Input: Make a list of common phrases for the given section of the paper.\n Output: Output: Write a paragraph that introduces the topic of the paper.\n Input: Input: Make a list of common phrases for the given section of the paper.\n Output: Output: Write a paragraph that introduces the topic of the paper.\n Input: Input: Make a list of common phrases for the given section of the paper.", "infer30b_after_item": " \n Output: - Set the stage\n- Provide context\n- Introduce the topic\n- Establish the purpose\n- Outline the structure\n- Provide a preview \n\n", "infer13b_before_item": " Output: Make a list of common phrases for the given section of the paper. Introduction \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: - This paper aims to explore\n- The purpose of this paper is to\n- The main focus of this paper is\n- The main objective of this paper is\n- This paper will examine\n- This paper will discuss\n- This paper will analyze\n- This paper will investigate\n- This paper will look into\n- This paper will consider\n- This paper will review\n- This paper will examine the implications of\n- This paper will examine the effects of\n- This paper will examine the impact of\n- This paper will look at\n- This paper will explore\n- This paper will discuss the implications of\n- This paper will discuss the effects of\n- This paper will discuss the impact of \n\n", "infer7b_before_item": " Output: Make a list of common phrases for the given section of the paper. Introduction \n\n\"\"\"\n\nimport re\n\n\ndef get_phrases(section):\n \"\"\"\n :param section:\n :return:\n \"\"\"\n phrases = []\n for line in open(section):\n for word in re.findall(r'\\w+', line):\n phrases.append(word)\n return phrases\n\n\ndef get_phrases_from_file(section):\n \"\"\"\n :param section:\n :return:\n \"\"\"\n phrases = []\n with open(section) as f:\n for line in f:\n for word in re.findall(r'\\w+', line):\n phrases.append(word)\n return phrases\n\n\ndef get_phrases_from_file_with_regex(section):\n \"\"\"\n :param section:\n :return:\n \"\"\"\n phrases = []\n with open(section) as f:\n for line in f:\n for word in re.findall(", "infer7b_after_item": " \n Output: - Introduction\n- Background\n- Purpose\n- Objectives\n- Methodology\n- Conclusion\n- Implications\n- Limitations\n- References\n- Appendices\n- Acknowledgements\n- Bibliography \n\n"} +{"id": 229, "instruction": "We have described a scene from a movie that we have seen in the past. Find out what movie it is.", "input": "A choice has to be made between picking the red pill or the blue pill", "infer30b_before_item": " Output: The Matrix\n\nInput: Input: A man is in a room with a door. The door is locked. There is a key in the room. The man is not in the room. The key is not in the room. The man is not in the room. The key is not in the room. The man is not in the room. The key is not in the room. The man is not in the room. The key is not in the room. The man is not in the room. The key is not in the room. The man is not in the room. The key is not in the room. The man is not in the room. The key is not in the room. The man is not in the room. The key is not in the room. The man is not in the room. The key is not in the room. The man is not in the room. The key is not in the room. The man is not in the room. The key is not in the room. The man is not in the room. The key is not in the room. The man is not in the room. The key is not in the room. The man is not in the room. The key is not in the room", "infer30b_after_item": ". \n Output: The Matrix (1999). \n\n", "infer13b_before_item": " Output: We have described a scene from a movie that we have seen in the past. Find out what movie it is. A choice has to be made between picking the red pill or the blue pill \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": ". \n Output: The Matrix (1999). \n\n", "infer7b_before_item": " The output is the name of the movie.\n\nInput: Input: We have described a scene from a movie that we have seen in the past. Find out what movie it is. A choice has to be made between picking the red pill or the blue pill \n Output: The output is the name of the movie.\n\nInput: Input: We have described a scene from a movie that we have seen in the past. Find out what movie it is. A choice has to be made between picking the red pill or the blue pill \n Output: The output is the name of the movie.\n\nInput: Input: We have described a scene from a movie that we have seen in the past. Find out what movie it is. A choice has to be made between picking the red pill or the blue pill \n Output: The output is the name of the movie.\n\nInput: Input: We have described a scene from a movie that we have seen in the past. Find out what movie it is. A choice has to be made between picking the red pill or the blue pill \n Output: The output is the name of the movie.\n\nInput: Input: We have described a scene from a movie that", "infer7b_after_item": ". \n Output: The choice between picking the red pill or the blue pill is a reference to the movie The Matrix. \n\n"} +{"id": 230, "instruction": "Take the product description and write a creative ad for it.", "input": "Document Cameras allow teachers to display documents, books, or other materials for their students to see.", "infer30b_before_item": " Output: Write a creative ad for the product.\n\n 1) Only use the product description.\n 2) Do not use the product name.\n 3) Do not use the product price.\n 4) Do not use the product image.\n 5) Do not use the product category.\n 6) Do not use the product brand.\n 7) Do not use the product URL.\n 8) Do not use the product description.\n 9) Do not use the product description.\n 10) Do not use the product description.\n 11) Do not use the product description.\n 12) Do not use the product description.\n 13) Do not use the product description.\n 14) Do not use the product description.\n 15) Do not use the product description.\n 16) Do not use the product description.\n 17) Do not use the product description.\n 18) Do not use the product description.\n 19) Do not use the product description.\n 20) Do not use the product description.\n 21) Do not use the product description.\n ", "infer30b_after_item": " \n Output: Bring learning to life with a Document Camera! Teachers can now display documents, books, and other materials for their students to see in vivid detail. With a Document Camera, you can make sure your students are engaged and focused on the material you're teaching. Get one today and see the difference it can make in your classroom! \n\n", "infer13b_before_item": " Output: Write a creative ad for the product.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Get the best view of your classroom with Document Cameras! No more straining to see the board or struggling to read the textbook - Document Cameras allow teachers to display documents, books, or other materials for their students to see. Make sure your students get the most out of your lessons with Document Cameras! \n\n", "infer7b_before_item": " Output: Write a creative ad for the product.\n\n# I worked on this challenge [by myself, with: ].\n# I spent [#] hours on this challenge.\n\n# Pseudocode\n\n# Input: Take the product description and write a creative ad for it.\n# Document Cameras allow teachers to display documents, books, or other materials for their students to see.\n# Output: Write a creative ad for the product.\n\n# Steps:\n\n# 1. Take the product description and write a creative ad for it.\n# 2. Write a creative ad for the product.\n\n# 1. Take the product description and write a creative ad for it.\n# 2. Write a creative ad for the product.\n\n# 1. Take the product description and write a creative ad for it.\n# 2. Write a creative ad for the product.\n\n# 1. Take the product description and write a creative ad for it.\n# 2. Write a creative ad for the product.\n\n# 1. Take the product description and write a creative ad for it.\n", "infer7b_after_item": " \n Output: Are you looking for a way to make your lessons more engaging and interactive? Look no further than our Document Cameras! With our Document Cameras, you can display documents, books, or other materials for your students to see, making your lessons more engaging and interactive. So don't wait any longer - get your Document Cameras today! \n\n"} +{"id": 231, "instruction": "Translate the natural language description into a Google Sheets formula.", "input": "if C7 is equal to \"U.S.A.\" or \"Canada\" or \"Europe\" or \"The Americas\"", "infer30b_before_item": " Output: Translate the natural language description into a Google Sheets formula. if C7 is equal to \"U.S.A.\" or \"Canada\" or \"Europe\" or \"The Americas\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " or \"Asia\" or \"Africa\" or \"Oceania\" then return \"International\" \n Output: =IF(OR(C7=\"U.S.A.\",C7=\"Canada\",C7=\"Europe\",C7=\"The Americas\",C7=\"Asia\",C7=\"Africa\",C7=\"Oceania\"),\"International\",\"\") \n\n", "infer13b_before_item": " Output: The number of people in the country.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: =OR(C7=\"U.S.A.\",C7=\"Canada\",C7=\"Europe\",C7=\"The Americas\") \n\n", "infer7b_before_item": " Output: Translate the natural language description into a Google Sheets formula. if C7 is equal to \"U.S.A.\" or \"Canada\" or \"Europe\" or \"The Americas\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " or \"Asia\" or \"Africa\" or \"Oceania\" \n Output: =IF(C7=\"U.S.A.\", \"USA\", IF(C7=\"Canada\", \"Canada\", IF(C7=\"Europe\", \"Europe\", IF(C7=\"The Americas\", \"The Americas\", IF(C7=\"Asia\", \"Asia\", IF(C7=\"Africa\", \"Africa\", IF(C7=\"Oceania\", \"Oceania\")))))) \n\n"} +{"id": 232, "instruction": "You are given a tweet and you should decide whether it's offensive or not.", "input": "She has certainly taken some heat for being such an....well idiot.", "infer30b_before_item": " Output: You should return a boolean value.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Offensive. \n\n", "infer13b_before_item": " You should return a boolean value.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Offensive. \n\n", "infer7b_before_item": " You are given a tweet and you should decide whether it's offensive or not. She has certainly taken some heat for being such an....well idiot. \n\"\"\"\n\nimport re\n\ndef is_offensive(tweet):\n if re.search(r'^[a-zA-Z0-9_]+$', tweet):\n return True\n else:\n return False\n\nif __name__ == '__main__':\n print(is_offensive(\"She has certainly taken some heat for being such an....well idiot.\"))\n", "infer7b_after_item": " \n Output: Offensive. \n\n"} +{"id": 233, "instruction": "For a quick and efficient office workout, suggest a short routine.", "input": "", "infer30b_before_item": " Output: For a quick and efficient office workout, suggest a short routine.\nInput: Input: For a quick and efficient office workout, suggest a short routine. \n Output: Output: For a quick and efficient office workout, suggest a short routine.\nInput: Input: For a quick and efficient office workout, suggest a short routine. \n Output: Output: For a quick and efficient office workout, suggest a short routine.\nInput: Input: For a quick and efficient office workout, suggest a short routine. \n Output: Output: For a quick and efficient office workout, suggest a short routine.\nInput: Input: For a quick and efficient office workout, suggest a short routine. \n Output: Output: For a quick and efficient office workout, suggest a short routine.\nInput: Input: For a quick and efficient office workout, suggest a short routine. \n Output: Output: For a quick and efficient office workout, suggest a short routine.\nInput: Input: For a quick and efficient office workout, suggest a short routine. \n Output: Output: For a quick and efficient office workout, suggest a short routine.\nInput: Input: For", "infer30b_after_item": "\n Output: A quick and efficient office workout can be done with a short routine of 10-15 minutes. Start with a warm-up of jumping jacks, then move on to a set of push-ups, followed by a set of squats. Finish with a set of plank holds and a cool down of stretching. \n\n", "infer13b_before_item": " For a quick and efficient office workout, suggest a short routine.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: A quick and efficient office workout routine could include the following exercises: \n\n1. Chair Squats: Stand up and sit down in your chair 10 times. \n2. Arm Circles: Raise your arms in a circle 10 times. \n3. Shoulder Shrugs: Stand up and shrug your shoulders up and down 10 times. \n4. Neck Stretches: Roll your head in a circle 10 times. \n5. Leg Lifts: While sitting, lift your legs up and down 10 times. \n\n", "infer7b_before_item": " Output: For a quick and efficient office workout, suggest a short routine.\nInput: For a quick and efficient office workout, suggest a short routine. \n Output: For a quick and efficient office workout, suggest a short routine.\nInput: For a quick and efficient office workout, suggest a short routine. \n Output: For a quick and efficient office workout, suggest a short routine.\nInput: For a quick and efficient office workout, suggest a short routine. \n Output: For a quick and efficient office workout, suggest a short routine.\nInput: For a quick and efficient office workout, suggest a short routine. \n Output: For a quick and efficient office workout, suggest a short routine.\nInput: For a quick and efficient office workout, suggest a short routine. \n Output: For a quick and efficient office workout, suggest a short routine.\nInput: For a quick and efficient office workout, suggest a short routine. \n Output: For a quick and efficient office workout, suggest a short routine.\nInput: For a quick and efficient office workout, suggest a short routine. \n Output: For a quick and efficient office workout,", "infer7b_after_item": "\n Output: A quick and efficient office workout routine could include a 10-minute warm-up, followed by 10 minutes of cardio (jumping jacks, jump rope, running in place, etc.), 10 minutes of strength training (push-ups, squats, lunges, etc.), and 10 minutes of stretching. \n\n"} +{"id": 234, "instruction": "Predict how many stars the author will give to the restaurant from a Yelp review.", "input": "The reviews were great, but honestly i felt it was just ok. Seemed like a typical tourist spot were the food was mediocre. The service was ok, not great, but not the worst.\nThe bread was very good. I ordered the muscle fries. They had a sweet Vidalia onion flavor. No need for fries bc they were soggy wo much flavor. My boyfriend ordered a pesto chicken pasta and it was again mediocre.", "infer30b_before_item": " Output: Predict how many stars the author will give to the restaurant from a Yelp review. The reviews were great, but honestly i felt it was just ok. Seemed like a typical tourist spot were the food was mediocre. The service was ok, not great, but not the worst.\nThe bread was very good. I ordered the muscle fries. They had a sweet Vidalia onion flavor. No need for fries bc they were soggy wo much flavor. My boyfriend ordered a pesto chicken pasta and it was again mediocre. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: The author will likely give the restaurant 3 stars on Yelp. \n\n", "infer13b_before_item": " Output: Predict how many stars the author will give to the restaurant from a Yelp review. The reviews were great, but honestly i felt it was just ok. Seemed like a typical tourist spot were the food was mediocre. The service was ok, not great, but not the worst.\nThe bread was very good. I ordered the muscle fries. They had a sweet Vidalia onion flavor. No need for fries bc they were soggy wo much flavor. My boyfriend ordered a pesto chicken pasta and it was again mediocre. \n\"\"\"\n\nimport re\nimport sys\nimport os\nimport time\nimport random\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport nltk\nimport nltk.data\nimport nltk.corpus\nimport nltk.stem\nimport nltk.tokenize\nimport nltk.classify\nimport nltk.probability\nimport nltk.metrics\nimport nltk.tag\nimport nltk.parse\nimport nltk.util\nimport nltk.sem\nimport nltk.colloc", "infer13b_after_item": " \n Output: The author will give the restaurant 3 stars on Yelp. \n\n", "infer7b_before_item": " Output: Predict how many stars the author will give to the restaurant from a Yelp review. The reviews were great, but honestly i felt it was just ok. Seemed like a typical tourist spot were the food was mediocre. The service was ok, not great, but not the worst.\nThe bread was very good. I ordered the muscle fries. They had a sweet Vidalia onion flavor. No need for fries bc they were soggy wo much flavor. My boyfriend ordered a pesto chicken pasta and it was again mediocre. \n\n\"\"\"\n\n\ndef main():\n # Read in the file\n with open(\"input.txt\", \"r\") as f:\n data = f.read()\n\n # Split the data into lines\n data = data.split(\"\\n\")\n\n # Split the lines into words\n data = [word.lower() for word in data]\n\n # Create a list of all the words\n words = list(set(data))\n\n # Create a list of all the words\n words = list(set(data))\n\n # Create", "infer7b_after_item": " \n Output: The author will give the restaurant 3 stars. \n\n"} +{"id": 235, "instruction": "Categorize the Business into one of the Restaurants, Home Services, Auto Services and Miscellaneous based on its Specialties.", "input": "Call us at 650-636-4884 or visit our website to receive a quote. This shop specializes in New Tires and General Auto Repair. We carry all tires in-house and have a wide range to fit any budget or car specifics. If you are unsure what tires you need there are experts on hand and available to help you pick the best tire for your needs. We also carry commercial vehicle tires and can serve a wide array of fleets.", "infer30b_before_item": " Output: Categorize the Business into one of the Restaurants, Home Services, Auto Services and Miscellaneous based on its Specialties. Call us at 650-636-4884 or visit our website to receive a quote. This shop specializes in New Tires and General Auto Repair. We carry all tires in-house and have a wide range to fit any budget or car specifics. If you are unsure what tires you need there are experts on hand and available to help you pick the best tire for your needs. We also carry commercial vehicle tires and can serve a wide array of fleets. \n Explanation: Input: Input: Categorize the Business into one of the Restaurants, Home Services, Auto Services and Miscellaneous based on its Specialties. Call us at 650-636-4884 or visit our website to receive a quote. This shop specializes in New Tires and General Auto Repair. We carry all tires in-house and have a wide range to fit any budget or car specifics. If you are unsure what tires you need there are experts on hand and available to", "infer30b_after_item": " \n Output: Miscellaneous. \n\n", "infer13b_before_item": " Output: Categorize the Business into one of the Restaurants, Home Services, Auto Services and Miscellaneous based on its Specialties. Call us at 650-636-4884 or visit our website to receive a quote. This shop specializes in New Tires and General Auto Repair. We carry all tires in-house and have a wide range to fit any budget or car specifics. If you are unsure what tires you need there are experts on hand and available to help you pick the best tire for your needs. We also carry commercial vehicle tires and can serve a wide array of fleets. \n\nimport java.util.*;\nimport java.io.*;\n\npublic class Solution {\n\n public static void main(String[] args) {\n Scanner in = new Scanner(System.in);\n String input = in.nextLine();\n String[] inputArray = input.split(\" \");\n String[] outputArray = new String[inputArray.length];\n for(int i = 0; i < inputArray.length; i++){\n outputArray[i] = inputArray[i].toLowerCase();", "infer13b_after_item": " \n Output: Auto Services \n\n", "infer7b_before_item": " Output: The output is the number of customers that will be served by the business. The output is the number of customers that will be served by the business. \n\n### 2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.2.", "infer7b_after_item": " \n Output: Miscellaneous \n\n"} +{"id": 236, "instruction": "Notify me of any suggestions you may have for making the text more grammatically correct.", "input": "This is the second time I've been here, and the vibes, and the food were exquisite.", "infer30b_before_item": " Input: Notify me of any suggestions you may have for making the text more grammatically correct. This is the second time I've been here, and the vibes, and the food were exquisite.\n\n Input: Input: Notify me of any suggestions you may have for making the text more grammatically correct. This is the second time I've been here, and the vibes, and the food were exquisite.\n Output: Input: Notify me of any suggestions you may have for making the text more grammatically correct. This is the second time I've been here, and the vibes, and the food were exquisite.\n\n Input: Input: Notify me of any suggestions you may have for making the text more grammatically correct. This is the second time I've been here, and the vibes, and the food were exquisite.\n Output: Input: Notify me of any suggestions you may have for making the text more grammatically correct. This is the second time I've been here, and the vibes, and the food were exquisite.\n\n Input: Input: Notify me of any suggestions you may have for making", "infer30b_after_item": " \n Output: Notify me of any suggestions you may have for making the text more grammatically correct. This is the second time I've been here, and the vibes, and the food were exquisite. \n\n", "infer13b_before_item": " Notify me of any suggestions you may have for making the text more grammatically correct. This is the second time I've been here, and the vibes, and the food were exquisite.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: This is the second time I've been here, and the vibes and the food were exquisite. \n\n", "infer7b_before_item": " I'm not sure what you mean by \"vibes.\" I'm not sure what you mean by \"exquisite.\"\n\\end{code}\n\nComment: I'm not sure what you mean by \"vibes.\" I'm not sure what you mean by \"exquisite.\"\n\nComment: I'm not sure what you mean by \"vibes.\" I'm not sure what you mean by \"exquisite.\"\n\nComment: I'm not sure what you mean by \"vibes.\" I'm not sure what you mean by \"exquisite.\"\n\nComment: I'm not sure what you mean by \"vibes.\" I'm not sure what you mean by \"exquisite.\"\n\nComment: I'm not sure what you mean by \"vibes.\" I'm not sure what you mean by \"exquisite.\"\n\nComment: I'm not sure what you mean by \"vibes.\" I'm not sure what you mean by \"exquisite.\"\n\nComment: I'm not sure what you mean by \"vibes.\" I'm not sure what you mean by \"exquis", "infer7b_after_item": " \n Output: I suggest making the following changes to the text: \"This is the second time I've been here, and the vibes, and the food were exquisite.\" \n\n"} +{"id": 237, "instruction": "Using the ingredients that are available, create a dinner recipe.", "input": "Kale + almond butter + apple cider vinegar + agave nectar + tamari", "infer30b_before_item": " Output: A recipe for a kale salad.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Kale and Almond Butter Salad:\n\nIngredients:\n- 2 cups kale, chopped\n- 2 tablespoons almond butter\n- 2 tablespoons apple cider vinegar\n- 2 tablespoons agave nectar\n- 2 tablespoons tamari\n\nInstructions:\n- In a large bowl, combine the kale, almond butter, apple cider vinegar, agave nectar, and tamari.\n- Massage the ingredients together until the kale is evenly coated.\n- Let the salad sit for 10 minutes before serving.\n- Enjoy! \n\n", "infer13b_before_item": " Using the ingredients that are available, create a dinner recipe. Kale + almond butter + apple cider vinegar + agave nectar + tamari \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: Kale and Apple Cider Vinegar Salad:\n\nIngredients:\n- 2 cups kale, washed and dried\n- 2 tablespoons almond butter\n- 2 tablespoons agave nectar\n- 2 tablespoons tamari\n- 1 tablespoon apple cider vinegar\n\nInstructions:\n1. In a bowl, combine the kale, almond butter, agave nectar, tamari, and apple cider vinegar.\n2. Massage the mixture until the kale is evenly coated.\n3. Serve and enjoy! \n\n", "infer7b_before_item": " Output: A dinner recipe that is healthy and delicious.\n\nComment: I'm not sure what you mean by \"healthy and delicious\". Do you mean that the recipe should be healthy and delicious? Or that the recipe should be healthy and delicious, but not delicious? Or that the recipe should be healthy and delicious, but not healthy? Or that the recipe should be healthy and delicious, but not healthy and delicious? Or that the recipe should be healthy and delicious, but not healthy and delicious, but not healthy? Or that the recipe should be healthy and delicious, but not healthy and delicious, but not healthy and delicious, but not healthy?\n\nComment: I'm not sure what you mean by \"healthy and delicious\". Do you mean that the recipe should be healthy and delicious? Or that the recipe should be healthy and delicious, but not delicious? Or that the recipe should be healthy and delicious, but not healthy? Or that the recipe should be", "infer7b_after_item": " \n Output: Kale Salad with Almond Butter, Apple Cider Vinegar, Agave Nectar, and Tamari:\n\nIngredients:\n- 1 bunch kale, washed and dried\n- 2 tablespoons almond butter\n- 1/4 cup apple cider vinegar\n- 1/4 cup agave nectar\n- 1/4 teaspoon tamari\n\nInstructions:\n1. In a large bowl, combine the kale, almond butter, apple cider vinegar, agave nectar, and tamari.\n2. Toss to combine and serve. \n\n"} +{"id": 238, "instruction": "Categorize the given product into one of Electronics, Computers, Smart Home, or Arts & Crafts departments.", "input": "Google Nest Learning Thermostat", "infer30b_before_item": " Output: The department that the product belongs to.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Smart Home department. \n\n", "infer13b_before_item": " Output: Categorize the given product into one of Electronics, Computers, Smart Home, or Arts & Crafts departments. Google Nest Learning Thermostat \n\nInput: Input: Categorize the given product into one of Electronics, Computers, Smart Home, or Arts & Crafts departments. Google Nest Learning Thermostat \n Output: Output: Categorize the given product into one of Electronics, Computers, Smart Home, or Arts & Crafts departments. Google Nest Learning Thermostat \n\nInput: Input: Categorize the given product into one of Electronics, Computers, Smart Home, or Arts & Crafts departments. Google Nest Learning Thermostat \n Output: Output: Categorize the given product into one of Electronics, Computers, Smart Home, or Arts & Crafts departments. Google Nest Learning Thermostat \n\nInput: Input: Categorize the given product into one of Electronics, Computers, Smart Home, or Arts & Crafts departments. Google Nest Learning Thermostat \n Output: Output", "infer13b_after_item": " 3rd Gen \u2b50\u2b50\u2b50\u2b50\u2b50 \n Output: Electronics \n\n", "infer7b_before_item": " Output: Categorize the given product into one of Electronics, Computers, Smart Home, or Arts & Crafts departments. Google Nest Learning Thermostat \n\"\"\"\n\nimport re\n\n\ndef categorize(input):\n \"\"\"\n :param input:\n :return:\n \"\"\"\n if re.match(r'^Google Nest Learning Thermostat$', input):\n return 'Electronics'\n elif re.match(r'^Google Home$', input):\n return 'Smart Home'\n elif re.match(r'^Google Nest$', input):\n return 'Computers'\n elif re.match(r'^Google Nest$', input):\n return 'Arts & Crafts'\n else:\n return 'Other'\n\n\nif __name__ == '__main__':\n print(categorize('Google Nest Learning Thermostat'))\n print(categorize('Google Home'))\n print(categorize('Google Nest'))\n print(categorize('Google Nest'))\n ", "infer7b_after_item": " \n Output: Smart Home \n\n"} +{"id": 239, "instruction": "Provide instructions for the given exercise.", "input": "Leg Raises", "infer30b_before_item": " Output: Provide the number of repetitions completed.\n\n 1. Pseudocode\n\n 2. Initial Solution\n\n 3. Refactored Solution\n\n 4. Reflection\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: Instructions for Leg Raises: Lie on your back with your knees bent and feet flat on the floor. Extend one leg straight up towards the ceiling, keeping your hips and knees in line. Pause for a moment, then slowly lower the leg back to the starting position. Repeat with the other leg. \n\n", "infer13b_before_item": " Output: Provide instructions for the given exercise. Leg Raises \n\nInput: Input: Provide instructions for the given exercise. Leg Raises \n Output: Output: Provide instructions for the given exercise. Leg Raises \n\nInput: Input: Provide instructions for the given exercise. Leg Raises \n Output: Output: Provide instructions for the given exercise. Leg Raises \n\nInput: Input: Provide instructions for the given exercise. Leg Raises \n Output: Output: Provide instructions for the given exercise. Leg Raises \n\nInput: Input: Provide instructions for the given exercise. Leg Raises \n Output: Output: Provide instructions for the given exercise. Leg Raises \n\nInput: Input: Provide instructions for the given exercise. Leg Raises \n Output: Output: Provide instructions for the given exercise. Leg Raises \n\nInput: Input: Provide instructions for the given exercise. Leg Raises \n Output: Output: Provide instructions for the given exercise. Leg Raises \n\nInput: Input: Provide instructions for the given exercise. Leg Raises ", "infer13b_after_item": " \n Output: Instructions for Leg Raises:\n1. Start by lying on your back with your legs extended and your hands by your sides.\n2. Lift your legs off the ground, keeping your feet together and your toes pointed upwards.\n3. Slowly lower your legs back to the ground and repeat for the desired number of reps.\n4. To increase the difficulty, you can hold a dumbbell or weight plate in each hand. \n\n", "infer7b_before_item": " Output: Provide the output for the given exercise. \n*/\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: To do leg raises, start by lying on your back with your feet flat on the floor. Raise your legs up to a 90 degree angle and hold for 10 seconds. Lower your legs and repeat. \n\n"} +{"id": 240, "instruction": "Rewrite the text and correct the spelling errors.", "input": "It solves problems comon and uniqe to every team.", "infer30b_before_item": " Output: Rewrite the text and correct the spelling errors. It solves problems common and unique to every team.\n\n Input: Input: Rewrite the text and correct the spelling errors. It solves problems comon and uniqe to every team. \n Output: Output: Rewrite the text and correct the spelling errors. It solves problems common and unique to every team.\n\n Input: Input: Rewrite the text and correct the spelling errors. It solves problems comon and uniqe to every team. \n Output: Output: Rewrite the text and correct the spelling errors. It solves problems common and unique to every team.\n\n Input: Input: Rewrite the text and correct the spelling errors. It solves problems comon and uniqe to every team. \n Output: Output: Rewrite the text and correct the spelling errors. It solves problems common and unique to every team.\n\n Input: Input: Rewrite the text and correct the spelling errors. It solves problems comon and uniqe to every team. \n Output: Output: Rewrite the text and correct the spelling errors. It solves problems common and unique to every team.\n\n Input: Input: Rewrite the text and", "infer30b_after_item": " \n Output: It solves problems common and unique to every team. \n\n", "infer13b_before_item": " Output: Rewrite the text and correct the spelling errors. It solves problems comon and uniqe to every team. \n\n\nclass Solution {\npublic:\n string correctSpelling(string s) {\n int n = s.size();\n vector v(n, -1);\n for (int i = 0; i < n; ++i) {\n if (s[i] =='') continue;\n if (v[i] == -1) {\n v[i] = i;\n } else {\n v[i] = min(v[i], v[v[i]]);\n }\n }\n for (int i = 0; i < n; ++i) {\n if (v[i] == -1) continue;\n if (v[i] == i) continue;\n int j = v[i];\n while (j!= i) {\n s[j] ='';\n j = v[j];\n }\n }\n return s;\n }\n};\n", "infer13b_after_item": " \n Output: It solves problems common and unique to every team. \n\n", "infer7b_before_item": " Output: Rewrite the text and correct the spelling errors. It solves problems comon and uniqe to every team. \n*/\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: It solves problems common and unique to every team. \n\n"} +{"id": 241, "instruction": "Write down antonyms for the given word.", "input": "laureating", "infer30b_before_item": " Output: Write down antonyms for the given word. laureating \n Expected: Expected: Write down antonyms for the given word. laureating \n Actual: Actual: Write down antonyms for the given word. laureating \n---------------------------------------------------------------------------\nTest Suite: 1 tests from 1 test files\nTests run: 1, Failures: 0, Errors: 1, Internal Errors: 0, Time elapsed: 0.001 sec\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: An antonym for laureating is demoting. \n\n", "infer13b_before_item": " Output: Write down antonyms for the given word. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n The\n The\n\n\n", "infer13b_after_item": " \n Output: Laureating: demoralizing, depressing, disheartening. \n\n", "infer7b_before_item": " Output: Write down antonyms for the given word. laureating\n\n\\begin{code}\nimport java.util.Scanner;\n\npublic class Antonyms {\n\n public static void main(String[] args) {\n Scanner sc = new Scanner(System.in);\n System.out.println(\"Input: \");\n String word = sc.nextLine();\n System.out.println(\"Output: \");\n String antonyms = sc.nextLine();\n System.out.println(\"\");\n }\n}\n\\end{code}\n\nComment: What is the problem?\n\nComment: I am not able to get the output.\n\nComment: What is the output you are getting?\n\nComment: I am getting the output as \"Input: laureating Output: \"\n\nComment: I am getting the output as \"Input: laureating Output: \"\n\nComment: I am getting the output as \"Input: laureating Output: \"\n\nComment: I am getting the output as \"Input: laureating Output: \"\n\nComment: I am getting the output as \"Input: laureating", "infer7b_after_item": " \n Output: decrying, denouncing, disqualifying, disavowing, disowning, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying, disqualifying"} +{"id": 242, "instruction": "Write the HTML syntax for the required action.", "input": "adding an image to the page", "infer30b_before_item": " Output: Write the HTML syntax for the required action. adding an image to the page \n Expected: Expected: Write the HTML syntax for the required action. adding an image to the page \n */\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: \"image\" \n\n", "infer13b_before_item": " Output: Write the HTML syntax for the required action. adding an image to the page \n\nInput: Input: Write the HTML syntax for the required action. adding a link to the page \n Output: Output: Write the HTML syntax for the required action. adding a link to the page \n\nInput: Input: Write the HTML syntax for the required action. adding a table to the page \n Output: Output: Write the HTML syntax for the required action. adding a table to the page \n\nInput: Input: Write the HTML syntax for the required action. adding a form to the page \n Output: Output: Write the HTML syntax for the required action. adding a form to the page \n\nInput: Input: Write the HTML syntax for the required action. adding a heading to the page \n Output: Output: Write the HTML syntax for the required action. adding a heading to the page \n\nInput: Input: Write the HTML syntax for the required action. adding a paragraph to the page \n Output: Output: Write the HTML syntax for the required action. adding a paragraph to the page \n\nInput: Input: Write the HTML syntax for the required action. adding a list to the page \n", "infer13b_after_item": " \n Output: \"image \n\n", "infer7b_before_item": " Output: Write the HTML syntax for the required action. adding an image to the page \n*/\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": " \n Output: \n\n"} +{"id": 243, "instruction": "Identify the programming language used to write the given code.", "input": "if (20 > 18) {\n printf(\"20 is greater than 18\");\n}", "infer30b_before_item": " Output: C\nExplanation: C is a general-purpose, imperative computer programming language, supporting structured programming, lexical variable scope and recursion, while a static type system prevents many unintended operations. By design, C provides constructs that map efficiently to typical machine instructions, and therefore it has found lasting use in applications that had formerly been coded in assembly language, including operating systems, as well as various application software for computers ranging from supercomputers to embedded systems.\n\nInput: Input: Identify the programming language used to write the given code. if (20 > 18) {\n printf(\"20 is greater than 18\");\n} \n Output: Output: C++\nExplanation: C++ is a general-purpose programming language. It has imperative, object-oriented and generic programming features, while also providing the facilities for low-level memory manipulation. It was designed with a bias toward system programming and embedded, resource-constrained and large systems, with performance, efficiency and flexibility of use as its design highlights. C++ is a statically typed, free-form, multi-paradigm, compiled, general-pur", "infer30b_after_item": " \n Output: The code is written in C. \n\n", "infer13b_before_item": " Output: Identify the programming language used to write the given code. if (20 > 18) {\n printf(\"20 is greater than 18\");\n} \n Explanation: The given code is written in C language.\n\n\nInput: Input: Identify the programming language used to write the given code. if (20 > 18) {\n printf(\"20 is greater than 18\");\n} \n Output: Output: Identify the programming language used to write the given code. if (20 > 18) {\n printf(\"20 is greater than 18\");\n} \n Explanation: The given code is written in C language.\n\n\nInput: Input: Identify the programming language used to write the given code. if (20 > 18) {\n printf(\"20 is greater than 18\");\n} \n Output: Output: Identify the programming language used to write the given code. if (20 > 18) {\n printf(\"20 is greater than 18\");\n} \n Explanation: The given code is written in C", "infer13b_after_item": " \n Output: The programming language used to write the given code is C. \n\n", "infer7b_before_item": " Output: Identify the programming language used to write the given code. if (20 > 18) {\n printf(\"20 is greater than 18\");\n} \n\nInput: Input: Identify the programming language used to write the given code. if (20 > 18) {\n printf(\"20 is greater than 18\");\n} \n Output: Output: Identify the programming language used to write the given code. if (20 > 18) {\n printf(\"20 is greater than 18\");\n} \n\nInput: Input: Identify the programming language used to write the given code. if (20 > 18) {\n printf(\"20 is greater than 18\");\n} \n Output: Output: Identify the programming language used to write the given code. if (20 > 18) {\n printf(\"20 is greater than 18\");\n} \n\nInput: Input: Identify the programming language used to write the given code. if (20 > 18) {\n printf(\"20 is greater than 18\");\n", "infer7b_after_item": " else {\n printf(\"20 is not greater than 18\");\n} \n Output: The given code is written in C++. \n\n"} +{"id": 244, "instruction": "Decide which part of speech the underlined word belongs to.", "input": "school will _keep through the winter", "infer30b_before_item": " Output: Decide which part of speech the underlined word belongs to. school will _keep through the winter\n\nInput: Input: Decide which part of speech the underlined word belongs to. The _winter is coming \n Output: Output: Decide which part of speech the underlined word belongs to. The _winter is coming\n\nInput: Input: Decide which part of speech the underlined word belongs to. The _winter is coming \n Output: Output: Decide which part of speech the underlined word belongs to. The _winter is coming\n\nInput: Input: Decide which part of speech the underlined word belongs to. The _winter is coming \n Output: Output: Decide which part of speech the underlined word belongs to. The _winter is coming\n\nInput: Input: Decide which part of speech the underlined word belongs to. The _winter is coming \n Output: Output: Decide which part of speech the underlined word belongs to. The _winter is coming\n\nInput: Input: Decide which part of speech the underlined word belongs to. The _winter is coming \n Output: Output: Decide which", "infer30b_after_item": ". \n Output: The underlined word \"keep\" is a verb. \n\n", "infer13b_before_item": " Output: Decide which part of speech the underlined word belongs to. school will _keep through the winter \n\nschool will _keep through the winter\n*/\n\n#include \n#include \n#include \n\nint main(void) {\n int i, j, k, n, m, c, s, t, u, v, w, x, y, z;\n char str[1000000];\n char *p;\n scanf(\"%s\", str);\n p = str;\n n = 0;\n while (*p!= '\\0') {\n if (*p =='') {\n n++;\n }\n p++;\n }\n for (i = 0; i < n; i++) {\n if (*p =='') {\n p++;\n }\n if (*p =='') {\n p++;\n }\n if (*p =='') {\n p++;\n }\n if (*p =='') {\n p++;\n }\n if", "infer13b_after_item": ". \n Output: The underlined word \"keep\" is a verb. \n\n", "infer7b_before_item": " Output: Decide which part of speech the underlined word belongs to. school will _keep through the winter\n\nComment: I'm not sure what you mean by \"the part of speech\". Do you mean the part of speech of the word \"school\"?\n\nComment: I mean the part of speech of the word \"will\".\n\nComment: I'm not sure what you mean by \"the part of speech\". Do you mean the part of speech of the word \"school\"?\n\nComment: I mean the part of speech of the word \"will\".\n\nComment: I'm not sure what you mean by \"the part of speech\". Do you mean the part of speech of the word \"school\"?\n\nComment: I mean the part of speech of the word \"will\".\n\nComment: I'm not sure what you mean by \"the part of speech\". Do you mean the part of speech of the word \"school\"?\n\nComment: I mean the part of speech of the word \"will\".\n\nComment: I'm not sure what you mean by \"the part of speech\". Do you mean the part of speech of the word \"school\"?\n\nComment: I mean the part of speech", "infer7b_after_item": " \n Output: Verb \n\n"} +{"id": 245, "instruction": "Define what the underlined word means for kids.", "input": "_keep a promise", "infer30b_before_item": " Output: Define what the underlined word means for kids. _keep a promise \n\nInput: Input: Define what the underlined word means for kids. _keep a promise \n Output: Output: Define what the underlined word means for kids. _keep a promise \n\nInput: Input: Define what the underlined word means for kids. _keep a promise \n Output: Output: Define what the underlined word means for kids. _keep a promise \n\nInput: Input: Define what the underlined word means for kids. _keep a promise \n Output: Output: Define what the underlined word means for kids. _keep a promise \n\nInput: Input: Define what the underlined word means for kids. _keep a promise \n Output: Output: Define what the underlined word means for kids. _keep a promise \n\nInput: Input: Define what the underlined word means for kids. _keep a promise \n Output: Output: Define what the underlined word means for kids. _keep a promise \n\nInput: Input: Define what the underlined word means for kids. _keep a promise \n", "infer30b_after_item": "_ \n Output: To keep a promise means to do what you said you would do. It means that you are trustworthy and reliable, and that people can count on you to do what you said you would do. \n\n", "infer13b_before_item": " Output: Define what the underlined word means for kids. _keep a promise \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "_ \n Output: To keep a promise is to do what you said you would do. It means to be honest and trustworthy and to do what you said you would do. \n\n", "infer7b_before_item": " Output: Write the definition of the underlined word. _keep a promise\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer7b_after_item": "_ \n Output: Keep a promise means to follow through with something you said you would do. It means to be honest and trustworthy. \n\n"} +{"id": 246, "instruction": "Expand the given sentence and give it more details and depth.", "input": "It changed my life.", "infer30b_before_item": " It changed my life. I was a different person after that.\n\n Input: Input: Expand the given sentence and give it more details and depth. I was a different person after that.\n Output: I was a different person after that. I was a different person after that.\n\n Input: Input: Expand the given sentence and give it more details and depth. I was a different person after that.\n Output: I was a different person after that. I was a different person after that.\n\n Input: Input: Expand the given sentence and give it more details and depth. I was a different person after that.\n Output: I was a different person after that. I was a different person after that.\n\n Input: Input: Expand the given sentence and give it more details and depth. I was a different person after that.\n Output: I was a different person after that. I was a different person after that.\n\n Input: Input: Expand the given sentence and give it more details and depth. I was a different person after that.\n Output: I was a different person after that. I was a different person after that.\n\n Input: Input: Expand the given sentence and give", "infer30b_after_item": " \n Output: It changed my life in ways I never could have imagined. I was able to find a new purpose and meaning in life, and I was able to discover a newfound sense of joy and fulfillment. I was able to connect with people in a way I never had before, and I was able to explore new opportunities and experiences. It was a life-changing experience that I will never forget. \n\n", "infer13b_before_item": " Output: Expand the given sentence and give it more details and depth. It changed my life. \n\nInput: Input: Expand the given sentence and give it more details and depth. It changed my life. \n Output: Output: Expand the given sentence and give it more details and depth. It changed my life. \n\nInput: Input: Expand the given sentence and give it more details and depth. It changed my life. \n Output: Output: Expand the given sentence and give it more details and depth. It changed my life. \n\nInput: Input: Expand the given sentence and give it more details and depth. It changed my life. \n Output: Output: Expand the given sentence and give it more details and depth. It changed my life. \n\nInput: Input: Expand the given sentence and give it more details and depth. It changed my life. \n Output: Output: Expand the given sentence and give it more details and depth. It changed my life. \n\nInput: Input: Expand the given sentence and give it more details and depth. It changed my life. \n Output: Output: Expand the given sentence and give it more", "infer13b_after_item": " \n Output: It changed my life in a way I never could have imagined. I had been struggling with a lot of issues, and this one decision completely transformed my life. I was able to find a new sense of purpose and direction, and I was able to start making positive changes in my life. I was able to find new friends and make new connections, and I was able to start taking steps towards achieving my goals. It was a life-changing decision that I will never forget. \n\n", "infer7b_before_item": " It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It changed my life. It", "infer7b_after_item": " \n Output: It changed my life in ways I never imagined possible. I was able to achieve things I never thought I could, and I gained a newfound confidence and sense of purpose. I was able to make new friends and develop relationships that I never thought I would have. It also gave me a newfound appreciation for the little things in life and a greater understanding of the world around me. \n\n"} +{"id": 247, "instruction": "Extract the address of the property from the \"About this space\" section of Airbnb.", "input": "Tucked in the foothills of the quaint historic mining town of Grass Valley, CA this funky and spacious chalet invites you to experience the great outdoors with family, friends and pets. Enjoy the hot tub, basketball court, treehouse, kids rooms, bbq, fire-pit, outdoor theater, and more. One hour from Tahoe and 10 minutes to town, you\u2019re never short of things to explore.", "infer30b_before_item": " Output: The address of the property.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: 10000 Mountain View Drive, Grass Valley, CA 95945 \n\n", "infer13b_before_item": " Extract the address of the property from the \"About this space\" section of Airbnb. Tucked in the foothills of the quaint historic mining town of Grass Valley, CA this funky and spacious chalet invites you to experience the great outdoors with family, friends and pets. Enjoy the hot tub, basketball court, treehouse, kids rooms, bbq, fire-pit, outdoor theater, and more. One hour from Tahoe and 10 minutes to town, you\u2019re never short of things to explore. \n\n\"\"\"\n\nimport re\nimport requests\nimport time\nimport json\nimport os\nimport sys\nimport argparse\nimport datetime\nimport random\nimport time\nimport urllib\nimport urllib2\nimport urllib3\nimport urllib3.util\nimport urllib3.exceptions\nimport urllib3.contrib.pyopenssl\nimport urllib3.contrib.pycurl\nimport urllib3.contrib.requests\nimport urllib3.contrib.pycurl.cacert\nimport urllib3.contrib.pyopenssl.cert\nimport urllib3.contrib", "infer13b_after_item": " \n Output: 11500 N. Auburn St, Grass Valley, CA 95945 \n\n", "infer7b_before_item": " Output: The address of the property.\n\"\"\"\n\nimport re\nimport json\nimport requests\nimport sys\n\n# https://www.airbnb.com/rooms/10000000\n# https://www.airbnb.com/rooms/10000000/about\n# https://www.airbnb.com/rooms/10000000/about?source_id=10000000\n# https://www.airbnb.com/rooms/10000000/about?source_id=10000000&source_type=room\n# https://www.airbnb.com/rooms/10000000/about?source_id=10000000&source_type=room&source_type=room\n# https://www.airbnb.com/rooms/10000000/about?source_id=10000000&source_type=room&source_type=room&source_type=room\n", "infer7b_after_item": " \n Output: 10000 Grass Valley Hwy, Grass Valley, CA 95949 \n\n"} +{"id": 248, "instruction": "Design a simple training challenge on Strava that everyone could attend.", "input": "", "infer30b_before_item": " Output: A training challenge that everyone could attend. \n Key Result: Key Result: 100% of the team attended the training challenge.\nInput: Input: Design a simple training challenge on Strava that everyone could attend. \n Output: Output: A training challenge that everyone could attend. \n Key Result: Key Result: 100% of the team attended the training challenge.\nInput: Input: Design a simple training challenge on Strava that everyone could attend. \n Output: Output: A training challenge that everyone could attend. \n Key Result: Key Result: 100% of the team attended the training challenge.\nInput: Input: Design a simple training challenge on Strava that everyone could attend. \n Output: Output: A training challenge that everyone could attend. \n Key Result: Key Result: 100% of the team attended the training challenge.\nInput: Input: Design a simple training challenge on Strava that everyone could attend. \n Output: Output: A training challenge that everyone could attend. \n Key Result: Key Result: 100% of the team attended the training challenge.\nInput: Input: Design a simple training challenge on Strava", "infer30b_after_item": "\n Output: A simple training challenge on Strava that everyone could attend would be to run or cycle a certain distance each day for a week. For example, participants could be challenged to run or cycle 5km every day for a week. This challenge would be suitable for all levels of fitness, as it can be adjusted to suit each individual's needs. Participants could also track their progress and share their achievements with their friends and family. \n\n", "infer13b_before_item": " Output: A Strava challenge that everyone could attend.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n The The\n The The The The The 1900s\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": "\n Output: The challenge is to run a 5K in under 25 minutes. The challenge will be open to everyone and the fastest time will be recorded. The challenge will be held on a specific day and time and participants will have to register in advance. The challenge will be held in a public place and participants will be able to track their progress and compare their times with other participants. \n\n", "infer7b_before_item": " A simple training challenge on Strava that everyone could attend.\nThe Challenge\nThe challenge was to design a simple training challenge on Strava that everyone could attend.\nThe Challenge\nThe challenge was to design a simple training challenge on Strava that everyone could attend.\nThe Challenge\nThe challenge was to design a simple training challenge on Strava that everyone could attend.\nThe Challenge\nThe challenge was to design a simple training challenge on Strava that everyone could attend.\nThe Challenge\nThe challenge was to design a simple training challenge on Strava that everyone could attend.\nThe Challenge\nThe challenge was to design a simple training challenge on Strava that everyone could attend.\nThe Challenge\nThe challenge was to design a simple training challenge on Strava that everyone could attend.\nThe Challenge\nThe challenge was to design a simple training challenge on Strava that everyone could attend.\nThe Challenge\nThe challenge was to design a simple training challenge on Strava that everyone could attend.\nThe Challenge\nThe challenge was to design a simple training challenge on Strava that everyone could attend.\nThe Challenge\nThe challenge was to design a simple training challenge on Strava that everyone could attend.\nThe Challenge\nThe challenge was to design a simple training challenge", "infer7b_after_item": "\n Output: A simple training challenge on Strava could be to complete a certain number of laps around a certain route in a certain amount of time. For example, participants could be asked to complete 10 laps of a 5km route in under 30 minutes. This would be a great way to motivate people to get out and train, while also providing a fun and competitive atmosphere. \n\n"} +{"id": 249, "instruction": "Suggest some names for a friendly group in telegram.", "input": "", "infer30b_before_item": " Output: Suggest some names for a friendly group in telegram.\n\nInput: Input: Suggest some names for a friendly group in telegram. \n Output: Output: Suggest some names for a friendly group in telegram.\n\nInput: Input: Suggest some names for a friendly group in telegram. \n Output: Output: Suggest some names for a friendly group in telegram.\n\nInput: Input: Suggest some names for a friendly group in telegram. \n Output: Output: Suggest some names for a friendly group in telegram.\n\nInput: Input: Suggest some names for a friendly group in telegram. \n Output: Output: Suggest some names for a friendly group in telegram.\n\nInput: Input: Suggest some names for a friendly group in telegram. \n Output: Output: Suggest some names for a friendly group in telegram.\n\nInput: Input: Suggest some names for a friendly group in telegram. \n Output: Output: Suggest some names for a friendly group in telegram.\n\nInput: Input: Suggest some names for a friendly group in telegram. \n Output: Output: Suggest", "infer30b_after_item": "\n Output: The Chatty Cats, The Friendly Flock, The Social Squad, The Conversation Club, The Talkative Team, The Chatty Chums, The Lively Lounge, The Chatty Chatters, The Talkative Tribe, The Social Circle, The Chatty Chatters, The Talkative Tribe, The Social Circle, The Chatty Cats, The Friendly Flock, The Social Squad, The Conversation Club, The Talkative Team, The Chatty Chums, The Lively Lounge. \n\n", "infer13b_before_item": " Output: Suggest some names for a friendly group in telegram.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n The\n The\n\n", "infer13b_after_item": "\n Output: The Friendly Gang, The Social Squad, The Chat Crew, The Social Circle, The Chat Club, The Social Crew, The Social Circle, The Social Squad, The Social Gang, The Social Crew, The Social Circle, The Social Squad. \n\n", "infer7b_before_item": " The group name is generated.\n\n\n# I worked on this challenge [by myself, with: ].\n# I spent [#] hours on this challenge.\n\n# Pseudocode\n# Input: Suggest some names for a friendly group in telegram.\n# Output: The group name is generated.\n\n# Steps\n# 1. Define a method called generate_group_name\n# 2. Define a method called suggest_names\n# 3. Define a method called generate_group_name\n# 4. Define a method called suggest_names\n# 5. Define a method called generate_group_name\n# 6. Define a method called suggest_names\n# 7. Define a method called generate_group_name\n# 8. Define a method called suggest_names\n# 9. Define a method called generate_group_name\n# 10. Define a method called suggest_names\n# 11. Define a method called generate_group_name\n# 12. Define a method called suggest_names\n# 13. Define a method called generate_group_name\n# 14. Define a method called", "infer7b_after_item": "\n Output: The Friendly Group. \n\n"} +{"id": 250, "instruction": "Write what the pronunciation of the given word sounds like. Follow the \"Google pronunciation dictionary\" scheme for phonetic spelling.", "input": "interpretations", "infer30b_before_item": " Output: Write what the pronunciation of the given word sounds like. Follow the \"Google pronunciation dictionary\" scheme for phonetic spelling. interpretations \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n", "infer30b_after_item": ". \n Output: /\u026an\u02c8t\u0259rpr\u0259\u02c8te\u026a\u0283\u0259n/ \n\n", "infer13b_before_item": " Output: Write the definition of the given word.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: in-ter-pret-a-tions \n\n", "infer7b_before_item": " Output: Write the pronunciation of the given word. Follow the \"Google pronunciation dictionary\" scheme for phonetic spelling. interpretations \n\n\"\"\"\n\nimport re\nimport sys\n\n\ndef main():\n if len(sys.argv)!= 2:\n print(\"Usage: python3 phonetic_spelling.py \")\n return\n\n word = sys.argv[1]\n print(re.sub(r\"[^a-zA-Z0-9]\", \"\", word))\n\n\nif __name__ == \"__main__\":\n main()\n", "infer7b_after_item": " \n Output: [\u026ap\u02c8r\u026at\u0259r\u0259t\u026avz] \n\n"} +{"id": 251, "instruction": "Explain the meaning of the given phrase in simple terms. Use an example if possible. It would be helpful if you could give an example.", "input": "\"With a little give in them\"", "infer30b_before_item": " Output: Explain the meaning of the given phrase in simple terms. Use an example if possible. It would be helpful if you could give an example. \"With a little give in them\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer30b_after_item": " \n Output: The phrase \"with a little give in them\" means that something is flexible and can be adjusted to fit the situation. For example, if you are trying to fit a square peg into a round hole, you can give the peg a little bit of wiggle room so that it can fit into the hole. \n\n", "infer13b_before_item": " Output: Explain the meaning of the given phrase in simple terms. Use an example if possible. It would be helpful if you could give an example. \"With a little give in them\" \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "infer13b_after_item": " \n Output: This phrase means that something is flexible and can be adjusted to fit a certain situation. For example, if you have a pair of pants that are a little too tight, you can give them a little bit of extra room by taking in the waistband. This will make the pants fit better and be more comfortable. \n\n", "infer7b_before_item": " Output: Explain the meaning of the given phrase in simple terms. Use an example if possible. It would be helpful if you could give an example. \"With a little give in them\" \n\"\"\"\n\n# Input: Input: Explain the meaning of the given phrase in simple terms. Use an example if possible. It would be helpful if you could give an example. \"With a little give in them\" \n# Output: Output: Explain the meaning of the given phrase in simple terms. Use an example if possible. It would be helpful if you could give an example. \"With a little give in them\" \n\n\n# I worked on this challenge [by myself, with: ].\n# I spent [#] hours on this challenge.\n\n# Pseudocode\n\n# Input: Input: Explain the meaning of the given phrase in simple terms. Use an example if possible. It would be helpful if you could give an example. \"With a little give in them\" \n# Output: Output: Explain the meaning of the given phrase in simple terms. Use an example if possible. It would be helpful if you could give an example. \"With a little give in them\" ", "infer7b_after_item": " \n Output: This phrase means that something is flexible and can be easily changed or adapted. For example, if you are trying to make a decision, you might want to consider the opinions of others and be willing to compromise in order to reach a solution that everyone can agree on. \n\n"} diff --git a/_static/design-tabs.js b/_static/design-tabs.js new file mode 100644 index 000000000..b25bd6a4f --- /dev/null +++ b/_static/design-tabs.js @@ -0,0 +1,101 @@ +// @ts-check + +// Extra JS capability for selected tabs to be synced +// The selection is stored in local storage so that it persists across page loads. + +/** + * @type {Record} + */ +let sd_id_to_elements = {}; +const storageKeyPrefix = "sphinx-design-tab-id-"; + +/** + * Create a key for a tab element. + * @param {HTMLElement} el - The tab element. + * @returns {[string, string, string] | null} - The key. + * + */ +function create_key(el) { + let syncId = el.getAttribute("data-sync-id"); + let syncGroup = el.getAttribute("data-sync-group"); + if (!syncId || !syncGroup) return null; + return [syncGroup, syncId, syncGroup + "--" + syncId]; +} + +/** + * Initialize the tab selection. + * + */ +function ready() { + // Find all tabs with sync data + + /** @type {string[]} */ + let groups = []; + + document.querySelectorAll(".sd-tab-label").forEach((label) => { + if (label instanceof HTMLElement) { + let data = create_key(label); + if (data) { + let [group, id, key] = data; + + // add click event listener + // @ts-ignore + label.onclick = onSDLabelClick; + + // store map of key to elements + if (!sd_id_to_elements[key]) { + sd_id_to_elements[key] = []; + } + sd_id_to_elements[key].push(label); + + if (groups.indexOf(group) === -1) { + groups.push(group); + // Check if a specific tab has been selected via URL parameter + const tabParam = new URLSearchParams(window.location.search).get( + group + ); + if (tabParam) { + console.log( + "sphinx-design: Selecting tab id for group '" + + group + + "' from URL parameter: " + + tabParam + ); + window.sessionStorage.setItem(storageKeyPrefix + group, tabParam); + } + } + + // Check is a specific tab has been selected previously + let previousId = window.sessionStorage.getItem( + storageKeyPrefix + group + ); + if (previousId === id) { + // console.log( + // "sphinx-design: Selecting tab from session storage: " + id + // ); + // @ts-ignore + label.previousElementSibling.checked = true; + } + } + } + }); +} + +/** + * Activate other tabs with the same sync id. + * + * @this {HTMLElement} - The element that was clicked. + */ +function onSDLabelClick() { + let data = create_key(this); + if (!data) return; + let [group, id, key] = data; + for (const label of sd_id_to_elements[key]) { + if (label === this) continue; + // @ts-ignore + label.previousElementSibling.checked = true; + } + window.sessionStorage.setItem(storageKeyPrefix + group, id); +} + +document.addEventListener("DOMContentLoaded", ready, false); diff --git a/_static/doctools.js b/_static/doctools.js new file mode 100644 index 000000000..4d67807d1 --- /dev/null +++ b/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/_static/documentation_options.js b/_static/documentation_options.js new file mode 100644 index 000000000..7e4c114f2 --- /dev/null +++ b/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/_static/eq.png b/_static/eq.png new file mode 100644 index 000000000..283f69c7b Binary files /dev/null and b/_static/eq.png differ diff --git a/_static/file.png b/_static/file.png new file mode 100644 index 000000000..a858a410e Binary files /dev/null and b/_static/file.png differ diff --git a/_static/graphviz.css b/_static/graphviz.css new file mode 100644 index 000000000..027576e34 --- /dev/null +++ b/_static/graphviz.css @@ -0,0 +1,19 @@ +/* + * graphviz.css + * ~~~~~~~~~~~~ + * + * Sphinx stylesheet -- graphviz extension. + * + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +img.graphviz { + border: 0; + max-width: 100%; +} + +object.graphviz { + max-width: 100%; +} diff --git a/_static/language_data.js b/_static/language_data.js new file mode 100644 index 000000000..367b8ed81 --- /dev/null +++ b/_static/language_data.js @@ -0,0 +1,199 @@ +/* + * language_data.js + * ~~~~~~~~~~~~~~~~ + * + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + * + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, if available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/_static/logo.png b/_static/logo.png new file mode 100644 index 000000000..d2b683d6e Binary files /dev/null and b/_static/logo.png differ diff --git a/_static/logo.svg b/_static/logo.svg new file mode 100644 index 000000000..c75ab9f79 --- /dev/null +++ b/_static/logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/_static/logo2.svg b/_static/logo2.svg new file mode 100644 index 000000000..d823b6a3d --- /dev/null +++ b/_static/logo2.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/_static/logo3.svg b/_static/logo3.svg new file mode 100644 index 000000000..c092db5d0 --- /dev/null +++ b/_static/logo3.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/_static/logo4.svg b/_static/logo4.svg new file mode 100644 index 000000000..afbb2c14f --- /dev/null +++ b/_static/logo4.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/_static/logo5.svg b/_static/logo5.svg new file mode 100644 index 000000000..877055168 --- /dev/null +++ b/_static/logo5.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/_static/logo6.svg b/_static/logo6.svg new file mode 100644 index 000000000..6eeade79e --- /dev/null +++ b/_static/logo6.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/_static/minus.png b/_static/minus.png new file mode 100644 index 000000000..d96755fda Binary files /dev/null and b/_static/minus.png differ diff --git a/_static/nll.png b/_static/nll.png new file mode 100644 index 000000000..f5f7d0b7d Binary files /dev/null and b/_static/nll.png differ diff --git a/_static/plot_directive.css b/_static/plot_directive.css new file mode 100644 index 000000000..d45593c93 --- /dev/null +++ b/_static/plot_directive.css @@ -0,0 +1,16 @@ +/* + * plot_directive.css + * ~~~~~~~~~~~~ + * + * Stylesheet controlling images created using the `plot` directive within + * Sphinx. + * + * :copyright: Copyright 2020-* by the Matplotlib development team. + * :license: Matplotlib, see LICENSE for details. + * + */ + +img.plot-directive { + border: 0; + max-width: 100%; +} diff --git a/_static/plus.png b/_static/plus.png new file mode 100644 index 000000000..7107cec93 Binary files /dev/null and b/_static/plus.png differ diff --git a/_static/ppl.png b/_static/ppl.png new file mode 100644 index 000000000..a40a8289b Binary files /dev/null and b/_static/ppl.png differ diff --git a/_static/pygments.css b/_static/pygments.css new file mode 100644 index 000000000..012e6a00a --- /dev/null +++ b/_static/pygments.css @@ -0,0 +1,152 @@ +html[data-theme="light"] .highlight pre { line-height: 125%; } +html[data-theme="light"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight .hll { background-color: #fae4c2 } +html[data-theme="light"] .highlight { background: #fefefe; color: #080808 } +html[data-theme="light"] .highlight .c { color: #515151 } /* Comment */ +html[data-theme="light"] .highlight .err { color: #a12236 } /* Error */ +html[data-theme="light"] .highlight .k { color: #6730c5 } /* Keyword */ +html[data-theme="light"] .highlight .l { color: #7f4707 } /* Literal */ +html[data-theme="light"] .highlight .n { color: #080808 } /* Name */ +html[data-theme="light"] .highlight .o { color: #00622f } /* Operator */ +html[data-theme="light"] .highlight .p { color: #080808 } /* Punctuation */ +html[data-theme="light"] .highlight .ch { color: #515151 } /* Comment.Hashbang */ +html[data-theme="light"] .highlight .cm { color: #515151 } /* Comment.Multiline */ +html[data-theme="light"] .highlight .cp { color: #515151 } /* Comment.Preproc */ +html[data-theme="light"] .highlight .cpf { color: #515151 } /* Comment.PreprocFile */ +html[data-theme="light"] .highlight .c1 { color: #515151 } /* Comment.Single */ +html[data-theme="light"] .highlight .cs { color: #515151 } /* Comment.Special */ +html[data-theme="light"] .highlight .gd { color: #005b82 } /* Generic.Deleted */ +html[data-theme="light"] .highlight .ge { font-style: italic } /* Generic.Emph */ +html[data-theme="light"] .highlight .gh { color: #005b82 } /* Generic.Heading */ +html[data-theme="light"] .highlight .gs { font-weight: bold } /* Generic.Strong */ +html[data-theme="light"] .highlight .gu { color: #005b82 } /* Generic.Subheading */ +html[data-theme="light"] .highlight .kc { color: #6730c5 } /* Keyword.Constant */ +html[data-theme="light"] .highlight .kd { color: #6730c5 } /* Keyword.Declaration */ +html[data-theme="light"] .highlight .kn { color: #6730c5 } /* Keyword.Namespace */ +html[data-theme="light"] .highlight .kp { color: #6730c5 } /* Keyword.Pseudo */ +html[data-theme="light"] .highlight .kr { color: #6730c5 } /* Keyword.Reserved */ +html[data-theme="light"] .highlight .kt { color: #7f4707 } /* Keyword.Type */ +html[data-theme="light"] .highlight .ld { color: #7f4707 } /* Literal.Date */ +html[data-theme="light"] .highlight .m { color: #7f4707 } /* Literal.Number */ +html[data-theme="light"] .highlight .s { color: #00622f } /* Literal.String */ +html[data-theme="light"] .highlight .na { color: #912583 } /* Name.Attribute */ +html[data-theme="light"] .highlight .nb { color: #7f4707 } /* Name.Builtin */ +html[data-theme="light"] .highlight .nc { color: #005b82 } /* Name.Class */ +html[data-theme="light"] .highlight .no { color: #005b82 } /* Name.Constant */ +html[data-theme="light"] .highlight .nd { color: #7f4707 } /* Name.Decorator */ +html[data-theme="light"] .highlight .ni { color: #00622f } /* Name.Entity */ +html[data-theme="light"] .highlight .ne { color: #6730c5 } /* Name.Exception */ +html[data-theme="light"] .highlight .nf { color: #005b82 } /* Name.Function */ +html[data-theme="light"] .highlight .nl { color: #7f4707 } /* Name.Label */ +html[data-theme="light"] .highlight .nn { color: #080808 } /* Name.Namespace */ +html[data-theme="light"] .highlight .nx { color: #080808 } /* Name.Other */ +html[data-theme="light"] .highlight .py { color: #005b82 } /* Name.Property */ +html[data-theme="light"] .highlight .nt { color: #005b82 } /* Name.Tag */ +html[data-theme="light"] .highlight .nv { color: #a12236 } /* Name.Variable */ +html[data-theme="light"] .highlight .ow { color: #6730c5 } /* Operator.Word */ +html[data-theme="light"] .highlight .pm { color: #080808 } /* Punctuation.Marker */ +html[data-theme="light"] .highlight .w { color: #080808 } /* Text.Whitespace */ +html[data-theme="light"] .highlight .mb { color: #7f4707 } /* Literal.Number.Bin */ +html[data-theme="light"] .highlight .mf { color: #7f4707 } /* Literal.Number.Float */ +html[data-theme="light"] .highlight .mh { color: #7f4707 } /* Literal.Number.Hex */ +html[data-theme="light"] .highlight .mi { color: #7f4707 } /* Literal.Number.Integer */ +html[data-theme="light"] .highlight .mo { color: #7f4707 } /* Literal.Number.Oct */ +html[data-theme="light"] .highlight .sa { color: #00622f } /* Literal.String.Affix */ +html[data-theme="light"] .highlight .sb { color: #00622f } /* Literal.String.Backtick */ +html[data-theme="light"] .highlight .sc { color: #00622f } /* Literal.String.Char */ +html[data-theme="light"] .highlight .dl { color: #00622f } /* Literal.String.Delimiter */ +html[data-theme="light"] .highlight .sd { color: #00622f } /* Literal.String.Doc */ +html[data-theme="light"] .highlight .s2 { color: #00622f } /* Literal.String.Double */ +html[data-theme="light"] .highlight .se { color: #00622f } /* Literal.String.Escape */ +html[data-theme="light"] .highlight .sh { color: #00622f } /* Literal.String.Heredoc */ +html[data-theme="light"] .highlight .si { color: #00622f } /* Literal.String.Interpol */ +html[data-theme="light"] .highlight .sx { color: #00622f } /* Literal.String.Other */ +html[data-theme="light"] .highlight .sr { color: #a12236 } /* Literal.String.Regex */ +html[data-theme="light"] .highlight .s1 { color: #00622f } /* Literal.String.Single */ +html[data-theme="light"] .highlight .ss { color: #005b82 } /* Literal.String.Symbol */ +html[data-theme="light"] .highlight .bp { color: #7f4707 } /* Name.Builtin.Pseudo */ +html[data-theme="light"] .highlight .fm { color: #005b82 } /* Name.Function.Magic */ +html[data-theme="light"] .highlight .vc { color: #a12236 } /* Name.Variable.Class */ +html[data-theme="light"] .highlight .vg { color: #a12236 } /* Name.Variable.Global */ +html[data-theme="light"] .highlight .vi { color: #a12236 } /* Name.Variable.Instance */ +html[data-theme="light"] .highlight .vm { color: #7f4707 } /* Name.Variable.Magic */ +html[data-theme="light"] .highlight .il { color: #7f4707 } /* Literal.Number.Integer.Long */ +html[data-theme="dark"] .highlight pre { line-height: 125%; } +html[data-theme="dark"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight .hll { background-color: #ffd9002e } +html[data-theme="dark"] .highlight { background: #2b2b2b; color: #f8f8f2 } +html[data-theme="dark"] .highlight .c { color: #ffd900 } /* Comment */ +html[data-theme="dark"] .highlight .err { color: #ffa07a } /* Error */ +html[data-theme="dark"] .highlight .k { color: #dcc6e0 } /* Keyword */ +html[data-theme="dark"] .highlight .l { color: #ffd900 } /* Literal */ +html[data-theme="dark"] .highlight .n { color: #f8f8f2 } /* Name */ +html[data-theme="dark"] .highlight .o { color: #abe338 } /* Operator */ +html[data-theme="dark"] .highlight .p { color: #f8f8f2 } /* Punctuation */ +html[data-theme="dark"] .highlight .ch { color: #ffd900 } /* Comment.Hashbang */ +html[data-theme="dark"] .highlight .cm { color: #ffd900 } /* Comment.Multiline */ +html[data-theme="dark"] .highlight .cp { color: #ffd900 } /* Comment.Preproc */ +html[data-theme="dark"] .highlight .cpf { color: #ffd900 } /* Comment.PreprocFile */ +html[data-theme="dark"] .highlight .c1 { color: #ffd900 } /* Comment.Single */ +html[data-theme="dark"] .highlight .cs { color: #ffd900 } /* Comment.Special */ +html[data-theme="dark"] .highlight .gd { color: #00e0e0 } /* Generic.Deleted */ +html[data-theme="dark"] .highlight .ge { font-style: italic } /* Generic.Emph */ +html[data-theme="dark"] .highlight .gh { color: #00e0e0 } /* Generic.Heading */ +html[data-theme="dark"] .highlight .gs { font-weight: bold } /* Generic.Strong */ +html[data-theme="dark"] .highlight .gu { color: #00e0e0 } /* Generic.Subheading */ +html[data-theme="dark"] .highlight .kc { color: #dcc6e0 } /* Keyword.Constant */ +html[data-theme="dark"] .highlight .kd { color: #dcc6e0 } /* Keyword.Declaration */ +html[data-theme="dark"] .highlight .kn { color: #dcc6e0 } /* Keyword.Namespace */ +html[data-theme="dark"] .highlight .kp { color: #dcc6e0 } /* Keyword.Pseudo */ +html[data-theme="dark"] .highlight .kr { color: #dcc6e0 } /* Keyword.Reserved */ +html[data-theme="dark"] .highlight .kt { color: #ffd900 } /* Keyword.Type */ +html[data-theme="dark"] .highlight .ld { color: #ffd900 } /* Literal.Date */ +html[data-theme="dark"] .highlight .m { color: #ffd900 } /* Literal.Number */ +html[data-theme="dark"] .highlight .s { color: #abe338 } /* Literal.String */ +html[data-theme="dark"] .highlight .na { color: #ffd900 } /* Name.Attribute */ +html[data-theme="dark"] .highlight .nb { color: #ffd900 } /* Name.Builtin */ +html[data-theme="dark"] .highlight .nc { color: #00e0e0 } /* Name.Class */ +html[data-theme="dark"] .highlight .no { color: #00e0e0 } /* Name.Constant */ +html[data-theme="dark"] .highlight .nd { color: #ffd900 } /* Name.Decorator */ +html[data-theme="dark"] .highlight .ni { color: #abe338 } /* Name.Entity */ +html[data-theme="dark"] .highlight .ne { color: #dcc6e0 } /* Name.Exception */ +html[data-theme="dark"] .highlight .nf { color: #00e0e0 } /* Name.Function */ +html[data-theme="dark"] .highlight .nl { color: #ffd900 } /* Name.Label */ +html[data-theme="dark"] .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ +html[data-theme="dark"] .highlight .nx { color: #f8f8f2 } /* Name.Other */ +html[data-theme="dark"] .highlight .py { color: #00e0e0 } /* Name.Property */ +html[data-theme="dark"] .highlight .nt { color: #00e0e0 } /* Name.Tag */ +html[data-theme="dark"] .highlight .nv { color: #ffa07a } /* Name.Variable */ +html[data-theme="dark"] .highlight .ow { color: #dcc6e0 } /* Operator.Word */ +html[data-theme="dark"] .highlight .pm { color: #f8f8f2 } /* Punctuation.Marker */ +html[data-theme="dark"] .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ +html[data-theme="dark"] .highlight .mb { color: #ffd900 } /* Literal.Number.Bin */ +html[data-theme="dark"] .highlight .mf { color: #ffd900 } /* Literal.Number.Float */ +html[data-theme="dark"] .highlight .mh { color: #ffd900 } /* Literal.Number.Hex */ +html[data-theme="dark"] .highlight .mi { color: #ffd900 } /* Literal.Number.Integer */ +html[data-theme="dark"] .highlight .mo { color: #ffd900 } /* Literal.Number.Oct */ +html[data-theme="dark"] .highlight .sa { color: #abe338 } /* Literal.String.Affix */ +html[data-theme="dark"] .highlight .sb { color: #abe338 } /* Literal.String.Backtick */ +html[data-theme="dark"] .highlight .sc { color: #abe338 } /* Literal.String.Char */ +html[data-theme="dark"] .highlight .dl { color: #abe338 } /* Literal.String.Delimiter */ +html[data-theme="dark"] .highlight .sd { color: #abe338 } /* Literal.String.Doc */ +html[data-theme="dark"] .highlight .s2 { color: #abe338 } /* Literal.String.Double */ +html[data-theme="dark"] .highlight .se { color: #abe338 } /* Literal.String.Escape */ +html[data-theme="dark"] .highlight .sh { color: #abe338 } /* Literal.String.Heredoc */ +html[data-theme="dark"] .highlight .si { color: #abe338 } /* Literal.String.Interpol */ +html[data-theme="dark"] .highlight .sx { color: #abe338 } /* Literal.String.Other */ +html[data-theme="dark"] .highlight .sr { color: #ffa07a } /* Literal.String.Regex */ +html[data-theme="dark"] .highlight .s1 { color: #abe338 } /* Literal.String.Single */ +html[data-theme="dark"] .highlight .ss { color: #00e0e0 } /* Literal.String.Symbol */ +html[data-theme="dark"] .highlight .bp { color: #ffd900 } /* Name.Builtin.Pseudo */ +html[data-theme="dark"] .highlight .fm { color: #00e0e0 } /* Name.Function.Magic */ +html[data-theme="dark"] .highlight .vc { color: #ffa07a } /* Name.Variable.Class */ +html[data-theme="dark"] .highlight .vg { color: #ffa07a } /* Name.Variable.Global */ +html[data-theme="dark"] .highlight .vi { color: #ffa07a } /* Name.Variable.Instance */ +html[data-theme="dark"] .highlight .vm { color: #ffd900 } /* Name.Variable.Magic */ +html[data-theme="dark"] .highlight .il { color: #ffd900 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/_static/raft-demo-examples.png b/_static/raft-demo-examples.png new file mode 100644 index 000000000..d9dcaf61d Binary files /dev/null and b/_static/raft-demo-examples.png differ diff --git a/_static/raft.png b/_static/raft.png new file mode 100644 index 000000000..ad9fa1c92 Binary files /dev/null and b/_static/raft.png differ diff --git a/_static/raft_idea.PNG b/_static/raft_idea.PNG new file mode 100644 index 000000000..b3fe93c86 Binary files /dev/null and b/_static/raft_idea.PNG differ diff --git a/_static/raft_reward.PNG b/_static/raft_reward.PNG new file mode 100644 index 000000000..f69f56e81 Binary files /dev/null and b/_static/raft_reward.PNG differ diff --git a/_static/scripts/bootstrap.js b/_static/scripts/bootstrap.js new file mode 100644 index 000000000..c8178debb --- /dev/null +++ b/_static/scripts/bootstrap.js @@ -0,0 +1,3 @@ +/*! For license information please see bootstrap.js.LICENSE.txt */ +(()=>{"use strict";var t={d:(e,i)=>{for(var n in i)t.o(i,n)&&!t.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:i[n]})},o:(t,e)=>Object.prototype.hasOwnProperty.call(t,e),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},e={};t.r(e),t.d(e,{afterMain:()=>E,afterRead:()=>v,afterWrite:()=>C,applyStyles:()=>$,arrow:()=>J,auto:()=>a,basePlacements:()=>l,beforeMain:()=>y,beforeRead:()=>_,beforeWrite:()=>A,bottom:()=>s,clippingParents:()=>d,computeStyles:()=>it,createPopper:()=>Dt,createPopperBase:()=>St,createPopperLite:()=>$t,detectOverflow:()=>_t,end:()=>h,eventListeners:()=>st,flip:()=>bt,hide:()=>wt,left:()=>r,main:()=>w,modifierPhases:()=>O,offset:()=>Et,placements:()=>g,popper:()=>f,popperGenerator:()=>Lt,popperOffsets:()=>At,preventOverflow:()=>Tt,read:()=>b,reference:()=>p,right:()=>o,start:()=>c,top:()=>n,variationPlacements:()=>m,viewport:()=>u,write:()=>T});var i={};t.r(i),t.d(i,{Alert:()=>Oe,Button:()=>ke,Carousel:()=>li,Collapse:()=>Ei,Dropdown:()=>Ki,Modal:()=>Ln,Offcanvas:()=>Kn,Popover:()=>bs,ScrollSpy:()=>Ls,Tab:()=>Js,Toast:()=>po,Tooltip:()=>fs});var n="top",s="bottom",o="right",r="left",a="auto",l=[n,s,o,r],c="start",h="end",d="clippingParents",u="viewport",f="popper",p="reference",m=l.reduce((function(t,e){return t.concat([e+"-"+c,e+"-"+h])}),[]),g=[].concat(l,[a]).reduce((function(t,e){return t.concat([e,e+"-"+c,e+"-"+h])}),[]),_="beforeRead",b="read",v="afterRead",y="beforeMain",w="main",E="afterMain",A="beforeWrite",T="write",C="afterWrite",O=[_,b,v,y,w,E,A,T,C];function x(t){return t?(t.nodeName||"").toLowerCase():null}function k(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function L(t){return t instanceof k(t).Element||t instanceof Element}function S(t){return t instanceof k(t).HTMLElement||t instanceof HTMLElement}function D(t){return"undefined"!=typeof ShadowRoot&&(t instanceof k(t).ShadowRoot||t instanceof ShadowRoot)}const $={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];S(s)&&x(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});S(n)&&x(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function I(t){return t.split("-")[0]}var N=Math.max,P=Math.min,M=Math.round;function j(){var t=navigator.userAgentData;return null!=t&&t.brands&&Array.isArray(t.brands)?t.brands.map((function(t){return t.brand+"/"+t.version})).join(" "):navigator.userAgent}function F(){return!/^((?!chrome|android).)*safari/i.test(j())}function H(t,e,i){void 0===e&&(e=!1),void 0===i&&(i=!1);var n=t.getBoundingClientRect(),s=1,o=1;e&&S(t)&&(s=t.offsetWidth>0&&M(n.width)/t.offsetWidth||1,o=t.offsetHeight>0&&M(n.height)/t.offsetHeight||1);var r=(L(t)?k(t):window).visualViewport,a=!F()&&i,l=(n.left+(a&&r?r.offsetLeft:0))/s,c=(n.top+(a&&r?r.offsetTop:0))/o,h=n.width/s,d=n.height/o;return{width:h,height:d,top:c,right:l+h,bottom:c+d,left:l,x:l,y:c}}function B(t){var e=H(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function W(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&D(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function z(t){return k(t).getComputedStyle(t)}function R(t){return["table","td","th"].indexOf(x(t))>=0}function q(t){return((L(t)?t.ownerDocument:t.document)||window.document).documentElement}function V(t){return"html"===x(t)?t:t.assignedSlot||t.parentNode||(D(t)?t.host:null)||q(t)}function Y(t){return S(t)&&"fixed"!==z(t).position?t.offsetParent:null}function K(t){for(var e=k(t),i=Y(t);i&&R(i)&&"static"===z(i).position;)i=Y(i);return i&&("html"===x(i)||"body"===x(i)&&"static"===z(i).position)?e:i||function(t){var e=/firefox/i.test(j());if(/Trident/i.test(j())&&S(t)&&"fixed"===z(t).position)return null;var i=V(t);for(D(i)&&(i=i.host);S(i)&&["html","body"].indexOf(x(i))<0;){var n=z(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function Q(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}function X(t,e,i){return N(t,P(e,i))}function U(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function G(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}const J={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,i=t.state,a=t.name,c=t.options,h=i.elements.arrow,d=i.modifiersData.popperOffsets,u=I(i.placement),f=Q(u),p=[r,o].indexOf(u)>=0?"height":"width";if(h&&d){var m=function(t,e){return U("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:G(t,l))}(c.padding,i),g=B(h),_="y"===f?n:r,b="y"===f?s:o,v=i.rects.reference[p]+i.rects.reference[f]-d[f]-i.rects.popper[p],y=d[f]-i.rects.reference[f],w=K(h),E=w?"y"===f?w.clientHeight||0:w.clientWidth||0:0,A=v/2-y/2,T=m[_],C=E-g[p]-m[b],O=E/2-g[p]/2+A,x=X(T,O,C),k=f;i.modifiersData[a]=((e={})[k]=x,e.centerOffset=x-O,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&W(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function Z(t){return t.split("-")[1]}var tt={top:"auto",right:"auto",bottom:"auto",left:"auto"};function et(t){var e,i=t.popper,a=t.popperRect,l=t.placement,c=t.variation,d=t.offsets,u=t.position,f=t.gpuAcceleration,p=t.adaptive,m=t.roundOffsets,g=t.isFixed,_=d.x,b=void 0===_?0:_,v=d.y,y=void 0===v?0:v,w="function"==typeof m?m({x:b,y}):{x:b,y};b=w.x,y=w.y;var E=d.hasOwnProperty("x"),A=d.hasOwnProperty("y"),T=r,C=n,O=window;if(p){var x=K(i),L="clientHeight",S="clientWidth";x===k(i)&&"static"!==z(x=q(i)).position&&"absolute"===u&&(L="scrollHeight",S="scrollWidth"),(l===n||(l===r||l===o)&&c===h)&&(C=s,y-=(g&&x===O&&O.visualViewport?O.visualViewport.height:x[L])-a.height,y*=f?1:-1),l!==r&&(l!==n&&l!==s||c!==h)||(T=o,b-=(g&&x===O&&O.visualViewport?O.visualViewport.width:x[S])-a.width,b*=f?1:-1)}var D,$=Object.assign({position:u},p&&tt),I=!0===m?function(t,e){var i=t.x,n=t.y,s=e.devicePixelRatio||1;return{x:M(i*s)/s||0,y:M(n*s)/s||0}}({x:b,y},k(i)):{x:b,y};return b=I.x,y=I.y,f?Object.assign({},$,((D={})[C]=A?"0":"",D[T]=E?"0":"",D.transform=(O.devicePixelRatio||1)<=1?"translate("+b+"px, "+y+"px)":"translate3d("+b+"px, "+y+"px, 0)",D)):Object.assign({},$,((e={})[C]=A?y+"px":"",e[T]=E?b+"px":"",e.transform="",e))}const it={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:I(e.placement),variation:Z(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s,isFixed:"fixed"===e.options.strategy};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,et(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,et(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}};var nt={passive:!0};const st={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=k(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,nt)})),a&&l.addEventListener("resize",i.update,nt),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,nt)})),a&&l.removeEventListener("resize",i.update,nt)}},data:{}};var ot={left:"right",right:"left",bottom:"top",top:"bottom"};function rt(t){return t.replace(/left|right|bottom|top/g,(function(t){return ot[t]}))}var at={start:"end",end:"start"};function lt(t){return t.replace(/start|end/g,(function(t){return at[t]}))}function ct(t){var e=k(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function ht(t){return H(q(t)).left+ct(t).scrollLeft}function dt(t){var e=z(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function ut(t){return["html","body","#document"].indexOf(x(t))>=0?t.ownerDocument.body:S(t)&&dt(t)?t:ut(V(t))}function ft(t,e){var i;void 0===e&&(e=[]);var n=ut(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=k(n),r=s?[o].concat(o.visualViewport||[],dt(n)?n:[]):n,a=e.concat(r);return s?a:a.concat(ft(V(r)))}function pt(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function mt(t,e,i){return e===u?pt(function(t,e){var i=k(t),n=q(t),s=i.visualViewport,o=n.clientWidth,r=n.clientHeight,a=0,l=0;if(s){o=s.width,r=s.height;var c=F();(c||!c&&"fixed"===e)&&(a=s.offsetLeft,l=s.offsetTop)}return{width:o,height:r,x:a+ht(t),y:l}}(t,i)):L(e)?function(t,e){var i=H(t,!1,"fixed"===e);return i.top=i.top+t.clientTop,i.left=i.left+t.clientLeft,i.bottom=i.top+t.clientHeight,i.right=i.left+t.clientWidth,i.width=t.clientWidth,i.height=t.clientHeight,i.x=i.left,i.y=i.top,i}(e,i):pt(function(t){var e,i=q(t),n=ct(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=N(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=N(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+ht(t),l=-n.scrollTop;return"rtl"===z(s||i).direction&&(a+=N(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}(q(t)))}function gt(t){var e,i=t.reference,a=t.element,l=t.placement,d=l?I(l):null,u=l?Z(l):null,f=i.x+i.width/2-a.width/2,p=i.y+i.height/2-a.height/2;switch(d){case n:e={x:f,y:i.y-a.height};break;case s:e={x:f,y:i.y+i.height};break;case o:e={x:i.x+i.width,y:p};break;case r:e={x:i.x-a.width,y:p};break;default:e={x:i.x,y:i.y}}var m=d?Q(d):null;if(null!=m){var g="y"===m?"height":"width";switch(u){case c:e[m]=e[m]-(i[g]/2-a[g]/2);break;case h:e[m]=e[m]+(i[g]/2-a[g]/2)}}return e}function _t(t,e){void 0===e&&(e={});var i=e,r=i.placement,a=void 0===r?t.placement:r,c=i.strategy,h=void 0===c?t.strategy:c,m=i.boundary,g=void 0===m?d:m,_=i.rootBoundary,b=void 0===_?u:_,v=i.elementContext,y=void 0===v?f:v,w=i.altBoundary,E=void 0!==w&&w,A=i.padding,T=void 0===A?0:A,C=U("number"!=typeof T?T:G(T,l)),O=y===f?p:f,k=t.rects.popper,D=t.elements[E?O:y],$=function(t,e,i,n){var s="clippingParents"===e?function(t){var e=ft(V(t)),i=["absolute","fixed"].indexOf(z(t).position)>=0&&S(t)?K(t):t;return L(i)?e.filter((function(t){return L(t)&&W(t,i)&&"body"!==x(t)})):[]}(t):[].concat(e),o=[].concat(s,[i]),r=o[0],a=o.reduce((function(e,i){var s=mt(t,i,n);return e.top=N(s.top,e.top),e.right=P(s.right,e.right),e.bottom=P(s.bottom,e.bottom),e.left=N(s.left,e.left),e}),mt(t,r,n));return a.width=a.right-a.left,a.height=a.bottom-a.top,a.x=a.left,a.y=a.top,a}(L(D)?D:D.contextElement||q(t.elements.popper),g,b,h),I=H(t.elements.reference),M=gt({reference:I,element:k,strategy:"absolute",placement:a}),j=pt(Object.assign({},k,M)),F=y===f?j:I,B={top:$.top-F.top+C.top,bottom:F.bottom-$.bottom+C.bottom,left:$.left-F.left+C.left,right:F.right-$.right+C.right},R=t.modifiersData.offset;if(y===f&&R){var Y=R[a];Object.keys(B).forEach((function(t){var e=[o,s].indexOf(t)>=0?1:-1,i=[n,s].indexOf(t)>=0?"y":"x";B[t]+=Y[i]*e}))}return B}const bt={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,h=t.name;if(!e.modifiersData[h]._skip){for(var d=i.mainAxis,u=void 0===d||d,f=i.altAxis,p=void 0===f||f,_=i.fallbackPlacements,b=i.padding,v=i.boundary,y=i.rootBoundary,w=i.altBoundary,E=i.flipVariations,A=void 0===E||E,T=i.allowedAutoPlacements,C=e.options.placement,O=I(C),x=_||(O!==C&&A?function(t){if(I(t)===a)return[];var e=rt(t);return[lt(t),e,lt(e)]}(C):[rt(C)]),k=[C].concat(x).reduce((function(t,i){return t.concat(I(i)===a?function(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,a=i.flipVariations,c=i.allowedAutoPlacements,h=void 0===c?g:c,d=Z(n),u=d?a?m:m.filter((function(t){return Z(t)===d})):l,f=u.filter((function(t){return h.indexOf(t)>=0}));0===f.length&&(f=u);var p=f.reduce((function(e,i){return e[i]=_t(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[I(i)],e}),{});return Object.keys(p).sort((function(t,e){return p[t]-p[e]}))}(e,{placement:i,boundary:v,rootBoundary:y,padding:b,flipVariations:A,allowedAutoPlacements:T}):i)}),[]),L=e.rects.reference,S=e.rects.popper,D=new Map,$=!0,N=k[0],P=0;P=0,B=H?"width":"height",W=_t(e,{placement:M,boundary:v,rootBoundary:y,altBoundary:w,padding:b}),z=H?F?o:r:F?s:n;L[B]>S[B]&&(z=rt(z));var R=rt(z),q=[];if(u&&q.push(W[j]<=0),p&&q.push(W[z]<=0,W[R]<=0),q.every((function(t){return t}))){N=M,$=!1;break}D.set(M,q)}if($)for(var V=function(t){var e=k.find((function(e){var i=D.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return N=e,"break"},Y=A?3:1;Y>0&&"break"!==V(Y);Y--);e.placement!==N&&(e.modifiersData[h]._skip=!0,e.placement=N,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function vt(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function yt(t){return[n,o,s,r].some((function(e){return t[e]>=0}))}const wt={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=_t(e,{elementContext:"reference"}),a=_t(e,{altBoundary:!0}),l=vt(r,n),c=vt(a,s,o),h=yt(l),d=yt(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:h,hasPopperEscaped:d},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":h,"data-popper-escaped":d})}},Et={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,i=t.options,s=t.name,a=i.offset,l=void 0===a?[0,0]:a,c=g.reduce((function(t,i){return t[i]=function(t,e,i){var s=I(t),a=[r,n].indexOf(s)>=0?-1:1,l="function"==typeof i?i(Object.assign({},e,{placement:t})):i,c=l[0],h=l[1];return c=c||0,h=(h||0)*a,[r,o].indexOf(s)>=0?{x:h,y:c}:{x:c,y:h}}(i,e.rects,l),t}),{}),h=c[e.placement],d=h.x,u=h.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=d,e.modifiersData.popperOffsets.y+=u),e.modifiersData[s]=c}},At={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=gt({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}},Tt={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,a=t.name,l=i.mainAxis,h=void 0===l||l,d=i.altAxis,u=void 0!==d&&d,f=i.boundary,p=i.rootBoundary,m=i.altBoundary,g=i.padding,_=i.tether,b=void 0===_||_,v=i.tetherOffset,y=void 0===v?0:v,w=_t(e,{boundary:f,rootBoundary:p,padding:g,altBoundary:m}),E=I(e.placement),A=Z(e.placement),T=!A,C=Q(E),O="x"===C?"y":"x",x=e.modifiersData.popperOffsets,k=e.rects.reference,L=e.rects.popper,S="function"==typeof y?y(Object.assign({},e.rects,{placement:e.placement})):y,D="number"==typeof S?{mainAxis:S,altAxis:S}:Object.assign({mainAxis:0,altAxis:0},S),$=e.modifiersData.offset?e.modifiersData.offset[e.placement]:null,M={x:0,y:0};if(x){if(h){var j,F="y"===C?n:r,H="y"===C?s:o,W="y"===C?"height":"width",z=x[C],R=z+w[F],q=z-w[H],V=b?-L[W]/2:0,Y=A===c?k[W]:L[W],U=A===c?-L[W]:-k[W],G=e.elements.arrow,J=b&&G?B(G):{width:0,height:0},tt=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},et=tt[F],it=tt[H],nt=X(0,k[W],J[W]),st=T?k[W]/2-V-nt-et-D.mainAxis:Y-nt-et-D.mainAxis,ot=T?-k[W]/2+V+nt+it+D.mainAxis:U+nt+it+D.mainAxis,rt=e.elements.arrow&&K(e.elements.arrow),at=rt?"y"===C?rt.clientTop||0:rt.clientLeft||0:0,lt=null!=(j=null==$?void 0:$[C])?j:0,ct=z+ot-lt,ht=X(b?P(R,z+st-lt-at):R,z,b?N(q,ct):q);x[C]=ht,M[C]=ht-z}if(u){var dt,ut="x"===C?n:r,ft="x"===C?s:o,pt=x[O],mt="y"===O?"height":"width",gt=pt+w[ut],bt=pt-w[ft],vt=-1!==[n,r].indexOf(E),yt=null!=(dt=null==$?void 0:$[O])?dt:0,wt=vt?gt:pt-k[mt]-L[mt]-yt+D.altAxis,Et=vt?pt+k[mt]+L[mt]-yt-D.altAxis:bt,At=b&&vt?function(t,e,i){var n=X(t,e,i);return n>i?i:n}(wt,pt,Et):X(b?wt:gt,pt,b?Et:bt);x[O]=At,M[O]=At-pt}e.modifiersData[a]=M}},requiresIfExists:["offset"]};function Ct(t,e,i){void 0===i&&(i=!1);var n,s,o=S(e),r=S(e)&&function(t){var e=t.getBoundingClientRect(),i=M(e.width)/t.offsetWidth||1,n=M(e.height)/t.offsetHeight||1;return 1!==i||1!==n}(e),a=q(e),l=H(t,r,i),c={scrollLeft:0,scrollTop:0},h={x:0,y:0};return(o||!o&&!i)&&(("body"!==x(e)||dt(a))&&(c=(n=e)!==k(n)&&S(n)?{scrollLeft:(s=n).scrollLeft,scrollTop:s.scrollTop}:ct(n)),S(e)?((h=H(e,!0)).x+=e.clientLeft,h.y+=e.clientTop):a&&(h.x=ht(a))),{x:l.left+c.scrollLeft-h.x,y:l.top+c.scrollTop-h.y,width:l.width,height:l.height}}function Ot(t){var e=new Map,i=new Set,n=[];function s(t){i.add(t.name),[].concat(t.requires||[],t.requiresIfExists||[]).forEach((function(t){if(!i.has(t)){var n=e.get(t);n&&s(n)}})),n.push(t)}return t.forEach((function(t){e.set(t.name,t)})),t.forEach((function(t){i.has(t.name)||s(t)})),n}var xt={placement:"bottom",modifiers:[],strategy:"absolute"};function kt(){for(var t=arguments.length,e=new Array(t),i=0;iIt.has(t)&&It.get(t).get(e)||null,remove(t,e){if(!It.has(t))return;const i=It.get(t);i.delete(e),0===i.size&&It.delete(t)}},Pt="transitionend",Mt=t=>(t&&window.CSS&&window.CSS.escape&&(t=t.replace(/#([^\s"#']+)/g,((t,e)=>`#${CSS.escape(e)}`))),t),jt=t=>{t.dispatchEvent(new Event(Pt))},Ft=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),Ht=t=>Ft(t)?t.jquery?t[0]:t:"string"==typeof t&&t.length>0?document.querySelector(Mt(t)):null,Bt=t=>{if(!Ft(t)||0===t.getClientRects().length)return!1;const e="visible"===getComputedStyle(t).getPropertyValue("visibility"),i=t.closest("details:not([open])");if(!i)return e;if(i!==t){const e=t.closest("summary");if(e&&e.parentNode!==i)return!1;if(null===e)return!1}return e},Wt=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled")),zt=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?zt(t.parentNode):null},Rt=()=>{},qt=t=>{t.offsetHeight},Vt=()=>window.jQuery&&!document.body.hasAttribute("data-bs-no-jquery")?window.jQuery:null,Yt=[],Kt=()=>"rtl"===document.documentElement.dir,Qt=t=>{var e;e=()=>{const e=Vt();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?(Yt.length||document.addEventListener("DOMContentLoaded",(()=>{for(const t of Yt)t()})),Yt.push(e)):e()},Xt=(t,e=[],i=t)=>"function"==typeof t?t(...e):i,Ut=(t,e,i=!0)=>{if(!i)return void Xt(t);const n=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(e)+5;let s=!1;const o=({target:i})=>{i===e&&(s=!0,e.removeEventListener(Pt,o),Xt(t))};e.addEventListener(Pt,o),setTimeout((()=>{s||jt(e)}),n)},Gt=(t,e,i,n)=>{const s=t.length;let o=t.indexOf(e);return-1===o?!i&&n?t[s-1]:t[0]:(o+=i?1:-1,n&&(o=(o+s)%s),t[Math.max(0,Math.min(o,s-1))])},Jt=/[^.]*(?=\..*)\.|.*/,Zt=/\..*/,te=/::\d+$/,ee={};let ie=1;const ne={mouseenter:"mouseover",mouseleave:"mouseout"},se=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function oe(t,e){return e&&`${e}::${ie++}`||t.uidEvent||ie++}function re(t){const e=oe(t);return t.uidEvent=e,ee[e]=ee[e]||{},ee[e]}function ae(t,e,i=null){return Object.values(t).find((t=>t.callable===e&&t.delegationSelector===i))}function le(t,e,i){const n="string"==typeof e,s=n?i:e||i;let o=ue(t);return se.has(o)||(o=t),[n,s,o]}function ce(t,e,i,n,s){if("string"!=typeof e||!t)return;let[o,r,a]=le(e,i,n);if(e in ne){const t=t=>function(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};r=t(r)}const l=re(t),c=l[a]||(l[a]={}),h=ae(c,r,o?i:null);if(h)return void(h.oneOff=h.oneOff&&s);const d=oe(r,e.replace(Jt,"")),u=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(const a of o)if(a===r)return pe(s,{delegateTarget:r}),n.oneOff&&fe.off(t,s.type,e,i),i.apply(r,[s])}}(t,i,r):function(t,e){return function i(n){return pe(n,{delegateTarget:t}),i.oneOff&&fe.off(t,n.type,e),e.apply(t,[n])}}(t,r);u.delegationSelector=o?i:null,u.callable=r,u.oneOff=s,u.uidEvent=d,c[d]=u,t.addEventListener(a,u,o)}function he(t,e,i,n,s){const o=ae(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function de(t,e,i,n){const s=e[i]||{};for(const[o,r]of Object.entries(s))o.includes(n)&&he(t,e,i,r.callable,r.delegationSelector)}function ue(t){return t=t.replace(Zt,""),ne[t]||t}const fe={on(t,e,i,n){ce(t,e,i,n,!1)},one(t,e,i,n){ce(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=le(e,i,n),a=r!==e,l=re(t),c=l[r]||{},h=e.startsWith(".");if(void 0===o){if(h)for(const i of Object.keys(l))de(t,l,i,e.slice(1));for(const[i,n]of Object.entries(c)){const s=i.replace(te,"");a&&!e.includes(s)||he(t,l,r,n.callable,n.delegationSelector)}}else{if(!Object.keys(c).length)return;he(t,l,r,o,s?i:null)}},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=Vt();let s=null,o=!0,r=!0,a=!1;e!==ue(e)&&n&&(s=n.Event(e,i),n(t).trigger(s),o=!s.isPropagationStopped(),r=!s.isImmediatePropagationStopped(),a=s.isDefaultPrevented());const l=pe(new Event(e,{bubbles:o,cancelable:!0}),i);return a&&l.preventDefault(),r&&t.dispatchEvent(l),l.defaultPrevented&&s&&s.preventDefault(),l}};function pe(t,e={}){for(const[i,n]of Object.entries(e))try{t[i]=n}catch(e){Object.defineProperty(t,i,{configurable:!0,get:()=>n})}return t}function me(t){if("true"===t)return!0;if("false"===t)return!1;if(t===Number(t).toString())return Number(t);if(""===t||"null"===t)return null;if("string"!=typeof t)return t;try{return JSON.parse(decodeURIComponent(t))}catch(e){return t}}function ge(t){return t.replace(/[A-Z]/g,(t=>`-${t.toLowerCase()}`))}const _e={setDataAttribute(t,e,i){t.setAttribute(`data-bs-${ge(e)}`,i)},removeDataAttribute(t,e){t.removeAttribute(`data-bs-${ge(e)}`)},getDataAttributes(t){if(!t)return{};const e={},i=Object.keys(t.dataset).filter((t=>t.startsWith("bs")&&!t.startsWith("bsConfig")));for(const n of i){let i=n.replace(/^bs/,"");i=i.charAt(0).toLowerCase()+i.slice(1,i.length),e[i]=me(t.dataset[n])}return e},getDataAttribute:(t,e)=>me(t.getAttribute(`data-bs-${ge(e)}`))};class be{static get Default(){return{}}static get DefaultType(){return{}}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}_getConfig(t){return t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t}_mergeConfigObj(t,e){const i=Ft(e)?_e.getDataAttribute(e,"config"):{};return{...this.constructor.Default,..."object"==typeof i?i:{},...Ft(e)?_e.getDataAttributes(e):{},..."object"==typeof t?t:{}}}_typeCheckConfig(t,e=this.constructor.DefaultType){for(const[n,s]of Object.entries(e)){const e=t[n],o=Ft(e)?"element":null==(i=e)?`${i}`:Object.prototype.toString.call(i).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(s).test(o))throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option "${n}" provided type "${o}" but expected type "${s}".`)}var i}}class ve extends be{constructor(t,e){super(),(t=Ht(t))&&(this._element=t,this._config=this._getConfig(e),Nt.set(this._element,this.constructor.DATA_KEY,this))}dispose(){Nt.remove(this._element,this.constructor.DATA_KEY),fe.off(this._element,this.constructor.EVENT_KEY);for(const t of Object.getOwnPropertyNames(this))this[t]=null}_queueCallback(t,e,i=!0){Ut(t,e,i)}_getConfig(t){return t=this._mergeConfigObj(t,this._element),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}static getInstance(t){return Nt.get(Ht(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,"object"==typeof e?e:null)}static get VERSION(){return"5.3.3"}static get DATA_KEY(){return`bs.${this.NAME}`}static get EVENT_KEY(){return`.${this.DATA_KEY}`}static eventName(t){return`${t}${this.EVENT_KEY}`}}const ye=t=>{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i=`#${i.split("#")[1]}`),e=i&&"#"!==i?i.trim():null}return e?e.split(",").map((t=>Mt(t))).join(","):null},we={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter((t=>t.matches(e))),parents(t,e){const i=[];let n=t.parentNode.closest(e);for(;n;)i.push(n),n=n.parentNode.closest(e);return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=["a","button","input","textarea","select","details","[tabindex]",'[contenteditable="true"]'].map((t=>`${t}:not([tabindex^="-"])`)).join(",");return this.find(e,t).filter((t=>!Wt(t)&&Bt(t)))},getSelectorFromElement(t){const e=ye(t);return e&&we.findOne(e)?e:null},getElementFromSelector(t){const e=ye(t);return e?we.findOne(e):null},getMultipleElementsFromSelector(t){const e=ye(t);return e?we.find(e):[]}},Ee=(t,e="hide")=>{const i=`click.dismiss${t.EVENT_KEY}`,n=t.NAME;fe.on(document,i,`[data-bs-dismiss="${n}"]`,(function(i){if(["A","AREA"].includes(this.tagName)&&i.preventDefault(),Wt(this))return;const s=we.getElementFromSelector(this)||this.closest(`.${n}`);t.getOrCreateInstance(s)[e]()}))},Ae=".bs.alert",Te=`close${Ae}`,Ce=`closed${Ae}`;class Oe extends ve{static get NAME(){return"alert"}close(){if(fe.trigger(this._element,Te).defaultPrevented)return;this._element.classList.remove("show");const t=this._element.classList.contains("fade");this._queueCallback((()=>this._destroyElement()),this._element,t)}_destroyElement(){this._element.remove(),fe.trigger(this._element,Ce),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=Oe.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}Ee(Oe,"close"),Qt(Oe);const xe='[data-bs-toggle="button"]';class ke extends ve{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){const e=ke.getOrCreateInstance(this);"toggle"===t&&e[t]()}))}}fe.on(document,"click.bs.button.data-api",xe,(t=>{t.preventDefault();const e=t.target.closest(xe);ke.getOrCreateInstance(e).toggle()})),Qt(ke);const Le=".bs.swipe",Se=`touchstart${Le}`,De=`touchmove${Le}`,$e=`touchend${Le}`,Ie=`pointerdown${Le}`,Ne=`pointerup${Le}`,Pe={endCallback:null,leftCallback:null,rightCallback:null},Me={endCallback:"(function|null)",leftCallback:"(function|null)",rightCallback:"(function|null)"};class je extends be{constructor(t,e){super(),this._element=t,t&&je.isSupported()&&(this._config=this._getConfig(e),this._deltaX=0,this._supportPointerEvents=Boolean(window.PointerEvent),this._initEvents())}static get Default(){return Pe}static get DefaultType(){return Me}static get NAME(){return"swipe"}dispose(){fe.off(this._element,Le)}_start(t){this._supportPointerEvents?this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX):this._deltaX=t.touches[0].clientX}_end(t){this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX-this._deltaX),this._handleSwipe(),Xt(this._config.endCallback)}_move(t){this._deltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this._deltaX}_handleSwipe(){const t=Math.abs(this._deltaX);if(t<=40)return;const e=t/this._deltaX;this._deltaX=0,e&&Xt(e>0?this._config.rightCallback:this._config.leftCallback)}_initEvents(){this._supportPointerEvents?(fe.on(this._element,Ie,(t=>this._start(t))),fe.on(this._element,Ne,(t=>this._end(t))),this._element.classList.add("pointer-event")):(fe.on(this._element,Se,(t=>this._start(t))),fe.on(this._element,De,(t=>this._move(t))),fe.on(this._element,$e,(t=>this._end(t))))}_eventIsPointerPenTouch(t){return this._supportPointerEvents&&("pen"===t.pointerType||"touch"===t.pointerType)}static isSupported(){return"ontouchstart"in document.documentElement||navigator.maxTouchPoints>0}}const Fe=".bs.carousel",He=".data-api",Be="ArrowLeft",We="ArrowRight",ze="next",Re="prev",qe="left",Ve="right",Ye=`slide${Fe}`,Ke=`slid${Fe}`,Qe=`keydown${Fe}`,Xe=`mouseenter${Fe}`,Ue=`mouseleave${Fe}`,Ge=`dragstart${Fe}`,Je=`load${Fe}${He}`,Ze=`click${Fe}${He}`,ti="carousel",ei="active",ii=".active",ni=".carousel-item",si=ii+ni,oi={[Be]:Ve,[We]:qe},ri={interval:5e3,keyboard:!0,pause:"hover",ride:!1,touch:!0,wrap:!0},ai={interval:"(number|boolean)",keyboard:"boolean",pause:"(string|boolean)",ride:"(boolean|string)",touch:"boolean",wrap:"boolean"};class li extends ve{constructor(t,e){super(t,e),this._interval=null,this._activeElement=null,this._isSliding=!1,this.touchTimeout=null,this._swipeHelper=null,this._indicatorsElement=we.findOne(".carousel-indicators",this._element),this._addEventListeners(),this._config.ride===ti&&this.cycle()}static get Default(){return ri}static get DefaultType(){return ai}static get NAME(){return"carousel"}next(){this._slide(ze)}nextWhenVisible(){!document.hidden&&Bt(this._element)&&this.next()}prev(){this._slide(Re)}pause(){this._isSliding&&jt(this._element),this._clearInterval()}cycle(){this._clearInterval(),this._updateInterval(),this._interval=setInterval((()=>this.nextWhenVisible()),this._config.interval)}_maybeEnableCycle(){this._config.ride&&(this._isSliding?fe.one(this._element,Ke,(()=>this.cycle())):this.cycle())}to(t){const e=this._getItems();if(t>e.length-1||t<0)return;if(this._isSliding)return void fe.one(this._element,Ke,(()=>this.to(t)));const i=this._getItemIndex(this._getActive());if(i===t)return;const n=t>i?ze:Re;this._slide(n,e[t])}dispose(){this._swipeHelper&&this._swipeHelper.dispose(),super.dispose()}_configAfterMerge(t){return t.defaultInterval=t.interval,t}_addEventListeners(){this._config.keyboard&&fe.on(this._element,Qe,(t=>this._keydown(t))),"hover"===this._config.pause&&(fe.on(this._element,Xe,(()=>this.pause())),fe.on(this._element,Ue,(()=>this._maybeEnableCycle()))),this._config.touch&&je.isSupported()&&this._addTouchEventListeners()}_addTouchEventListeners(){for(const t of we.find(".carousel-item img",this._element))fe.on(t,Ge,(t=>t.preventDefault()));const t={leftCallback:()=>this._slide(this._directionToOrder(qe)),rightCallback:()=>this._slide(this._directionToOrder(Ve)),endCallback:()=>{"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout((()=>this._maybeEnableCycle()),500+this._config.interval))}};this._swipeHelper=new je(this._element,t)}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=oi[t.key];e&&(t.preventDefault(),this._slide(this._directionToOrder(e)))}_getItemIndex(t){return this._getItems().indexOf(t)}_setActiveIndicatorElement(t){if(!this._indicatorsElement)return;const e=we.findOne(ii,this._indicatorsElement);e.classList.remove(ei),e.removeAttribute("aria-current");const i=we.findOne(`[data-bs-slide-to="${t}"]`,this._indicatorsElement);i&&(i.classList.add(ei),i.setAttribute("aria-current","true"))}_updateInterval(){const t=this._activeElement||this._getActive();if(!t)return;const e=Number.parseInt(t.getAttribute("data-bs-interval"),10);this._config.interval=e||this._config.defaultInterval}_slide(t,e=null){if(this._isSliding)return;const i=this._getActive(),n=t===ze,s=e||Gt(this._getItems(),i,n,this._config.wrap);if(s===i)return;const o=this._getItemIndex(s),r=e=>fe.trigger(this._element,e,{relatedTarget:s,direction:this._orderToDirection(t),from:this._getItemIndex(i),to:o});if(r(Ye).defaultPrevented)return;if(!i||!s)return;const a=Boolean(this._interval);this.pause(),this._isSliding=!0,this._setActiveIndicatorElement(o),this._activeElement=s;const l=n?"carousel-item-start":"carousel-item-end",c=n?"carousel-item-next":"carousel-item-prev";s.classList.add(c),qt(s),i.classList.add(l),s.classList.add(l),this._queueCallback((()=>{s.classList.remove(l,c),s.classList.add(ei),i.classList.remove(ei,c,l),this._isSliding=!1,r(Ke)}),i,this._isAnimated()),a&&this.cycle()}_isAnimated(){return this._element.classList.contains("slide")}_getActive(){return we.findOne(si,this._element)}_getItems(){return we.find(ni,this._element)}_clearInterval(){this._interval&&(clearInterval(this._interval),this._interval=null)}_directionToOrder(t){return Kt()?t===qe?Re:ze:t===qe?ze:Re}_orderToDirection(t){return Kt()?t===Re?qe:Ve:t===Re?Ve:qe}static jQueryInterface(t){return this.each((function(){const e=li.getOrCreateInstance(this,t);if("number"!=typeof t){if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}else e.to(t)}))}}fe.on(document,Ze,"[data-bs-slide], [data-bs-slide-to]",(function(t){const e=we.getElementFromSelector(this);if(!e||!e.classList.contains(ti))return;t.preventDefault();const i=li.getOrCreateInstance(e),n=this.getAttribute("data-bs-slide-to");return n?(i.to(n),void i._maybeEnableCycle()):"next"===_e.getDataAttribute(this,"slide")?(i.next(),void i._maybeEnableCycle()):(i.prev(),void i._maybeEnableCycle())})),fe.on(window,Je,(()=>{const t=we.find('[data-bs-ride="carousel"]');for(const e of t)li.getOrCreateInstance(e)})),Qt(li);const ci=".bs.collapse",hi=`show${ci}`,di=`shown${ci}`,ui=`hide${ci}`,fi=`hidden${ci}`,pi=`click${ci}.data-api`,mi="show",gi="collapse",_i="collapsing",bi=`:scope .${gi} .${gi}`,vi='[data-bs-toggle="collapse"]',yi={parent:null,toggle:!0},wi={parent:"(null|element)",toggle:"boolean"};class Ei extends ve{constructor(t,e){super(t,e),this._isTransitioning=!1,this._triggerArray=[];const i=we.find(vi);for(const t of i){const e=we.getSelectorFromElement(t),i=we.find(e).filter((t=>t===this._element));null!==e&&i.length&&this._triggerArray.push(t)}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return yi}static get DefaultType(){return wi}static get NAME(){return"collapse"}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t=[];if(this._config.parent&&(t=this._getFirstLevelChildren(".collapse.show, .collapse.collapsing").filter((t=>t!==this._element)).map((t=>Ei.getOrCreateInstance(t,{toggle:!1})))),t.length&&t[0]._isTransitioning)return;if(fe.trigger(this._element,hi).defaultPrevented)return;for(const e of t)e.hide();const e=this._getDimension();this._element.classList.remove(gi),this._element.classList.add(_i),this._element.style[e]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const i=`scroll${e[0].toUpperCase()+e.slice(1)}`;this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(_i),this._element.classList.add(gi,mi),this._element.style[e]="",fe.trigger(this._element,di)}),this._element,!0),this._element.style[e]=`${this._element[i]}px`}hide(){if(this._isTransitioning||!this._isShown())return;if(fe.trigger(this._element,ui).defaultPrevented)return;const t=this._getDimension();this._element.style[t]=`${this._element.getBoundingClientRect()[t]}px`,qt(this._element),this._element.classList.add(_i),this._element.classList.remove(gi,mi);for(const t of this._triggerArray){const e=we.getElementFromSelector(t);e&&!this._isShown(e)&&this._addAriaAndCollapsedClass([t],!1)}this._isTransitioning=!0,this._element.style[t]="",this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(_i),this._element.classList.add(gi),fe.trigger(this._element,fi)}),this._element,!0)}_isShown(t=this._element){return t.classList.contains(mi)}_configAfterMerge(t){return t.toggle=Boolean(t.toggle),t.parent=Ht(t.parent),t}_getDimension(){return this._element.classList.contains("collapse-horizontal")?"width":"height"}_initializeChildren(){if(!this._config.parent)return;const t=this._getFirstLevelChildren(vi);for(const e of t){const t=we.getElementFromSelector(e);t&&this._addAriaAndCollapsedClass([e],this._isShown(t))}}_getFirstLevelChildren(t){const e=we.find(bi,this._config.parent);return we.find(t,this._config.parent).filter((t=>!e.includes(t)))}_addAriaAndCollapsedClass(t,e){if(t.length)for(const i of t)i.classList.toggle("collapsed",!e),i.setAttribute("aria-expanded",e)}static jQueryInterface(t){const e={};return"string"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1),this.each((function(){const i=Ei.getOrCreateInstance(this,e);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t]()}}))}}fe.on(document,pi,vi,(function(t){("A"===t.target.tagName||t.delegateTarget&&"A"===t.delegateTarget.tagName)&&t.preventDefault();for(const t of we.getMultipleElementsFromSelector(this))Ei.getOrCreateInstance(t,{toggle:!1}).toggle()})),Qt(Ei);const Ai="dropdown",Ti=".bs.dropdown",Ci=".data-api",Oi="ArrowUp",xi="ArrowDown",ki=`hide${Ti}`,Li=`hidden${Ti}`,Si=`show${Ti}`,Di=`shown${Ti}`,$i=`click${Ti}${Ci}`,Ii=`keydown${Ti}${Ci}`,Ni=`keyup${Ti}${Ci}`,Pi="show",Mi='[data-bs-toggle="dropdown"]:not(.disabled):not(:disabled)',ji=`${Mi}.${Pi}`,Fi=".dropdown-menu",Hi=Kt()?"top-end":"top-start",Bi=Kt()?"top-start":"top-end",Wi=Kt()?"bottom-end":"bottom-start",zi=Kt()?"bottom-start":"bottom-end",Ri=Kt()?"left-start":"right-start",qi=Kt()?"right-start":"left-start",Vi={autoClose:!0,boundary:"clippingParents",display:"dynamic",offset:[0,2],popperConfig:null,reference:"toggle"},Yi={autoClose:"(boolean|string)",boundary:"(string|element)",display:"string",offset:"(array|string|function)",popperConfig:"(null|object|function)",reference:"(string|element|object)"};class Ki extends ve{constructor(t,e){super(t,e),this._popper=null,this._parent=this._element.parentNode,this._menu=we.next(this._element,Fi)[0]||we.prev(this._element,Fi)[0]||we.findOne(Fi,this._parent),this._inNavbar=this._detectNavbar()}static get Default(){return Vi}static get DefaultType(){return Yi}static get NAME(){return Ai}toggle(){return this._isShown()?this.hide():this.show()}show(){if(Wt(this._element)||this._isShown())return;const t={relatedTarget:this._element};if(!fe.trigger(this._element,Si,t).defaultPrevented){if(this._createPopper(),"ontouchstart"in document.documentElement&&!this._parent.closest(".navbar-nav"))for(const t of[].concat(...document.body.children))fe.on(t,"mouseover",Rt);this._element.focus(),this._element.setAttribute("aria-expanded",!0),this._menu.classList.add(Pi),this._element.classList.add(Pi),fe.trigger(this._element,Di,t)}}hide(){if(Wt(this._element)||!this._isShown())return;const t={relatedTarget:this._element};this._completeHide(t)}dispose(){this._popper&&this._popper.destroy(),super.dispose()}update(){this._inNavbar=this._detectNavbar(),this._popper&&this._popper.update()}_completeHide(t){if(!fe.trigger(this._element,ki,t).defaultPrevented){if("ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.off(t,"mouseover",Rt);this._popper&&this._popper.destroy(),this._menu.classList.remove(Pi),this._element.classList.remove(Pi),this._element.setAttribute("aria-expanded","false"),_e.removeDataAttribute(this._menu,"popper"),fe.trigger(this._element,Li,t)}}_getConfig(t){if("object"==typeof(t=super._getConfig(t)).reference&&!Ft(t.reference)&&"function"!=typeof t.reference.getBoundingClientRect)throw new TypeError(`${Ai.toUpperCase()}: Option "reference" provided type "object" without a required "getBoundingClientRect" method.`);return t}_createPopper(){if(void 0===e)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");let t=this._element;"parent"===this._config.reference?t=this._parent:Ft(this._config.reference)?t=Ht(this._config.reference):"object"==typeof this._config.reference&&(t=this._config.reference);const i=this._getPopperConfig();this._popper=Dt(t,this._menu,i)}_isShown(){return this._menu.classList.contains(Pi)}_getPlacement(){const t=this._parent;if(t.classList.contains("dropend"))return Ri;if(t.classList.contains("dropstart"))return qi;if(t.classList.contains("dropup-center"))return"top";if(t.classList.contains("dropdown-center"))return"bottom";const e="end"===getComputedStyle(this._menu).getPropertyValue("--bs-position").trim();return t.classList.contains("dropup")?e?Bi:Hi:e?zi:Wi}_detectNavbar(){return null!==this._element.closest(".navbar")}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return(this._inNavbar||"static"===this._config.display)&&(_e.setDataAttribute(this._menu,"popper","static"),t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,...Xt(this._config.popperConfig,[t])}}_selectMenuItem({key:t,target:e}){const i=we.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter((t=>Bt(t)));i.length&&Gt(i,e,t===xi,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=Ki.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}static clearMenus(t){if(2===t.button||"keyup"===t.type&&"Tab"!==t.key)return;const e=we.find(ji);for(const i of e){const e=Ki.getInstance(i);if(!e||!1===e._config.autoClose)continue;const n=t.composedPath(),s=n.includes(e._menu);if(n.includes(e._element)||"inside"===e._config.autoClose&&!s||"outside"===e._config.autoClose&&s)continue;if(e._menu.contains(t.target)&&("keyup"===t.type&&"Tab"===t.key||/input|select|option|textarea|form/i.test(t.target.tagName)))continue;const o={relatedTarget:e._element};"click"===t.type&&(o.clickEvent=t),e._completeHide(o)}}static dataApiKeydownHandler(t){const e=/input|textarea/i.test(t.target.tagName),i="Escape"===t.key,n=[Oi,xi].includes(t.key);if(!n&&!i)return;if(e&&!i)return;t.preventDefault();const s=this.matches(Mi)?this:we.prev(this,Mi)[0]||we.next(this,Mi)[0]||we.findOne(Mi,t.delegateTarget.parentNode),o=Ki.getOrCreateInstance(s);if(n)return t.stopPropagation(),o.show(),void o._selectMenuItem(t);o._isShown()&&(t.stopPropagation(),o.hide(),s.focus())}}fe.on(document,Ii,Mi,Ki.dataApiKeydownHandler),fe.on(document,Ii,Fi,Ki.dataApiKeydownHandler),fe.on(document,$i,Ki.clearMenus),fe.on(document,Ni,Ki.clearMenus),fe.on(document,$i,Mi,(function(t){t.preventDefault(),Ki.getOrCreateInstance(this).toggle()})),Qt(Ki);const Qi="backdrop",Xi="show",Ui=`mousedown.bs.${Qi}`,Gi={className:"modal-backdrop",clickCallback:null,isAnimated:!1,isVisible:!0,rootElement:"body"},Ji={className:"string",clickCallback:"(function|null)",isAnimated:"boolean",isVisible:"boolean",rootElement:"(element|string)"};class Zi extends be{constructor(t){super(),this._config=this._getConfig(t),this._isAppended=!1,this._element=null}static get Default(){return Gi}static get DefaultType(){return Ji}static get NAME(){return Qi}show(t){if(!this._config.isVisible)return void Xt(t);this._append();const e=this._getElement();this._config.isAnimated&&qt(e),e.classList.add(Xi),this._emulateAnimation((()=>{Xt(t)}))}hide(t){this._config.isVisible?(this._getElement().classList.remove(Xi),this._emulateAnimation((()=>{this.dispose(),Xt(t)}))):Xt(t)}dispose(){this._isAppended&&(fe.off(this._element,Ui),this._element.remove(),this._isAppended=!1)}_getElement(){if(!this._element){const t=document.createElement("div");t.className=this._config.className,this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_configAfterMerge(t){return t.rootElement=Ht(t.rootElement),t}_append(){if(this._isAppended)return;const t=this._getElement();this._config.rootElement.append(t),fe.on(t,Ui,(()=>{Xt(this._config.clickCallback)})),this._isAppended=!0}_emulateAnimation(t){Ut(t,this._getElement(),this._config.isAnimated)}}const tn=".bs.focustrap",en=`focusin${tn}`,nn=`keydown.tab${tn}`,sn="backward",on={autofocus:!0,trapElement:null},rn={autofocus:"boolean",trapElement:"element"};class an extends be{constructor(t){super(),this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}static get Default(){return on}static get DefaultType(){return rn}static get NAME(){return"focustrap"}activate(){this._isActive||(this._config.autofocus&&this._config.trapElement.focus(),fe.off(document,tn),fe.on(document,en,(t=>this._handleFocusin(t))),fe.on(document,nn,(t=>this._handleKeydown(t))),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,fe.off(document,tn))}_handleFocusin(t){const{trapElement:e}=this._config;if(t.target===document||t.target===e||e.contains(t.target))return;const i=we.focusableChildren(e);0===i.length?e.focus():this._lastTabNavDirection===sn?i[i.length-1].focus():i[0].focus()}_handleKeydown(t){"Tab"===t.key&&(this._lastTabNavDirection=t.shiftKey?sn:"forward")}}const ln=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",cn=".sticky-top",hn="padding-right",dn="margin-right";class un{constructor(){this._element=document.body}getWidth(){const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)}hide(){const t=this.getWidth();this._disableOverFlow(),this._setElementAttributes(this._element,hn,(e=>e+t)),this._setElementAttributes(ln,hn,(e=>e+t)),this._setElementAttributes(cn,dn,(e=>e-t))}reset(){this._resetElementAttributes(this._element,"overflow"),this._resetElementAttributes(this._element,hn),this._resetElementAttributes(ln,hn),this._resetElementAttributes(cn,dn)}isOverflowing(){return this.getWidth()>0}_disableOverFlow(){this._saveInitialAttribute(this._element,"overflow"),this._element.style.overflow="hidden"}_setElementAttributes(t,e,i){const n=this.getWidth();this._applyManipulationCallback(t,(t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+n)return;this._saveInitialAttribute(t,e);const s=window.getComputedStyle(t).getPropertyValue(e);t.style.setProperty(e,`${i(Number.parseFloat(s))}px`)}))}_saveInitialAttribute(t,e){const i=t.style.getPropertyValue(e);i&&_e.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,(t=>{const i=_e.getDataAttribute(t,e);null!==i?(_e.removeDataAttribute(t,e),t.style.setProperty(e,i)):t.style.removeProperty(e)}))}_applyManipulationCallback(t,e){if(Ft(t))e(t);else for(const i of we.find(t,this._element))e(i)}}const fn=".bs.modal",pn=`hide${fn}`,mn=`hidePrevented${fn}`,gn=`hidden${fn}`,_n=`show${fn}`,bn=`shown${fn}`,vn=`resize${fn}`,yn=`click.dismiss${fn}`,wn=`mousedown.dismiss${fn}`,En=`keydown.dismiss${fn}`,An=`click${fn}.data-api`,Tn="modal-open",Cn="show",On="modal-static",xn={backdrop:!0,focus:!0,keyboard:!0},kn={backdrop:"(boolean|string)",focus:"boolean",keyboard:"boolean"};class Ln extends ve{constructor(t,e){super(t,e),this._dialog=we.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._isTransitioning=!1,this._scrollBar=new un,this._addEventListeners()}static get Default(){return xn}static get DefaultType(){return kn}static get NAME(){return"modal"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||this._isTransitioning||fe.trigger(this._element,_n,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isTransitioning=!0,this._scrollBar.hide(),document.body.classList.add(Tn),this._adjustDialog(),this._backdrop.show((()=>this._showElement(t))))}hide(){this._isShown&&!this._isTransitioning&&(fe.trigger(this._element,pn).defaultPrevented||(this._isShown=!1,this._isTransitioning=!0,this._focustrap.deactivate(),this._element.classList.remove(Cn),this._queueCallback((()=>this._hideModal()),this._element,this._isAnimated())))}dispose(){fe.off(window,fn),fe.off(this._dialog,fn),this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new Zi({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new an({trapElement:this._element})}_showElement(t){document.body.contains(this._element)||document.body.append(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0;const e=we.findOne(".modal-body",this._dialog);e&&(e.scrollTop=0),qt(this._element),this._element.classList.add(Cn),this._queueCallback((()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,fe.trigger(this._element,bn,{relatedTarget:t})}),this._dialog,this._isAnimated())}_addEventListeners(){fe.on(this._element,En,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():this._triggerBackdropTransition())})),fe.on(window,vn,(()=>{this._isShown&&!this._isTransitioning&&this._adjustDialog()})),fe.on(this._element,wn,(t=>{fe.one(this._element,yn,(e=>{this._element===t.target&&this._element===e.target&&("static"!==this._config.backdrop?this._config.backdrop&&this.hide():this._triggerBackdropTransition())}))}))}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide((()=>{document.body.classList.remove(Tn),this._resetAdjustments(),this._scrollBar.reset(),fe.trigger(this._element,gn)}))}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(fe.trigger(this._element,mn).defaultPrevented)return;const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._element.style.overflowY;"hidden"===e||this._element.classList.contains(On)||(t||(this._element.style.overflowY="hidden"),this._element.classList.add(On),this._queueCallback((()=>{this._element.classList.remove(On),this._queueCallback((()=>{this._element.style.overflowY=e}),this._dialog)}),this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;if(i&&!t){const t=Kt()?"paddingLeft":"paddingRight";this._element.style[t]=`${e}px`}if(!i&&t){const t=Kt()?"paddingRight":"paddingLeft";this._element.style[t]=`${e}px`}}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=Ln.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}fe.on(document,An,'[data-bs-toggle="modal"]',(function(t){const e=we.getElementFromSelector(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),fe.one(e,_n,(t=>{t.defaultPrevented||fe.one(e,gn,(()=>{Bt(this)&&this.focus()}))}));const i=we.findOne(".modal.show");i&&Ln.getInstance(i).hide(),Ln.getOrCreateInstance(e).toggle(this)})),Ee(Ln),Qt(Ln);const Sn=".bs.offcanvas",Dn=".data-api",$n=`load${Sn}${Dn}`,In="show",Nn="showing",Pn="hiding",Mn=".offcanvas.show",jn=`show${Sn}`,Fn=`shown${Sn}`,Hn=`hide${Sn}`,Bn=`hidePrevented${Sn}`,Wn=`hidden${Sn}`,zn=`resize${Sn}`,Rn=`click${Sn}${Dn}`,qn=`keydown.dismiss${Sn}`,Vn={backdrop:!0,keyboard:!0,scroll:!1},Yn={backdrop:"(boolean|string)",keyboard:"boolean",scroll:"boolean"};class Kn extends ve{constructor(t,e){super(t,e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get Default(){return Vn}static get DefaultType(){return Yn}static get NAME(){return"offcanvas"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||fe.trigger(this._element,jn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._backdrop.show(),this._config.scroll||(new un).hide(),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add(Nn),this._queueCallback((()=>{this._config.scroll&&!this._config.backdrop||this._focustrap.activate(),this._element.classList.add(In),this._element.classList.remove(Nn),fe.trigger(this._element,Fn,{relatedTarget:t})}),this._element,!0))}hide(){this._isShown&&(fe.trigger(this._element,Hn).defaultPrevented||(this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.add(Pn),this._backdrop.hide(),this._queueCallback((()=>{this._element.classList.remove(In,Pn),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._config.scroll||(new un).reset(),fe.trigger(this._element,Wn)}),this._element,!0)))}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_initializeBackDrop(){const t=Boolean(this._config.backdrop);return new Zi({className:"offcanvas-backdrop",isVisible:t,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:t?()=>{"static"!==this._config.backdrop?this.hide():fe.trigger(this._element,Bn)}:null})}_initializeFocusTrap(){return new an({trapElement:this._element})}_addEventListeners(){fe.on(this._element,qn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():fe.trigger(this._element,Bn))}))}static jQueryInterface(t){return this.each((function(){const e=Kn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}fe.on(document,Rn,'[data-bs-toggle="offcanvas"]',(function(t){const e=we.getElementFromSelector(this);if(["A","AREA"].includes(this.tagName)&&t.preventDefault(),Wt(this))return;fe.one(e,Wn,(()=>{Bt(this)&&this.focus()}));const i=we.findOne(Mn);i&&i!==e&&Kn.getInstance(i).hide(),Kn.getOrCreateInstance(e).toggle(this)})),fe.on(window,$n,(()=>{for(const t of we.find(Mn))Kn.getOrCreateInstance(t).show()})),fe.on(window,zn,(()=>{for(const t of we.find("[aria-modal][class*=show][class*=offcanvas-]"))"fixed"!==getComputedStyle(t).position&&Kn.getOrCreateInstance(t).hide()})),Ee(Kn),Qt(Kn);const Qn={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],dd:[],div:[],dl:[],dt:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},Xn=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),Un=/^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i,Gn=(t,e)=>{const i=t.nodeName.toLowerCase();return e.includes(i)?!Xn.has(i)||Boolean(Un.test(t.nodeValue)):e.filter((t=>t instanceof RegExp)).some((t=>t.test(i)))},Jn={allowList:Qn,content:{},extraClass:"",html:!1,sanitize:!0,sanitizeFn:null,template:"
"},Zn={allowList:"object",content:"object",extraClass:"(string|function)",html:"boolean",sanitize:"boolean",sanitizeFn:"(null|function)",template:"string"},ts={entry:"(string|element|function|null)",selector:"(string|element)"};class es extends be{constructor(t){super(),this._config=this._getConfig(t)}static get Default(){return Jn}static get DefaultType(){return Zn}static get NAME(){return"TemplateFactory"}getContent(){return Object.values(this._config.content).map((t=>this._resolvePossibleFunction(t))).filter(Boolean)}hasContent(){return this.getContent().length>0}changeContent(t){return this._checkContent(t),this._config.content={...this._config.content,...t},this}toHtml(){const t=document.createElement("div");t.innerHTML=this._maybeSanitize(this._config.template);for(const[e,i]of Object.entries(this._config.content))this._setContent(t,i,e);const e=t.children[0],i=this._resolvePossibleFunction(this._config.extraClass);return i&&e.classList.add(...i.split(" ")),e}_typeCheckConfig(t){super._typeCheckConfig(t),this._checkContent(t.content)}_checkContent(t){for(const[e,i]of Object.entries(t))super._typeCheckConfig({selector:e,entry:i},ts)}_setContent(t,e,i){const n=we.findOne(i,t);n&&((e=this._resolvePossibleFunction(e))?Ft(e)?this._putElementInTemplate(Ht(e),n):this._config.html?n.innerHTML=this._maybeSanitize(e):n.textContent=e:n.remove())}_maybeSanitize(t){return this._config.sanitize?function(t,e,i){if(!t.length)return t;if(i&&"function"==typeof i)return i(t);const n=(new window.DOMParser).parseFromString(t,"text/html"),s=[].concat(...n.body.querySelectorAll("*"));for(const t of s){const i=t.nodeName.toLowerCase();if(!Object.keys(e).includes(i)){t.remove();continue}const n=[].concat(...t.attributes),s=[].concat(e["*"]||[],e[i]||[]);for(const e of n)Gn(e,s)||t.removeAttribute(e.nodeName)}return n.body.innerHTML}(t,this._config.allowList,this._config.sanitizeFn):t}_resolvePossibleFunction(t){return Xt(t,[this])}_putElementInTemplate(t,e){if(this._config.html)return e.innerHTML="",void e.append(t);e.textContent=t.textContent}}const is=new Set(["sanitize","allowList","sanitizeFn"]),ns="fade",ss="show",os=".tooltip-inner",rs=".modal",as="hide.bs.modal",ls="hover",cs="focus",hs={AUTO:"auto",TOP:"top",RIGHT:Kt()?"left":"right",BOTTOM:"bottom",LEFT:Kt()?"right":"left"},ds={allowList:Qn,animation:!0,boundary:"clippingParents",container:!1,customClass:"",delay:0,fallbackPlacements:["top","right","bottom","left"],html:!1,offset:[0,6],placement:"top",popperConfig:null,sanitize:!0,sanitizeFn:null,selector:!1,template:'',title:"",trigger:"hover focus"},us={allowList:"object",animation:"boolean",boundary:"(string|element)",container:"(string|element|boolean)",customClass:"(string|function)",delay:"(number|object)",fallbackPlacements:"array",html:"boolean",offset:"(array|string|function)",placement:"(string|function)",popperConfig:"(null|object|function)",sanitize:"boolean",sanitizeFn:"(null|function)",selector:"(string|boolean)",template:"string",title:"(string|element|function)",trigger:"string"};class fs extends ve{constructor(t,i){if(void 0===e)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t,i),this._isEnabled=!0,this._timeout=0,this._isHovered=null,this._activeTrigger={},this._popper=null,this._templateFactory=null,this._newContent=null,this.tip=null,this._setListeners(),this._config.selector||this._fixTitle()}static get Default(){return ds}static get DefaultType(){return us}static get NAME(){return"tooltip"}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(){this._isEnabled&&(this._activeTrigger.click=!this._activeTrigger.click,this._isShown()?this._leave():this._enter())}dispose(){clearTimeout(this._timeout),fe.off(this._element.closest(rs),as,this._hideModalHandler),this._element.getAttribute("data-bs-original-title")&&this._element.setAttribute("title",this._element.getAttribute("data-bs-original-title")),this._disposePopper(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this._isWithContent()||!this._isEnabled)return;const t=fe.trigger(this._element,this.constructor.eventName("show")),e=(zt(this._element)||this._element.ownerDocument.documentElement).contains(this._element);if(t.defaultPrevented||!e)return;this._disposePopper();const i=this._getTipElement();this._element.setAttribute("aria-describedby",i.getAttribute("id"));const{container:n}=this._config;if(this._element.ownerDocument.documentElement.contains(this.tip)||(n.append(i),fe.trigger(this._element,this.constructor.eventName("inserted"))),this._popper=this._createPopper(i),i.classList.add(ss),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.on(t,"mouseover",Rt);this._queueCallback((()=>{fe.trigger(this._element,this.constructor.eventName("shown")),!1===this._isHovered&&this._leave(),this._isHovered=!1}),this.tip,this._isAnimated())}hide(){if(this._isShown()&&!fe.trigger(this._element,this.constructor.eventName("hide")).defaultPrevented){if(this._getTipElement().classList.remove(ss),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.off(t,"mouseover",Rt);this._activeTrigger.click=!1,this._activeTrigger[cs]=!1,this._activeTrigger[ls]=!1,this._isHovered=null,this._queueCallback((()=>{this._isWithActiveTrigger()||(this._isHovered||this._disposePopper(),this._element.removeAttribute("aria-describedby"),fe.trigger(this._element,this.constructor.eventName("hidden")))}),this.tip,this._isAnimated())}}update(){this._popper&&this._popper.update()}_isWithContent(){return Boolean(this._getTitle())}_getTipElement(){return this.tip||(this.tip=this._createTipElement(this._newContent||this._getContentForTemplate())),this.tip}_createTipElement(t){const e=this._getTemplateFactory(t).toHtml();if(!e)return null;e.classList.remove(ns,ss),e.classList.add(`bs-${this.constructor.NAME}-auto`);const i=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME).toString();return e.setAttribute("id",i),this._isAnimated()&&e.classList.add(ns),e}setContent(t){this._newContent=t,this._isShown()&&(this._disposePopper(),this.show())}_getTemplateFactory(t){return this._templateFactory?this._templateFactory.changeContent(t):this._templateFactory=new es({...this._config,content:t,extraClass:this._resolvePossibleFunction(this._config.customClass)}),this._templateFactory}_getContentForTemplate(){return{[os]:this._getTitle()}}_getTitle(){return this._resolvePossibleFunction(this._config.title)||this._element.getAttribute("data-bs-original-title")}_initializeOnDelegatedTarget(t){return this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_isAnimated(){return this._config.animation||this.tip&&this.tip.classList.contains(ns)}_isShown(){return this.tip&&this.tip.classList.contains(ss)}_createPopper(t){const e=Xt(this._config.placement,[this,t,this._element]),i=hs[e.toUpperCase()];return Dt(this._element,t,this._getPopperConfig(i))}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return Xt(t,[this._element])}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"preSetPlacement",enabled:!0,phase:"beforeMain",fn:t=>{this._getTipElement().setAttribute("data-popper-placement",t.state.placement)}}]};return{...e,...Xt(this._config.popperConfig,[e])}}_setListeners(){const t=this._config.trigger.split(" ");for(const e of t)if("click"===e)fe.on(this._element,this.constructor.eventName("click"),this._config.selector,(t=>{this._initializeOnDelegatedTarget(t).toggle()}));else if("manual"!==e){const t=e===ls?this.constructor.eventName("mouseenter"):this.constructor.eventName("focusin"),i=e===ls?this.constructor.eventName("mouseleave"):this.constructor.eventName("focusout");fe.on(this._element,t,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusin"===t.type?cs:ls]=!0,e._enter()})),fe.on(this._element,i,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusout"===t.type?cs:ls]=e._element.contains(t.relatedTarget),e._leave()}))}this._hideModalHandler=()=>{this._element&&this.hide()},fe.on(this._element.closest(rs),as,this._hideModalHandler)}_fixTitle(){const t=this._element.getAttribute("title");t&&(this._element.getAttribute("aria-label")||this._element.textContent.trim()||this._element.setAttribute("aria-label",t),this._element.setAttribute("data-bs-original-title",t),this._element.removeAttribute("title"))}_enter(){this._isShown()||this._isHovered?this._isHovered=!0:(this._isHovered=!0,this._setTimeout((()=>{this._isHovered&&this.show()}),this._config.delay.show))}_leave(){this._isWithActiveTrigger()||(this._isHovered=!1,this._setTimeout((()=>{this._isHovered||this.hide()}),this._config.delay.hide))}_setTimeout(t,e){clearTimeout(this._timeout),this._timeout=setTimeout(t,e)}_isWithActiveTrigger(){return Object.values(this._activeTrigger).includes(!0)}_getConfig(t){const e=_e.getDataAttributes(this._element);for(const t of Object.keys(e))is.has(t)&&delete e[t];return t={...e,..."object"==typeof t&&t?t:{}},t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t.container=!1===t.container?document.body:Ht(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),t}_getDelegateConfig(){const t={};for(const[e,i]of Object.entries(this._config))this.constructor.Default[e]!==i&&(t[e]=i);return t.selector=!1,t.trigger="manual",t}_disposePopper(){this._popper&&(this._popper.destroy(),this._popper=null),this.tip&&(this.tip.remove(),this.tip=null)}static jQueryInterface(t){return this.each((function(){const e=fs.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Qt(fs);const ps=".popover-header",ms=".popover-body",gs={...fs.Default,content:"",offset:[0,8],placement:"right",template:'',trigger:"click"},_s={...fs.DefaultType,content:"(null|string|element|function)"};class bs extends fs{static get Default(){return gs}static get DefaultType(){return _s}static get NAME(){return"popover"}_isWithContent(){return this._getTitle()||this._getContent()}_getContentForTemplate(){return{[ps]:this._getTitle(),[ms]:this._getContent()}}_getContent(){return this._resolvePossibleFunction(this._config.content)}static jQueryInterface(t){return this.each((function(){const e=bs.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Qt(bs);const vs=".bs.scrollspy",ys=`activate${vs}`,ws=`click${vs}`,Es=`load${vs}.data-api`,As="active",Ts="[href]",Cs=".nav-link",Os=`${Cs}, .nav-item > ${Cs}, .list-group-item`,xs={offset:null,rootMargin:"0px 0px -25%",smoothScroll:!1,target:null,threshold:[.1,.5,1]},ks={offset:"(number|null)",rootMargin:"string",smoothScroll:"boolean",target:"element",threshold:"array"};class Ls extends ve{constructor(t,e){super(t,e),this._targetLinks=new Map,this._observableSections=new Map,this._rootElement="visible"===getComputedStyle(this._element).overflowY?null:this._element,this._activeTarget=null,this._observer=null,this._previousScrollData={visibleEntryTop:0,parentScrollTop:0},this.refresh()}static get Default(){return xs}static get DefaultType(){return ks}static get NAME(){return"scrollspy"}refresh(){this._initializeTargetsAndObservables(),this._maybeEnableSmoothScroll(),this._observer?this._observer.disconnect():this._observer=this._getNewObserver();for(const t of this._observableSections.values())this._observer.observe(t)}dispose(){this._observer.disconnect(),super.dispose()}_configAfterMerge(t){return t.target=Ht(t.target)||document.body,t.rootMargin=t.offset?`${t.offset}px 0px -30%`:t.rootMargin,"string"==typeof t.threshold&&(t.threshold=t.threshold.split(",").map((t=>Number.parseFloat(t)))),t}_maybeEnableSmoothScroll(){this._config.smoothScroll&&(fe.off(this._config.target,ws),fe.on(this._config.target,ws,Ts,(t=>{const e=this._observableSections.get(t.target.hash);if(e){t.preventDefault();const i=this._rootElement||window,n=e.offsetTop-this._element.offsetTop;if(i.scrollTo)return void i.scrollTo({top:n,behavior:"smooth"});i.scrollTop=n}})))}_getNewObserver(){const t={root:this._rootElement,threshold:this._config.threshold,rootMargin:this._config.rootMargin};return new IntersectionObserver((t=>this._observerCallback(t)),t)}_observerCallback(t){const e=t=>this._targetLinks.get(`#${t.target.id}`),i=t=>{this._previousScrollData.visibleEntryTop=t.target.offsetTop,this._process(e(t))},n=(this._rootElement||document.documentElement).scrollTop,s=n>=this._previousScrollData.parentScrollTop;this._previousScrollData.parentScrollTop=n;for(const o of t){if(!o.isIntersecting){this._activeTarget=null,this._clearActiveClass(e(o));continue}const t=o.target.offsetTop>=this._previousScrollData.visibleEntryTop;if(s&&t){if(i(o),!n)return}else s||t||i(o)}}_initializeTargetsAndObservables(){this._targetLinks=new Map,this._observableSections=new Map;const t=we.find(Ts,this._config.target);for(const e of t){if(!e.hash||Wt(e))continue;const t=we.findOne(decodeURI(e.hash),this._element);Bt(t)&&(this._targetLinks.set(decodeURI(e.hash),e),this._observableSections.set(e.hash,t))}}_process(t){this._activeTarget!==t&&(this._clearActiveClass(this._config.target),this._activeTarget=t,t.classList.add(As),this._activateParents(t),fe.trigger(this._element,ys,{relatedTarget:t}))}_activateParents(t){if(t.classList.contains("dropdown-item"))we.findOne(".dropdown-toggle",t.closest(".dropdown")).classList.add(As);else for(const e of we.parents(t,".nav, .list-group"))for(const t of we.prev(e,Os))t.classList.add(As)}_clearActiveClass(t){t.classList.remove(As);const e=we.find(`${Ts}.${As}`,t);for(const t of e)t.classList.remove(As)}static jQueryInterface(t){return this.each((function(){const e=Ls.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}fe.on(window,Es,(()=>{for(const t of we.find('[data-bs-spy="scroll"]'))Ls.getOrCreateInstance(t)})),Qt(Ls);const Ss=".bs.tab",Ds=`hide${Ss}`,$s=`hidden${Ss}`,Is=`show${Ss}`,Ns=`shown${Ss}`,Ps=`click${Ss}`,Ms=`keydown${Ss}`,js=`load${Ss}`,Fs="ArrowLeft",Hs="ArrowRight",Bs="ArrowUp",Ws="ArrowDown",zs="Home",Rs="End",qs="active",Vs="fade",Ys="show",Ks=".dropdown-toggle",Qs=`:not(${Ks})`,Xs='[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',Us=`.nav-link${Qs}, .list-group-item${Qs}, [role="tab"]${Qs}, ${Xs}`,Gs=`.${qs}[data-bs-toggle="tab"], .${qs}[data-bs-toggle="pill"], .${qs}[data-bs-toggle="list"]`;class Js extends ve{constructor(t){super(t),this._parent=this._element.closest('.list-group, .nav, [role="tablist"]'),this._parent&&(this._setInitialAttributes(this._parent,this._getChildren()),fe.on(this._element,Ms,(t=>this._keydown(t))))}static get NAME(){return"tab"}show(){const t=this._element;if(this._elemIsActive(t))return;const e=this._getActiveElem(),i=e?fe.trigger(e,Ds,{relatedTarget:t}):null;fe.trigger(t,Is,{relatedTarget:e}).defaultPrevented||i&&i.defaultPrevented||(this._deactivate(e,t),this._activate(t,e))}_activate(t,e){t&&(t.classList.add(qs),this._activate(we.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.removeAttribute("tabindex"),t.setAttribute("aria-selected",!0),this._toggleDropDown(t,!0),fe.trigger(t,Ns,{relatedTarget:e})):t.classList.add(Ys)}),t,t.classList.contains(Vs)))}_deactivate(t,e){t&&(t.classList.remove(qs),t.blur(),this._deactivate(we.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.setAttribute("aria-selected",!1),t.setAttribute("tabindex","-1"),this._toggleDropDown(t,!1),fe.trigger(t,$s,{relatedTarget:e})):t.classList.remove(Ys)}),t,t.classList.contains(Vs)))}_keydown(t){if(![Fs,Hs,Bs,Ws,zs,Rs].includes(t.key))return;t.stopPropagation(),t.preventDefault();const e=this._getChildren().filter((t=>!Wt(t)));let i;if([zs,Rs].includes(t.key))i=e[t.key===zs?0:e.length-1];else{const n=[Hs,Ws].includes(t.key);i=Gt(e,t.target,n,!0)}i&&(i.focus({preventScroll:!0}),Js.getOrCreateInstance(i).show())}_getChildren(){return we.find(Us,this._parent)}_getActiveElem(){return this._getChildren().find((t=>this._elemIsActive(t)))||null}_setInitialAttributes(t,e){this._setAttributeIfNotExists(t,"role","tablist");for(const t of e)this._setInitialAttributesOnChild(t)}_setInitialAttributesOnChild(t){t=this._getInnerElement(t);const e=this._elemIsActive(t),i=this._getOuterElement(t);t.setAttribute("aria-selected",e),i!==t&&this._setAttributeIfNotExists(i,"role","presentation"),e||t.setAttribute("tabindex","-1"),this._setAttributeIfNotExists(t,"role","tab"),this._setInitialAttributesOnTargetPanel(t)}_setInitialAttributesOnTargetPanel(t){const e=we.getElementFromSelector(t);e&&(this._setAttributeIfNotExists(e,"role","tabpanel"),t.id&&this._setAttributeIfNotExists(e,"aria-labelledby",`${t.id}`))}_toggleDropDown(t,e){const i=this._getOuterElement(t);if(!i.classList.contains("dropdown"))return;const n=(t,n)=>{const s=we.findOne(t,i);s&&s.classList.toggle(n,e)};n(Ks,qs),n(".dropdown-menu",Ys),i.setAttribute("aria-expanded",e)}_setAttributeIfNotExists(t,e,i){t.hasAttribute(e)||t.setAttribute(e,i)}_elemIsActive(t){return t.classList.contains(qs)}_getInnerElement(t){return t.matches(Us)?t:we.findOne(Us,t)}_getOuterElement(t){return t.closest(".nav-item, .list-group-item")||t}static jQueryInterface(t){return this.each((function(){const e=Js.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}fe.on(document,Ps,Xs,(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),Wt(this)||Js.getOrCreateInstance(this).show()})),fe.on(window,js,(()=>{for(const t of we.find(Gs))Js.getOrCreateInstance(t)})),Qt(Js);const Zs=".bs.toast",to=`mouseover${Zs}`,eo=`mouseout${Zs}`,io=`focusin${Zs}`,no=`focusout${Zs}`,so=`hide${Zs}`,oo=`hidden${Zs}`,ro=`show${Zs}`,ao=`shown${Zs}`,lo="hide",co="show",ho="showing",uo={animation:"boolean",autohide:"boolean",delay:"number"},fo={animation:!0,autohide:!0,delay:5e3};class po extends ve{constructor(t,e){super(t,e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get Default(){return fo}static get DefaultType(){return uo}static get NAME(){return"toast"}show(){fe.trigger(this._element,ro).defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add("fade"),this._element.classList.remove(lo),qt(this._element),this._element.classList.add(co,ho),this._queueCallback((()=>{this._element.classList.remove(ho),fe.trigger(this._element,ao),this._maybeScheduleHide()}),this._element,this._config.animation))}hide(){this.isShown()&&(fe.trigger(this._element,so).defaultPrevented||(this._element.classList.add(ho),this._queueCallback((()=>{this._element.classList.add(lo),this._element.classList.remove(ho,co),fe.trigger(this._element,oo)}),this._element,this._config.animation)))}dispose(){this._clearTimeout(),this.isShown()&&this._element.classList.remove(co),super.dispose()}isShown(){return this._element.classList.contains(co)}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout((()=>{this.hide()}),this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){fe.on(this._element,to,(t=>this._onInteraction(t,!0))),fe.on(this._element,eo,(t=>this._onInteraction(t,!1))),fe.on(this._element,io,(t=>this._onInteraction(t,!0))),fe.on(this._element,no,(t=>this._onInteraction(t,!1)))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=po.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}function mo(t){"loading"!=document.readyState?t():document.addEventListener("DOMContentLoaded",t)}Ee(po),Qt(po),mo((function(){[].slice.call(document.querySelectorAll('[data-bs-toggle="tooltip"]')).map((function(t){return new fs(t,{delay:{show:500,hide:100}})}))})),mo((function(){document.getElementById("pst-back-to-top").addEventListener("click",(function(){document.body.scrollTop=0,document.documentElement.scrollTop=0}))})),mo((function(){var t=document.getElementById("pst-back-to-top"),e=document.getElementsByClassName("bd-header")[0].getBoundingClientRect();window.addEventListener("scroll",(function(){this.oldScroll>this.scrollY&&this.scrollY>e.bottom?t.style.display="block":t.style.display="none",this.oldScroll=this.scrollY}))})),window.bootstrap=i})(); +//# sourceMappingURL=bootstrap.js.map \ No newline at end of file diff --git a/_static/scripts/bootstrap.js.LICENSE.txt b/_static/scripts/bootstrap.js.LICENSE.txt new file mode 100644 index 000000000..28755c2c5 --- /dev/null +++ b/_static/scripts/bootstrap.js.LICENSE.txt @@ -0,0 +1,5 @@ +/*! + * Bootstrap v5.3.3 (https://getbootstrap.com/) + * Copyright 2011-2024 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) + */ diff --git a/_static/scripts/bootstrap.js.map b/_static/scripts/bootstrap.js.map new file mode 100644 index 000000000..e9e815891 --- /dev/null +++ b/_static/scripts/bootstrap.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/bootstrap.js","mappings":";mBACA,IAAIA,EAAsB,CCA1BA,EAAwB,CAACC,EAASC,KACjC,IAAI,IAAIC,KAAOD,EACXF,EAAoBI,EAAEF,EAAYC,KAASH,EAAoBI,EAAEH,EAASE,IAC5EE,OAAOC,eAAeL,EAASE,EAAK,CAAEI,YAAY,EAAMC,IAAKN,EAAWC,IAE1E,ECNDH,EAAwB,CAACS,EAAKC,IAAUL,OAAOM,UAAUC,eAAeC,KAAKJ,EAAKC,GCClFV,EAAyBC,IACH,oBAAXa,QAA0BA,OAAOC,aAC1CV,OAAOC,eAAeL,EAASa,OAAOC,YAAa,CAAEC,MAAO,WAE7DX,OAAOC,eAAeL,EAAS,aAAc,CAAEe,OAAO,GAAO,01BCLvD,IAAI,EAAM,MACNC,EAAS,SACTC,EAAQ,QACRC,EAAO,OACPC,EAAO,OACPC,EAAiB,CAAC,EAAKJ,EAAQC,EAAOC,GACtCG,EAAQ,QACRC,EAAM,MACNC,EAAkB,kBAClBC,EAAW,WACXC,EAAS,SACTC,EAAY,YACZC,EAAmCP,EAAeQ,QAAO,SAAUC,EAAKC,GACjF,OAAOD,EAAIE,OAAO,CAACD,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAChE,GAAG,IACQ,EAA0B,GAAGS,OAAOX,EAAgB,CAACD,IAAOS,QAAO,SAAUC,EAAKC,GAC3F,OAAOD,EAAIE,OAAO,CAACD,EAAWA,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAC3E,GAAG,IAEQU,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAc,cACdC,EAAQ,QACRC,EAAa,aACbC,EAAiB,CAACT,EAAYC,EAAMC,EAAWC,EAAYC,EAAMC,EAAWC,EAAaC,EAAOC,GC9B5F,SAASE,EAAYC,GAClC,OAAOA,GAAWA,EAAQC,UAAY,IAAIC,cAAgB,IAC5D,CCFe,SAASC,EAAUC,GAChC,GAAY,MAARA,EACF,OAAOC,OAGT,GAAwB,oBAApBD,EAAKE,WAAkC,CACzC,IAAIC,EAAgBH,EAAKG,cACzB,OAAOA,GAAgBA,EAAcC,aAAwBH,MAC/D,CAEA,OAAOD,CACT,CCTA,SAASK,EAAUL,GAEjB,OAAOA,aADUD,EAAUC,GAAMM,SACIN,aAAgBM,OACvD,CAEA,SAASC,EAAcP,GAErB,OAAOA,aADUD,EAAUC,GAAMQ,aACIR,aAAgBQ,WACvD,CAEA,SAASC,EAAaT,GAEpB,MAA0B,oBAAfU,aAKJV,aADUD,EAAUC,GAAMU,YACIV,aAAgBU,WACvD,CCwDA,SACEC,KAAM,cACNC,SAAS,EACTC,MAAO,QACPC,GA5EF,SAAqBC,GACnB,IAAIC,EAAQD,EAAKC,MACjB3D,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIS,EAAQJ,EAAMK,OAAOV,IAAS,CAAC,EAC/BW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EACxCf,EAAUoB,EAAME,SAASP,GAExBJ,EAAcX,IAAaD,EAAYC,KAO5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUR,GACxC,IAAI3C,EAAQsD,EAAWX,IAET,IAAV3C,EACF4B,EAAQ4B,gBAAgBb,GAExBf,EAAQ6B,aAAad,GAAgB,IAAV3C,EAAiB,GAAKA,EAErD,IACF,GACF,EAoDE0D,OAlDF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MACdY,EAAgB,CAClBlD,OAAQ,CACNmD,SAAUb,EAAMc,QAAQC,SACxB5D,KAAM,IACN6D,IAAK,IACLC,OAAQ,KAEVC,MAAO,CACLL,SAAU,YAEZlD,UAAW,CAAC,GASd,OAPAtB,OAAOkE,OAAOP,EAAME,SAASxC,OAAO0C,MAAOQ,EAAclD,QACzDsC,EAAMK,OAASO,EAEXZ,EAAME,SAASgB,OACjB7E,OAAOkE,OAAOP,EAAME,SAASgB,MAAMd,MAAOQ,EAAcM,OAGnD,WACL7E,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIf,EAAUoB,EAAME,SAASP,GACzBW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EAGxCS,EAFkB/D,OAAO4D,KAAKD,EAAMK,OAAOzD,eAAe+C,GAAQK,EAAMK,OAAOV,GAAQiB,EAAcjB,IAE7E9B,QAAO,SAAUuC,EAAOe,GAElD,OADAf,EAAMe,GAAY,GACXf,CACT,GAAG,CAAC,GAECb,EAAcX,IAAaD,EAAYC,KAI5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUiB,GACxCxC,EAAQ4B,gBAAgBY,EAC1B,IACF,GACF,CACF,EASEC,SAAU,CAAC,kBCjFE,SAASC,EAAiBvD,GACvC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCHO,IAAI,EAAMC,KAAKC,IACX,EAAMD,KAAKE,IACXC,EAAQH,KAAKG,MCFT,SAASC,IACtB,IAAIC,EAASC,UAAUC,cAEvB,OAAc,MAAVF,GAAkBA,EAAOG,QAAUC,MAAMC,QAAQL,EAAOG,QACnDH,EAAOG,OAAOG,KAAI,SAAUC,GACjC,OAAOA,EAAKC,MAAQ,IAAMD,EAAKE,OACjC,IAAGC,KAAK,KAGHT,UAAUU,SACnB,CCTe,SAASC,IACtB,OAAQ,iCAAiCC,KAAKd,IAChD,CCCe,SAASe,EAAsB/D,EAASgE,EAAcC,QAC9C,IAAjBD,IACFA,GAAe,QAGO,IAApBC,IACFA,GAAkB,GAGpB,IAAIC,EAAalE,EAAQ+D,wBACrBI,EAAS,EACTC,EAAS,EAETJ,GAAgBrD,EAAcX,KAChCmE,EAASnE,EAAQqE,YAAc,GAAItB,EAAMmB,EAAWI,OAAStE,EAAQqE,aAAmB,EACxFD,EAASpE,EAAQuE,aAAe,GAAIxB,EAAMmB,EAAWM,QAAUxE,EAAQuE,cAAoB,GAG7F,IACIE,GADOhE,EAAUT,GAAWG,EAAUH,GAAWK,QAC3BoE,eAEtBC,GAAoBb,KAAsBI,EAC1CU,GAAKT,EAAW3F,MAAQmG,GAAoBD,EAAiBA,EAAeG,WAAa,IAAMT,EAC/FU,GAAKX,EAAW9B,KAAOsC,GAAoBD,EAAiBA,EAAeK,UAAY,IAAMV,EAC7FE,EAAQJ,EAAWI,MAAQH,EAC3BK,EAASN,EAAWM,OAASJ,EACjC,MAAO,CACLE,MAAOA,EACPE,OAAQA,EACRpC,IAAKyC,EACLvG,MAAOqG,EAAIL,EACXjG,OAAQwG,EAAIL,EACZjG,KAAMoG,EACNA,EAAGA,EACHE,EAAGA,EAEP,CCrCe,SAASE,EAAc/E,GACpC,IAAIkE,EAAaH,EAAsB/D,GAGnCsE,EAAQtE,EAAQqE,YAChBG,EAASxE,EAAQuE,aAUrB,OARI3B,KAAKoC,IAAId,EAAWI,MAAQA,IAAU,IACxCA,EAAQJ,EAAWI,OAGjB1B,KAAKoC,IAAId,EAAWM,OAASA,IAAW,IAC1CA,EAASN,EAAWM,QAGf,CACLG,EAAG3E,EAAQ4E,WACXC,EAAG7E,EAAQ8E,UACXR,MAAOA,EACPE,OAAQA,EAEZ,CCvBe,SAASS,EAASC,EAAQC,GACvC,IAAIC,EAAWD,EAAME,aAAeF,EAAME,cAE1C,GAAIH,EAAOD,SAASE,GAClB,OAAO,EAEJ,GAAIC,GAAYvE,EAAauE,GAAW,CACzC,IAAIE,EAAOH,EAEX,EAAG,CACD,GAAIG,GAAQJ,EAAOK,WAAWD,GAC5B,OAAO,EAITA,EAAOA,EAAKE,YAAcF,EAAKG,IACjC,OAASH,EACX,CAGF,OAAO,CACT,CCrBe,SAAS,EAAiBtF,GACvC,OAAOG,EAAUH,GAAS0F,iBAAiB1F,EAC7C,CCFe,SAAS2F,EAAe3F,GACrC,MAAO,CAAC,QAAS,KAAM,MAAM4F,QAAQ7F,EAAYC,KAAa,CAChE,CCFe,SAAS6F,EAAmB7F,GAEzC,QAASS,EAAUT,GAAWA,EAAQO,cACtCP,EAAQ8F,WAAazF,OAAOyF,UAAUC,eACxC,CCFe,SAASC,EAAchG,GACpC,MAA6B,SAAzBD,EAAYC,GACPA,EAMPA,EAAQiG,cACRjG,EAAQwF,aACR3E,EAAab,GAAWA,EAAQyF,KAAO,OAEvCI,EAAmB7F,EAGvB,CCVA,SAASkG,EAAoBlG,GAC3B,OAAKW,EAAcX,IACoB,UAAvC,EAAiBA,GAASiC,SAInBjC,EAAQmG,aAHN,IAIX,CAwCe,SAASC,EAAgBpG,GAItC,IAHA,IAAIK,EAASF,EAAUH,GACnBmG,EAAeD,EAAoBlG,GAEhCmG,GAAgBR,EAAeQ,IAA6D,WAA5C,EAAiBA,GAAclE,UACpFkE,EAAeD,EAAoBC,GAGrC,OAAIA,IAA+C,SAA9BpG,EAAYoG,IAA0D,SAA9BpG,EAAYoG,IAAwE,WAA5C,EAAiBA,GAAclE,UAC3H5B,EAGF8F,GAhDT,SAA4BnG,GAC1B,IAAIqG,EAAY,WAAWvC,KAAKd,KAGhC,GAFW,WAAWc,KAAKd,MAEfrC,EAAcX,IAII,UAFX,EAAiBA,GAEnBiC,SACb,OAAO,KAIX,IAAIqE,EAAcN,EAAchG,GAMhC,IAJIa,EAAayF,KACfA,EAAcA,EAAYb,MAGrB9E,EAAc2F,IAAgB,CAAC,OAAQ,QAAQV,QAAQ7F,EAAYuG,IAAgB,GAAG,CAC3F,IAAIC,EAAM,EAAiBD,GAI3B,GAAsB,SAAlBC,EAAIC,WAA4C,SAApBD,EAAIE,aAA0C,UAAhBF,EAAIG,UAAiF,IAA1D,CAAC,YAAa,eAAed,QAAQW,EAAII,aAAsBN,GAAgC,WAAnBE,EAAII,YAA2BN,GAAaE,EAAIK,QAAyB,SAAfL,EAAIK,OACjO,OAAON,EAEPA,EAAcA,EAAYd,UAE9B,CAEA,OAAO,IACT,CAgByBqB,CAAmB7G,IAAYK,CACxD,CCpEe,SAASyG,EAAyB3H,GAC/C,MAAO,CAAC,MAAO,UAAUyG,QAAQzG,IAAc,EAAI,IAAM,GAC3D,CCDO,SAAS4H,EAAOjE,EAAK1E,EAAOyE,GACjC,OAAO,EAAQC,EAAK,EAAQ1E,EAAOyE,GACrC,CCFe,SAASmE,EAAmBC,GACzC,OAAOxJ,OAAOkE,OAAO,CAAC,ECDf,CACLS,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GDHuC0I,EACjD,CEHe,SAASC,EAAgB9I,EAAOiD,GAC7C,OAAOA,EAAKpC,QAAO,SAAUkI,EAAS5J,GAEpC,OADA4J,EAAQ5J,GAAOa,EACR+I,CACT,GAAG,CAAC,EACN,CC4EA,SACEpG,KAAM,QACNC,SAAS,EACTC,MAAO,OACPC,GApEF,SAAeC,GACb,IAAIiG,EAEAhG,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZmB,EAAUf,EAAKe,QACfmF,EAAejG,EAAME,SAASgB,MAC9BgF,EAAgBlG,EAAMmG,cAAcD,cACpCE,EAAgB9E,EAAiBtB,EAAMjC,WACvCsI,EAAOX,EAAyBU,GAEhCE,EADa,CAACnJ,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAClC,SAAW,QAElC,GAAKH,GAAiBC,EAAtB,CAIA,IAAIL,EAxBgB,SAAyBU,EAASvG,GAItD,OAAO4F,EAAsC,iBAH7CW,EAA6B,mBAAZA,EAAyBA,EAAQlK,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CAC/EzI,UAAWiC,EAAMjC,aACbwI,GACkDA,EAAUT,EAAgBS,EAASlJ,GAC7F,CAmBsBoJ,CAAgB3F,EAAQyF,QAASvG,GACjD0G,EAAY/C,EAAcsC,GAC1BU,EAAmB,MAATN,EAAe,EAAMlJ,EAC/ByJ,EAAmB,MAATP,EAAepJ,EAASC,EAClC2J,EAAU7G,EAAMwG,MAAM7I,UAAU2I,GAAOtG,EAAMwG,MAAM7I,UAAU0I,GAAQH,EAAcG,GAAQrG,EAAMwG,MAAM9I,OAAO4I,GAC9GQ,EAAYZ,EAAcG,GAAQrG,EAAMwG,MAAM7I,UAAU0I,GACxDU,EAAoB/B,EAAgBiB,GACpCe,EAAaD,EAA6B,MAATV,EAAeU,EAAkBE,cAAgB,EAAIF,EAAkBG,aAAe,EAAI,EAC3HC,EAAoBN,EAAU,EAAIC,EAAY,EAG9CpF,EAAMmE,EAAcc,GACpBlF,EAAMuF,EAAaN,EAAUJ,GAAOT,EAAce,GAClDQ,EAASJ,EAAa,EAAIN,EAAUJ,GAAO,EAAIa,EAC/CE,EAAS1B,EAAOjE,EAAK0F,EAAQ3F,GAE7B6F,EAAWjB,EACfrG,EAAMmG,cAAcxG,KAASqG,EAAwB,CAAC,GAAyBsB,GAAYD,EAAQrB,EAAsBuB,aAAeF,EAASD,EAAQpB,EAnBzJ,CAoBF,EAkCEtF,OAhCF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MAEdwH,EADU7G,EAAMG,QACWlC,QAC3BqH,OAAoC,IAArBuB,EAA8B,sBAAwBA,EAErD,MAAhBvB,IAKwB,iBAAjBA,IACTA,EAAejG,EAAME,SAASxC,OAAO+J,cAAcxB,MAOhDpC,EAAS7D,EAAME,SAASxC,OAAQuI,KAIrCjG,EAAME,SAASgB,MAAQ+E,EACzB,EASE5E,SAAU,CAAC,iBACXqG,iBAAkB,CAAC,oBCxFN,SAASC,EAAa5J,GACnC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCOA,IAAIqG,GAAa,CACf5G,IAAK,OACL9D,MAAO,OACPD,OAAQ,OACRE,KAAM,QAeD,SAAS0K,GAAYlH,GAC1B,IAAImH,EAEApK,EAASiD,EAAMjD,OACfqK,EAAapH,EAAMoH,WACnBhK,EAAY4C,EAAM5C,UAClBiK,EAAYrH,EAAMqH,UAClBC,EAAUtH,EAAMsH,QAChBpH,EAAWF,EAAME,SACjBqH,EAAkBvH,EAAMuH,gBACxBC,EAAWxH,EAAMwH,SACjBC,EAAezH,EAAMyH,aACrBC,EAAU1H,EAAM0H,QAChBC,EAAaL,EAAQ1E,EACrBA,OAAmB,IAAf+E,EAAwB,EAAIA,EAChCC,EAAaN,EAAQxE,EACrBA,OAAmB,IAAf8E,EAAwB,EAAIA,EAEhCC,EAAgC,mBAAjBJ,EAA8BA,EAAa,CAC5D7E,EAAGA,EACHE,IACG,CACHF,EAAGA,EACHE,GAGFF,EAAIiF,EAAMjF,EACVE,EAAI+E,EAAM/E,EACV,IAAIgF,EAAOR,EAAQrL,eAAe,KAC9B8L,EAAOT,EAAQrL,eAAe,KAC9B+L,EAAQxL,EACRyL,EAAQ,EACRC,EAAM5J,OAEV,GAAIkJ,EAAU,CACZ,IAAIpD,EAAeC,EAAgBtH,GAC/BoL,EAAa,eACbC,EAAY,cAEZhE,IAAiBhG,EAAUrB,IAGmB,WAA5C,EAFJqH,EAAeN,EAAmB/G,IAECmD,UAAsC,aAAbA,IAC1DiI,EAAa,eACbC,EAAY,gBAOZhL,IAAc,IAAQA,IAAcZ,GAAQY,IAAcb,IAAU8K,IAAczK,KACpFqL,EAAQ3L,EAGRwG,IAFc4E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeD,OACzF2B,EAAa+D,IACEf,EAAW3E,OAC1BK,GAAKyE,EAAkB,GAAK,GAG1BnK,IAAcZ,IAASY,IAAc,GAAOA,IAAcd,GAAW+K,IAAczK,KACrFoL,EAAQzL,EAGRqG,IAFc8E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeH,MACzF6B,EAAagE,IACEhB,EAAW7E,MAC1BK,GAAK2E,EAAkB,GAAK,EAEhC,CAEA,IAgBMc,EAhBFC,EAAe5M,OAAOkE,OAAO,CAC/BM,SAAUA,GACTsH,GAAYP,IAEXsB,GAAyB,IAAjBd,EAlFd,SAA2BrI,EAAM8I,GAC/B,IAAItF,EAAIxD,EAAKwD,EACTE,EAAI1D,EAAK0D,EACT0F,EAAMN,EAAIO,kBAAoB,EAClC,MAAO,CACL7F,EAAG5B,EAAM4B,EAAI4F,GAAOA,GAAO,EAC3B1F,EAAG9B,EAAM8B,EAAI0F,GAAOA,GAAO,EAE/B,CA0EsCE,CAAkB,CACpD9F,EAAGA,EACHE,GACC1E,EAAUrB,IAAW,CACtB6F,EAAGA,EACHE,GAMF,OAHAF,EAAI2F,EAAM3F,EACVE,EAAIyF,EAAMzF,EAENyE,EAGK7L,OAAOkE,OAAO,CAAC,EAAG0I,IAAeD,EAAiB,CAAC,GAAkBJ,GAASF,EAAO,IAAM,GAAIM,EAAeL,GAASF,EAAO,IAAM,GAAIO,EAAe5D,WAAayD,EAAIO,kBAAoB,IAAM,EAAI,aAAe7F,EAAI,OAASE,EAAI,MAAQ,eAAiBF,EAAI,OAASE,EAAI,SAAUuF,IAG5R3M,OAAOkE,OAAO,CAAC,EAAG0I,IAAenB,EAAkB,CAAC,GAAmBc,GAASF,EAAOjF,EAAI,KAAO,GAAIqE,EAAgBa,GAASF,EAAOlF,EAAI,KAAO,GAAIuE,EAAgB1C,UAAY,GAAI0C,GAC9L,CA4CA,UACEnI,KAAM,gBACNC,SAAS,EACTC,MAAO,cACPC,GA9CF,SAAuBwJ,GACrB,IAAItJ,EAAQsJ,EAAMtJ,MACdc,EAAUwI,EAAMxI,QAChByI,EAAwBzI,EAAQoH,gBAChCA,OAA4C,IAA1BqB,GAA0CA,EAC5DC,EAAoB1I,EAAQqH,SAC5BA,OAAiC,IAAtBqB,GAAsCA,EACjDC,EAAwB3I,EAAQsH,aAChCA,OAAyC,IAA1BqB,GAA0CA,EACzDR,EAAe,CACjBlL,UAAWuD,EAAiBtB,EAAMjC,WAClCiK,UAAWL,EAAa3H,EAAMjC,WAC9BL,OAAQsC,EAAME,SAASxC,OACvBqK,WAAY/H,EAAMwG,MAAM9I,OACxBwK,gBAAiBA,EACjBG,QAAoC,UAA3BrI,EAAMc,QAAQC,UAGgB,MAArCf,EAAMmG,cAAcD,gBACtBlG,EAAMK,OAAO3C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAO3C,OAAQmK,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACvGhB,QAASjI,EAAMmG,cAAcD,cAC7BrF,SAAUb,EAAMc,QAAQC,SACxBoH,SAAUA,EACVC,aAAcA,OAIe,MAA7BpI,EAAMmG,cAAcjF,QACtBlB,EAAMK,OAAOa,MAAQ7E,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAOa,MAAO2G,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACrGhB,QAASjI,EAAMmG,cAAcjF,MAC7BL,SAAU,WACVsH,UAAU,EACVC,aAAcA,OAIlBpI,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,wBAAyBsC,EAAMjC,WAEnC,EAQE2L,KAAM,CAAC,GCrKT,IAAIC,GAAU,CACZA,SAAS,GAsCX,UACEhK,KAAM,iBACNC,SAAS,EACTC,MAAO,QACPC,GAAI,WAAe,EACnBY,OAxCF,SAAgBX,GACd,IAAIC,EAAQD,EAAKC,MACb4J,EAAW7J,EAAK6J,SAChB9I,EAAUf,EAAKe,QACf+I,EAAkB/I,EAAQgJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAkBjJ,EAAQkJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7C9K,EAASF,EAAUiB,EAAME,SAASxC,QAClCuM,EAAgB,GAAGjM,OAAOgC,EAAMiK,cAActM,UAAWqC,EAAMiK,cAAcvM,QAYjF,OAVIoM,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaC,iBAAiB,SAAUP,EAASQ,OAAQT,GAC3D,IAGEK,GACF/K,EAAOkL,iBAAiB,SAAUP,EAASQ,OAAQT,IAG9C,WACDG,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaG,oBAAoB,SAAUT,EAASQ,OAAQT,GAC9D,IAGEK,GACF/K,EAAOoL,oBAAoB,SAAUT,EAASQ,OAAQT,GAE1D,CACF,EASED,KAAM,CAAC,GC/CT,IAAIY,GAAO,CACTnN,KAAM,QACND,MAAO,OACPD,OAAQ,MACR+D,IAAK,UAEQ,SAASuJ,GAAqBxM,GAC3C,OAAOA,EAAUyM,QAAQ,0BAA0B,SAAUC,GAC3D,OAAOH,GAAKG,EACd,GACF,CCVA,IAAI,GAAO,CACTnN,MAAO,MACPC,IAAK,SAEQ,SAASmN,GAA8B3M,GACpD,OAAOA,EAAUyM,QAAQ,cAAc,SAAUC,GAC/C,OAAO,GAAKA,EACd,GACF,CCPe,SAASE,GAAgB3L,GACtC,IAAI6J,EAAM9J,EAAUC,GAGpB,MAAO,CACL4L,WAHe/B,EAAIgC,YAInBC,UAHcjC,EAAIkC,YAKtB,CCNe,SAASC,GAAoBpM,GAQ1C,OAAO+D,EAAsB8B,EAAmB7F,IAAUzB,KAAOwN,GAAgB/L,GAASgM,UAC5F,CCXe,SAASK,GAAerM,GAErC,IAAIsM,EAAoB,EAAiBtM,GACrCuM,EAAWD,EAAkBC,SAC7BC,EAAYF,EAAkBE,UAC9BC,EAAYH,EAAkBG,UAElC,MAAO,6BAA6B3I,KAAKyI,EAAWE,EAAYD,EAClE,CCLe,SAASE,GAAgBtM,GACtC,MAAI,CAAC,OAAQ,OAAQ,aAAawF,QAAQ7F,EAAYK,KAAU,EAEvDA,EAAKG,cAAcoM,KAGxBhM,EAAcP,IAASiM,GAAejM,GACjCA,EAGFsM,GAAgB1G,EAAc5F,GACvC,CCJe,SAASwM,GAAkB5M,EAAS6M,GACjD,IAAIC,OAES,IAATD,IACFA,EAAO,IAGT,IAAIvB,EAAeoB,GAAgB1M,GAC/B+M,EAASzB,KAAqE,OAAlDwB,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,MACpH1C,EAAM9J,EAAUmL,GAChB0B,EAASD,EAAS,CAAC9C,GAAK7K,OAAO6K,EAAIxF,gBAAkB,GAAI4H,GAAef,GAAgBA,EAAe,IAAMA,EAC7G2B,EAAcJ,EAAKzN,OAAO4N,GAC9B,OAAOD,EAASE,EAChBA,EAAY7N,OAAOwN,GAAkB5G,EAAcgH,IACrD,CCzBe,SAASE,GAAiBC,GACvC,OAAO1P,OAAOkE,OAAO,CAAC,EAAGwL,EAAM,CAC7B5O,KAAM4O,EAAKxI,EACXvC,IAAK+K,EAAKtI,EACVvG,MAAO6O,EAAKxI,EAAIwI,EAAK7I,MACrBjG,OAAQ8O,EAAKtI,EAAIsI,EAAK3I,QAE1B,CCqBA,SAAS4I,GAA2BpN,EAASqN,EAAgBlL,GAC3D,OAAOkL,IAAmBxO,EAAWqO,GCzBxB,SAAyBlN,EAASmC,GAC/C,IAAI8H,EAAM9J,EAAUH,GAChBsN,EAAOzH,EAAmB7F,GAC1ByE,EAAiBwF,EAAIxF,eACrBH,EAAQgJ,EAAKhF,YACb9D,EAAS8I,EAAKjF,aACd1D,EAAI,EACJE,EAAI,EAER,GAAIJ,EAAgB,CAClBH,EAAQG,EAAeH,MACvBE,EAASC,EAAeD,OACxB,IAAI+I,EAAiB1J,KAEjB0J,IAAmBA,GAA+B,UAAbpL,KACvCwC,EAAIF,EAAeG,WACnBC,EAAIJ,EAAeK,UAEvB,CAEA,MAAO,CACLR,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EAAIyH,GAAoBpM,GAC3B6E,EAAGA,EAEP,CDDwD2I,CAAgBxN,EAASmC,IAAa1B,EAAU4M,GAdxG,SAAoCrN,EAASmC,GAC3C,IAAIgL,EAAOpJ,EAAsB/D,GAAS,EAAoB,UAAbmC,GASjD,OARAgL,EAAK/K,IAAM+K,EAAK/K,IAAMpC,EAAQyN,UAC9BN,EAAK5O,KAAO4O,EAAK5O,KAAOyB,EAAQ0N,WAChCP,EAAK9O,OAAS8O,EAAK/K,IAAMpC,EAAQqI,aACjC8E,EAAK7O,MAAQ6O,EAAK5O,KAAOyB,EAAQsI,YACjC6E,EAAK7I,MAAQtE,EAAQsI,YACrB6E,EAAK3I,OAASxE,EAAQqI,aACtB8E,EAAKxI,EAAIwI,EAAK5O,KACd4O,EAAKtI,EAAIsI,EAAK/K,IACP+K,CACT,CAG0HQ,CAA2BN,EAAgBlL,GAAY+K,GEtBlK,SAAyBlN,GACtC,IAAI8M,EAEAQ,EAAOzH,EAAmB7F,GAC1B4N,EAAY7B,GAAgB/L,GAC5B2M,EAA0D,OAAlDG,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,KAChGrI,EAAQ,EAAIgJ,EAAKO,YAAaP,EAAKhF,YAAaqE,EAAOA,EAAKkB,YAAc,EAAGlB,EAAOA,EAAKrE,YAAc,GACvG9D,EAAS,EAAI8I,EAAKQ,aAAcR,EAAKjF,aAAcsE,EAAOA,EAAKmB,aAAe,EAAGnB,EAAOA,EAAKtE,aAAe,GAC5G1D,GAAKiJ,EAAU5B,WAAaI,GAAoBpM,GAChD6E,GAAK+I,EAAU1B,UAMnB,MAJiD,QAA7C,EAAiBS,GAAQW,GAAMS,YACjCpJ,GAAK,EAAI2I,EAAKhF,YAAaqE,EAAOA,EAAKrE,YAAc,GAAKhE,GAGrD,CACLA,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EACHE,EAAGA,EAEP,CFCkMmJ,CAAgBnI,EAAmB7F,IACrO,CG1Be,SAASiO,GAAe9M,GACrC,IAOIkI,EAPAtK,EAAYoC,EAAKpC,UACjBiB,EAAUmB,EAAKnB,QACfb,EAAYgC,EAAKhC,UACjBqI,EAAgBrI,EAAYuD,EAAiBvD,GAAa,KAC1DiK,EAAYjK,EAAY4J,EAAa5J,GAAa,KAClD+O,EAAUnP,EAAU4F,EAAI5F,EAAUuF,MAAQ,EAAItE,EAAQsE,MAAQ,EAC9D6J,EAAUpP,EAAU8F,EAAI9F,EAAUyF,OAAS,EAAIxE,EAAQwE,OAAS,EAGpE,OAAQgD,GACN,KAAK,EACH6B,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI7E,EAAQwE,QAE3B,MAEF,KAAKnG,EACHgL,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI9F,EAAUyF,QAE7B,MAEF,KAAKlG,EACH+K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI5F,EAAUuF,MAC3BO,EAAGsJ,GAEL,MAEF,KAAK5P,EACH8K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI3E,EAAQsE,MACzBO,EAAGsJ,GAEL,MAEF,QACE9E,EAAU,CACR1E,EAAG5F,EAAU4F,EACbE,EAAG9F,EAAU8F,GAInB,IAAIuJ,EAAW5G,EAAgBV,EAAyBU,GAAiB,KAEzE,GAAgB,MAAZ4G,EAAkB,CACpB,IAAI1G,EAAmB,MAAb0G,EAAmB,SAAW,QAExC,OAAQhF,GACN,KAAK1K,EACH2K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAC7E,MAEF,KAAK/I,EACH0K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAKnF,CAEA,OAAO2B,CACT,CC3De,SAASgF,GAAejN,EAAOc,QAC5B,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACXqM,EAAqBD,EAASnP,UAC9BA,OAAmC,IAAvBoP,EAAgCnN,EAAMjC,UAAYoP,EAC9DC,EAAoBF,EAASnM,SAC7BA,OAAiC,IAAtBqM,EAA+BpN,EAAMe,SAAWqM,EAC3DC,EAAoBH,EAASI,SAC7BA,OAAiC,IAAtBD,EAA+B7P,EAAkB6P,EAC5DE,EAAwBL,EAASM,aACjCA,OAAyC,IAA1BD,EAAmC9P,EAAW8P,EAC7DE,EAAwBP,EAASQ,eACjCA,OAA2C,IAA1BD,EAAmC/P,EAAS+P,EAC7DE,EAAuBT,EAASU,YAChCA,OAAuC,IAAzBD,GAA0CA,EACxDE,EAAmBX,EAAS3G,QAC5BA,OAA+B,IAArBsH,EAA8B,EAAIA,EAC5ChI,EAAgBD,EAAsC,iBAAZW,EAAuBA,EAAUT,EAAgBS,EAASlJ,IACpGyQ,EAAaJ,IAAmBhQ,EAASC,EAAYD,EACrDqK,EAAa/H,EAAMwG,MAAM9I,OACzBkB,EAAUoB,EAAME,SAAS0N,EAAcE,EAAaJ,GACpDK,EJkBS,SAAyBnP,EAAS0O,EAAUE,EAAczM,GACvE,IAAIiN,EAAmC,oBAAbV,EAlB5B,SAA4B1O,GAC1B,IAAIpB,EAAkBgO,GAAkB5G,EAAchG,IAElDqP,EADoB,CAAC,WAAY,SAASzJ,QAAQ,EAAiB5F,GAASiC,WAAa,GACnDtB,EAAcX,GAAWoG,EAAgBpG,GAAWA,EAE9F,OAAKS,EAAU4O,GAKRzQ,EAAgBgI,QAAO,SAAUyG,GACtC,OAAO5M,EAAU4M,IAAmBpI,EAASoI,EAAgBgC,IAAmD,SAAhCtP,EAAYsN,EAC9F,IANS,EAOX,CAK6DiC,CAAmBtP,GAAW,GAAGZ,OAAOsP,GAC/F9P,EAAkB,GAAGQ,OAAOgQ,EAAqB,CAACR,IAClDW,EAAsB3Q,EAAgB,GACtC4Q,EAAe5Q,EAAgBK,QAAO,SAAUwQ,EAASpC,GAC3D,IAAIF,EAAOC,GAA2BpN,EAASqN,EAAgBlL,GAK/D,OAJAsN,EAAQrN,IAAM,EAAI+K,EAAK/K,IAAKqN,EAAQrN,KACpCqN,EAAQnR,MAAQ,EAAI6O,EAAK7O,MAAOmR,EAAQnR,OACxCmR,EAAQpR,OAAS,EAAI8O,EAAK9O,OAAQoR,EAAQpR,QAC1CoR,EAAQlR,KAAO,EAAI4O,EAAK5O,KAAMkR,EAAQlR,MAC/BkR,CACT,GAAGrC,GAA2BpN,EAASuP,EAAqBpN,IAK5D,OAJAqN,EAAalL,MAAQkL,EAAalR,MAAQkR,EAAajR,KACvDiR,EAAahL,OAASgL,EAAanR,OAASmR,EAAapN,IACzDoN,EAAa7K,EAAI6K,EAAajR,KAC9BiR,EAAa3K,EAAI2K,EAAapN,IACvBoN,CACT,CInC2BE,CAAgBjP,EAAUT,GAAWA,EAAUA,EAAQ2P,gBAAkB9J,EAAmBzE,EAAME,SAASxC,QAAS4P,EAAUE,EAAczM,GACjKyN,EAAsB7L,EAAsB3C,EAAME,SAASvC,WAC3DuI,EAAgB2G,GAAe,CACjClP,UAAW6Q,EACX5P,QAASmJ,EACThH,SAAU,WACVhD,UAAWA,IAET0Q,EAAmB3C,GAAiBzP,OAAOkE,OAAO,CAAC,EAAGwH,EAAY7B,IAClEwI,EAAoBhB,IAAmBhQ,EAAS+Q,EAAmBD,EAGnEG,EAAkB,CACpB3N,IAAK+M,EAAmB/M,IAAM0N,EAAkB1N,IAAM6E,EAAc7E,IACpE/D,OAAQyR,EAAkBzR,OAAS8Q,EAAmB9Q,OAAS4I,EAAc5I,OAC7EE,KAAM4Q,EAAmB5Q,KAAOuR,EAAkBvR,KAAO0I,EAAc1I,KACvED,MAAOwR,EAAkBxR,MAAQ6Q,EAAmB7Q,MAAQ2I,EAAc3I,OAExE0R,EAAa5O,EAAMmG,cAAckB,OAErC,GAAIqG,IAAmBhQ,GAAUkR,EAAY,CAC3C,IAAIvH,EAASuH,EAAW7Q,GACxB1B,OAAO4D,KAAK0O,GAAiBxO,SAAQ,SAAUhE,GAC7C,IAAI0S,EAAW,CAAC3R,EAAOD,GAAQuH,QAAQrI,IAAQ,EAAI,GAAK,EACpDkK,EAAO,CAAC,EAAKpJ,GAAQuH,QAAQrI,IAAQ,EAAI,IAAM,IACnDwS,EAAgBxS,IAAQkL,EAAOhB,GAAQwI,CACzC,GACF,CAEA,OAAOF,CACT,CCyEA,UACEhP,KAAM,OACNC,SAAS,EACTC,MAAO,OACPC,GA5HF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KAEhB,IAAIK,EAAMmG,cAAcxG,GAAMmP,MAA9B,CAoCA,IAhCA,IAAIC,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAqCA,EACpDG,EAA8BtO,EAAQuO,mBACtC9I,EAAUzF,EAAQyF,QAClB+G,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtB0B,EAAwBxO,EAAQyO,eAChCA,OAA2C,IAA1BD,GAA0CA,EAC3DE,EAAwB1O,EAAQ0O,sBAChCC,EAAqBzP,EAAMc,QAAQ/C,UACnCqI,EAAgB9E,EAAiBmO,GAEjCJ,EAAqBD,IADHhJ,IAAkBqJ,GACqCF,EAjC/E,SAAuCxR,GACrC,GAAIuD,EAAiBvD,KAAeX,EAClC,MAAO,GAGT,IAAIsS,EAAoBnF,GAAqBxM,GAC7C,MAAO,CAAC2M,GAA8B3M,GAAY2R,EAAmBhF,GAA8BgF,GACrG,CA0B6IC,CAA8BF,GAA3E,CAAClF,GAAqBkF,KAChHG,EAAa,CAACH,GAAoBzR,OAAOqR,GAAoBxR,QAAO,SAAUC,EAAKC,GACrF,OAAOD,EAAIE,OAAOsD,EAAiBvD,KAAeX,ECvCvC,SAA8B4C,EAAOc,QAClC,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACX/C,EAAYmP,EAASnP,UACrBuP,EAAWJ,EAASI,SACpBE,EAAeN,EAASM,aACxBjH,EAAU2G,EAAS3G,QACnBgJ,EAAiBrC,EAASqC,eAC1BM,EAAwB3C,EAASsC,sBACjCA,OAAkD,IAA1BK,EAAmC,EAAgBA,EAC3E7H,EAAYL,EAAa5J,GACzB6R,EAAa5H,EAAYuH,EAAiB3R,EAAsBA,EAAoB4H,QAAO,SAAUzH,GACvG,OAAO4J,EAAa5J,KAAeiK,CACrC,IAAK3K,EACDyS,EAAoBF,EAAWpK,QAAO,SAAUzH,GAClD,OAAOyR,EAAsBhL,QAAQzG,IAAc,CACrD,IAEiC,IAA7B+R,EAAkBC,SACpBD,EAAoBF,GAItB,IAAII,EAAYF,EAAkBjS,QAAO,SAAUC,EAAKC,GAOtD,OANAD,EAAIC,GAAakP,GAAejN,EAAO,CACrCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,IACRjF,EAAiBvD,IACbD,CACT,GAAG,CAAC,GACJ,OAAOzB,OAAO4D,KAAK+P,GAAWC,MAAK,SAAUC,EAAGC,GAC9C,OAAOH,EAAUE,GAAKF,EAAUG,EAClC,GACF,CDC6DC,CAAqBpQ,EAAO,CACnFjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTgJ,eAAgBA,EAChBC,sBAAuBA,IACpBzR,EACP,GAAG,IACCsS,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzB4S,EAAY,IAAIC,IAChBC,GAAqB,EACrBC,EAAwBb,EAAW,GAE9Bc,EAAI,EAAGA,EAAId,EAAWG,OAAQW,IAAK,CAC1C,IAAI3S,EAAY6R,EAAWc,GAEvBC,EAAiBrP,EAAiBvD,GAElC6S,EAAmBjJ,EAAa5J,KAAeT,EAC/CuT,EAAa,CAAC,EAAK5T,GAAQuH,QAAQmM,IAAmB,EACtDrK,EAAMuK,EAAa,QAAU,SAC7B1F,EAAW8B,GAAejN,EAAO,CACnCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdI,YAAaA,EACbrH,QAASA,IAEPuK,EAAoBD,EAAaD,EAAmB1T,EAAQC,EAAOyT,EAAmB3T,EAAS,EAE/FoT,EAAc/J,GAAOyB,EAAWzB,KAClCwK,EAAoBvG,GAAqBuG,IAG3C,IAAIC,EAAmBxG,GAAqBuG,GACxCE,EAAS,GAUb,GARIhC,GACFgC,EAAOC,KAAK9F,EAASwF,IAAmB,GAGtCxB,GACF6B,EAAOC,KAAK9F,EAAS2F,IAAsB,EAAG3F,EAAS4F,IAAqB,GAG1EC,EAAOE,OAAM,SAAUC,GACzB,OAAOA,CACT,IAAI,CACFV,EAAwB1S,EACxByS,GAAqB,EACrB,KACF,CAEAF,EAAUc,IAAIrT,EAAWiT,EAC3B,CAEA,GAAIR,EAqBF,IAnBA,IAEIa,EAAQ,SAAeC,GACzB,IAAIC,EAAmB3B,EAAW4B,MAAK,SAAUzT,GAC/C,IAAIiT,EAASV,EAAU9T,IAAIuB,GAE3B,GAAIiT,EACF,OAAOA,EAAOS,MAAM,EAAGH,GAAIJ,OAAM,SAAUC,GACzC,OAAOA,CACT,GAEJ,IAEA,GAAII,EAEF,OADAd,EAAwBc,EACjB,OAEX,EAESD,EAnBY/B,EAAiB,EAAI,EAmBZ+B,EAAK,GAGpB,UAFFD,EAAMC,GADmBA,KAOpCtR,EAAMjC,YAAc0S,IACtBzQ,EAAMmG,cAAcxG,GAAMmP,OAAQ,EAClC9O,EAAMjC,UAAY0S,EAClBzQ,EAAM0R,OAAQ,EA5GhB,CA8GF,EAQEhK,iBAAkB,CAAC,UACnBgC,KAAM,CACJoF,OAAO,IE7IX,SAAS6C,GAAexG,EAAUY,EAAM6F,GAQtC,YAPyB,IAArBA,IACFA,EAAmB,CACjBrO,EAAG,EACHE,EAAG,IAIA,CACLzC,IAAKmK,EAASnK,IAAM+K,EAAK3I,OAASwO,EAAiBnO,EACnDvG,MAAOiO,EAASjO,MAAQ6O,EAAK7I,MAAQ0O,EAAiBrO,EACtDtG,OAAQkO,EAASlO,OAAS8O,EAAK3I,OAASwO,EAAiBnO,EACzDtG,KAAMgO,EAAShO,KAAO4O,EAAK7I,MAAQ0O,EAAiBrO,EAExD,CAEA,SAASsO,GAAsB1G,GAC7B,MAAO,CAAC,EAAKjO,EAAOD,EAAQE,GAAM2U,MAAK,SAAUC,GAC/C,OAAO5G,EAAS4G,IAAS,CAC3B,GACF,CA+BA,UACEpS,KAAM,OACNC,SAAS,EACTC,MAAO,OACP6H,iBAAkB,CAAC,mBACnB5H,GAlCF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZ0Q,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBkU,EAAmB5R,EAAMmG,cAAc6L,gBACvCC,EAAoBhF,GAAejN,EAAO,CAC5C0N,eAAgB,cAEdwE,EAAoBjF,GAAejN,EAAO,CAC5C4N,aAAa,IAEXuE,EAA2BR,GAAeM,EAAmB5B,GAC7D+B,EAAsBT,GAAeO,EAAmBnK,EAAY6J,GACpES,EAAoBR,GAAsBM,GAC1CG,EAAmBT,GAAsBO,GAC7CpS,EAAMmG,cAAcxG,GAAQ,CAC1BwS,yBAA0BA,EAC1BC,oBAAqBA,EACrBC,kBAAmBA,EACnBC,iBAAkBA,GAEpBtS,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,+BAAgC2U,EAChC,sBAAuBC,GAE3B,GCJA,IACE3S,KAAM,SACNC,SAAS,EACTC,MAAO,OACPwB,SAAU,CAAC,iBACXvB,GA5BF,SAAgBa,GACd,IAAIX,EAAQW,EAAMX,MACdc,EAAUH,EAAMG,QAChBnB,EAAOgB,EAAMhB,KACb4S,EAAkBzR,EAAQuG,OAC1BA,OAA6B,IAApBkL,EAA6B,CAAC,EAAG,GAAKA,EAC/C7I,EAAO,EAAW7L,QAAO,SAAUC,EAAKC,GAE1C,OADAD,EAAIC,GA5BD,SAAiCA,EAAWyI,EAAOa,GACxD,IAAIjB,EAAgB9E,EAAiBvD,GACjCyU,EAAiB,CAACrV,EAAM,GAAKqH,QAAQ4B,IAAkB,GAAK,EAAI,EAEhErG,EAAyB,mBAAXsH,EAAwBA,EAAOhL,OAAOkE,OAAO,CAAC,EAAGiG,EAAO,CACxEzI,UAAWA,KACPsJ,EACFoL,EAAW1S,EAAK,GAChB2S,EAAW3S,EAAK,GAIpB,OAFA0S,EAAWA,GAAY,EACvBC,GAAYA,GAAY,GAAKF,EACtB,CAACrV,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAAI,CACjD7C,EAAGmP,EACHjP,EAAGgP,GACD,CACFlP,EAAGkP,EACHhP,EAAGiP,EAEP,CASqBC,CAAwB5U,EAAWiC,EAAMwG,MAAOa,GAC1DvJ,CACT,GAAG,CAAC,GACA8U,EAAwBlJ,EAAK1J,EAAMjC,WACnCwF,EAAIqP,EAAsBrP,EAC1BE,EAAImP,EAAsBnP,EAEW,MAArCzD,EAAMmG,cAAcD,gBACtBlG,EAAMmG,cAAcD,cAAc3C,GAAKA,EACvCvD,EAAMmG,cAAcD,cAAczC,GAAKA,GAGzCzD,EAAMmG,cAAcxG,GAAQ+J,CAC9B,GC1BA,IACE/J,KAAM,gBACNC,SAAS,EACTC,MAAO,OACPC,GApBF,SAAuBC,GACrB,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KAKhBK,EAAMmG,cAAcxG,GAAQkN,GAAe,CACzClP,UAAWqC,EAAMwG,MAAM7I,UACvBiB,QAASoB,EAAMwG,MAAM9I,OACrBqD,SAAU,WACVhD,UAAWiC,EAAMjC,WAErB,EAQE2L,KAAM,CAAC,GCgHT,IACE/J,KAAM,kBACNC,SAAS,EACTC,MAAO,OACPC,GA/HF,SAAyBC,GACvB,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KACZoP,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAsCA,EACrD3B,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtBrH,EAAUzF,EAAQyF,QAClBsM,EAAkB/R,EAAQgS,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAwBjS,EAAQkS,aAChCA,OAAyC,IAA1BD,EAAmC,EAAIA,EACtD5H,EAAW8B,GAAejN,EAAO,CACnCsN,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTqH,YAAaA,IAEXxH,EAAgB9E,EAAiBtB,EAAMjC,WACvCiK,EAAYL,EAAa3H,EAAMjC,WAC/BkV,GAAmBjL,EACnBgF,EAAWtH,EAAyBU,GACpC8I,ECrCY,MDqCSlC,ECrCH,IAAM,IDsCxB9G,EAAgBlG,EAAMmG,cAAcD,cACpCmK,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBwV,EAA4C,mBAAjBF,EAA8BA,EAAa3W,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CACvGzI,UAAWiC,EAAMjC,aACbiV,EACFG,EAA2D,iBAAtBD,EAAiC,CACxElG,SAAUkG,EACVhE,QAASgE,GACP7W,OAAOkE,OAAO,CAChByM,SAAU,EACVkC,QAAS,GACRgE,GACCE,EAAsBpT,EAAMmG,cAAckB,OAASrH,EAAMmG,cAAckB,OAAOrH,EAAMjC,WAAa,KACjG2L,EAAO,CACTnG,EAAG,EACHE,EAAG,GAGL,GAAKyC,EAAL,CAIA,GAAI8I,EAAe,CACjB,IAAIqE,EAEAC,EAAwB,MAAbtG,EAAmB,EAAM7P,EACpCoW,EAAuB,MAAbvG,EAAmB/P,EAASC,EACtCoJ,EAAmB,MAAb0G,EAAmB,SAAW,QACpC3F,EAASnB,EAAc8G,GACvBtL,EAAM2F,EAAS8D,EAASmI,GACxB7R,EAAM4F,EAAS8D,EAASoI,GACxBC,EAAWV,GAAU/K,EAAWzB,GAAO,EAAI,EAC3CmN,EAASzL,IAAc1K,EAAQ+S,EAAc/J,GAAOyB,EAAWzB,GAC/DoN,EAAS1L,IAAc1K,GAASyK,EAAWzB,IAAQ+J,EAAc/J,GAGjEL,EAAejG,EAAME,SAASgB,MAC9BwF,EAAYoM,GAAU7M,EAAetC,EAAcsC,GAAgB,CACrE/C,MAAO,EACPE,OAAQ,GAENuQ,GAAqB3T,EAAMmG,cAAc,oBAAsBnG,EAAMmG,cAAc,oBAAoBI,QxBhFtG,CACLvF,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GwB6EFyW,GAAkBD,GAAmBL,GACrCO,GAAkBF,GAAmBJ,GAMrCO,GAAWnO,EAAO,EAAG0K,EAAc/J,GAAMI,EAAUJ,IACnDyN,GAAYd,EAAkB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWF,GAAkBT,EAA4BnG,SAAWyG,EAASK,GAAWF,GAAkBT,EAA4BnG,SACxMgH,GAAYf,GAAmB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWD,GAAkBV,EAA4BnG,SAAW0G,EAASI,GAAWD,GAAkBV,EAA4BnG,SACzMjG,GAAoB/G,EAAME,SAASgB,OAAS8D,EAAgBhF,EAAME,SAASgB,OAC3E+S,GAAelN,GAAiC,MAAbiG,EAAmBjG,GAAkBsF,WAAa,EAAItF,GAAkBuF,YAAc,EAAI,EAC7H4H,GAAwH,OAAjGb,EAA+C,MAAvBD,OAA8B,EAASA,EAAoBpG,IAAqBqG,EAAwB,EAEvJc,GAAY9M,EAAS2M,GAAYE,GACjCE,GAAkBzO,EAAOmN,EAAS,EAAQpR,EAF9B2F,EAAS0M,GAAYG,GAAsBD,IAEKvS,EAAK2F,EAAQyL,EAAS,EAAQrR,EAAK0S,IAAa1S,GAChHyE,EAAc8G,GAAYoH,GAC1B1K,EAAKsD,GAAYoH,GAAkB/M,CACrC,CAEA,GAAI8H,EAAc,CAChB,IAAIkF,GAEAC,GAAyB,MAAbtH,EAAmB,EAAM7P,EAErCoX,GAAwB,MAAbvH,EAAmB/P,EAASC,EAEvCsX,GAAUtO,EAAcgJ,GAExBuF,GAAmB,MAAZvF,EAAkB,SAAW,QAEpCwF,GAAOF,GAAUrJ,EAASmJ,IAE1BK,GAAOH,GAAUrJ,EAASoJ,IAE1BK,IAAuD,IAAxC,CAAC,EAAKzX,GAAMqH,QAAQ4B,GAEnCyO,GAAyH,OAAjGR,GAAgD,MAAvBjB,OAA8B,EAASA,EAAoBlE,IAAoBmF,GAAyB,EAEzJS,GAAaF,GAAeF,GAAOF,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAEzI6F,GAAaH,GAAeJ,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAAUyF,GAE5IK,GAAmBlC,GAAU8B,G1BzH9B,SAAwBlT,EAAK1E,EAAOyE,GACzC,IAAIwT,EAAItP,EAAOjE,EAAK1E,EAAOyE,GAC3B,OAAOwT,EAAIxT,EAAMA,EAAMwT,CACzB,C0BsHoDC,CAAeJ,GAAYN,GAASO,IAAcpP,EAAOmN,EAASgC,GAAaJ,GAAMF,GAAS1B,EAASiC,GAAaJ,IAEpKzO,EAAcgJ,GAAW8F,GACzBtL,EAAKwF,GAAW8F,GAAmBR,EACrC,CAEAxU,EAAMmG,cAAcxG,GAAQ+J,CAvE5B,CAwEF,EAQEhC,iBAAkB,CAAC,WE1HN,SAASyN,GAAiBC,EAAyBrQ,EAAcsD,QAC9D,IAAZA,IACFA,GAAU,GAGZ,ICnBoCrJ,ECJOJ,EFuBvCyW,EAA0B9V,EAAcwF,GACxCuQ,EAAuB/V,EAAcwF,IAf3C,SAAyBnG,GACvB,IAAImN,EAAOnN,EAAQ+D,wBACfI,EAASpB,EAAMoK,EAAK7I,OAAStE,EAAQqE,aAAe,EACpDD,EAASrB,EAAMoK,EAAK3I,QAAUxE,EAAQuE,cAAgB,EAC1D,OAAkB,IAAXJ,GAA2B,IAAXC,CACzB,CAU4DuS,CAAgBxQ,GACtEJ,EAAkBF,EAAmBM,GACrCgH,EAAOpJ,EAAsByS,EAAyBE,EAAsBjN,GAC5EyB,EAAS,CACXc,WAAY,EACZE,UAAW,GAET7C,EAAU,CACZ1E,EAAG,EACHE,EAAG,GAkBL,OAfI4R,IAA4BA,IAA4BhN,MACxB,SAA9B1J,EAAYoG,IAChBkG,GAAetG,MACbmF,GCnCgC9K,EDmCT+F,KClCdhG,EAAUC,IAAUO,EAAcP,GCJxC,CACL4L,YAFyChM,EDQbI,GCNR4L,WACpBE,UAAWlM,EAAQkM,WDGZH,GAAgB3L,IDoCnBO,EAAcwF,KAChBkD,EAAUtF,EAAsBoC,GAAc,IACtCxB,GAAKwB,EAAauH,WAC1BrE,EAAQxE,GAAKsB,EAAasH,WACjB1H,IACTsD,EAAQ1E,EAAIyH,GAAoBrG,KAI7B,CACLpB,EAAGwI,EAAK5O,KAAO2M,EAAOc,WAAa3C,EAAQ1E,EAC3CE,EAAGsI,EAAK/K,IAAM8I,EAAOgB,UAAY7C,EAAQxE,EACzCP,MAAO6I,EAAK7I,MACZE,OAAQ2I,EAAK3I,OAEjB,CGvDA,SAASoS,GAAMC,GACb,IAAItT,EAAM,IAAIoO,IACVmF,EAAU,IAAIC,IACdC,EAAS,GAKb,SAAS3F,EAAK4F,GACZH,EAAQI,IAAID,EAASlW,MACN,GAAG3B,OAAO6X,EAASxU,UAAY,GAAIwU,EAASnO,kBAAoB,IACtEvH,SAAQ,SAAU4V,GACzB,IAAKL,EAAQM,IAAID,GAAM,CACrB,IAAIE,EAAc9T,EAAI3F,IAAIuZ,GAEtBE,GACFhG,EAAKgG,EAET,CACF,IACAL,EAAO3E,KAAK4E,EACd,CAQA,OAzBAJ,EAAUtV,SAAQ,SAAU0V,GAC1B1T,EAAIiP,IAAIyE,EAASlW,KAAMkW,EACzB,IAiBAJ,EAAUtV,SAAQ,SAAU0V,GACrBH,EAAQM,IAAIH,EAASlW,OAExBsQ,EAAK4F,EAET,IACOD,CACT,CCvBA,IAAIM,GAAkB,CACpBnY,UAAW,SACX0X,UAAW,GACX1U,SAAU,YAGZ,SAASoV,KACP,IAAK,IAAI1B,EAAO2B,UAAUrG,OAAQsG,EAAO,IAAIpU,MAAMwS,GAAO6B,EAAO,EAAGA,EAAO7B,EAAM6B,IAC/ED,EAAKC,GAAQF,UAAUE,GAGzB,OAAQD,EAAKvE,MAAK,SAAUlT,GAC1B,QAASA,GAAoD,mBAAlCA,EAAQ+D,sBACrC,GACF,CAEO,SAAS4T,GAAgBC,QACL,IAArBA,IACFA,EAAmB,CAAC,GAGtB,IAAIC,EAAoBD,EACpBE,EAAwBD,EAAkBE,iBAC1CA,OAA6C,IAA1BD,EAAmC,GAAKA,EAC3DE,EAAyBH,EAAkBI,eAC3CA,OAA4C,IAA3BD,EAAoCV,GAAkBU,EAC3E,OAAO,SAAsBjZ,EAAWD,EAAQoD,QAC9B,IAAZA,IACFA,EAAU+V,GAGZ,ICxC6B/W,EAC3BgX,EDuCE9W,EAAQ,CACVjC,UAAW,SACXgZ,iBAAkB,GAClBjW,QAASzE,OAAOkE,OAAO,CAAC,EAAG2V,GAAiBW,GAC5C1Q,cAAe,CAAC,EAChBjG,SAAU,CACRvC,UAAWA,EACXD,OAAQA,GAEV4C,WAAY,CAAC,EACbD,OAAQ,CAAC,GAEP2W,EAAmB,GACnBC,GAAc,EACdrN,EAAW,CACb5J,MAAOA,EACPkX,WAAY,SAAoBC,GAC9B,IAAIrW,EAAsC,mBAArBqW,EAAkCA,EAAiBnX,EAAMc,SAAWqW,EACzFC,IACApX,EAAMc,QAAUzE,OAAOkE,OAAO,CAAC,EAAGsW,EAAgB7W,EAAMc,QAASA,GACjEd,EAAMiK,cAAgB,CACpBtM,UAAW0B,EAAU1B,GAAa6N,GAAkB7N,GAAaA,EAAU4Q,eAAiB/C,GAAkB7N,EAAU4Q,gBAAkB,GAC1I7Q,OAAQ8N,GAAkB9N,IAI5B,IElE4B+X,EAC9B4B,EFiEMN,EDhCG,SAAwBtB,GAErC,IAAIsB,EAAmBvB,GAAMC,GAE7B,OAAO/W,EAAeb,QAAO,SAAUC,EAAK+B,GAC1C,OAAO/B,EAAIE,OAAO+Y,EAAiBvR,QAAO,SAAUqQ,GAClD,OAAOA,EAAShW,QAAUA,CAC5B,IACF,GAAG,GACL,CCuB+ByX,EElEK7B,EFkEsB,GAAGzX,OAAO2Y,EAAkB3W,EAAMc,QAAQ2U,WEjE9F4B,EAAS5B,EAAU5X,QAAO,SAAUwZ,EAAQE,GAC9C,IAAIC,EAAWH,EAAOE,EAAQ5X,MAK9B,OAJA0X,EAAOE,EAAQ5X,MAAQ6X,EAAWnb,OAAOkE,OAAO,CAAC,EAAGiX,EAAUD,EAAS,CACrEzW,QAASzE,OAAOkE,OAAO,CAAC,EAAGiX,EAAS1W,QAASyW,EAAQzW,SACrD4I,KAAMrN,OAAOkE,OAAO,CAAC,EAAGiX,EAAS9N,KAAM6N,EAAQ7N,QAC5C6N,EACEF,CACT,GAAG,CAAC,GAEGhb,OAAO4D,KAAKoX,GAAQlV,KAAI,SAAUhG,GACvC,OAAOkb,EAAOlb,EAChB,MF4DM,OAJA6D,EAAM+W,iBAAmBA,EAAiBvR,QAAO,SAAUiS,GACzD,OAAOA,EAAE7X,OACX,IA+FFI,EAAM+W,iBAAiB5W,SAAQ,SAAUJ,GACvC,IAAIJ,EAAOI,EAAKJ,KACZ+X,EAAe3X,EAAKe,QACpBA,OAA2B,IAAjB4W,EAA0B,CAAC,EAAIA,EACzChX,EAASX,EAAKW,OAElB,GAAsB,mBAAXA,EAAuB,CAChC,IAAIiX,EAAYjX,EAAO,CACrBV,MAAOA,EACPL,KAAMA,EACNiK,SAAUA,EACV9I,QAASA,IAKXkW,EAAiB/F,KAAK0G,GAFT,WAAmB,EAGlC,CACF,IA/GS/N,EAASQ,QAClB,EAMAwN,YAAa,WACX,IAAIX,EAAJ,CAIA,IAAIY,EAAkB7X,EAAME,SACxBvC,EAAYka,EAAgBla,UAC5BD,EAASma,EAAgBna,OAG7B,GAAKyY,GAAiBxY,EAAWD,GAAjC,CAKAsC,EAAMwG,MAAQ,CACZ7I,UAAWwX,GAAiBxX,EAAWqH,EAAgBtH,GAAoC,UAA3BsC,EAAMc,QAAQC,UAC9ErD,OAAQiG,EAAcjG,IAOxBsC,EAAM0R,OAAQ,EACd1R,EAAMjC,UAAYiC,EAAMc,QAAQ/C,UAKhCiC,EAAM+W,iBAAiB5W,SAAQ,SAAU0V,GACvC,OAAO7V,EAAMmG,cAAc0P,EAASlW,MAAQtD,OAAOkE,OAAO,CAAC,EAAGsV,EAASnM,KACzE,IAEA,IAAK,IAAIoO,EAAQ,EAAGA,EAAQ9X,EAAM+W,iBAAiBhH,OAAQ+H,IACzD,IAAoB,IAAhB9X,EAAM0R,MAAV,CAMA,IAAIqG,EAAwB/X,EAAM+W,iBAAiBe,GAC/ChY,EAAKiY,EAAsBjY,GAC3BkY,EAAyBD,EAAsBjX,QAC/CoM,OAAsC,IAA3B8K,EAAoC,CAAC,EAAIA,EACpDrY,EAAOoY,EAAsBpY,KAEf,mBAAPG,IACTE,EAAQF,EAAG,CACTE,MAAOA,EACPc,QAASoM,EACTvN,KAAMA,EACNiK,SAAUA,KACN5J,EAdR,MAHEA,EAAM0R,OAAQ,EACdoG,GAAS,CAzBb,CATA,CAqDF,EAGA1N,QC1I2BtK,ED0IV,WACf,OAAO,IAAImY,SAAQ,SAAUC,GAC3BtO,EAASgO,cACTM,EAAQlY,EACV,GACF,EC7IG,WAUL,OATK8W,IACHA,EAAU,IAAImB,SAAQ,SAAUC,GAC9BD,QAAQC,UAAUC,MAAK,WACrBrB,OAAUsB,EACVF,EAAQpY,IACV,GACF,KAGKgX,CACT,GDmIIuB,QAAS,WACPjB,IACAH,GAAc,CAChB,GAGF,IAAKd,GAAiBxY,EAAWD,GAC/B,OAAOkM,EAmCT,SAASwN,IACPJ,EAAiB7W,SAAQ,SAAUL,GACjC,OAAOA,GACT,IACAkX,EAAmB,EACrB,CAEA,OAvCApN,EAASsN,WAAWpW,GAASqX,MAAK,SAAUnY,IACrCiX,GAAenW,EAAQwX,eAC1BxX,EAAQwX,cAActY,EAE1B,IAmCO4J,CACT,CACF,CACO,IAAI2O,GAA4BhC,KGzLnC,GAA4BA,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,EAAa,GAAQ,GAAM,GAAiB,EAAO,MCJrH,GAA4BjC,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,KCatE,MAAMC,GAAa,IAAIlI,IACjBmI,GAAO,CACX,GAAAtH,CAAIxS,EAASzC,EAAKyN,GACX6O,GAAWzC,IAAIpX,IAClB6Z,GAAWrH,IAAIxS,EAAS,IAAI2R,KAE9B,MAAMoI,EAAcF,GAAWjc,IAAIoC,GAI9B+Z,EAAY3C,IAAI7Z,IAA6B,IAArBwc,EAAYC,KAKzCD,EAAYvH,IAAIjV,EAAKyN,GAHnBiP,QAAQC,MAAM,+EAA+E7W,MAAM8W,KAAKJ,EAAY1Y,QAAQ,MAIhI,EACAzD,IAAG,CAACoC,EAASzC,IACPsc,GAAWzC,IAAIpX,IACV6Z,GAAWjc,IAAIoC,GAASpC,IAAIL,IAE9B,KAET,MAAA6c,CAAOpa,EAASzC,GACd,IAAKsc,GAAWzC,IAAIpX,GAClB,OAEF,MAAM+Z,EAAcF,GAAWjc,IAAIoC,GACnC+Z,EAAYM,OAAO9c,GAGM,IAArBwc,EAAYC,MACdH,GAAWQ,OAAOra,EAEtB,GAYIsa,GAAiB,gBAOjBC,GAAgBC,IAChBA,GAAYna,OAAOoa,KAAOpa,OAAOoa,IAAIC,SAEvCF,EAAWA,EAAS5O,QAAQ,iBAAiB,CAAC+O,EAAOC,IAAO,IAAIH,IAAIC,OAAOE,QAEtEJ,GA4CHK,GAAuB7a,IAC3BA,EAAQ8a,cAAc,IAAIC,MAAMT,IAAgB,EAE5C,GAAYU,MACXA,GAA4B,iBAAXA,UAGO,IAAlBA,EAAOC,SAChBD,EAASA,EAAO,SAEgB,IAApBA,EAAOE,UAEjBC,GAAaH,GAEb,GAAUA,GACLA,EAAOC,OAASD,EAAO,GAAKA,EAEf,iBAAXA,GAAuBA,EAAO7J,OAAS,EACzCrL,SAAS+C,cAAc0R,GAAcS,IAEvC,KAEHI,GAAYpb,IAChB,IAAK,GAAUA,IAAgD,IAApCA,EAAQqb,iBAAiBlK,OAClD,OAAO,EAET,MAAMmK,EAAgF,YAA7D5V,iBAAiB1F,GAASub,iBAAiB,cAE9DC,EAAgBxb,EAAQyb,QAAQ,uBACtC,IAAKD,EACH,OAAOF,EAET,GAAIE,IAAkBxb,EAAS,CAC7B,MAAM0b,EAAU1b,EAAQyb,QAAQ,WAChC,GAAIC,GAAWA,EAAQlW,aAAegW,EACpC,OAAO,EAET,GAAgB,OAAZE,EACF,OAAO,CAEX,CACA,OAAOJ,CAAgB,EAEnBK,GAAa3b,IACZA,GAAWA,EAAQkb,WAAaU,KAAKC,gBAGtC7b,EAAQ8b,UAAU7W,SAAS,mBAGC,IAArBjF,EAAQ+b,SACV/b,EAAQ+b,SAEV/b,EAAQgc,aAAa,aAAoD,UAArChc,EAAQic,aAAa,aAE5DC,GAAiBlc,IACrB,IAAK8F,SAASC,gBAAgBoW,aAC5B,OAAO,KAIT,GAAmC,mBAAxBnc,EAAQqF,YAA4B,CAC7C,MAAM+W,EAAOpc,EAAQqF,cACrB,OAAO+W,aAAgBtb,WAAasb,EAAO,IAC7C,CACA,OAAIpc,aAAmBc,WACdd,EAIJA,EAAQwF,WAGN0W,GAAelc,EAAQwF,YAFrB,IAEgC,EAErC6W,GAAO,OAUPC,GAAStc,IACbA,EAAQuE,YAAY,EAEhBgY,GAAY,IACZlc,OAAOmc,SAAW1W,SAAS6G,KAAKqP,aAAa,qBACxC3b,OAAOmc,OAET,KAEHC,GAA4B,GAgB5BC,GAAQ,IAAuC,QAAjC5W,SAASC,gBAAgB4W,IACvCC,GAAqBC,IAhBAC,QAiBN,KACjB,MAAMC,EAAIR,KAEV,GAAIQ,EAAG,CACL,MAAMhc,EAAO8b,EAAOG,KACdC,EAAqBF,EAAE7b,GAAGH,GAChCgc,EAAE7b,GAAGH,GAAQ8b,EAAOK,gBACpBH,EAAE7b,GAAGH,GAAMoc,YAAcN,EACzBE,EAAE7b,GAAGH,GAAMqc,WAAa,KACtBL,EAAE7b,GAAGH,GAAQkc,EACNJ,EAAOK,gBAElB,GA5B0B,YAAxBpX,SAASuX,YAENZ,GAA0BtL,QAC7BrL,SAASyF,iBAAiB,oBAAoB,KAC5C,IAAK,MAAMuR,KAAYL,GACrBK,GACF,IAGJL,GAA0BpK,KAAKyK,IAE/BA,GAkBA,EAEEQ,GAAU,CAACC,EAAkB9F,EAAO,GAAI+F,EAAeD,IACxB,mBAArBA,EAAkCA,KAAoB9F,GAAQ+F,EAExEC,GAAyB,CAACX,EAAUY,EAAmBC,GAAoB,KAC/E,IAAKA,EAEH,YADAL,GAAQR,GAGV,MACMc,EA/JiC5d,KACvC,IAAKA,EACH,OAAO,EAIT,IAAI,mBACF6d,EAAkB,gBAClBC,GACEzd,OAAOqF,iBAAiB1F,GAC5B,MAAM+d,EAA0BC,OAAOC,WAAWJ,GAC5CK,EAAuBF,OAAOC,WAAWH,GAG/C,OAAKC,GAA4BG,GAKjCL,EAAqBA,EAAmBlb,MAAM,KAAK,GACnDmb,EAAkBA,EAAgBnb,MAAM,KAAK,GAtDf,KAuDtBqb,OAAOC,WAAWJ,GAAsBG,OAAOC,WAAWH,KANzD,CAMoG,EA0IpFK,CAAiCT,GADlC,EAExB,IAAIU,GAAS,EACb,MAAMC,EAAU,EACdrR,aAEIA,IAAW0Q,IAGfU,GAAS,EACTV,EAAkBjS,oBAAoB6O,GAAgB+D,GACtDf,GAAQR,GAAS,EAEnBY,EAAkBnS,iBAAiB+O,GAAgB+D,GACnDC,YAAW,KACJF,GACHvD,GAAqB6C,EACvB,GACCE,EAAiB,EAYhBW,GAAuB,CAAC1R,EAAM2R,EAAeC,EAAeC,KAChE,MAAMC,EAAa9R,EAAKsE,OACxB,IAAI+H,EAAQrM,EAAKjH,QAAQ4Y,GAIzB,OAAe,IAAXtF,GACMuF,GAAiBC,EAAiB7R,EAAK8R,EAAa,GAAK9R,EAAK,IAExEqM,GAASuF,EAAgB,GAAK,EAC1BC,IACFxF,GAASA,EAAQyF,GAAcA,GAE1B9R,EAAKjK,KAAKC,IAAI,EAAGD,KAAKE,IAAIoW,EAAOyF,EAAa,KAAI,EAerDC,GAAiB,qBACjBC,GAAiB,OACjBC,GAAgB,SAChBC,GAAgB,CAAC,EACvB,IAAIC,GAAW,EACf,MAAMC,GAAe,CACnBC,WAAY,YACZC,WAAY,YAERC,GAAe,IAAIrI,IAAI,CAAC,QAAS,WAAY,UAAW,YAAa,cAAe,aAAc,iBAAkB,YAAa,WAAY,YAAa,cAAe,YAAa,UAAW,WAAY,QAAS,oBAAqB,aAAc,YAAa,WAAY,cAAe,cAAe,cAAe,YAAa,eAAgB,gBAAiB,eAAgB,gBAAiB,aAAc,QAAS,OAAQ,SAAU,QAAS,SAAU,SAAU,UAAW,WAAY,OAAQ,SAAU,eAAgB,SAAU,OAAQ,mBAAoB,mBAAoB,QAAS,QAAS,WAM/lB,SAASsI,GAAarf,EAASsf,GAC7B,OAAOA,GAAO,GAAGA,MAAQN,QAAgBhf,EAAQgf,UAAYA,IAC/D,CACA,SAASO,GAAiBvf,GACxB,MAAMsf,EAAMD,GAAarf,GAGzB,OAFAA,EAAQgf,SAAWM,EACnBP,GAAcO,GAAOP,GAAcO,IAAQ,CAAC,EACrCP,GAAcO,EACvB,CAiCA,SAASE,GAAYC,EAAQC,EAAUC,EAAqB,MAC1D,OAAOliB,OAAOmiB,OAAOH,GAAQ7M,MAAKiN,GAASA,EAAMH,WAAaA,GAAYG,EAAMF,qBAAuBA,GACzG,CACA,SAASG,GAAoBC,EAAmB1B,EAAS2B,GACvD,MAAMC,EAAiC,iBAAZ5B,EAErBqB,EAAWO,EAAcD,EAAqB3B,GAAW2B,EAC/D,IAAIE,EAAYC,GAAaJ,GAI7B,OAHKX,GAAahI,IAAI8I,KACpBA,EAAYH,GAEP,CAACE,EAAaP,EAAUQ,EACjC,CACA,SAASE,GAAWpgB,EAAS+f,EAAmB1B,EAAS2B,EAAoBK,GAC3E,GAAiC,iBAAtBN,IAAmC/f,EAC5C,OAEF,IAAKigB,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GAIzF,GAAID,KAAqBd,GAAc,CACrC,MAAMqB,EAAepf,GACZ,SAAU2e,GACf,IAAKA,EAAMU,eAAiBV,EAAMU,gBAAkBV,EAAMW,iBAAmBX,EAAMW,eAAevb,SAAS4a,EAAMU,eAC/G,OAAOrf,EAAGjD,KAAKwiB,KAAMZ,EAEzB,EAEFH,EAAWY,EAAaZ,EAC1B,CACA,MAAMD,EAASF,GAAiBvf,GAC1B0gB,EAAWjB,EAAOS,KAAeT,EAAOS,GAAa,CAAC,GACtDS,EAAmBnB,GAAYkB,EAAUhB,EAAUO,EAAc5B,EAAU,MACjF,GAAIsC,EAEF,YADAA,EAAiBN,OAASM,EAAiBN,QAAUA,GAGvD,MAAMf,EAAMD,GAAaK,EAAUK,EAAkBnU,QAAQgT,GAAgB,KACvE1d,EAAK+e,EA5Db,SAAoCjgB,EAASwa,EAAUtZ,GACrD,OAAO,SAASmd,EAAQwB,GACtB,MAAMe,EAAc5gB,EAAQ6gB,iBAAiBrG,GAC7C,IAAK,IAAI,OACPxN,GACE6S,EAAO7S,GAAUA,IAAWyT,KAAMzT,EAASA,EAAOxH,WACpD,IAAK,MAAMsb,KAAcF,EACvB,GAAIE,IAAe9T,EASnB,OANA+T,GAAWlB,EAAO,CAChBW,eAAgBxT,IAEdqR,EAAQgC,QACVW,GAAaC,IAAIjhB,EAAS6f,EAAMqB,KAAM1G,EAAUtZ,GAE3CA,EAAGigB,MAAMnU,EAAQ,CAAC6S,GAG/B,CACF,CAwC2BuB,CAA2BphB,EAASqe,EAASqB,GAvExE,SAA0B1f,EAASkB,GACjC,OAAO,SAASmd,EAAQwB,GAOtB,OANAkB,GAAWlB,EAAO,CAChBW,eAAgBxgB,IAEdqe,EAAQgC,QACVW,GAAaC,IAAIjhB,EAAS6f,EAAMqB,KAAMhgB,GAEjCA,EAAGigB,MAAMnhB,EAAS,CAAC6f,GAC5B,CACF,CA6DoFwB,CAAiBrhB,EAAS0f,GAC5Gxe,EAAGye,mBAAqBM,EAAc5B,EAAU,KAChDnd,EAAGwe,SAAWA,EACdxe,EAAGmf,OAASA,EACZnf,EAAG8d,SAAWM,EACdoB,EAASpB,GAAOpe,EAChBlB,EAAQuL,iBAAiB2U,EAAWhf,EAAI+e,EAC1C,CACA,SAASqB,GAActhB,EAASyf,EAAQS,EAAW7B,EAASsB,GAC1D,MAAMze,EAAKse,GAAYC,EAAOS,GAAY7B,EAASsB,GAC9Cze,IAGLlB,EAAQyL,oBAAoByU,EAAWhf,EAAIqgB,QAAQ5B,WAC5CF,EAAOS,GAAWhf,EAAG8d,UAC9B,CACA,SAASwC,GAAyBxhB,EAASyf,EAAQS,EAAWuB,GAC5D,MAAMC,EAAoBjC,EAAOS,IAAc,CAAC,EAChD,IAAK,MAAOyB,EAAY9B,KAAUpiB,OAAOmkB,QAAQF,GAC3CC,EAAWE,SAASJ,IACtBH,GAActhB,EAASyf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAGtE,CACA,SAASQ,GAAaN,GAGpB,OADAA,EAAQA,EAAMjU,QAAQiT,GAAgB,IAC/BI,GAAaY,IAAUA,CAChC,CACA,MAAMmB,GAAe,CACnB,EAAAc,CAAG9hB,EAAS6f,EAAOxB,EAAS2B,GAC1BI,GAAWpgB,EAAS6f,EAAOxB,EAAS2B,GAAoB,EAC1D,EACA,GAAA+B,CAAI/hB,EAAS6f,EAAOxB,EAAS2B,GAC3BI,GAAWpgB,EAAS6f,EAAOxB,EAAS2B,GAAoB,EAC1D,EACA,GAAAiB,CAAIjhB,EAAS+f,EAAmB1B,EAAS2B,GACvC,GAAiC,iBAAtBD,IAAmC/f,EAC5C,OAEF,MAAOigB,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GACrFgC,EAAc9B,IAAcH,EAC5BN,EAASF,GAAiBvf,GAC1B0hB,EAAoBjC,EAAOS,IAAc,CAAC,EAC1C+B,EAAclC,EAAkBmC,WAAW,KACjD,QAAwB,IAAbxC,EAAX,CAQA,GAAIuC,EACF,IAAK,MAAME,KAAgB1kB,OAAO4D,KAAKoe,GACrC+B,GAAyBxhB,EAASyf,EAAQ0C,EAAcpC,EAAkBlN,MAAM,IAGpF,IAAK,MAAOuP,EAAavC,KAAUpiB,OAAOmkB,QAAQF,GAAoB,CACpE,MAAMC,EAAaS,EAAYxW,QAAQkT,GAAe,IACjDkD,IAAejC,EAAkB8B,SAASF,IAC7CL,GAActhB,EAASyf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAEpE,CAXA,KAPA,CAEE,IAAKliB,OAAO4D,KAAKqgB,GAAmBvQ,OAClC,OAEFmQ,GAActhB,EAASyf,EAAQS,EAAWR,EAAUO,EAAc5B,EAAU,KAE9E,CAYF,EACA,OAAAgE,CAAQriB,EAAS6f,EAAOpI,GACtB,GAAqB,iBAAVoI,IAAuB7f,EAChC,OAAO,KAET,MAAM+c,EAAIR,KAGV,IAAI+F,EAAc,KACdC,GAAU,EACVC,GAAiB,EACjBC,GAAmB,EAJH5C,IADFM,GAAaN,IAMZ9C,IACjBuF,EAAcvF,EAAEhC,MAAM8E,EAAOpI,GAC7BsF,EAAE/c,GAASqiB,QAAQC,GACnBC,GAAWD,EAAYI,uBACvBF,GAAkBF,EAAYK,gCAC9BF,EAAmBH,EAAYM,sBAEjC,MAAMC,EAAM9B,GAAW,IAAIhG,MAAM8E,EAAO,CACtC0C,UACAO,YAAY,IACVrL,GAUJ,OATIgL,GACFI,EAAIE,iBAEFP,GACFxiB,EAAQ8a,cAAc+H,GAEpBA,EAAIJ,kBAAoBH,GAC1BA,EAAYS,iBAEPF,CACT,GAEF,SAAS9B,GAAWljB,EAAKmlB,EAAO,CAAC,GAC/B,IAAK,MAAOzlB,EAAKa,KAAUX,OAAOmkB,QAAQoB,GACxC,IACEnlB,EAAIN,GAAOa,CACb,CAAE,MAAO6kB,GACPxlB,OAAOC,eAAeG,EAAKN,EAAK,CAC9B2lB,cAAc,EACdtlB,IAAG,IACMQ,GAGb,CAEF,OAAOP,CACT,CASA,SAASslB,GAAc/kB,GACrB,GAAc,SAAVA,EACF,OAAO,EAET,GAAc,UAAVA,EACF,OAAO,EAET,GAAIA,IAAU4f,OAAO5f,GAAOkC,WAC1B,OAAO0d,OAAO5f,GAEhB,GAAc,KAAVA,GAA0B,SAAVA,EAClB,OAAO,KAET,GAAqB,iBAAVA,EACT,OAAOA,EAET,IACE,OAAOglB,KAAKC,MAAMC,mBAAmBllB,GACvC,CAAE,MAAO6kB,GACP,OAAO7kB,CACT,CACF,CACA,SAASmlB,GAAiBhmB,GACxB,OAAOA,EAAIqO,QAAQ,UAAU4X,GAAO,IAAIA,EAAItjB,iBAC9C,CACA,MAAMujB,GAAc,CAClB,gBAAAC,CAAiB1jB,EAASzC,EAAKa,GAC7B4B,EAAQ6B,aAAa,WAAW0hB,GAAiBhmB,KAAQa,EAC3D,EACA,mBAAAulB,CAAoB3jB,EAASzC,GAC3ByC,EAAQ4B,gBAAgB,WAAW2hB,GAAiBhmB,KACtD,EACA,iBAAAqmB,CAAkB5jB,GAChB,IAAKA,EACH,MAAO,CAAC,EAEV,MAAM0B,EAAa,CAAC,EACdmiB,EAASpmB,OAAO4D,KAAKrB,EAAQ8jB,SAASld,QAAOrJ,GAAOA,EAAI2kB,WAAW,QAAU3kB,EAAI2kB,WAAW,cAClG,IAAK,MAAM3kB,KAAOsmB,EAAQ,CACxB,IAAIE,EAAUxmB,EAAIqO,QAAQ,MAAO,IACjCmY,EAAUA,EAAQC,OAAO,GAAG9jB,cAAgB6jB,EAAQlR,MAAM,EAAGkR,EAAQ5S,QACrEzP,EAAWqiB,GAAWZ,GAAcnjB,EAAQ8jB,QAAQvmB,GACtD,CACA,OAAOmE,CACT,EACAuiB,iBAAgB,CAACjkB,EAASzC,IACjB4lB,GAAcnjB,EAAQic,aAAa,WAAWsH,GAAiBhmB,QAgB1E,MAAM2mB,GAEJ,kBAAWC,GACT,MAAO,CAAC,CACV,CACA,sBAAWC,GACT,MAAO,CAAC,CACV,CACA,eAAWpH,GACT,MAAM,IAAIqH,MAAM,sEAClB,CACA,UAAAC,CAAWC,GAIT,OAHAA,EAAS9D,KAAK+D,gBAAgBD,GAC9BA,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CACA,iBAAAE,CAAkBF,GAChB,OAAOA,CACT,CACA,eAAAC,CAAgBD,EAAQvkB,GACtB,MAAM2kB,EAAa,GAAU3kB,GAAWyjB,GAAYQ,iBAAiBjkB,EAAS,UAAY,CAAC,EAE3F,MAAO,IACFygB,KAAKmE,YAAYT,WACM,iBAAfQ,EAA0BA,EAAa,CAAC,KAC/C,GAAU3kB,GAAWyjB,GAAYG,kBAAkB5jB,GAAW,CAAC,KAC7C,iBAAXukB,EAAsBA,EAAS,CAAC,EAE/C,CACA,gBAAAG,CAAiBH,EAAQM,EAAcpE,KAAKmE,YAAYR,aACtD,IAAK,MAAO7hB,EAAUuiB,KAAkBrnB,OAAOmkB,QAAQiD,GAAc,CACnE,MAAMzmB,EAAQmmB,EAAOhiB,GACfwiB,EAAY,GAAU3mB,GAAS,UAhiBrC4c,OADSA,EAiiB+C5c,GA/hBnD,GAAG4c,IAELvd,OAAOM,UAAUuC,SAASrC,KAAK+c,GAAQL,MAAM,eAAe,GAAGza,cA8hBlE,IAAK,IAAI8kB,OAAOF,GAAehhB,KAAKihB,GAClC,MAAM,IAAIE,UAAU,GAAGxE,KAAKmE,YAAY5H,KAAKkI,0BAA0B3iB,qBAA4BwiB,yBAAiCD,MAExI,CAriBW9J,KAsiBb,EAqBF,MAAMmK,WAAsBjB,GAC1B,WAAAU,CAAY5kB,EAASukB,GACnBa,SACAplB,EAAUmb,GAAWnb,MAIrBygB,KAAK4E,SAAWrlB,EAChBygB,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/BzK,GAAKtH,IAAIiO,KAAK4E,SAAU5E,KAAKmE,YAAYW,SAAU9E,MACrD,CAGA,OAAA+E,GACE1L,GAAKM,OAAOqG,KAAK4E,SAAU5E,KAAKmE,YAAYW,UAC5CvE,GAAaC,IAAIR,KAAK4E,SAAU5E,KAAKmE,YAAYa,WACjD,IAAK,MAAMC,KAAgBjoB,OAAOkoB,oBAAoBlF,MACpDA,KAAKiF,GAAgB,IAEzB,CACA,cAAAE,CAAe9I,EAAU9c,EAAS6lB,GAAa,GAC7CpI,GAAuBX,EAAU9c,EAAS6lB,EAC5C,CACA,UAAAvB,CAAWC,GAIT,OAHAA,EAAS9D,KAAK+D,gBAAgBD,EAAQ9D,KAAK4E,UAC3Cd,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CAGA,kBAAOuB,CAAY9lB,GACjB,OAAO8Z,GAAKlc,IAAIud,GAAWnb,GAAUygB,KAAK8E,SAC5C,CACA,0BAAOQ,CAAoB/lB,EAASukB,EAAS,CAAC,GAC5C,OAAO9D,KAAKqF,YAAY9lB,IAAY,IAAIygB,KAAKzgB,EAA2B,iBAAXukB,EAAsBA,EAAS,KAC9F,CACA,kBAAWyB,GACT,MA5CY,OA6Cd,CACA,mBAAWT,GACT,MAAO,MAAM9E,KAAKzD,MACpB,CACA,oBAAWyI,GACT,MAAO,IAAIhF,KAAK8E,UAClB,CACA,gBAAOU,CAAUllB,GACf,MAAO,GAAGA,IAAO0f,KAAKgF,WACxB,EAUF,MAAMS,GAAclmB,IAClB,IAAIwa,EAAWxa,EAAQic,aAAa,kBACpC,IAAKzB,GAAyB,MAAbA,EAAkB,CACjC,IAAI2L,EAAgBnmB,EAAQic,aAAa,QAMzC,IAAKkK,IAAkBA,EAActE,SAAS,OAASsE,EAAcjE,WAAW,KAC9E,OAAO,KAILiE,EAActE,SAAS,OAASsE,EAAcjE,WAAW,OAC3DiE,EAAgB,IAAIA,EAAcxjB,MAAM,KAAK,MAE/C6X,EAAW2L,GAAmC,MAAlBA,EAAwBA,EAAcC,OAAS,IAC7E,CACA,OAAO5L,EAAWA,EAAS7X,MAAM,KAAKY,KAAI8iB,GAAO9L,GAAc8L,KAAM1iB,KAAK,KAAO,IAAI,EAEjF2iB,GAAiB,CACrB1T,KAAI,CAAC4H,EAAUxa,EAAU8F,SAASC,kBACzB,GAAG3G,UAAUsB,QAAQ3C,UAAU8iB,iBAAiB5iB,KAAK+B,EAASwa,IAEvE+L,QAAO,CAAC/L,EAAUxa,EAAU8F,SAASC,kBAC5BrF,QAAQ3C,UAAU8K,cAAc5K,KAAK+B,EAASwa,GAEvDgM,SAAQ,CAACxmB,EAASwa,IACT,GAAGpb,UAAUY,EAAQwmB,UAAU5f,QAAOzB,GAASA,EAAMshB,QAAQjM,KAEtE,OAAAkM,CAAQ1mB,EAASwa,GACf,MAAMkM,EAAU,GAChB,IAAIC,EAAW3mB,EAAQwF,WAAWiW,QAAQjB,GAC1C,KAAOmM,GACLD,EAAQrU,KAAKsU,GACbA,EAAWA,EAASnhB,WAAWiW,QAAQjB,GAEzC,OAAOkM,CACT,EACA,IAAAE,CAAK5mB,EAASwa,GACZ,IAAIqM,EAAW7mB,EAAQ8mB,uBACvB,KAAOD,GAAU,CACf,GAAIA,EAASJ,QAAQjM,GACnB,MAAO,CAACqM,GAEVA,EAAWA,EAASC,sBACtB,CACA,MAAO,EACT,EAEA,IAAAxhB,CAAKtF,EAASwa,GACZ,IAAIlV,EAAOtF,EAAQ+mB,mBACnB,KAAOzhB,GAAM,CACX,GAAIA,EAAKmhB,QAAQjM,GACf,MAAO,CAAClV,GAEVA,EAAOA,EAAKyhB,kBACd,CACA,MAAO,EACT,EACA,iBAAAC,CAAkBhnB,GAChB,MAAMinB,EAAa,CAAC,IAAK,SAAU,QAAS,WAAY,SAAU,UAAW,aAAc,4BAA4B1jB,KAAIiX,GAAY,GAAGA,2BAAiC7W,KAAK,KAChL,OAAO8c,KAAK7N,KAAKqU,EAAYjnB,GAAS4G,QAAOsgB,IAAOvL,GAAWuL,IAAO9L,GAAU8L,IAClF,EACA,sBAAAC,CAAuBnnB,GACrB,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAIwa,GACK8L,GAAeC,QAAQ/L,GAAYA,EAErC,IACT,EACA,sBAAA4M,CAAuBpnB,GACrB,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAOwa,EAAW8L,GAAeC,QAAQ/L,GAAY,IACvD,EACA,+BAAA6M,CAAgCrnB,GAC9B,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAOwa,EAAW8L,GAAe1T,KAAK4H,GAAY,EACpD,GAUI8M,GAAuB,CAACC,EAAWC,EAAS,UAChD,MAAMC,EAAa,gBAAgBF,EAAU9B,YACvC1kB,EAAOwmB,EAAUvK,KACvBgE,GAAac,GAAGhc,SAAU2hB,EAAY,qBAAqB1mB,OAAU,SAAU8e,GAI7E,GAHI,CAAC,IAAK,QAAQgC,SAASpB,KAAKiH,UAC9B7H,EAAMkD,iBAEJpH,GAAW8E,MACb,OAEF,MAAMzT,EAASsZ,GAAec,uBAAuB3G,OAASA,KAAKhF,QAAQ,IAAI1a,KAC9DwmB,EAAUxB,oBAAoB/Y,GAGtCwa,IACX,GAAE,EAiBEG,GAAc,YACdC,GAAc,QAAQD,KACtBE,GAAe,SAASF,KAQ9B,MAAMG,WAAc3C,GAElB,eAAWnI,GACT,MAfW,OAgBb,CAGA,KAAA+K,GAEE,GADmB/G,GAAaqB,QAAQ5B,KAAK4E,SAAUuC,IACxCnF,iBACb,OAEFhC,KAAK4E,SAASvJ,UAAU1B,OAlBF,QAmBtB,MAAMyL,EAAapF,KAAK4E,SAASvJ,UAAU7W,SApBrB,QAqBtBwb,KAAKmF,gBAAe,IAAMnF,KAAKuH,mBAAmBvH,KAAK4E,SAAUQ,EACnE,CAGA,eAAAmC,GACEvH,KAAK4E,SAASjL,SACd4G,GAAaqB,QAAQ5B,KAAK4E,SAAUwC,IACpCpH,KAAK+E,SACP,CAGA,sBAAOtI,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOgd,GAAM/B,oBAAoBtF,MACvC,GAAsB,iBAAX8D,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KAJb,CAKF,GACF,EAOF6G,GAAqBQ,GAAO,SAM5BlL,GAAmBkL,IAcnB,MAKMI,GAAyB,4BAO/B,MAAMC,WAAehD,GAEnB,eAAWnI,GACT,MAfW,QAgBb,CAGA,MAAAoL,GAEE3H,KAAK4E,SAASxjB,aAAa,eAAgB4e,KAAK4E,SAASvJ,UAAUsM,OAjB3C,UAkB1B,CAGA,sBAAOlL,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOqd,GAAOpC,oBAAoBtF,MACzB,WAAX8D,GACFzZ,EAAKyZ,IAET,GACF,EAOFvD,GAAac,GAAGhc,SAjCe,2BAiCmBoiB,IAAwBrI,IACxEA,EAAMkD,iBACN,MAAMsF,EAASxI,EAAM7S,OAAOyO,QAAQyM,IACvBC,GAAOpC,oBAAoBsC,GACnCD,QAAQ,IAOfxL,GAAmBuL,IAcnB,MACMG,GAAc,YACdC,GAAmB,aAAaD,KAChCE,GAAkB,YAAYF,KAC9BG,GAAiB,WAAWH,KAC5BI,GAAoB,cAAcJ,KAClCK,GAAkB,YAAYL,KAK9BM,GAAY,CAChBC,YAAa,KACbC,aAAc,KACdC,cAAe,MAEXC,GAAgB,CACpBH,YAAa,kBACbC,aAAc,kBACdC,cAAe,mBAOjB,MAAME,WAAc/E,GAClB,WAAAU,CAAY5kB,EAASukB,GACnBa,QACA3E,KAAK4E,SAAWrlB,EACXA,GAAYipB,GAAMC,gBAGvBzI,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAK0I,QAAU,EACf1I,KAAK2I,sBAAwB7H,QAAQlhB,OAAOgpB,cAC5C5I,KAAK6I,cACP,CAGA,kBAAWnF,GACT,OAAOyE,EACT,CACA,sBAAWxE,GACT,OAAO4E,EACT,CACA,eAAWhM,GACT,MA/CW,OAgDb,CAGA,OAAAwI,GACExE,GAAaC,IAAIR,KAAK4E,SAAUiD,GAClC,CAGA,MAAAiB,CAAO1J,GACAY,KAAK2I,sBAIN3I,KAAK+I,wBAAwB3J,KAC/BY,KAAK0I,QAAUtJ,EAAM4J,SAJrBhJ,KAAK0I,QAAUtJ,EAAM6J,QAAQ,GAAGD,OAMpC,CACA,IAAAE,CAAK9J,GACCY,KAAK+I,wBAAwB3J,KAC/BY,KAAK0I,QAAUtJ,EAAM4J,QAAUhJ,KAAK0I,SAEtC1I,KAAKmJ,eACLtM,GAAQmD,KAAK6E,QAAQuD,YACvB,CACA,KAAAgB,CAAMhK,GACJY,KAAK0I,QAAUtJ,EAAM6J,SAAW7J,EAAM6J,QAAQvY,OAAS,EAAI,EAAI0O,EAAM6J,QAAQ,GAAGD,QAAUhJ,KAAK0I,OACjG,CACA,YAAAS,GACE,MAAME,EAAYlnB,KAAKoC,IAAIyb,KAAK0I,SAChC,GAAIW,GAnEgB,GAoElB,OAEF,MAAM/b,EAAY+b,EAAYrJ,KAAK0I,QACnC1I,KAAK0I,QAAU,EACVpb,GAGLuP,GAAQvP,EAAY,EAAI0S,KAAK6E,QAAQyD,cAAgBtI,KAAK6E,QAAQwD,aACpE,CACA,WAAAQ,GACM7I,KAAK2I,uBACPpI,GAAac,GAAGrB,KAAK4E,SAAUqD,IAAmB7I,GAASY,KAAK8I,OAAO1J,KACvEmB,GAAac,GAAGrB,KAAK4E,SAAUsD,IAAiB9I,GAASY,KAAKkJ,KAAK9J,KACnEY,KAAK4E,SAASvJ,UAAU5E,IAlFG,mBAoF3B8J,GAAac,GAAGrB,KAAK4E,SAAUkD,IAAkB1I,GAASY,KAAK8I,OAAO1J,KACtEmB,GAAac,GAAGrB,KAAK4E,SAAUmD,IAAiB3I,GAASY,KAAKoJ,MAAMhK,KACpEmB,GAAac,GAAGrB,KAAK4E,SAAUoD,IAAgB5I,GAASY,KAAKkJ,KAAK9J,KAEtE,CACA,uBAAA2J,CAAwB3J,GACtB,OAAOY,KAAK2I,wBA3FS,QA2FiBvJ,EAAMkK,aA5FrB,UA4FyDlK,EAAMkK,YACxF,CAGA,kBAAOb,GACL,MAAO,iBAAkBpjB,SAASC,iBAAmB7C,UAAU8mB,eAAiB,CAClF,EAeF,MAEMC,GAAc,eACdC,GAAiB,YACjBC,GAAmB,YACnBC,GAAoB,aAGpBC,GAAa,OACbC,GAAa,OACbC,GAAiB,OACjBC,GAAkB,QAClBC,GAAc,QAAQR,KACtBS,GAAa,OAAOT,KACpBU,GAAkB,UAAUV,KAC5BW,GAAqB,aAAaX,KAClCY,GAAqB,aAAaZ,KAClCa,GAAmB,YAAYb,KAC/Bc,GAAwB,OAAOd,KAAcC,KAC7Cc,GAAyB,QAAQf,KAAcC,KAC/Ce,GAAsB,WACtBC,GAAsB,SAMtBC,GAAkB,UAClBC,GAAgB,iBAChBC,GAAuBF,GAAkBC,GAKzCE,GAAmB,CACvB,CAACnB,IAAmBK,GACpB,CAACJ,IAAoBG,IAEjBgB,GAAY,CAChBC,SAAU,IACVC,UAAU,EACVC,MAAO,QACPC,MAAM,EACNC,OAAO,EACPC,MAAM,GAEFC,GAAgB,CACpBN,SAAU,mBAEVC,SAAU,UACVC,MAAO,mBACPC,KAAM,mBACNC,MAAO,UACPC,KAAM,WAOR,MAAME,WAAiB5G,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKuL,UAAY,KACjBvL,KAAKwL,eAAiB,KACtBxL,KAAKyL,YAAa,EAClBzL,KAAK0L,aAAe,KACpB1L,KAAK2L,aAAe,KACpB3L,KAAK4L,mBAAqB/F,GAAeC,QArCjB,uBAqC8C9F,KAAK4E,UAC3E5E,KAAK6L,qBACD7L,KAAK6E,QAAQqG,OAASV,IACxBxK,KAAK8L,OAET,CAGA,kBAAWpI,GACT,OAAOoH,EACT,CACA,sBAAWnH,GACT,OAAO0H,EACT,CACA,eAAW9O,GACT,MAnFW,UAoFb,CAGA,IAAA1X,GACEmb,KAAK+L,OAAOnC,GACd,CACA,eAAAoC,IAIO3mB,SAAS4mB,QAAUtR,GAAUqF,KAAK4E,WACrC5E,KAAKnb,MAET,CACA,IAAAshB,GACEnG,KAAK+L,OAAOlC,GACd,CACA,KAAAoB,GACMjL,KAAKyL,YACPrR,GAAqB4F,KAAK4E,UAE5B5E,KAAKkM,gBACP,CACA,KAAAJ,GACE9L,KAAKkM,iBACLlM,KAAKmM,kBACLnM,KAAKuL,UAAYa,aAAY,IAAMpM,KAAKgM,mBAAmBhM,KAAK6E,QAAQkG,SAC1E,CACA,iBAAAsB,GACOrM,KAAK6E,QAAQqG,OAGdlL,KAAKyL,WACPlL,GAAae,IAAItB,KAAK4E,SAAUqF,IAAY,IAAMjK,KAAK8L,UAGzD9L,KAAK8L,QACP,CACA,EAAAQ,CAAG7T,GACD,MAAM8T,EAAQvM,KAAKwM,YACnB,GAAI/T,EAAQ8T,EAAM7b,OAAS,GAAK+H,EAAQ,EACtC,OAEF,GAAIuH,KAAKyL,WAEP,YADAlL,GAAae,IAAItB,KAAK4E,SAAUqF,IAAY,IAAMjK,KAAKsM,GAAG7T,KAG5D,MAAMgU,EAAczM,KAAK0M,cAAc1M,KAAK2M,cAC5C,GAAIF,IAAgBhU,EAClB,OAEF,MAAMtC,EAAQsC,EAAQgU,EAAc7C,GAAaC,GACjD7J,KAAK+L,OAAO5V,EAAOoW,EAAM9T,GAC3B,CACA,OAAAsM,GACM/E,KAAK2L,cACP3L,KAAK2L,aAAa5G,UAEpBJ,MAAMI,SACR,CAGA,iBAAAf,CAAkBF,GAEhB,OADAA,EAAO8I,gBAAkB9I,EAAOiH,SACzBjH,CACT,CACA,kBAAA+H,GACM7L,KAAK6E,QAAQmG,UACfzK,GAAac,GAAGrB,KAAK4E,SAAUsF,IAAiB9K,GAASY,KAAK6M,SAASzN,KAE9C,UAAvBY,KAAK6E,QAAQoG,QACf1K,GAAac,GAAGrB,KAAK4E,SAAUuF,IAAoB,IAAMnK,KAAKiL,UAC9D1K,GAAac,GAAGrB,KAAK4E,SAAUwF,IAAoB,IAAMpK,KAAKqM,uBAE5DrM,KAAK6E,QAAQsG,OAAS3C,GAAMC,eAC9BzI,KAAK8M,yBAET,CACA,uBAAAA,GACE,IAAK,MAAMC,KAAOlH,GAAe1T,KArIX,qBAqImC6N,KAAK4E,UAC5DrE,GAAac,GAAG0L,EAAK1C,IAAkBjL,GAASA,EAAMkD,mBAExD,MAmBM0K,EAAc,CAClB3E,aAAc,IAAMrI,KAAK+L,OAAO/L,KAAKiN,kBAAkBnD,KACvDxB,cAAe,IAAMtI,KAAK+L,OAAO/L,KAAKiN,kBAAkBlD,KACxD3B,YAtBkB,KACS,UAAvBpI,KAAK6E,QAAQoG,QAYjBjL,KAAKiL,QACDjL,KAAK0L,cACPwB,aAAalN,KAAK0L,cAEpB1L,KAAK0L,aAAe7N,YAAW,IAAMmC,KAAKqM,qBAjLjB,IAiL+DrM,KAAK6E,QAAQkG,UAAS,GAOhH/K,KAAK2L,aAAe,IAAInD,GAAMxI,KAAK4E,SAAUoI,EAC/C,CACA,QAAAH,CAASzN,GACP,GAAI,kBAAkB/b,KAAK+b,EAAM7S,OAAO0a,SACtC,OAEF,MAAM3Z,EAAYud,GAAiBzL,EAAMtiB,KACrCwQ,IACF8R,EAAMkD,iBACNtC,KAAK+L,OAAO/L,KAAKiN,kBAAkB3f,IAEvC,CACA,aAAAof,CAAcntB,GACZ,OAAOygB,KAAKwM,YAAYrnB,QAAQ5F,EAClC,CACA,0BAAA4tB,CAA2B1U,GACzB,IAAKuH,KAAK4L,mBACR,OAEF,MAAMwB,EAAkBvH,GAAeC,QAAQ4E,GAAiB1K,KAAK4L,oBACrEwB,EAAgB/R,UAAU1B,OAAO8Q,IACjC2C,EAAgBjsB,gBAAgB,gBAChC,MAAMksB,EAAqBxH,GAAeC,QAAQ,sBAAsBrN,MAAWuH,KAAK4L,oBACpFyB,IACFA,EAAmBhS,UAAU5E,IAAIgU,IACjC4C,EAAmBjsB,aAAa,eAAgB,QAEpD,CACA,eAAA+qB,GACE,MAAM5sB,EAAUygB,KAAKwL,gBAAkBxL,KAAK2M,aAC5C,IAAKptB,EACH,OAEF,MAAM+tB,EAAkB/P,OAAOgQ,SAAShuB,EAAQic,aAAa,oBAAqB,IAClFwE,KAAK6E,QAAQkG,SAAWuC,GAAmBtN,KAAK6E,QAAQ+H,eAC1D,CACA,MAAAb,CAAO5V,EAAO5W,EAAU,MACtB,GAAIygB,KAAKyL,WACP,OAEF,MAAM1N,EAAgBiC,KAAK2M,aACrBa,EAASrX,IAAUyT,GACnB6D,EAAcluB,GAAWue,GAAqBkC,KAAKwM,YAAazO,EAAeyP,EAAQxN,KAAK6E,QAAQuG,MAC1G,GAAIqC,IAAgB1P,EAClB,OAEF,MAAM2P,EAAmB1N,KAAK0M,cAAce,GACtCE,EAAenI,GACZjF,GAAaqB,QAAQ5B,KAAK4E,SAAUY,EAAW,CACpD1F,cAAe2N,EACfngB,UAAW0S,KAAK4N,kBAAkBzX,GAClCuD,KAAMsG,KAAK0M,cAAc3O,GACzBuO,GAAIoB,IAIR,GADmBC,EAAa3D,IACjBhI,iBACb,OAEF,IAAKjE,IAAkB0P,EAGrB,OAEF,MAAMI,EAAY/M,QAAQd,KAAKuL,WAC/BvL,KAAKiL,QACLjL,KAAKyL,YAAa,EAClBzL,KAAKmN,2BAA2BO,GAChC1N,KAAKwL,eAAiBiC,EACtB,MAAMK,EAAuBN,EA3OR,sBADF,oBA6ObO,EAAiBP,EA3OH,qBACA,qBA2OpBC,EAAYpS,UAAU5E,IAAIsX,GAC1BlS,GAAO4R,GACP1P,EAAc1C,UAAU5E,IAAIqX,GAC5BL,EAAYpS,UAAU5E,IAAIqX,GAQ1B9N,KAAKmF,gBAPoB,KACvBsI,EAAYpS,UAAU1B,OAAOmU,EAAsBC,GACnDN,EAAYpS,UAAU5E,IAAIgU,IAC1B1M,EAAc1C,UAAU1B,OAAO8Q,GAAqBsD,EAAgBD,GACpE9N,KAAKyL,YAAa,EAClBkC,EAAa1D,GAAW,GAEYlM,EAAeiC,KAAKgO,eACtDH,GACF7N,KAAK8L,OAET,CACA,WAAAkC,GACE,OAAOhO,KAAK4E,SAASvJ,UAAU7W,SAhQV,QAiQvB,CACA,UAAAmoB,GACE,OAAO9G,GAAeC,QAAQ8E,GAAsB5K,KAAK4E,SAC3D,CACA,SAAA4H,GACE,OAAO3G,GAAe1T,KAAKwY,GAAe3K,KAAK4E,SACjD,CACA,cAAAsH,GACMlM,KAAKuL,YACP0C,cAAcjO,KAAKuL,WACnBvL,KAAKuL,UAAY,KAErB,CACA,iBAAA0B,CAAkB3f,GAChB,OAAI2O,KACK3O,IAAcwc,GAAiBD,GAAaD,GAE9Ctc,IAAcwc,GAAiBF,GAAaC,EACrD,CACA,iBAAA+D,CAAkBzX,GAChB,OAAI8F,KACK9F,IAAU0T,GAAaC,GAAiBC,GAE1C5T,IAAU0T,GAAaE,GAAkBD,EAClD,CAGA,sBAAOrN,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOihB,GAAShG,oBAAoBtF,KAAM8D,GAChD,GAAsB,iBAAXA,GAIX,GAAsB,iBAAXA,EAAqB,CAC9B,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IACP,OAREzZ,EAAKiiB,GAAGxI,EASZ,GACF,EAOFvD,GAAac,GAAGhc,SAAUklB,GAvSE,uCAuS2C,SAAUnL,GAC/E,MAAM7S,EAASsZ,GAAec,uBAAuB3G,MACrD,IAAKzT,IAAWA,EAAO8O,UAAU7W,SAASgmB,IACxC,OAEFpL,EAAMkD,iBACN,MAAM4L,EAAW5C,GAAShG,oBAAoB/Y,GACxC4hB,EAAanO,KAAKxE,aAAa,oBACrC,OAAI2S,GACFD,EAAS5B,GAAG6B,QACZD,EAAS7B,qBAGyC,SAAhDrJ,GAAYQ,iBAAiBxD,KAAM,UACrCkO,EAASrpB,YACTqpB,EAAS7B,sBAGX6B,EAAS/H,YACT+H,EAAS7B,oBACX,IACA9L,GAAac,GAAGzhB,OAAQ0qB,IAAuB,KAC7C,MAAM8D,EAAYvI,GAAe1T,KA5TR,6BA6TzB,IAAK,MAAM+b,KAAYE,EACrB9C,GAAShG,oBAAoB4I,EAC/B,IAOF/R,GAAmBmP,IAcnB,MAEM+C,GAAc,eAEdC,GAAe,OAAOD,KACtBE,GAAgB,QAAQF,KACxBG,GAAe,OAAOH,KACtBI,GAAiB,SAASJ,KAC1BK,GAAyB,QAAQL,cACjCM,GAAoB,OACpBC,GAAsB,WACtBC,GAAwB,aAExBC,GAA6B,WAAWF,OAAwBA,KAKhEG,GAAyB,8BACzBC,GAAY,CAChBvqB,OAAQ,KACRkjB,QAAQ,GAEJsH,GAAgB,CACpBxqB,OAAQ,iBACRkjB,OAAQ,WAOV,MAAMuH,WAAiBxK,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKmP,kBAAmB,EACxBnP,KAAKoP,cAAgB,GACrB,MAAMC,EAAaxJ,GAAe1T,KAAK4c,IACvC,IAAK,MAAMO,KAAQD,EAAY,CAC7B,MAAMtV,EAAW8L,GAAea,uBAAuB4I,GACjDC,EAAgB1J,GAAe1T,KAAK4H,GAAU5T,QAAOqpB,GAAgBA,IAAiBxP,KAAK4E,WAChF,OAAb7K,GAAqBwV,EAAc7e,QACrCsP,KAAKoP,cAAcxd,KAAK0d,EAE5B,CACAtP,KAAKyP,sBACAzP,KAAK6E,QAAQpgB,QAChBub,KAAK0P,0BAA0B1P,KAAKoP,cAAepP,KAAK2P,YAEtD3P,KAAK6E,QAAQ8C,QACf3H,KAAK2H,QAET,CAGA,kBAAWjE,GACT,OAAOsL,EACT,CACA,sBAAWrL,GACT,OAAOsL,EACT,CACA,eAAW1S,GACT,MA9DW,UA+Db,CAGA,MAAAoL,GACM3H,KAAK2P,WACP3P,KAAK4P,OAEL5P,KAAK6P,MAET,CACA,IAAAA,GACE,GAAI7P,KAAKmP,kBAAoBnP,KAAK2P,WAChC,OAEF,IAAIG,EAAiB,GAQrB,GALI9P,KAAK6E,QAAQpgB,SACfqrB,EAAiB9P,KAAK+P,uBAhEH,wCAgE4C5pB,QAAO5G,GAAWA,IAAYygB,KAAK4E,WAAU9hB,KAAIvD,GAAW2vB,GAAS5J,oBAAoB/lB,EAAS,CAC/JooB,QAAQ,OAGRmI,EAAepf,QAAUof,EAAe,GAAGX,iBAC7C,OAGF,GADmB5O,GAAaqB,QAAQ5B,KAAK4E,SAAU0J,IACxCtM,iBACb,OAEF,IAAK,MAAMgO,KAAkBF,EAC3BE,EAAeJ,OAEjB,MAAMK,EAAYjQ,KAAKkQ,gBACvBlQ,KAAK4E,SAASvJ,UAAU1B,OAAOiV,IAC/B5O,KAAK4E,SAASvJ,UAAU5E,IAAIoY,IAC5B7O,KAAK4E,SAAS7jB,MAAMkvB,GAAa,EACjCjQ,KAAK0P,0BAA0B1P,KAAKoP,eAAe,GACnDpP,KAAKmP,kBAAmB,EACxB,MAQMgB,EAAa,SADUF,EAAU,GAAGxL,cAAgBwL,EAAU7d,MAAM,KAE1E4N,KAAKmF,gBATY,KACfnF,KAAKmP,kBAAmB,EACxBnP,KAAK4E,SAASvJ,UAAU1B,OAAOkV,IAC/B7O,KAAK4E,SAASvJ,UAAU5E,IAAImY,GAAqBD,IACjD3O,KAAK4E,SAAS7jB,MAAMkvB,GAAa,GACjC1P,GAAaqB,QAAQ5B,KAAK4E,SAAU2J,GAAc,GAItBvO,KAAK4E,UAAU,GAC7C5E,KAAK4E,SAAS7jB,MAAMkvB,GAAa,GAAGjQ,KAAK4E,SAASuL,MACpD,CACA,IAAAP,GACE,GAAI5P,KAAKmP,mBAAqBnP,KAAK2P,WACjC,OAGF,GADmBpP,GAAaqB,QAAQ5B,KAAK4E,SAAU4J,IACxCxM,iBACb,OAEF,MAAMiO,EAAYjQ,KAAKkQ,gBACvBlQ,KAAK4E,SAAS7jB,MAAMkvB,GAAa,GAAGjQ,KAAK4E,SAASthB,wBAAwB2sB,OAC1EpU,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIoY,IAC5B7O,KAAK4E,SAASvJ,UAAU1B,OAAOiV,GAAqBD,IACpD,IAAK,MAAM/M,KAAW5B,KAAKoP,cAAe,CACxC,MAAM7vB,EAAUsmB,GAAec,uBAAuB/E,GAClDriB,IAAYygB,KAAK2P,SAASpwB,IAC5BygB,KAAK0P,0BAA0B,CAAC9N,IAAU,EAE9C,CACA5B,KAAKmP,kBAAmB,EAOxBnP,KAAK4E,SAAS7jB,MAAMkvB,GAAa,GACjCjQ,KAAKmF,gBAPY,KACfnF,KAAKmP,kBAAmB,EACxBnP,KAAK4E,SAASvJ,UAAU1B,OAAOkV,IAC/B7O,KAAK4E,SAASvJ,UAAU5E,IAAImY,IAC5BrO,GAAaqB,QAAQ5B,KAAK4E,SAAU6J,GAAe,GAGvBzO,KAAK4E,UAAU,EAC/C,CACA,QAAA+K,CAASpwB,EAAUygB,KAAK4E,UACtB,OAAOrlB,EAAQ8b,UAAU7W,SAASmqB,GACpC,CAGA,iBAAA3K,CAAkBF,GAGhB,OAFAA,EAAO6D,OAAS7G,QAAQgD,EAAO6D,QAC/B7D,EAAOrf,OAASiW,GAAWoJ,EAAOrf,QAC3Bqf,CACT,CACA,aAAAoM,GACE,OAAOlQ,KAAK4E,SAASvJ,UAAU7W,SA3IL,uBAChB,QACC,QA0Ib,CACA,mBAAAirB,GACE,IAAKzP,KAAK6E,QAAQpgB,OAChB,OAEF,MAAMshB,EAAW/F,KAAK+P,uBAAuBhB,IAC7C,IAAK,MAAMxvB,KAAWwmB,EAAU,CAC9B,MAAMqK,EAAWvK,GAAec,uBAAuBpnB,GACnD6wB,GACFpQ,KAAK0P,0BAA0B,CAACnwB,GAAUygB,KAAK2P,SAASS,GAE5D,CACF,CACA,sBAAAL,CAAuBhW,GACrB,MAAMgM,EAAWF,GAAe1T,KAAK2c,GAA4B9O,KAAK6E,QAAQpgB,QAE9E,OAAOohB,GAAe1T,KAAK4H,EAAUiG,KAAK6E,QAAQpgB,QAAQ0B,QAAO5G,IAAYwmB,EAAS3E,SAAS7hB,IACjG,CACA,yBAAAmwB,CAA0BW,EAAcC,GACtC,GAAKD,EAAa3f,OAGlB,IAAK,MAAMnR,KAAW8wB,EACpB9wB,EAAQ8b,UAAUsM,OArKK,aAqKyB2I,GAChD/wB,EAAQ6B,aAAa,gBAAiBkvB,EAE1C,CAGA,sBAAO7T,CAAgBqH,GACrB,MAAMe,EAAU,CAAC,EAIjB,MAHsB,iBAAXf,GAAuB,YAAYzgB,KAAKygB,KACjDe,EAAQ8C,QAAS,GAEZ3H,KAAKwH,MAAK,WACf,MAAMnd,EAAO6kB,GAAS5J,oBAAoBtF,KAAM6E,GAChD,GAAsB,iBAAXf,EAAqB,CAC9B,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IACP,CACF,GACF,EAOFvD,GAAac,GAAGhc,SAAUqpB,GAAwBK,IAAwB,SAAU3P,IAErD,MAAzBA,EAAM7S,OAAO0a,SAAmB7H,EAAMW,gBAAmD,MAAjCX,EAAMW,eAAekH,UAC/E7H,EAAMkD,iBAER,IAAK,MAAM/iB,KAAWsmB,GAAee,gCAAgC5G,MACnEkP,GAAS5J,oBAAoB/lB,EAAS,CACpCooB,QAAQ,IACPA,QAEP,IAMAxL,GAAmB+S,IAcnB,MAAMqB,GAAS,WAETC,GAAc,eACdC,GAAiB,YAGjBC,GAAiB,UACjBC,GAAmB,YAGnBC,GAAe,OAAOJ,KACtBK,GAAiB,SAASL,KAC1BM,GAAe,OAAON,KACtBO,GAAgB,QAAQP,KACxBQ,GAAyB,QAAQR,KAAcC,KAC/CQ,GAAyB,UAAUT,KAAcC,KACjDS,GAAuB,QAAQV,KAAcC,KAC7CU,GAAoB,OAMpBC,GAAyB,4DACzBC,GAA6B,GAAGD,MAA0BD,KAC1DG,GAAgB,iBAIhBC,GAAgBtV,KAAU,UAAY,YACtCuV,GAAmBvV,KAAU,YAAc,UAC3CwV,GAAmBxV,KAAU,aAAe,eAC5CyV,GAAsBzV,KAAU,eAAiB,aACjD0V,GAAkB1V,KAAU,aAAe,cAC3C2V,GAAiB3V,KAAU,cAAgB,aAG3C4V,GAAY,CAChBC,WAAW,EACX7jB,SAAU,kBACV8jB,QAAS,UACT/pB,OAAQ,CAAC,EAAG,GACZgqB,aAAc,KACd1zB,UAAW,UAEP2zB,GAAgB,CACpBH,UAAW,mBACX7jB,SAAU,mBACV8jB,QAAS,SACT/pB,OAAQ,0BACRgqB,aAAc,yBACd1zB,UAAW,2BAOb,MAAM4zB,WAAiBxN,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKmS,QAAU,KACfnS,KAAKoS,QAAUpS,KAAK4E,SAAS7f,WAE7Bib,KAAKqS,MAAQxM,GAAehhB,KAAKmb,KAAK4E,SAAU0M,IAAe,IAAMzL,GAAeM,KAAKnG,KAAK4E,SAAU0M,IAAe,IAAMzL,GAAeC,QAAQwL,GAAetR,KAAKoS,SACxKpS,KAAKsS,UAAYtS,KAAKuS,eACxB,CAGA,kBAAW7O,GACT,OAAOmO,EACT,CACA,sBAAWlO,GACT,OAAOsO,EACT,CACA,eAAW1V,GACT,OAAOgU,EACT,CAGA,MAAA5I,GACE,OAAO3H,KAAK2P,WAAa3P,KAAK4P,OAAS5P,KAAK6P,MAC9C,CACA,IAAAA,GACE,GAAI3U,GAAW8E,KAAK4E,WAAa5E,KAAK2P,WACpC,OAEF,MAAM7P,EAAgB,CACpBA,cAAeE,KAAK4E,UAGtB,IADkBrE,GAAaqB,QAAQ5B,KAAK4E,SAAUkM,GAAchR,GACtDkC,iBAAd,CASA,GANAhC,KAAKwS,gBAMD,iBAAkBntB,SAASC,kBAAoB0a,KAAKoS,QAAQpX,QAzExC,eA0EtB,IAAK,MAAMzb,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK6Z,UAC/CxF,GAAac,GAAG9hB,EAAS,YAAaqc,IAG1CoE,KAAK4E,SAAS6N,QACdzS,KAAK4E,SAASxjB,aAAa,iBAAiB,GAC5C4e,KAAKqS,MAAMhX,UAAU5E,IAAI0a,IACzBnR,KAAK4E,SAASvJ,UAAU5E,IAAI0a,IAC5B5Q,GAAaqB,QAAQ5B,KAAK4E,SAAUmM,GAAejR,EAhBnD,CAiBF,CACA,IAAA8P,GACE,GAAI1U,GAAW8E,KAAK4E,YAAc5E,KAAK2P,WACrC,OAEF,MAAM7P,EAAgB,CACpBA,cAAeE,KAAK4E,UAEtB5E,KAAK0S,cAAc5S,EACrB,CACA,OAAAiF,GACM/E,KAAKmS,SACPnS,KAAKmS,QAAQnZ,UAEf2L,MAAMI,SACR,CACA,MAAAha,GACEiV,KAAKsS,UAAYtS,KAAKuS,gBAClBvS,KAAKmS,SACPnS,KAAKmS,QAAQpnB,QAEjB,CAGA,aAAA2nB,CAAc5S,GAEZ,IADkBS,GAAaqB,QAAQ5B,KAAK4E,SAAUgM,GAAc9Q,GACtDkC,iBAAd,CAMA,GAAI,iBAAkB3c,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK6Z,UAC/CxF,GAAaC,IAAIjhB,EAAS,YAAaqc,IAGvCoE,KAAKmS,SACPnS,KAAKmS,QAAQnZ,UAEfgH,KAAKqS,MAAMhX,UAAU1B,OAAOwX,IAC5BnR,KAAK4E,SAASvJ,UAAU1B,OAAOwX,IAC/BnR,KAAK4E,SAASxjB,aAAa,gBAAiB,SAC5C4hB,GAAYE,oBAAoBlD,KAAKqS,MAAO,UAC5C9R,GAAaqB,QAAQ5B,KAAK4E,SAAUiM,GAAgB/Q,EAhBpD,CAiBF,CACA,UAAA+D,CAAWC,GAET,GAAgC,iBADhCA,EAASa,MAAMd,WAAWC,IACRxlB,YAA2B,GAAUwlB,EAAOxlB,YAAgE,mBAA3CwlB,EAAOxlB,UAAUgF,sBAElG,MAAM,IAAIkhB,UAAU,GAAG+L,GAAO9L,+GAEhC,OAAOX,CACT,CACA,aAAA0O,GACE,QAAsB,IAAX,EACT,MAAM,IAAIhO,UAAU,gEAEtB,IAAImO,EAAmB3S,KAAK4E,SACG,WAA3B5E,KAAK6E,QAAQvmB,UACfq0B,EAAmB3S,KAAKoS,QACf,GAAUpS,KAAK6E,QAAQvmB,WAChCq0B,EAAmBjY,GAAWsF,KAAK6E,QAAQvmB,WACA,iBAA3B0hB,KAAK6E,QAAQvmB,YAC7Bq0B,EAAmB3S,KAAK6E,QAAQvmB,WAElC,MAAM0zB,EAAehS,KAAK4S,mBAC1B5S,KAAKmS,QAAU,GAAoBQ,EAAkB3S,KAAKqS,MAAOL,EACnE,CACA,QAAArC,GACE,OAAO3P,KAAKqS,MAAMhX,UAAU7W,SAAS2sB,GACvC,CACA,aAAA0B,GACE,MAAMC,EAAiB9S,KAAKoS,QAC5B,GAAIU,EAAezX,UAAU7W,SArKN,WAsKrB,OAAOmtB,GAET,GAAImB,EAAezX,UAAU7W,SAvKJ,aAwKvB,OAAOotB,GAET,GAAIkB,EAAezX,UAAU7W,SAzKA,iBA0K3B,MA5JsB,MA8JxB,GAAIsuB,EAAezX,UAAU7W,SA3KE,mBA4K7B,MA9JyB,SAkK3B,MAAMuuB,EAAkF,QAA1E9tB,iBAAiB+a,KAAKqS,OAAOvX,iBAAiB,iBAAiB6K,OAC7E,OAAImN,EAAezX,UAAU7W,SArLP,UAsLbuuB,EAAQvB,GAAmBD,GAE7BwB,EAAQrB,GAAsBD,EACvC,CACA,aAAAc,GACE,OAAkD,OAA3CvS,KAAK4E,SAAS5J,QAnLD,UAoLtB,CACA,UAAAgY,GACE,MAAM,OACJhrB,GACEgY,KAAK6E,QACT,MAAsB,iBAAX7c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAAS4f,OAAOgQ,SAAS5vB,EAAO,MAEzC,mBAAXqK,EACFirB,GAAcjrB,EAAOirB,EAAYjT,KAAK4E,UAExC5c,CACT,CACA,gBAAA4qB,GACE,MAAMM,EAAwB,CAC5Bx0B,UAAWshB,KAAK6S,gBAChBzc,UAAW,CAAC,CACV9V,KAAM,kBACNmB,QAAS,CACPwM,SAAU+R,KAAK6E,QAAQ5W,WAExB,CACD3N,KAAM,SACNmB,QAAS,CACPuG,OAAQgY,KAAKgT,iBAanB,OAPIhT,KAAKsS,WAAsC,WAAzBtS,KAAK6E,QAAQkN,WACjC/O,GAAYC,iBAAiBjD,KAAKqS,MAAO,SAAU,UACnDa,EAAsB9c,UAAY,CAAC,CACjC9V,KAAM,cACNC,SAAS,KAGN,IACF2yB,KACArW,GAAQmD,KAAK6E,QAAQmN,aAAc,CAACkB,IAE3C,CACA,eAAAC,EAAgB,IACdr2B,EAAG,OACHyP,IAEA,MAAMggB,EAAQ1G,GAAe1T,KAhOF,8DAgO+B6N,KAAKqS,OAAOlsB,QAAO5G,GAAWob,GAAUpb,KAC7FgtB,EAAM7b,QAMXoN,GAAqByO,EAAOhgB,EAAQzP,IAAQ6zB,IAAmBpE,EAAMnL,SAAS7U,IAASkmB,OACzF,CAGA,sBAAOhW,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAO6nB,GAAS5M,oBAAoBtF,KAAM8D,GAChD,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,CACA,iBAAOsP,CAAWhU,GAChB,GA5QuB,IA4QnBA,EAAMwI,QAAgD,UAAfxI,EAAMqB,MA/QnC,QA+QuDrB,EAAMtiB,IACzE,OAEF,MAAMu2B,EAAcxN,GAAe1T,KAAKkf,IACxC,IAAK,MAAM1J,KAAU0L,EAAa,CAChC,MAAMC,EAAUpB,GAAS7M,YAAYsC,GACrC,IAAK2L,IAAyC,IAA9BA,EAAQzO,QAAQiN,UAC9B,SAEF,MAAMyB,EAAenU,EAAMmU,eACrBC,EAAeD,EAAanS,SAASkS,EAAQjB,OACnD,GAAIkB,EAAanS,SAASkS,EAAQ1O,WAA2C,WAA9B0O,EAAQzO,QAAQiN,YAA2B0B,GAA8C,YAA9BF,EAAQzO,QAAQiN,WAA2B0B,EACnJ,SAIF,GAAIF,EAAQjB,MAAM7tB,SAAS4a,EAAM7S,UAA2B,UAAf6S,EAAMqB,MA/RvC,QA+R2DrB,EAAMtiB,KAAqB,qCAAqCuG,KAAK+b,EAAM7S,OAAO0a,UACvJ,SAEF,MAAMnH,EAAgB,CACpBA,cAAewT,EAAQ1O,UAEN,UAAfxF,EAAMqB,OACRX,EAAckH,WAAa5H,GAE7BkU,EAAQZ,cAAc5S,EACxB,CACF,CACA,4BAAO2T,CAAsBrU,GAI3B,MAAMsU,EAAU,kBAAkBrwB,KAAK+b,EAAM7S,OAAO0a,SAC9C0M,EAjTW,WAiTKvU,EAAMtiB,IACtB82B,EAAkB,CAAClD,GAAgBC,IAAkBvP,SAAShC,EAAMtiB,KAC1E,IAAK82B,IAAoBD,EACvB,OAEF,GAAID,IAAYC,EACd,OAEFvU,EAAMkD,iBAGN,MAAMuR,EAAkB7T,KAAKgG,QAAQoL,IAA0BpR,KAAO6F,GAAeM,KAAKnG,KAAMoR,IAAwB,IAAMvL,GAAehhB,KAAKmb,KAAMoR,IAAwB,IAAMvL,GAAeC,QAAQsL,GAAwBhS,EAAMW,eAAehb,YACpPwF,EAAW2nB,GAAS5M,oBAAoBuO,GAC9C,GAAID,EAIF,OAHAxU,EAAM0U,kBACNvpB,EAASslB,YACTtlB,EAAS4oB,gBAAgB/T,GAGvB7U,EAASolB,aAEXvQ,EAAM0U,kBACNvpB,EAASqlB,OACTiE,EAAgBpB,QAEpB,EAOFlS,GAAac,GAAGhc,SAAU4rB,GAAwBG,GAAwBc,GAASuB,uBACnFlT,GAAac,GAAGhc,SAAU4rB,GAAwBK,GAAeY,GAASuB,uBAC1ElT,GAAac,GAAGhc,SAAU2rB,GAAwBkB,GAASkB,YAC3D7S,GAAac,GAAGhc,SAAU6rB,GAAsBgB,GAASkB,YACzD7S,GAAac,GAAGhc,SAAU2rB,GAAwBI,IAAwB,SAAUhS,GAClFA,EAAMkD,iBACN4P,GAAS5M,oBAAoBtF,MAAM2H,QACrC,IAMAxL,GAAmB+V,IAcnB,MAAM6B,GAAS,WAETC,GAAoB,OACpBC,GAAkB,gBAAgBF,KAClCG,GAAY,CAChBC,UAAW,iBACXC,cAAe,KACfhP,YAAY,EACZzK,WAAW,EAEX0Z,YAAa,QAETC,GAAgB,CACpBH,UAAW,SACXC,cAAe,kBACfhP,WAAY,UACZzK,UAAW,UACX0Z,YAAa,oBAOf,MAAME,WAAiB9Q,GACrB,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKwU,aAAc,EACnBxU,KAAK4E,SAAW,IAClB,CAGA,kBAAWlB,GACT,OAAOwQ,EACT,CACA,sBAAWvQ,GACT,OAAO2Q,EACT,CACA,eAAW/X,GACT,OAAOwX,EACT,CAGA,IAAAlE,CAAKxT,GACH,IAAK2D,KAAK6E,QAAQlK,UAEhB,YADAkC,GAAQR,GAGV2D,KAAKyU,UACL,MAAMl1B,EAAUygB,KAAK0U,cACjB1U,KAAK6E,QAAQO,YACfvJ,GAAOtc,GAETA,EAAQ8b,UAAU5E,IAAIud,IACtBhU,KAAK2U,mBAAkB,KACrB9X,GAAQR,EAAS,GAErB,CACA,IAAAuT,CAAKvT,GACE2D,KAAK6E,QAAQlK,WAIlBqF,KAAK0U,cAAcrZ,UAAU1B,OAAOqa,IACpChU,KAAK2U,mBAAkB,KACrB3U,KAAK+E,UACLlI,GAAQR,EAAS,KANjBQ,GAAQR,EAQZ,CACA,OAAA0I,GACO/E,KAAKwU,cAGVjU,GAAaC,IAAIR,KAAK4E,SAAUqP,IAChCjU,KAAK4E,SAASjL,SACdqG,KAAKwU,aAAc,EACrB,CAGA,WAAAE,GACE,IAAK1U,KAAK4E,SAAU,CAClB,MAAMgQ,EAAWvvB,SAASwvB,cAAc,OACxCD,EAAST,UAAYnU,KAAK6E,QAAQsP,UAC9BnU,KAAK6E,QAAQO,YACfwP,EAASvZ,UAAU5E,IApFD,QAsFpBuJ,KAAK4E,SAAWgQ,CAClB,CACA,OAAO5U,KAAK4E,QACd,CACA,iBAAAZ,CAAkBF,GAGhB,OADAA,EAAOuQ,YAAc3Z,GAAWoJ,EAAOuQ,aAChCvQ,CACT,CACA,OAAA2Q,GACE,GAAIzU,KAAKwU,YACP,OAEF,MAAMj1B,EAAUygB,KAAK0U,cACrB1U,KAAK6E,QAAQwP,YAAYS,OAAOv1B,GAChCghB,GAAac,GAAG9hB,EAAS00B,IAAiB,KACxCpX,GAAQmD,KAAK6E,QAAQuP,cAAc,IAErCpU,KAAKwU,aAAc,CACrB,CACA,iBAAAG,CAAkBtY,GAChBW,GAAuBX,EAAU2D,KAAK0U,cAAe1U,KAAK6E,QAAQO,WACpE,EAeF,MAEM2P,GAAc,gBACdC,GAAkB,UAAUD,KAC5BE,GAAoB,cAAcF,KAGlCG,GAAmB,WACnBC,GAAY,CAChBC,WAAW,EACXC,YAAa,MAETC,GAAgB,CACpBF,UAAW,UACXC,YAAa,WAOf,MAAME,WAAkB9R,GACtB,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKwV,WAAY,EACjBxV,KAAKyV,qBAAuB,IAC9B,CAGA,kBAAW/R,GACT,OAAOyR,EACT,CACA,sBAAWxR,GACT,OAAO2R,EACT,CACA,eAAW/Y,GACT,MArCW,WAsCb,CAGA,QAAAmZ,GACM1V,KAAKwV,YAGLxV,KAAK6E,QAAQuQ,WACfpV,KAAK6E,QAAQwQ,YAAY5C,QAE3BlS,GAAaC,IAAInb,SAAU0vB,IAC3BxU,GAAac,GAAGhc,SAAU2vB,IAAiB5V,GAASY,KAAK2V,eAAevW,KACxEmB,GAAac,GAAGhc,SAAU4vB,IAAmB7V,GAASY,KAAK4V,eAAexW,KAC1EY,KAAKwV,WAAY,EACnB,CACA,UAAAK,GACO7V,KAAKwV,YAGVxV,KAAKwV,WAAY,EACjBjV,GAAaC,IAAInb,SAAU0vB,IAC7B,CAGA,cAAAY,CAAevW,GACb,MAAM,YACJiW,GACErV,KAAK6E,QACT,GAAIzF,EAAM7S,SAAWlH,UAAY+Z,EAAM7S,SAAW8oB,GAAeA,EAAY7wB,SAAS4a,EAAM7S,QAC1F,OAEF,MAAM1L,EAAWglB,GAAeU,kBAAkB8O,GAC1B,IAApBx0B,EAAS6P,OACX2kB,EAAY5C,QACHzS,KAAKyV,uBAAyBP,GACvCr0B,EAASA,EAAS6P,OAAS,GAAG+hB,QAE9B5xB,EAAS,GAAG4xB,OAEhB,CACA,cAAAmD,CAAexW,GAzED,QA0ERA,EAAMtiB,MAGVkjB,KAAKyV,qBAAuBrW,EAAM0W,SAAWZ,GA5EzB,UA6EtB,EAeF,MAAMa,GAAyB,oDACzBC,GAA0B,cAC1BC,GAAmB,gBACnBC,GAAkB,eAMxB,MAAMC,GACJ,WAAAhS,GACEnE,KAAK4E,SAAWvf,SAAS6G,IAC3B,CAGA,QAAAkqB,GAEE,MAAMC,EAAgBhxB,SAASC,gBAAgBuC,YAC/C,OAAO1F,KAAKoC,IAAI3E,OAAO02B,WAAaD,EACtC,CACA,IAAAzG,GACE,MAAM/rB,EAAQmc,KAAKoW,WACnBpW,KAAKuW,mBAELvW,KAAKwW,sBAAsBxW,KAAK4E,SAAUqR,IAAkBQ,GAAmBA,EAAkB5yB,IAEjGmc,KAAKwW,sBAAsBT,GAAwBE,IAAkBQ,GAAmBA,EAAkB5yB,IAC1Gmc,KAAKwW,sBAAsBR,GAAyBE,IAAiBO,GAAmBA,EAAkB5yB,GAC5G,CACA,KAAAwO,GACE2N,KAAK0W,wBAAwB1W,KAAK4E,SAAU,YAC5C5E,KAAK0W,wBAAwB1W,KAAK4E,SAAUqR,IAC5CjW,KAAK0W,wBAAwBX,GAAwBE,IACrDjW,KAAK0W,wBAAwBV,GAAyBE,GACxD,CACA,aAAAS,GACE,OAAO3W,KAAKoW,WAAa,CAC3B,CAGA,gBAAAG,GACEvW,KAAK4W,sBAAsB5W,KAAK4E,SAAU,YAC1C5E,KAAK4E,SAAS7jB,MAAM+K,SAAW,QACjC,CACA,qBAAA0qB,CAAsBzc,EAAU8c,EAAexa,GAC7C,MAAMya,EAAiB9W,KAAKoW,WAS5BpW,KAAK+W,2BAA2Bhd,GARHxa,IAC3B,GAAIA,IAAYygB,KAAK4E,UAAYhlB,OAAO02B,WAAa/2B,EAAQsI,YAAcivB,EACzE,OAEF9W,KAAK4W,sBAAsBr3B,EAASs3B,GACpC,MAAMJ,EAAkB72B,OAAOqF,iBAAiB1F,GAASub,iBAAiB+b,GAC1Et3B,EAAQwB,MAAMi2B,YAAYH,EAAe,GAAGxa,EAASkB,OAAOC,WAAWiZ,QAAsB,GAGjG,CACA,qBAAAG,CAAsBr3B,EAASs3B,GAC7B,MAAMI,EAAc13B,EAAQwB,MAAM+Z,iBAAiB+b,GAC/CI,GACFjU,GAAYC,iBAAiB1jB,EAASs3B,EAAeI,EAEzD,CACA,uBAAAP,CAAwB3c,EAAU8c,GAWhC7W,KAAK+W,2BAA2Bhd,GAVHxa,IAC3B,MAAM5B,EAAQqlB,GAAYQ,iBAAiBjkB,EAASs3B,GAEtC,OAAVl5B,GAIJqlB,GAAYE,oBAAoB3jB,EAASs3B,GACzCt3B,EAAQwB,MAAMi2B,YAAYH,EAAel5B,IAJvC4B,EAAQwB,MAAMm2B,eAAeL,EAIgB,GAGnD,CACA,0BAAAE,CAA2Bhd,EAAUod,GACnC,GAAI,GAAUpd,GACZod,EAASpd,QAGX,IAAK,MAAM6L,KAAOC,GAAe1T,KAAK4H,EAAUiG,KAAK4E,UACnDuS,EAASvR,EAEb,EAeF,MAEMwR,GAAc,YAGdC,GAAe,OAAOD,KACtBE,GAAyB,gBAAgBF,KACzCG,GAAiB,SAASH,KAC1BI,GAAe,OAAOJ,KACtBK,GAAgB,QAAQL,KACxBM,GAAiB,SAASN,KAC1BO,GAAsB,gBAAgBP,KACtCQ,GAA0B,oBAAoBR,KAC9CS,GAA0B,kBAAkBT,KAC5CU,GAAyB,QAAQV,cACjCW,GAAkB,aAElBC,GAAoB,OACpBC,GAAoB,eAKpBC,GAAY,CAChBtD,UAAU,EACVnC,OAAO,EACPzH,UAAU,GAENmN,GAAgB,CACpBvD,SAAU,mBACVnC,MAAO,UACPzH,SAAU,WAOZ,MAAMoN,WAAc1T,GAClB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKqY,QAAUxS,GAAeC,QArBV,gBAqBmC9F,KAAK4E,UAC5D5E,KAAKsY,UAAYtY,KAAKuY,sBACtBvY,KAAKwY,WAAaxY,KAAKyY,uBACvBzY,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EACxBnP,KAAK0Y,WAAa,IAAIvC,GACtBnW,KAAK6L,oBACP,CAGA,kBAAWnI,GACT,OAAOwU,EACT,CACA,sBAAWvU,GACT,OAAOwU,EACT,CACA,eAAW5b,GACT,MA1DW,OA2Db,CAGA,MAAAoL,CAAO7H,GACL,OAAOE,KAAK2P,SAAW3P,KAAK4P,OAAS5P,KAAK6P,KAAK/P,EACjD,CACA,IAAA+P,CAAK/P,GACCE,KAAK2P,UAAY3P,KAAKmP,kBAGR5O,GAAaqB,QAAQ5B,KAAK4E,SAAU4S,GAAc,CAClE1X,kBAEYkC,mBAGdhC,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EACxBnP,KAAK0Y,WAAW9I,OAChBvqB,SAAS6G,KAAKmP,UAAU5E,IAAIshB,IAC5B/X,KAAK2Y,gBACL3Y,KAAKsY,UAAUzI,MAAK,IAAM7P,KAAK4Y,aAAa9Y,KAC9C,CACA,IAAA8P,GACO5P,KAAK2P,WAAY3P,KAAKmP,mBAGT5O,GAAaqB,QAAQ5B,KAAK4E,SAAUyS,IACxCrV,mBAGdhC,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EACxBnP,KAAKwY,WAAW3C,aAChB7V,KAAK4E,SAASvJ,UAAU1B,OAAOqe,IAC/BhY,KAAKmF,gBAAe,IAAMnF,KAAK6Y,cAAc7Y,KAAK4E,SAAU5E,KAAKgO,gBACnE,CACA,OAAAjJ,GACExE,GAAaC,IAAI5gB,OAAQw3B,IACzB7W,GAAaC,IAAIR,KAAKqY,QAASjB,IAC/BpX,KAAKsY,UAAUvT,UACf/E,KAAKwY,WAAW3C,aAChBlR,MAAMI,SACR,CACA,YAAA+T,GACE9Y,KAAK2Y,eACP,CAGA,mBAAAJ,GACE,OAAO,IAAIhE,GAAS,CAClB5Z,UAAWmG,QAAQd,KAAK6E,QAAQ+P,UAEhCxP,WAAYpF,KAAKgO,eAErB,CACA,oBAAAyK,GACE,OAAO,IAAIlD,GAAU,CACnBF,YAAarV,KAAK4E,UAEtB,CACA,YAAAgU,CAAa9Y,GAENza,SAAS6G,KAAK1H,SAASwb,KAAK4E,WAC/Bvf,SAAS6G,KAAK4oB,OAAO9U,KAAK4E,UAE5B5E,KAAK4E,SAAS7jB,MAAMgxB,QAAU,QAC9B/R,KAAK4E,SAASzjB,gBAAgB,eAC9B6e,KAAK4E,SAASxjB,aAAa,cAAc,GACzC4e,KAAK4E,SAASxjB,aAAa,OAAQ,UACnC4e,KAAK4E,SAASnZ,UAAY,EAC1B,MAAMstB,EAAYlT,GAAeC,QA7GT,cA6GsC9F,KAAKqY,SAC/DU,IACFA,EAAUttB,UAAY,GAExBoQ,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIuhB,IAU5BhY,KAAKmF,gBATsB,KACrBnF,KAAK6E,QAAQ4N,OACfzS,KAAKwY,WAAW9C,WAElB1V,KAAKmP,kBAAmB,EACxB5O,GAAaqB,QAAQ5B,KAAK4E,SAAU6S,GAAe,CACjD3X,iBACA,GAEoCE,KAAKqY,QAASrY,KAAKgO,cAC7D,CACA,kBAAAnC,GACEtL,GAAac,GAAGrB,KAAK4E,SAAUiT,IAAyBzY,IAhJvC,WAiJXA,EAAMtiB,MAGNkjB,KAAK6E,QAAQmG,SACfhL,KAAK4P,OAGP5P,KAAKgZ,6BAA4B,IAEnCzY,GAAac,GAAGzhB,OAAQ83B,IAAgB,KAClC1X,KAAK2P,WAAa3P,KAAKmP,kBACzBnP,KAAK2Y,eACP,IAEFpY,GAAac,GAAGrB,KAAK4E,SAAUgT,IAAyBxY,IAEtDmB,GAAae,IAAItB,KAAK4E,SAAU+S,IAAqBsB,IAC/CjZ,KAAK4E,WAAaxF,EAAM7S,QAAUyT,KAAK4E,WAAaqU,EAAO1sB,SAGjC,WAA1ByT,KAAK6E,QAAQ+P,SAIb5U,KAAK6E,QAAQ+P,UACf5U,KAAK4P,OAJL5P,KAAKgZ,6BAKP,GACA,GAEN,CACA,UAAAH,GACE7Y,KAAK4E,SAAS7jB,MAAMgxB,QAAU,OAC9B/R,KAAK4E,SAASxjB,aAAa,eAAe,GAC1C4e,KAAK4E,SAASzjB,gBAAgB,cAC9B6e,KAAK4E,SAASzjB,gBAAgB,QAC9B6e,KAAKmP,kBAAmB,EACxBnP,KAAKsY,UAAU1I,MAAK,KAClBvqB,SAAS6G,KAAKmP,UAAU1B,OAAOoe,IAC/B/X,KAAKkZ,oBACLlZ,KAAK0Y,WAAWrmB,QAChBkO,GAAaqB,QAAQ5B,KAAK4E,SAAU2S,GAAe,GAEvD,CACA,WAAAvJ,GACE,OAAOhO,KAAK4E,SAASvJ,UAAU7W,SAjLT,OAkLxB,CACA,0BAAAw0B,GAEE,GADkBzY,GAAaqB,QAAQ5B,KAAK4E,SAAU0S,IACxCtV,iBACZ,OAEF,MAAMmX,EAAqBnZ,KAAK4E,SAASvX,aAAehI,SAASC,gBAAgBsC,aAC3EwxB,EAAmBpZ,KAAK4E,SAAS7jB,MAAMiL,UAEpB,WAArBotB,GAAiCpZ,KAAK4E,SAASvJ,UAAU7W,SAASyzB,MAGjEkB,IACHnZ,KAAK4E,SAAS7jB,MAAMiL,UAAY,UAElCgU,KAAK4E,SAASvJ,UAAU5E,IAAIwhB,IAC5BjY,KAAKmF,gBAAe,KAClBnF,KAAK4E,SAASvJ,UAAU1B,OAAOse,IAC/BjY,KAAKmF,gBAAe,KAClBnF,KAAK4E,SAAS7jB,MAAMiL,UAAYotB,CAAgB,GAC/CpZ,KAAKqY,QAAQ,GACfrY,KAAKqY,SACRrY,KAAK4E,SAAS6N,QAChB,CAMA,aAAAkG,GACE,MAAMQ,EAAqBnZ,KAAK4E,SAASvX,aAAehI,SAASC,gBAAgBsC,aAC3EkvB,EAAiB9W,KAAK0Y,WAAWtC,WACjCiD,EAAoBvC,EAAiB,EAC3C,GAAIuC,IAAsBF,EAAoB,CAC5C,MAAMr3B,EAAWma,KAAU,cAAgB,eAC3C+D,KAAK4E,SAAS7jB,MAAMe,GAAY,GAAGg1B,KACrC,CACA,IAAKuC,GAAqBF,EAAoB,CAC5C,MAAMr3B,EAAWma,KAAU,eAAiB,cAC5C+D,KAAK4E,SAAS7jB,MAAMe,GAAY,GAAGg1B,KACrC,CACF,CACA,iBAAAoC,GACElZ,KAAK4E,SAAS7jB,MAAMu4B,YAAc,GAClCtZ,KAAK4E,SAAS7jB,MAAMw4B,aAAe,EACrC,CAGA,sBAAO9c,CAAgBqH,EAAQhE,GAC7B,OAAOE,KAAKwH,MAAK,WACf,MAAMnd,EAAO+tB,GAAM9S,oBAAoBtF,KAAM8D,GAC7C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQhE,EAJb,CAKF,GACF,EAOFS,GAAac,GAAGhc,SAAUyyB,GA9OK,4BA8O2C,SAAU1Y,GAClF,MAAM7S,EAASsZ,GAAec,uBAAuB3G,MACjD,CAAC,IAAK,QAAQoB,SAASpB,KAAKiH,UAC9B7H,EAAMkD,iBAER/B,GAAae,IAAI/U,EAAQirB,IAAcgC,IACjCA,EAAUxX,kBAIdzB,GAAae,IAAI/U,EAAQgrB,IAAgB,KACnC5c,GAAUqF,OACZA,KAAKyS,OACP,GACA,IAIJ,MAAMgH,EAAc5T,GAAeC,QAnQb,eAoQlB2T,GACFrB,GAAM/S,YAAYoU,GAAa7J,OAEpBwI,GAAM9S,oBAAoB/Y,GAClCob,OAAO3H,KACd,IACA6G,GAAqBuR,IAMrBjc,GAAmBic,IAcnB,MAEMsB,GAAc,gBACdC,GAAiB,YACjBC,GAAwB,OAAOF,KAAcC,KAE7CE,GAAoB,OACpBC,GAAuB,UACvBC,GAAoB,SAEpBC,GAAgB,kBAChBC,GAAe,OAAOP,KACtBQ,GAAgB,QAAQR,KACxBS,GAAe,OAAOT,KACtBU,GAAuB,gBAAgBV,KACvCW,GAAiB,SAASX,KAC1BY,GAAe,SAASZ,KACxBa,GAAyB,QAAQb,KAAcC,KAC/Ca,GAAwB,kBAAkBd,KAE1Ce,GAAY,CAChB7F,UAAU,EACV5J,UAAU,EACVvgB,QAAQ,GAEJiwB,GAAgB,CACpB9F,SAAU,mBACV5J,SAAU,UACVvgB,OAAQ,WAOV,MAAMkwB,WAAkBjW,GACtB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAK2P,UAAW,EAChB3P,KAAKsY,UAAYtY,KAAKuY,sBACtBvY,KAAKwY,WAAaxY,KAAKyY,uBACvBzY,KAAK6L,oBACP,CAGA,kBAAWnI,GACT,OAAO+W,EACT,CACA,sBAAW9W,GACT,OAAO+W,EACT,CACA,eAAWne,GACT,MApDW,WAqDb,CAGA,MAAAoL,CAAO7H,GACL,OAAOE,KAAK2P,SAAW3P,KAAK4P,OAAS5P,KAAK6P,KAAK/P,EACjD,CACA,IAAA+P,CAAK/P,GACCE,KAAK2P,UAGSpP,GAAaqB,QAAQ5B,KAAK4E,SAAUqV,GAAc,CAClEna,kBAEYkC,mBAGdhC,KAAK2P,UAAW,EAChB3P,KAAKsY,UAAUzI,OACV7P,KAAK6E,QAAQpa,SAChB,IAAI0rB,IAAkBvG,OAExB5P,KAAK4E,SAASxjB,aAAa,cAAc,GACzC4e,KAAK4E,SAASxjB,aAAa,OAAQ,UACnC4e,KAAK4E,SAASvJ,UAAU5E,IAAIqjB,IAW5B9Z,KAAKmF,gBAVoB,KAClBnF,KAAK6E,QAAQpa,SAAUuV,KAAK6E,QAAQ+P,UACvC5U,KAAKwY,WAAW9C,WAElB1V,KAAK4E,SAASvJ,UAAU5E,IAAIojB,IAC5B7Z,KAAK4E,SAASvJ,UAAU1B,OAAOmgB,IAC/BvZ,GAAaqB,QAAQ5B,KAAK4E,SAAUsV,GAAe,CACjDpa,iBACA,GAEkCE,KAAK4E,UAAU,GACvD,CACA,IAAAgL,GACO5P,KAAK2P,WAGQpP,GAAaqB,QAAQ5B,KAAK4E,SAAUuV,IACxCnY,mBAGdhC,KAAKwY,WAAW3C,aAChB7V,KAAK4E,SAASgW,OACd5a,KAAK2P,UAAW,EAChB3P,KAAK4E,SAASvJ,UAAU5E,IAAIsjB,IAC5B/Z,KAAKsY,UAAU1I,OAUf5P,KAAKmF,gBAToB,KACvBnF,KAAK4E,SAASvJ,UAAU1B,OAAOkgB,GAAmBE,IAClD/Z,KAAK4E,SAASzjB,gBAAgB,cAC9B6e,KAAK4E,SAASzjB,gBAAgB,QACzB6e,KAAK6E,QAAQpa,SAChB,IAAI0rB,IAAkB9jB,QAExBkO,GAAaqB,QAAQ5B,KAAK4E,SAAUyV,GAAe,GAEfra,KAAK4E,UAAU,IACvD,CACA,OAAAG,GACE/E,KAAKsY,UAAUvT,UACf/E,KAAKwY,WAAW3C,aAChBlR,MAAMI,SACR,CAGA,mBAAAwT,GACE,MASM5d,EAAYmG,QAAQd,KAAK6E,QAAQ+P,UACvC,OAAO,IAAIL,GAAS,CAClBJ,UA3HsB,qBA4HtBxZ,YACAyK,YAAY,EACZiP,YAAarU,KAAK4E,SAAS7f,WAC3BqvB,cAAezZ,EAfK,KACU,WAA1BqF,KAAK6E,QAAQ+P,SAIjB5U,KAAK4P,OAHHrP,GAAaqB,QAAQ5B,KAAK4E,SAAUwV,GAG3B,EAUgC,MAE/C,CACA,oBAAA3B,GACE,OAAO,IAAIlD,GAAU,CACnBF,YAAarV,KAAK4E,UAEtB,CACA,kBAAAiH,GACEtL,GAAac,GAAGrB,KAAK4E,SAAU4V,IAAuBpb,IA5IvC,WA6ITA,EAAMtiB,MAGNkjB,KAAK6E,QAAQmG,SACfhL,KAAK4P,OAGPrP,GAAaqB,QAAQ5B,KAAK4E,SAAUwV,IAAqB,GAE7D,CAGA,sBAAO3d,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOswB,GAAUrV,oBAAoBtF,KAAM8D,GACjD,GAAsB,iBAAXA,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KAJb,CAKF,GACF,EAOFO,GAAac,GAAGhc,SAAUk1B,GA7JK,gCA6J2C,SAAUnb,GAClF,MAAM7S,EAASsZ,GAAec,uBAAuB3G,MAIrD,GAHI,CAAC,IAAK,QAAQoB,SAASpB,KAAKiH,UAC9B7H,EAAMkD,iBAEJpH,GAAW8E,MACb,OAEFO,GAAae,IAAI/U,EAAQ8tB,IAAgB,KAEnC1f,GAAUqF,OACZA,KAAKyS,OACP,IAIF,MAAMgH,EAAc5T,GAAeC,QAAQkU,IACvCP,GAAeA,IAAgBltB,GACjCouB,GAAUtV,YAAYoU,GAAa7J,OAExB+K,GAAUrV,oBAAoB/Y,GACtCob,OAAO3H,KACd,IACAO,GAAac,GAAGzhB,OAAQg6B,IAAuB,KAC7C,IAAK,MAAM7f,KAAY8L,GAAe1T,KAAK6nB,IACzCW,GAAUrV,oBAAoBvL,GAAU8V,MAC1C,IAEFtP,GAAac,GAAGzhB,OAAQ06B,IAAc,KACpC,IAAK,MAAM/6B,KAAWsmB,GAAe1T,KAAK,gDACG,UAAvClN,iBAAiB1F,GAASiC,UAC5Bm5B,GAAUrV,oBAAoB/lB,GAASqwB,MAE3C,IAEF/I,GAAqB8T,IAMrBxe,GAAmBwe,IAUnB,MACME,GAAmB,CAEvB,IAAK,CAAC,QAAS,MAAO,KAAM,OAAQ,OAHP,kBAI7BhqB,EAAG,CAAC,SAAU,OAAQ,QAAS,OAC/BiqB,KAAM,GACNhqB,EAAG,GACHiqB,GAAI,GACJC,IAAK,GACLC,KAAM,GACNC,GAAI,GACJC,IAAK,GACLC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJxqB,EAAG,GACH0b,IAAK,CAAC,MAAO,SAAU,MAAO,QAAS,QAAS,UAChD+O,GAAI,GACJC,GAAI,GACJC,EAAG,GACHC,IAAK,GACLC,EAAG,GACHC,MAAO,GACPC,KAAM,GACNC,IAAK,GACLC,IAAK,GACLC,OAAQ,GACRC,EAAG,GACHC,GAAI,IAIAC,GAAgB,IAAIpmB,IAAI,CAAC,aAAc,OAAQ,OAAQ,WAAY,WAAY,SAAU,MAAO,eAShGqmB,GAAmB,0DACnBC,GAAmB,CAAC76B,EAAW86B,KACnC,MAAMC,EAAgB/6B,EAAUvC,SAASC,cACzC,OAAIo9B,EAAqBzb,SAAS0b,IAC5BJ,GAAc/lB,IAAImmB,IACbhc,QAAQ6b,GAAiBt5B,KAAKtB,EAAUg7B,YAM5CF,EAAqB12B,QAAO62B,GAAkBA,aAA0BzY,SAAQ9R,MAAKwqB,GAASA,EAAM55B,KAAKy5B,IAAe,EA0C3HI,GAAY,CAChBC,UAAWtC,GACXuC,QAAS,CAAC,EAEVC,WAAY,GACZxwB,MAAM,EACNywB,UAAU,EACVC,WAAY,KACZC,SAAU,eAENC,GAAgB,CACpBN,UAAW,SACXC,QAAS,SACTC,WAAY,oBACZxwB,KAAM,UACNywB,SAAU,UACVC,WAAY,kBACZC,SAAU,UAENE,GAAqB,CACzBC,MAAO,iCACP5jB,SAAU,oBAOZ,MAAM6jB,WAAwBna,GAC5B,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,EACjC,CAGA,kBAAWJ,GACT,OAAOwZ,EACT,CACA,sBAAWvZ,GACT,OAAO8Z,EACT,CACA,eAAWlhB,GACT,MA3CW,iBA4Cb,CAGA,UAAAshB,GACE,OAAO7gC,OAAOmiB,OAAOa,KAAK6E,QAAQuY,SAASt6B,KAAIghB,GAAU9D,KAAK8d,yBAAyBha,KAAS3d,OAAO2a,QACzG,CACA,UAAAid,GACE,OAAO/d,KAAK6d,aAAantB,OAAS,CACpC,CACA,aAAAstB,CAAcZ,GAMZ,OALApd,KAAKie,cAAcb,GACnBpd,KAAK6E,QAAQuY,QAAU,IAClBpd,KAAK6E,QAAQuY,WACbA,GAEEpd,IACT,CACA,MAAAke,GACE,MAAMC,EAAkB94B,SAASwvB,cAAc,OAC/CsJ,EAAgBC,UAAYpe,KAAKqe,eAAere,KAAK6E,QAAQ2Y,UAC7D,IAAK,MAAOzjB,EAAUukB,KAASthC,OAAOmkB,QAAQnB,KAAK6E,QAAQuY,SACzDpd,KAAKue,YAAYJ,EAAiBG,EAAMvkB,GAE1C,MAAMyjB,EAAWW,EAAgBpY,SAAS,GACpCsX,EAAard,KAAK8d,yBAAyB9d,KAAK6E,QAAQwY,YAI9D,OAHIA,GACFG,EAASniB,UAAU5E,OAAO4mB,EAAWn7B,MAAM,MAEtCs7B,CACT,CAGA,gBAAAvZ,CAAiBH,GACfa,MAAMV,iBAAiBH,GACvB9D,KAAKie,cAAcna,EAAOsZ,QAC5B,CACA,aAAAa,CAAcO,GACZ,IAAK,MAAOzkB,EAAUqjB,KAAYpgC,OAAOmkB,QAAQqd,GAC/C7Z,MAAMV,iBAAiB,CACrBlK,WACA4jB,MAAOP,GACNM,GAEP,CACA,WAAAa,CAAYf,EAAUJ,EAASrjB,GAC7B,MAAM0kB,EAAkB5Y,GAAeC,QAAQ/L,EAAUyjB,GACpDiB,KAGLrB,EAAUpd,KAAK8d,yBAAyBV,IAKpC,GAAUA,GACZpd,KAAK0e,sBAAsBhkB,GAAW0iB,GAAUqB,GAG9Cze,KAAK6E,QAAQhY,KACf4xB,EAAgBL,UAAYpe,KAAKqe,eAAejB,GAGlDqB,EAAgBE,YAAcvB,EAX5BqB,EAAgB9kB,SAYpB,CACA,cAAA0kB,CAAeG,GACb,OAAOxe,KAAK6E,QAAQyY,SApJxB,SAAsBsB,EAAYzB,EAAW0B,GAC3C,IAAKD,EAAWluB,OACd,OAAOkuB,EAET,GAAIC,GAAgD,mBAArBA,EAC7B,OAAOA,EAAiBD,GAE1B,MACME,GADY,IAAIl/B,OAAOm/B,WACKC,gBAAgBJ,EAAY,aACxD/9B,EAAW,GAAGlC,UAAUmgC,EAAgB5yB,KAAKkU,iBAAiB,MACpE,IAAK,MAAM7gB,KAAWsB,EAAU,CAC9B,MAAMo+B,EAAc1/B,EAAQC,SAASC,cACrC,IAAKzC,OAAO4D,KAAKu8B,GAAW/b,SAAS6d,GAAc,CACjD1/B,EAAQoa,SACR,QACF,CACA,MAAMulB,EAAgB,GAAGvgC,UAAUY,EAAQ0B,YACrCk+B,EAAoB,GAAGxgC,OAAOw+B,EAAU,MAAQ,GAAIA,EAAU8B,IAAgB,IACpF,IAAK,MAAMl9B,KAAam9B,EACjBtC,GAAiB76B,EAAWo9B,IAC/B5/B,EAAQ4B,gBAAgBY,EAAUvC,SAGxC,CACA,OAAOs/B,EAAgB5yB,KAAKkyB,SAC9B,CA2HmCgB,CAAaZ,EAAKxe,KAAK6E,QAAQsY,UAAWnd,KAAK6E,QAAQ0Y,YAAciB,CACtG,CACA,wBAAAV,CAAyBU,GACvB,OAAO3hB,GAAQ2hB,EAAK,CAACxe,MACvB,CACA,qBAAA0e,CAAsBn/B,EAASk/B,GAC7B,GAAIze,KAAK6E,QAAQhY,KAGf,OAFA4xB,EAAgBL,UAAY,QAC5BK,EAAgB3J,OAAOv1B,GAGzBk/B,EAAgBE,YAAcp/B,EAAQo/B,WACxC,EAeF,MACMU,GAAwB,IAAI/oB,IAAI,CAAC,WAAY,YAAa,eAC1DgpB,GAAoB,OAEpBC,GAAoB,OACpBC,GAAyB,iBACzBC,GAAiB,SACjBC,GAAmB,gBACnBC,GAAgB,QAChBC,GAAgB,QAahBC,GAAgB,CACpBC,KAAM,OACNC,IAAK,MACLC,MAAO/jB,KAAU,OAAS,QAC1BgkB,OAAQ,SACRC,KAAMjkB,KAAU,QAAU,QAEtBkkB,GAAY,CAChBhD,UAAWtC,GACXuF,WAAW,EACXnyB,SAAU,kBACVoyB,WAAW,EACXC,YAAa,GACbC,MAAO,EACPvwB,mBAAoB,CAAC,MAAO,QAAS,SAAU,QAC/CnD,MAAM,EACN7E,OAAQ,CAAC,EAAG,GACZtJ,UAAW,MACXszB,aAAc,KACdsL,UAAU,EACVC,WAAY,KACZxjB,UAAU,EACVyjB,SAAU,+GACVgD,MAAO,GACP5e,QAAS,eAEL6e,GAAgB,CACpBtD,UAAW,SACXiD,UAAW,UACXnyB,SAAU,mBACVoyB,UAAW,2BACXC,YAAa,oBACbC,MAAO,kBACPvwB,mBAAoB,QACpBnD,KAAM,UACN7E,OAAQ,0BACRtJ,UAAW,oBACXszB,aAAc,yBACdsL,SAAU,UACVC,WAAY,kBACZxjB,SAAU,mBACVyjB,SAAU,SACVgD,MAAO,4BACP5e,QAAS,UAOX,MAAM8e,WAAgBhc,GACpB,WAAAP,CAAY5kB,EAASukB,GACnB,QAAsB,IAAX,EACT,MAAM,IAAIU,UAAU,+DAEtBG,MAAMplB,EAASukB,GAGf9D,KAAK2gB,YAAa,EAClB3gB,KAAK4gB,SAAW,EAChB5gB,KAAK6gB,WAAa,KAClB7gB,KAAK8gB,eAAiB,CAAC,EACvB9gB,KAAKmS,QAAU,KACfnS,KAAK+gB,iBAAmB,KACxB/gB,KAAKghB,YAAc,KAGnBhhB,KAAKihB,IAAM,KACXjhB,KAAKkhB,gBACAlhB,KAAK6E,QAAQ9K,UAChBiG,KAAKmhB,WAET,CAGA,kBAAWzd,GACT,OAAOyc,EACT,CACA,sBAAWxc,GACT,OAAO8c,EACT,CACA,eAAWlkB,GACT,MAxGW,SAyGb,CAGA,MAAA6kB,GACEphB,KAAK2gB,YAAa,CACpB,CACA,OAAAU,GACErhB,KAAK2gB,YAAa,CACpB,CACA,aAAAW,GACEthB,KAAK2gB,YAAc3gB,KAAK2gB,UAC1B,CACA,MAAAhZ,GACO3H,KAAK2gB,aAGV3gB,KAAK8gB,eAAeS,OAASvhB,KAAK8gB,eAAeS,MAC7CvhB,KAAK2P,WACP3P,KAAKwhB,SAGPxhB,KAAKyhB,SACP,CACA,OAAA1c,GACEmI,aAAalN,KAAK4gB,UAClBrgB,GAAaC,IAAIR,KAAK4E,SAAS5J,QAAQykB,IAAiBC,GAAkB1f,KAAK0hB,mBAC3E1hB,KAAK4E,SAASpJ,aAAa,2BAC7BwE,KAAK4E,SAASxjB,aAAa,QAAS4e,KAAK4E,SAASpJ,aAAa,2BAEjEwE,KAAK2hB,iBACLhd,MAAMI,SACR,CACA,IAAA8K,GACE,GAAoC,SAAhC7P,KAAK4E,SAAS7jB,MAAMgxB,QACtB,MAAM,IAAInO,MAAM,uCAElB,IAAM5D,KAAK4hB,mBAAoB5hB,KAAK2gB,WAClC,OAEF,MAAMnH,EAAYjZ,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAlItD,SAoIXqc,GADapmB,GAAeuE,KAAK4E,WACL5E,KAAK4E,SAAS9kB,cAAcwF,iBAAiBd,SAASwb,KAAK4E,UAC7F,GAAI4U,EAAUxX,mBAAqB6f,EACjC,OAIF7hB,KAAK2hB,iBACL,MAAMV,EAAMjhB,KAAK8hB,iBACjB9hB,KAAK4E,SAASxjB,aAAa,mBAAoB6/B,EAAIzlB,aAAa,OAChE,MAAM,UACJ6kB,GACErgB,KAAK6E,QAYT,GAXK7E,KAAK4E,SAAS9kB,cAAcwF,gBAAgBd,SAASwb,KAAKihB,OAC7DZ,EAAUvL,OAAOmM,GACjB1gB,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAhJpC,cAkJnBxF,KAAKmS,QAAUnS,KAAKwS,cAAcyO,GAClCA,EAAI5lB,UAAU5E,IAAI8oB,IAMd,iBAAkBl6B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK6Z,UAC/CxF,GAAac,GAAG9hB,EAAS,YAAaqc,IAU1CoE,KAAKmF,gBAPY,KACf5E,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAhKrC,WAiKQ,IAApBxF,KAAK6gB,YACP7gB,KAAKwhB,SAEPxhB,KAAK6gB,YAAa,CAAK,GAEK7gB,KAAKihB,IAAKjhB,KAAKgO,cAC/C,CACA,IAAA4B,GACE,GAAK5P,KAAK2P,aAGQpP,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UA/KtD,SAgLHxD,iBAAd,CAQA,GALYhC,KAAK8hB,iBACbzmB,UAAU1B,OAAO4lB,IAIjB,iBAAkBl6B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK6Z,UAC/CxF,GAAaC,IAAIjhB,EAAS,YAAaqc,IAG3CoE,KAAK8gB,eAA4B,OAAI,EACrC9gB,KAAK8gB,eAAelB,KAAiB,EACrC5f,KAAK8gB,eAAenB,KAAiB,EACrC3f,KAAK6gB,WAAa,KAYlB7gB,KAAKmF,gBAVY,KACXnF,KAAK+hB,yBAGJ/hB,KAAK6gB,YACR7gB,KAAK2hB,iBAEP3hB,KAAK4E,SAASzjB,gBAAgB,oBAC9Bof,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAzMpC,WAyM8D,GAEnDxF,KAAKihB,IAAKjhB,KAAKgO,cA1B7C,CA2BF,CACA,MAAAjjB,GACMiV,KAAKmS,SACPnS,KAAKmS,QAAQpnB,QAEjB,CAGA,cAAA62B,GACE,OAAO9gB,QAAQd,KAAKgiB,YACtB,CACA,cAAAF,GAIE,OAHK9hB,KAAKihB,MACRjhB,KAAKihB,IAAMjhB,KAAKiiB,kBAAkBjiB,KAAKghB,aAAehhB,KAAKkiB,2BAEtDliB,KAAKihB,GACd,CACA,iBAAAgB,CAAkB7E,GAChB,MAAM6D,EAAMjhB,KAAKmiB,oBAAoB/E,GAASc,SAG9C,IAAK+C,EACH,OAAO,KAETA,EAAI5lB,UAAU1B,OAAO2lB,GAAmBC,IAExC0B,EAAI5lB,UAAU5E,IAAI,MAAMuJ,KAAKmE,YAAY5H,aACzC,MAAM6lB,EAvuGKC,KACb,GACEA,GAAUlgC,KAAKmgC,MA/BH,IA+BSngC,KAAKogC,gBACnBl9B,SAASm9B,eAAeH,IACjC,OAAOA,CAAM,EAmuGGI,CAAOziB,KAAKmE,YAAY5H,MAAM1c,WAK5C,OAJAohC,EAAI7/B,aAAa,KAAMghC,GACnBpiB,KAAKgO,eACPiT,EAAI5lB,UAAU5E,IAAI6oB,IAEb2B,CACT,CACA,UAAAyB,CAAWtF,GACTpd,KAAKghB,YAAc5D,EACfpd,KAAK2P,aACP3P,KAAK2hB,iBACL3hB,KAAK6P,OAET,CACA,mBAAAsS,CAAoB/E,GAYlB,OAXIpd,KAAK+gB,iBACP/gB,KAAK+gB,iBAAiB/C,cAAcZ,GAEpCpd,KAAK+gB,iBAAmB,IAAInD,GAAgB,IACvC5d,KAAK6E,QAGRuY,UACAC,WAAYrd,KAAK8d,yBAAyB9d,KAAK6E,QAAQyb,eAGpDtgB,KAAK+gB,gBACd,CACA,sBAAAmB,GACE,MAAO,CACL,CAAC1C,IAAyBxf,KAAKgiB,YAEnC,CACA,SAAAA,GACE,OAAOhiB,KAAK8d,yBAAyB9d,KAAK6E,QAAQ2b,QAAUxgB,KAAK4E,SAASpJ,aAAa,yBACzF,CAGA,4BAAAmnB,CAA6BvjB,GAC3B,OAAOY,KAAKmE,YAAYmB,oBAAoBlG,EAAMW,eAAgBC,KAAK4iB,qBACzE,CACA,WAAA5U,GACE,OAAOhO,KAAK6E,QAAQub,WAAapgB,KAAKihB,KAAOjhB,KAAKihB,IAAI5lB,UAAU7W,SAAS86B,GAC3E,CACA,QAAA3P,GACE,OAAO3P,KAAKihB,KAAOjhB,KAAKihB,IAAI5lB,UAAU7W,SAAS+6B,GACjD,CACA,aAAA/M,CAAcyO,GACZ,MAAMviC,EAAYme,GAAQmD,KAAK6E,QAAQnmB,UAAW,CAACshB,KAAMihB,EAAKjhB,KAAK4E,WAC7Die,EAAahD,GAAcnhC,EAAU+lB,eAC3C,OAAO,GAAoBzE,KAAK4E,SAAUqc,EAAKjhB,KAAK4S,iBAAiBiQ,GACvE,CACA,UAAA7P,GACE,MAAM,OACJhrB,GACEgY,KAAK6E,QACT,MAAsB,iBAAX7c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAAS4f,OAAOgQ,SAAS5vB,EAAO,MAEzC,mBAAXqK,EACFirB,GAAcjrB,EAAOirB,EAAYjT,KAAK4E,UAExC5c,CACT,CACA,wBAAA81B,CAAyBU,GACvB,OAAO3hB,GAAQ2hB,EAAK,CAACxe,KAAK4E,UAC5B,CACA,gBAAAgO,CAAiBiQ,GACf,MAAM3P,EAAwB,CAC5Bx0B,UAAWmkC,EACXzsB,UAAW,CAAC,CACV9V,KAAM,OACNmB,QAAS,CACPuO,mBAAoBgQ,KAAK6E,QAAQ7U,qBAElC,CACD1P,KAAM,SACNmB,QAAS,CACPuG,OAAQgY,KAAKgT,eAEd,CACD1yB,KAAM,kBACNmB,QAAS,CACPwM,SAAU+R,KAAK6E,QAAQ5W,WAExB,CACD3N,KAAM,QACNmB,QAAS,CACPlC,QAAS,IAAIygB,KAAKmE,YAAY5H,eAE/B,CACDjc,KAAM,kBACNC,SAAS,EACTC,MAAO,aACPC,GAAI4J,IAGF2V,KAAK8hB,iBAAiB1gC,aAAa,wBAAyBiJ,EAAK1J,MAAMjC,UAAU,KAIvF,MAAO,IACFw0B,KACArW,GAAQmD,KAAK6E,QAAQmN,aAAc,CAACkB,IAE3C,CACA,aAAAgO,GACE,MAAM4B,EAAW9iB,KAAK6E,QAAQjD,QAAQ1f,MAAM,KAC5C,IAAK,MAAM0f,KAAWkhB,EACpB,GAAgB,UAAZlhB,EACFrB,GAAac,GAAGrB,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAjVlC,SAiV4DxF,KAAK6E,QAAQ9K,UAAUqF,IAC/EY,KAAK2iB,6BAA6BvjB,GAC1CuI,QAAQ,SAEb,GA3VU,WA2VN/F,EAA4B,CACrC,MAAMmhB,EAAUnhB,IAAY+d,GAAgB3f,KAAKmE,YAAYqB,UAnV5C,cAmV0ExF,KAAKmE,YAAYqB,UArV5F,WAsVVwd,EAAWphB,IAAY+d,GAAgB3f,KAAKmE,YAAYqB,UAnV7C,cAmV2ExF,KAAKmE,YAAYqB,UArV5F,YAsVjBjF,GAAac,GAAGrB,KAAK4E,SAAUme,EAAS/iB,KAAK6E,QAAQ9K,UAAUqF,IAC7D,MAAMkU,EAAUtT,KAAK2iB,6BAA6BvjB,GAClDkU,EAAQwN,eAA8B,YAAf1hB,EAAMqB,KAAqBmf,GAAgBD,KAAiB,EACnFrM,EAAQmO,QAAQ,IAElBlhB,GAAac,GAAGrB,KAAK4E,SAAUoe,EAAUhjB,KAAK6E,QAAQ9K,UAAUqF,IAC9D,MAAMkU,EAAUtT,KAAK2iB,6BAA6BvjB,GAClDkU,EAAQwN,eAA8B,aAAf1hB,EAAMqB,KAAsBmf,GAAgBD,IAAiBrM,EAAQ1O,SAASpgB,SAAS4a,EAAMU,eACpHwT,EAAQkO,QAAQ,GAEpB,CAEFxhB,KAAK0hB,kBAAoB,KACnB1hB,KAAK4E,UACP5E,KAAK4P,MACP,EAEFrP,GAAac,GAAGrB,KAAK4E,SAAS5J,QAAQykB,IAAiBC,GAAkB1f,KAAK0hB,kBAChF,CACA,SAAAP,GACE,MAAMX,EAAQxgB,KAAK4E,SAASpJ,aAAa,SACpCglB,IAGAxgB,KAAK4E,SAASpJ,aAAa,eAAkBwE,KAAK4E,SAAS+Z,YAAYhZ,QAC1E3F,KAAK4E,SAASxjB,aAAa,aAAco/B,GAE3CxgB,KAAK4E,SAASxjB,aAAa,yBAA0Bo/B,GACrDxgB,KAAK4E,SAASzjB,gBAAgB,SAChC,CACA,MAAAsgC,GACMzhB,KAAK2P,YAAc3P,KAAK6gB,WAC1B7gB,KAAK6gB,YAAa,GAGpB7gB,KAAK6gB,YAAa,EAClB7gB,KAAKijB,aAAY,KACXjjB,KAAK6gB,YACP7gB,KAAK6P,MACP,GACC7P,KAAK6E,QAAQ0b,MAAM1Q,MACxB,CACA,MAAA2R,GACMxhB,KAAK+hB,yBAGT/hB,KAAK6gB,YAAa,EAClB7gB,KAAKijB,aAAY,KACVjjB,KAAK6gB,YACR7gB,KAAK4P,MACP,GACC5P,KAAK6E,QAAQ0b,MAAM3Q,MACxB,CACA,WAAAqT,CAAYrlB,EAASslB,GACnBhW,aAAalN,KAAK4gB,UAClB5gB,KAAK4gB,SAAW/iB,WAAWD,EAASslB,EACtC,CACA,oBAAAnB,GACE,OAAO/kC,OAAOmiB,OAAOa,KAAK8gB,gBAAgB1f,UAAS,EACrD,CACA,UAAAyC,CAAWC,GACT,MAAMqf,EAAiBngB,GAAYG,kBAAkBnD,KAAK4E,UAC1D,IAAK,MAAMwe,KAAiBpmC,OAAO4D,KAAKuiC,GAClC9D,GAAsB1oB,IAAIysB,WACrBD,EAAeC,GAU1B,OAPAtf,EAAS,IACJqf,KACmB,iBAAXrf,GAAuBA,EAASA,EAAS,CAAC,GAEvDA,EAAS9D,KAAK+D,gBAAgBD,GAC9BA,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CACA,iBAAAE,CAAkBF,GAchB,OAbAA,EAAOuc,WAAiC,IAArBvc,EAAOuc,UAAsBh7B,SAAS6G,KAAOwO,GAAWoJ,EAAOuc,WACtD,iBAAjBvc,EAAOyc,QAChBzc,EAAOyc,MAAQ,CACb1Q,KAAM/L,EAAOyc,MACb3Q,KAAM9L,EAAOyc,QAGW,iBAAjBzc,EAAO0c,QAChB1c,EAAO0c,MAAQ1c,EAAO0c,MAAM3gC,YAEA,iBAAnBikB,EAAOsZ,UAChBtZ,EAAOsZ,QAAUtZ,EAAOsZ,QAAQv9B,YAE3BikB,CACT,CACA,kBAAA8e,GACE,MAAM9e,EAAS,CAAC,EAChB,IAAK,MAAOhnB,EAAKa,KAAUX,OAAOmkB,QAAQnB,KAAK6E,SACzC7E,KAAKmE,YAAYT,QAAQ5mB,KAASa,IACpCmmB,EAAOhnB,GAAOa,GASlB,OANAmmB,EAAO/J,UAAW,EAClB+J,EAAOlC,QAAU,SAKVkC,CACT,CACA,cAAA6d,GACM3hB,KAAKmS,UACPnS,KAAKmS,QAAQnZ,UACbgH,KAAKmS,QAAU,MAEbnS,KAAKihB,MACPjhB,KAAKihB,IAAItnB,SACTqG,KAAKihB,IAAM,KAEf,CAGA,sBAAOxkB,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOq2B,GAAQpb,oBAAoBtF,KAAM8D,GAC/C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOF3H,GAAmBukB,IAcnB,MACM2C,GAAiB,kBACjBC,GAAmB,gBACnBC,GAAY,IACb7C,GAAQhd,QACX0Z,QAAS,GACTp1B,OAAQ,CAAC,EAAG,GACZtJ,UAAW,QACX8+B,SAAU,8IACV5b,QAAS,SAEL4hB,GAAgB,IACjB9C,GAAQ/c,YACXyZ,QAAS,kCAOX,MAAMqG,WAAgB/C,GAEpB,kBAAWhd,GACT,OAAO6f,EACT,CACA,sBAAW5f,GACT,OAAO6f,EACT,CACA,eAAWjnB,GACT,MA7BW,SA8Bb,CAGA,cAAAqlB,GACE,OAAO5hB,KAAKgiB,aAAehiB,KAAK0jB,aAClC,CAGA,sBAAAxB,GACE,MAAO,CACL,CAACmB,IAAiBrjB,KAAKgiB,YACvB,CAACsB,IAAmBtjB,KAAK0jB,cAE7B,CACA,WAAAA,GACE,OAAO1jB,KAAK8d,yBAAyB9d,KAAK6E,QAAQuY,QACpD,CAGA,sBAAO3gB,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOo5B,GAAQne,oBAAoBtF,KAAM8D,GAC/C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOF3H,GAAmBsnB,IAcnB,MAEME,GAAc,gBAEdC,GAAiB,WAAWD,KAC5BE,GAAc,QAAQF,KACtBG,GAAwB,OAAOH,cAE/BI,GAAsB,SAEtBC,GAAwB,SAExBC,GAAqB,YAGrBC,GAAsB,GAAGD,mBAA+CA,uBAGxEE,GAAY,CAChBn8B,OAAQ,KAERo8B,WAAY,eACZC,cAAc,EACd93B,OAAQ,KACR+3B,UAAW,CAAC,GAAK,GAAK,IAElBC,GAAgB,CACpBv8B,OAAQ,gBAERo8B,WAAY,SACZC,aAAc,UACd93B,OAAQ,UACR+3B,UAAW,SAOb,MAAME,WAAkB9f,GACtB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GAGf9D,KAAKykB,aAAe,IAAIvzB,IACxB8O,KAAK0kB,oBAAsB,IAAIxzB,IAC/B8O,KAAK2kB,aAA6D,YAA9C1/B,iBAAiB+a,KAAK4E,UAAU5Y,UAA0B,KAAOgU,KAAK4E,SAC1F5E,KAAK4kB,cAAgB,KACrB5kB,KAAK6kB,UAAY,KACjB7kB,KAAK8kB,oBAAsB,CACzBC,gBAAiB,EACjBC,gBAAiB,GAEnBhlB,KAAKilB,SACP,CAGA,kBAAWvhB,GACT,OAAOygB,EACT,CACA,sBAAWxgB,GACT,OAAO4gB,EACT,CACA,eAAWhoB,GACT,MAhEW,WAiEb,CAGA,OAAA0oB,GACEjlB,KAAKklB,mCACLllB,KAAKmlB,2BACDnlB,KAAK6kB,UACP7kB,KAAK6kB,UAAUO,aAEfplB,KAAK6kB,UAAY7kB,KAAKqlB,kBAExB,IAAK,MAAMC,KAAWtlB,KAAK0kB,oBAAoBvlB,SAC7Ca,KAAK6kB,UAAUU,QAAQD,EAE3B,CACA,OAAAvgB,GACE/E,KAAK6kB,UAAUO,aACfzgB,MAAMI,SACR,CAGA,iBAAAf,CAAkBF,GAShB,OAPAA,EAAOvX,OAASmO,GAAWoJ,EAAOvX,SAAWlH,SAAS6G,KAGtD4X,EAAOsgB,WAAatgB,EAAO9b,OAAS,GAAG8b,EAAO9b,oBAAsB8b,EAAOsgB,WAC3C,iBAArBtgB,EAAOwgB,YAChBxgB,EAAOwgB,UAAYxgB,EAAOwgB,UAAUpiC,MAAM,KAAKY,KAAInF,GAAS4f,OAAOC,WAAW7f,MAEzEmmB,CACT,CACA,wBAAAqhB,GACOnlB,KAAK6E,QAAQwf,eAKlB9jB,GAAaC,IAAIR,KAAK6E,QAAQtY,OAAQs3B,IACtCtjB,GAAac,GAAGrB,KAAK6E,QAAQtY,OAAQs3B,GAAaG,IAAuB5kB,IACvE,MAAMomB,EAAoBxlB,KAAK0kB,oBAAoBvnC,IAAIiiB,EAAM7S,OAAOtB,MACpE,GAAIu6B,EAAmB,CACrBpmB,EAAMkD,iBACN,MAAM3G,EAAOqE,KAAK2kB,cAAgB/kC,OAC5BmE,EAASyhC,EAAkBnhC,UAAY2b,KAAK4E,SAASvgB,UAC3D,GAAIsX,EAAK8pB,SAKP,YAJA9pB,EAAK8pB,SAAS,CACZ9jC,IAAKoC,EACL2hC,SAAU,WAMd/pB,EAAKlQ,UAAY1H,CACnB,KAEJ,CACA,eAAAshC,GACE,MAAM5jC,EAAU,CACdka,KAAMqE,KAAK2kB,aACXL,UAAWtkB,KAAK6E,QAAQyf,UACxBF,WAAYpkB,KAAK6E,QAAQuf,YAE3B,OAAO,IAAIuB,sBAAqBxkB,GAAWnB,KAAK4lB,kBAAkBzkB,IAAU1f,EAC9E,CAGA,iBAAAmkC,CAAkBzkB,GAChB,MAAM0kB,EAAgBlI,GAAS3d,KAAKykB,aAAatnC,IAAI,IAAIwgC,EAAMpxB,OAAO4N,MAChEub,EAAWiI,IACf3d,KAAK8kB,oBAAoBC,gBAAkBpH,EAAMpxB,OAAOlI,UACxD2b,KAAK8lB,SAASD,EAAclI,GAAO,EAE/BqH,GAAmBhlB,KAAK2kB,cAAgBt/B,SAASC,iBAAiBmG,UAClEs6B,EAAkBf,GAAmBhlB,KAAK8kB,oBAAoBE,gBACpEhlB,KAAK8kB,oBAAoBE,gBAAkBA,EAC3C,IAAK,MAAMrH,KAASxc,EAAS,CAC3B,IAAKwc,EAAMqI,eAAgB,CACzBhmB,KAAK4kB,cAAgB,KACrB5kB,KAAKimB,kBAAkBJ,EAAclI,IACrC,QACF,CACA,MAAMuI,EAA2BvI,EAAMpxB,OAAOlI,WAAa2b,KAAK8kB,oBAAoBC,gBAEpF,GAAIgB,GAAmBG,GAGrB,GAFAxQ,EAASiI,IAEJqH,EACH,YAMCe,GAAoBG,GACvBxQ,EAASiI,EAEb,CACF,CACA,gCAAAuH,GACEllB,KAAKykB,aAAe,IAAIvzB,IACxB8O,KAAK0kB,oBAAsB,IAAIxzB,IAC/B,MAAMi1B,EAActgB,GAAe1T,KAAK6xB,GAAuBhkB,KAAK6E,QAAQtY,QAC5E,IAAK,MAAM65B,KAAUD,EAAa,CAEhC,IAAKC,EAAOn7B,MAAQiQ,GAAWkrB,GAC7B,SAEF,MAAMZ,EAAoB3f,GAAeC,QAAQugB,UAAUD,EAAOn7B,MAAO+U,KAAK4E,UAG1EjK,GAAU6qB,KACZxlB,KAAKykB,aAAa1yB,IAAIs0B,UAAUD,EAAOn7B,MAAOm7B,GAC9CpmB,KAAK0kB,oBAAoB3yB,IAAIq0B,EAAOn7B,KAAMu6B,GAE9C,CACF,CACA,QAAAM,CAASv5B,GACHyT,KAAK4kB,gBAAkBr4B,IAG3ByT,KAAKimB,kBAAkBjmB,KAAK6E,QAAQtY,QACpCyT,KAAK4kB,cAAgBr4B,EACrBA,EAAO8O,UAAU5E,IAAIstB,IACrB/jB,KAAKsmB,iBAAiB/5B,GACtBgU,GAAaqB,QAAQ5B,KAAK4E,SAAUgf,GAAgB,CAClD9jB,cAAevT,IAEnB,CACA,gBAAA+5B,CAAiB/5B,GAEf,GAAIA,EAAO8O,UAAU7W,SA9LQ,iBA+L3BqhB,GAAeC,QArLc,mBAqLsBvZ,EAAOyO,QAtLtC,cAsLkEK,UAAU5E,IAAIstB,SAGtG,IAAK,MAAMwC,KAAa1gB,GAAeI,QAAQ1Z,EA9LnB,qBAiM1B,IAAK,MAAMxJ,KAAQ8iB,GAAeM,KAAKogB,EAAWrC,IAChDnhC,EAAKsY,UAAU5E,IAAIstB,GAGzB,CACA,iBAAAkC,CAAkBxhC,GAChBA,EAAO4W,UAAU1B,OAAOoqB,IACxB,MAAMyC,EAAc3gB,GAAe1T,KAAK,GAAG6xB,MAAyBD,KAAuBt/B,GAC3F,IAAK,MAAM9E,KAAQ6mC,EACjB7mC,EAAK0b,UAAU1B,OAAOoqB,GAE1B,CAGA,sBAAOtnB,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOm6B,GAAUlf,oBAAoBtF,KAAM8D,GACjD,GAAsB,iBAAXA,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOFvD,GAAac,GAAGzhB,OAAQkkC,IAAuB,KAC7C,IAAK,MAAM2C,KAAO5gB,GAAe1T,KApOT,0BAqOtBqyB,GAAUlf,oBAAoBmhB,EAChC,IAOFtqB,GAAmBqoB,IAcnB,MAEMkC,GAAc,UACdC,GAAe,OAAOD,KACtBE,GAAiB,SAASF,KAC1BG,GAAe,OAAOH,KACtBI,GAAgB,QAAQJ,KACxBK,GAAuB,QAAQL,KAC/BM,GAAgB,UAAUN,KAC1BO,GAAsB,OAAOP,KAC7BQ,GAAiB,YACjBC,GAAkB,aAClBC,GAAe,UACfC,GAAiB,YACjBC,GAAW,OACXC,GAAU,MACVC,GAAoB,SACpBC,GAAoB,OACpBC,GAAoB,OAEpBC,GAA2B,mBAE3BC,GAA+B,QAAQD,MAIvCE,GAAuB,2EACvBC,GAAsB,YAFOF,uBAAiDA,mBAA6CA,OAE/EC,KAC5CE,GAA8B,IAAIP,8BAA6CA,+BAA8CA,4BAMnI,MAAMQ,WAAYtjB,GAChB,WAAAP,CAAY5kB,GACVolB,MAAMplB,GACNygB,KAAKoS,QAAUpS,KAAK4E,SAAS5J,QAdN,uCAelBgF,KAAKoS,UAOVpS,KAAKioB,sBAAsBjoB,KAAKoS,QAASpS,KAAKkoB,gBAC9C3nB,GAAac,GAAGrB,KAAK4E,SAAUoiB,IAAe5nB,GAASY,KAAK6M,SAASzN,KACvE,CAGA,eAAW7C,GACT,MAnDW,KAoDb,CAGA,IAAAsT,GAEE,MAAMsY,EAAYnoB,KAAK4E,SACvB,GAAI5E,KAAKooB,cAAcD,GACrB,OAIF,MAAME,EAASroB,KAAKsoB,iBACdC,EAAYF,EAAS9nB,GAAaqB,QAAQymB,EAAQ1B,GAAc,CACpE7mB,cAAeqoB,IACZ,KACa5nB,GAAaqB,QAAQumB,EAAWtB,GAAc,CAC9D/mB,cAAeuoB,IAEHrmB,kBAAoBumB,GAAaA,EAAUvmB,mBAGzDhC,KAAKwoB,YAAYH,EAAQF,GACzBnoB,KAAKyoB,UAAUN,EAAWE,GAC5B,CAGA,SAAAI,CAAUlpC,EAASmpC,GACZnpC,IAGLA,EAAQ8b,UAAU5E,IAAI+wB,IACtBxnB,KAAKyoB,UAAU5iB,GAAec,uBAAuBpnB,IAcrDygB,KAAKmF,gBAZY,KACsB,QAAjC5lB,EAAQic,aAAa,SAIzBjc,EAAQ4B,gBAAgB,YACxB5B,EAAQ6B,aAAa,iBAAiB,GACtC4e,KAAK2oB,gBAAgBppC,GAAS,GAC9BghB,GAAaqB,QAAQriB,EAASunC,GAAe,CAC3ChnB,cAAe4oB,KAPfnpC,EAAQ8b,UAAU5E,IAAIixB,GAQtB,GAE0BnoC,EAASA,EAAQ8b,UAAU7W,SAASijC,KACpE,CACA,WAAAe,CAAYjpC,EAASmpC,GACdnpC,IAGLA,EAAQ8b,UAAU1B,OAAO6tB,IACzBjoC,EAAQq7B,OACR5a,KAAKwoB,YAAY3iB,GAAec,uBAAuBpnB,IAcvDygB,KAAKmF,gBAZY,KACsB,QAAjC5lB,EAAQic,aAAa,SAIzBjc,EAAQ6B,aAAa,iBAAiB,GACtC7B,EAAQ6B,aAAa,WAAY,MACjC4e,KAAK2oB,gBAAgBppC,GAAS,GAC9BghB,GAAaqB,QAAQriB,EAASqnC,GAAgB,CAC5C9mB,cAAe4oB,KAPfnpC,EAAQ8b,UAAU1B,OAAO+tB,GAQzB,GAE0BnoC,EAASA,EAAQ8b,UAAU7W,SAASijC,KACpE,CACA,QAAA5a,CAASzN,GACP,IAAK,CAAC8nB,GAAgBC,GAAiBC,GAAcC,GAAgBC,GAAUC,IAASnmB,SAAShC,EAAMtiB,KACrG,OAEFsiB,EAAM0U,kBACN1U,EAAMkD,iBACN,MAAMyD,EAAW/F,KAAKkoB,eAAe/hC,QAAO5G,IAAY2b,GAAW3b,KACnE,IAAIqpC,EACJ,GAAI,CAACtB,GAAUC,IAASnmB,SAAShC,EAAMtiB,KACrC8rC,EAAoB7iB,EAAS3G,EAAMtiB,MAAQwqC,GAAW,EAAIvhB,EAASrV,OAAS,OACvE,CACL,MAAM8c,EAAS,CAAC2Z,GAAiBE,IAAgBjmB,SAAShC,EAAMtiB,KAChE8rC,EAAoB9qB,GAAqBiI,EAAU3G,EAAM7S,OAAQihB,GAAQ,EAC3E,CACIob,IACFA,EAAkBnW,MAAM,CACtBoW,eAAe,IAEjBb,GAAI1iB,oBAAoBsjB,GAAmB/Y,OAE/C,CACA,YAAAqY,GAEE,OAAOriB,GAAe1T,KAAK21B,GAAqB9nB,KAAKoS,QACvD,CACA,cAAAkW,GACE,OAAOtoB,KAAKkoB,eAAe/1B,MAAKzN,GAASsb,KAAKooB,cAAc1jC,MAAW,IACzE,CACA,qBAAAujC,CAAsBxjC,EAAQshB,GAC5B/F,KAAK8oB,yBAAyBrkC,EAAQ,OAAQ,WAC9C,IAAK,MAAMC,KAASqhB,EAClB/F,KAAK+oB,6BAA6BrkC,EAEtC,CACA,4BAAAqkC,CAA6BrkC,GAC3BA,EAAQsb,KAAKgpB,iBAAiBtkC,GAC9B,MAAMukC,EAAWjpB,KAAKooB,cAAc1jC,GAC9BwkC,EAAYlpB,KAAKmpB,iBAAiBzkC,GACxCA,EAAMtD,aAAa,gBAAiB6nC,GAChCC,IAAcxkC,GAChBsb,KAAK8oB,yBAAyBI,EAAW,OAAQ,gBAE9CD,GACHvkC,EAAMtD,aAAa,WAAY,MAEjC4e,KAAK8oB,yBAAyBpkC,EAAO,OAAQ,OAG7Csb,KAAKopB,mCAAmC1kC,EAC1C,CACA,kCAAA0kC,CAAmC1kC,GACjC,MAAM6H,EAASsZ,GAAec,uBAAuBjiB,GAChD6H,IAGLyT,KAAK8oB,yBAAyBv8B,EAAQ,OAAQ,YAC1C7H,EAAMyV,IACR6F,KAAK8oB,yBAAyBv8B,EAAQ,kBAAmB,GAAG7H,EAAMyV,MAEtE,CACA,eAAAwuB,CAAgBppC,EAAS8pC,GACvB,MAAMH,EAAYlpB,KAAKmpB,iBAAiB5pC,GACxC,IAAK2pC,EAAU7tB,UAAU7W,SApKN,YAqKjB,OAEF,MAAMmjB,EAAS,CAAC5N,EAAUoa,KACxB,MAAM50B,EAAUsmB,GAAeC,QAAQ/L,EAAUmvB,GAC7C3pC,GACFA,EAAQ8b,UAAUsM,OAAOwM,EAAWkV,EACtC,EAEF1hB,EAAOggB,GAA0BH,IACjC7f,EA5K2B,iBA4KI+f,IAC/BwB,EAAU9nC,aAAa,gBAAiBioC,EAC1C,CACA,wBAAAP,CAAyBvpC,EAASwC,EAAWpE,GACtC4B,EAAQgc,aAAaxZ,IACxBxC,EAAQ6B,aAAaW,EAAWpE,EAEpC,CACA,aAAAyqC,CAAc9Y,GACZ,OAAOA,EAAKjU,UAAU7W,SAASgjC,GACjC,CAGA,gBAAAwB,CAAiB1Z,GACf,OAAOA,EAAKtJ,QAAQ8hB,IAAuBxY,EAAOzJ,GAAeC,QAAQgiB,GAAqBxY,EAChG,CAGA,gBAAA6Z,CAAiB7Z,GACf,OAAOA,EAAKtU,QA5LO,gCA4LoBsU,CACzC,CAGA,sBAAO7S,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAO29B,GAAI1iB,oBAAoBtF,MACrC,GAAsB,iBAAX8D,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOFvD,GAAac,GAAGhc,SAAU0hC,GAAsBc,IAAsB,SAAUzoB,GAC1E,CAAC,IAAK,QAAQgC,SAASpB,KAAKiH,UAC9B7H,EAAMkD,iBAEJpH,GAAW8E,OAGfgoB,GAAI1iB,oBAAoBtF,MAAM6P,MAChC,IAKAtP,GAAac,GAAGzhB,OAAQqnC,IAAqB,KAC3C,IAAK,MAAM1nC,KAAWsmB,GAAe1T,KAAK41B,IACxCC,GAAI1iB,oBAAoB/lB,EAC1B,IAMF4c,GAAmB6rB,IAcnB,MAEMhjB,GAAY,YACZskB,GAAkB,YAAYtkB,KAC9BukB,GAAiB,WAAWvkB,KAC5BwkB,GAAgB,UAAUxkB,KAC1BykB,GAAiB,WAAWzkB,KAC5B0kB,GAAa,OAAO1kB,KACpB2kB,GAAe,SAAS3kB,KACxB4kB,GAAa,OAAO5kB,KACpB6kB,GAAc,QAAQ7kB,KAEtB8kB,GAAkB,OAClBC,GAAkB,OAClBC,GAAqB,UACrBrmB,GAAc,CAClByc,UAAW,UACX6J,SAAU,UACV1J,MAAO,UAEH7c,GAAU,CACd0c,WAAW,EACX6J,UAAU,EACV1J,MAAO,KAOT,MAAM2J,WAAcxlB,GAClB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAK4gB,SAAW,KAChB5gB,KAAKmqB,sBAAuB,EAC5BnqB,KAAKoqB,yBAA0B,EAC/BpqB,KAAKkhB,eACP,CAGA,kBAAWxd,GACT,OAAOA,EACT,CACA,sBAAWC,GACT,OAAOA,EACT,CACA,eAAWpH,GACT,MA/CS,OAgDX,CAGA,IAAAsT,GACoBtP,GAAaqB,QAAQ5B,KAAK4E,SAAUglB,IACxC5nB,mBAGdhC,KAAKqqB,gBACDrqB,KAAK6E,QAAQub,WACfpgB,KAAK4E,SAASvJ,UAAU5E,IA/CN,QAsDpBuJ,KAAK4E,SAASvJ,UAAU1B,OAAOmwB,IAC/BjuB,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIszB,GAAiBC,IAC7ChqB,KAAKmF,gBARY,KACfnF,KAAK4E,SAASvJ,UAAU1B,OAAOqwB,IAC/BzpB,GAAaqB,QAAQ5B,KAAK4E,SAAUilB,IACpC7pB,KAAKsqB,oBAAoB,GAKGtqB,KAAK4E,SAAU5E,KAAK6E,QAAQub,WAC5D,CACA,IAAAxQ,GACO5P,KAAKuqB,YAGQhqB,GAAaqB,QAAQ5B,KAAK4E,SAAU8kB,IACxC1nB,mBAQdhC,KAAK4E,SAASvJ,UAAU5E,IAAIuzB,IAC5BhqB,KAAKmF,gBANY,KACfnF,KAAK4E,SAASvJ,UAAU5E,IAAIqzB,IAC5B9pB,KAAK4E,SAASvJ,UAAU1B,OAAOqwB,GAAoBD,IACnDxpB,GAAaqB,QAAQ5B,KAAK4E,SAAU+kB,GAAa,GAGrB3pB,KAAK4E,SAAU5E,KAAK6E,QAAQub,YAC5D,CACA,OAAArb,GACE/E,KAAKqqB,gBACDrqB,KAAKuqB,WACPvqB,KAAK4E,SAASvJ,UAAU1B,OAAOowB,IAEjCplB,MAAMI,SACR,CACA,OAAAwlB,GACE,OAAOvqB,KAAK4E,SAASvJ,UAAU7W,SAASulC,GAC1C,CAIA,kBAAAO,GACOtqB,KAAK6E,QAAQolB,WAGdjqB,KAAKmqB,sBAAwBnqB,KAAKoqB,0BAGtCpqB,KAAK4gB,SAAW/iB,YAAW,KACzBmC,KAAK4P,MAAM,GACV5P,KAAK6E,QAAQ0b,QAClB,CACA,cAAAiK,CAAeprB,EAAOqrB,GACpB,OAAQrrB,EAAMqB,MACZ,IAAK,YACL,IAAK,WAEDT,KAAKmqB,qBAAuBM,EAC5B,MAEJ,IAAK,UACL,IAAK,WAEDzqB,KAAKoqB,wBAA0BK,EAIrC,GAAIA,EAEF,YADAzqB,KAAKqqB,gBAGP,MAAM5c,EAAcrO,EAAMU,cACtBE,KAAK4E,WAAa6I,GAAezN,KAAK4E,SAASpgB,SAASipB,IAG5DzN,KAAKsqB,oBACP,CACA,aAAApJ,GACE3gB,GAAac,GAAGrB,KAAK4E,SAAU0kB,IAAiBlqB,GAASY,KAAKwqB,eAAeprB,GAAO,KACpFmB,GAAac,GAAGrB,KAAK4E,SAAU2kB,IAAgBnqB,GAASY,KAAKwqB,eAAeprB,GAAO,KACnFmB,GAAac,GAAGrB,KAAK4E,SAAU4kB,IAAepqB,GAASY,KAAKwqB,eAAeprB,GAAO,KAClFmB,GAAac,GAAGrB,KAAK4E,SAAU6kB,IAAgBrqB,GAASY,KAAKwqB,eAAeprB,GAAO,IACrF,CACA,aAAAirB,GACEnd,aAAalN,KAAK4gB,UAClB5gB,KAAK4gB,SAAW,IAClB,CAGA,sBAAOnkB,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAO6/B,GAAM5kB,oBAAoBtF,KAAM8D,GAC7C,GAAsB,iBAAXA,EAAqB,CAC9B,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KACf,CACF,GACF,ECr0IK,SAAS0qB,GAAcruB,GACD,WAAvBhX,SAASuX,WAAyBP,IACjChX,SAASyF,iBAAiB,mBAAoBuR,EACrD,CDy0IAwK,GAAqBqjB,IAMrB/tB,GAAmB+tB,IEpyInBQ,IAzCA,WAC2B,GAAGt4B,MAAM5U,KAChC6H,SAAS+a,iBAAiB,+BAETtd,KAAI,SAAU6nC,GAC/B,OAAO,IAAI,GAAkBA,EAAkB,CAC7CpK,MAAO,CAAE1Q,KAAM,IAAKD,KAAM,MAE9B,GACF,IAiCA8a,IA5BA,WACYrlC,SAASm9B,eAAe,mBAC9B13B,iBAAiB,SAAS,WAC5BzF,SAAS6G,KAAKT,UAAY,EAC1BpG,SAASC,gBAAgBmG,UAAY,CACvC,GACF,IAuBAi/B,IArBA,WACE,IAAIE,EAAMvlC,SAASm9B,eAAe,mBAC9BqI,EAASxlC,SACVylC,uBAAuB,aAAa,GACpCxnC,wBACH1D,OAAOkL,iBAAiB,UAAU,WAC5BkV,KAAK+qB,UAAY/qB,KAAKgrB,SAAWhrB,KAAKgrB,QAAUH,EAAOjtC,OACzDgtC,EAAI7pC,MAAMgxB,QAAU,QAEpB6Y,EAAI7pC,MAAMgxB,QAAU,OAEtB/R,KAAK+qB,UAAY/qB,KAAKgrB,OACxB,GACF,IAUAprC,OAAOqrC,UAAY","sources":["webpack://pydata_sphinx_theme/webpack/bootstrap","webpack://pydata_sphinx_theme/webpack/runtime/define property getters","webpack://pydata_sphinx_theme/webpack/runtime/hasOwnProperty shorthand","webpack://pydata_sphinx_theme/webpack/runtime/make namespace object","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/enums.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/instanceOf.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/applyStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getBasePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/math.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/userAgent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isLayoutViewport.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getBoundingClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getLayoutRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/contains.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getComputedStyle.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isTableElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getParentNode.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getOffsetParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getMainAxisFromPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/within.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergePaddingObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getFreshSideObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/expandToHashMap.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/arrow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getVariation.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/computeStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/eventListeners.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositeVariationPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScrollBarX.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/listScrollParents.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/rectToClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getClippingRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getViewportRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/detectOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/flip.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeAutoPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/hide.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/offset.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/popperOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/preventOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getAltAxis.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getCompositeRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getHTMLElementScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/orderModifiers.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/createPopper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/debounce.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergeByName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper-lite.js","webpack://pydata_sphinx_theme/./node_modules/bootstrap/dist/js/bootstrap.esm.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/mixin.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/bootstrap.js"],"sourcesContent":["// The require scope\nvar __webpack_require__ = {};\n\n","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","export var top = 'top';\nexport var bottom = 'bottom';\nexport var right = 'right';\nexport var left = 'left';\nexport var auto = 'auto';\nexport var basePlacements = [top, bottom, right, left];\nexport var start = 'start';\nexport var end = 'end';\nexport var clippingParents = 'clippingParents';\nexport var viewport = 'viewport';\nexport var popper = 'popper';\nexport var reference = 'reference';\nexport var variationPlacements = /*#__PURE__*/basePlacements.reduce(function (acc, placement) {\n return acc.concat([placement + \"-\" + start, placement + \"-\" + end]);\n}, []);\nexport var placements = /*#__PURE__*/[].concat(basePlacements, [auto]).reduce(function (acc, placement) {\n return acc.concat([placement, placement + \"-\" + start, placement + \"-\" + end]);\n}, []); // modifiers that need to read the DOM\n\nexport var beforeRead = 'beforeRead';\nexport var read = 'read';\nexport var afterRead = 'afterRead'; // pure-logic modifiers\n\nexport var beforeMain = 'beforeMain';\nexport var main = 'main';\nexport var afterMain = 'afterMain'; // modifier with the purpose to write to the DOM (or write into a framework state)\n\nexport var beforeWrite = 'beforeWrite';\nexport var write = 'write';\nexport var afterWrite = 'afterWrite';\nexport var modifierPhases = [beforeRead, read, afterRead, beforeMain, main, afterMain, beforeWrite, write, afterWrite];","export default function getNodeName(element) {\n return element ? (element.nodeName || '').toLowerCase() : null;\n}","export default function getWindow(node) {\n if (node == null) {\n return window;\n }\n\n if (node.toString() !== '[object Window]') {\n var ownerDocument = node.ownerDocument;\n return ownerDocument ? ownerDocument.defaultView || window : window;\n }\n\n return node;\n}","import getWindow from \"./getWindow.js\";\n\nfunction isElement(node) {\n var OwnElement = getWindow(node).Element;\n return node instanceof OwnElement || node instanceof Element;\n}\n\nfunction isHTMLElement(node) {\n var OwnElement = getWindow(node).HTMLElement;\n return node instanceof OwnElement || node instanceof HTMLElement;\n}\n\nfunction isShadowRoot(node) {\n // IE 11 has no ShadowRoot\n if (typeof ShadowRoot === 'undefined') {\n return false;\n }\n\n var OwnElement = getWindow(node).ShadowRoot;\n return node instanceof OwnElement || node instanceof ShadowRoot;\n}\n\nexport { isElement, isHTMLElement, isShadowRoot };","import getNodeName from \"../dom-utils/getNodeName.js\";\nimport { isHTMLElement } from \"../dom-utils/instanceOf.js\"; // This modifier takes the styles prepared by the `computeStyles` modifier\n// and applies them to the HTMLElements such as popper and arrow\n\nfunction applyStyles(_ref) {\n var state = _ref.state;\n Object.keys(state.elements).forEach(function (name) {\n var style = state.styles[name] || {};\n var attributes = state.attributes[name] || {};\n var element = state.elements[name]; // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n } // Flow doesn't support to extend this property, but it's the most\n // effective way to apply styles to an HTMLElement\n // $FlowFixMe[cannot-write]\n\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (name) {\n var value = attributes[name];\n\n if (value === false) {\n element.removeAttribute(name);\n } else {\n element.setAttribute(name, value === true ? '' : value);\n }\n });\n });\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state;\n var initialStyles = {\n popper: {\n position: state.options.strategy,\n left: '0',\n top: '0',\n margin: '0'\n },\n arrow: {\n position: 'absolute'\n },\n reference: {}\n };\n Object.assign(state.elements.popper.style, initialStyles.popper);\n state.styles = initialStyles;\n\n if (state.elements.arrow) {\n Object.assign(state.elements.arrow.style, initialStyles.arrow);\n }\n\n return function () {\n Object.keys(state.elements).forEach(function (name) {\n var element = state.elements[name];\n var attributes = state.attributes[name] || {};\n var styleProperties = Object.keys(state.styles.hasOwnProperty(name) ? state.styles[name] : initialStyles[name]); // Set all values to an empty string to unset them\n\n var style = styleProperties.reduce(function (style, property) {\n style[property] = '';\n return style;\n }, {}); // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n }\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (attribute) {\n element.removeAttribute(attribute);\n });\n });\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'applyStyles',\n enabled: true,\n phase: 'write',\n fn: applyStyles,\n effect: effect,\n requires: ['computeStyles']\n};","import { auto } from \"../enums.js\";\nexport default function getBasePlacement(placement) {\n return placement.split('-')[0];\n}","export var max = Math.max;\nexport var min = Math.min;\nexport var round = Math.round;","export default function getUAString() {\n var uaData = navigator.userAgentData;\n\n if (uaData != null && uaData.brands && Array.isArray(uaData.brands)) {\n return uaData.brands.map(function (item) {\n return item.brand + \"/\" + item.version;\n }).join(' ');\n }\n\n return navigator.userAgent;\n}","import getUAString from \"../utils/userAgent.js\";\nexport default function isLayoutViewport() {\n return !/^((?!chrome|android).)*safari/i.test(getUAString());\n}","import { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport { round } from \"../utils/math.js\";\nimport getWindow from \"./getWindow.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getBoundingClientRect(element, includeScale, isFixedStrategy) {\n if (includeScale === void 0) {\n includeScale = false;\n }\n\n if (isFixedStrategy === void 0) {\n isFixedStrategy = false;\n }\n\n var clientRect = element.getBoundingClientRect();\n var scaleX = 1;\n var scaleY = 1;\n\n if (includeScale && isHTMLElement(element)) {\n scaleX = element.offsetWidth > 0 ? round(clientRect.width) / element.offsetWidth || 1 : 1;\n scaleY = element.offsetHeight > 0 ? round(clientRect.height) / element.offsetHeight || 1 : 1;\n }\n\n var _ref = isElement(element) ? getWindow(element) : window,\n visualViewport = _ref.visualViewport;\n\n var addVisualOffsets = !isLayoutViewport() && isFixedStrategy;\n var x = (clientRect.left + (addVisualOffsets && visualViewport ? visualViewport.offsetLeft : 0)) / scaleX;\n var y = (clientRect.top + (addVisualOffsets && visualViewport ? visualViewport.offsetTop : 0)) / scaleY;\n var width = clientRect.width / scaleX;\n var height = clientRect.height / scaleY;\n return {\n width: width,\n height: height,\n top: y,\n right: x + width,\n bottom: y + height,\n left: x,\n x: x,\n y: y\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\"; // Returns the layout rect of an element relative to its offsetParent. Layout\n// means it doesn't take into account transforms.\n\nexport default function getLayoutRect(element) {\n var clientRect = getBoundingClientRect(element); // Use the clientRect sizes if it's not been transformed.\n // Fixes https://github.com/popperjs/popper-core/issues/1223\n\n var width = element.offsetWidth;\n var height = element.offsetHeight;\n\n if (Math.abs(clientRect.width - width) <= 1) {\n width = clientRect.width;\n }\n\n if (Math.abs(clientRect.height - height) <= 1) {\n height = clientRect.height;\n }\n\n return {\n x: element.offsetLeft,\n y: element.offsetTop,\n width: width,\n height: height\n };\n}","import { isShadowRoot } from \"./instanceOf.js\";\nexport default function contains(parent, child) {\n var rootNode = child.getRootNode && child.getRootNode(); // First, attempt with faster native method\n\n if (parent.contains(child)) {\n return true;\n } // then fallback to custom implementation with Shadow DOM support\n else if (rootNode && isShadowRoot(rootNode)) {\n var next = child;\n\n do {\n if (next && parent.isSameNode(next)) {\n return true;\n } // $FlowFixMe[prop-missing]: need a better way to handle this...\n\n\n next = next.parentNode || next.host;\n } while (next);\n } // Give up, the result is false\n\n\n return false;\n}","import getWindow from \"./getWindow.js\";\nexport default function getComputedStyle(element) {\n return getWindow(element).getComputedStyle(element);\n}","import getNodeName from \"./getNodeName.js\";\nexport default function isTableElement(element) {\n return ['table', 'td', 'th'].indexOf(getNodeName(element)) >= 0;\n}","import { isElement } from \"./instanceOf.js\";\nexport default function getDocumentElement(element) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return ((isElement(element) ? element.ownerDocument : // $FlowFixMe[prop-missing]\n element.document) || window.document).documentElement;\n}","import getNodeName from \"./getNodeName.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport { isShadowRoot } from \"./instanceOf.js\";\nexport default function getParentNode(element) {\n if (getNodeName(element) === 'html') {\n return element;\n }\n\n return (// this is a quicker (but less type safe) way to save quite some bytes from the bundle\n // $FlowFixMe[incompatible-return]\n // $FlowFixMe[prop-missing]\n element.assignedSlot || // step into the shadow DOM of the parent of a slotted node\n element.parentNode || ( // DOM Element detected\n isShadowRoot(element) ? element.host : null) || // ShadowRoot detected\n // $FlowFixMe[incompatible-call]: HTMLElement is a Node\n getDocumentElement(element) // fallback\n\n );\n}","import getWindow from \"./getWindow.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isHTMLElement, isShadowRoot } from \"./instanceOf.js\";\nimport isTableElement from \"./isTableElement.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getUAString from \"../utils/userAgent.js\";\n\nfunction getTrueOffsetParent(element) {\n if (!isHTMLElement(element) || // https://github.com/popperjs/popper-core/issues/837\n getComputedStyle(element).position === 'fixed') {\n return null;\n }\n\n return element.offsetParent;\n} // `.offsetParent` reports `null` for fixed elements, while absolute elements\n// return the containing block\n\n\nfunction getContainingBlock(element) {\n var isFirefox = /firefox/i.test(getUAString());\n var isIE = /Trident/i.test(getUAString());\n\n if (isIE && isHTMLElement(element)) {\n // In IE 9, 10 and 11 fixed elements containing block is always established by the viewport\n var elementCss = getComputedStyle(element);\n\n if (elementCss.position === 'fixed') {\n return null;\n }\n }\n\n var currentNode = getParentNode(element);\n\n if (isShadowRoot(currentNode)) {\n currentNode = currentNode.host;\n }\n\n while (isHTMLElement(currentNode) && ['html', 'body'].indexOf(getNodeName(currentNode)) < 0) {\n var css = getComputedStyle(currentNode); // This is non-exhaustive but covers the most common CSS properties that\n // create a containing block.\n // https://developer.mozilla.org/en-US/docs/Web/CSS/Containing_block#identifying_the_containing_block\n\n if (css.transform !== 'none' || css.perspective !== 'none' || css.contain === 'paint' || ['transform', 'perspective'].indexOf(css.willChange) !== -1 || isFirefox && css.willChange === 'filter' || isFirefox && css.filter && css.filter !== 'none') {\n return currentNode;\n } else {\n currentNode = currentNode.parentNode;\n }\n }\n\n return null;\n} // Gets the closest ancestor positioned element. Handles some edge cases,\n// such as table ancestors and cross browser bugs.\n\n\nexport default function getOffsetParent(element) {\n var window = getWindow(element);\n var offsetParent = getTrueOffsetParent(element);\n\n while (offsetParent && isTableElement(offsetParent) && getComputedStyle(offsetParent).position === 'static') {\n offsetParent = getTrueOffsetParent(offsetParent);\n }\n\n if (offsetParent && (getNodeName(offsetParent) === 'html' || getNodeName(offsetParent) === 'body' && getComputedStyle(offsetParent).position === 'static')) {\n return window;\n }\n\n return offsetParent || getContainingBlock(element) || window;\n}","export default function getMainAxisFromPlacement(placement) {\n return ['top', 'bottom'].indexOf(placement) >= 0 ? 'x' : 'y';\n}","import { max as mathMax, min as mathMin } from \"./math.js\";\nexport function within(min, value, max) {\n return mathMax(min, mathMin(value, max));\n}\nexport function withinMaxClamp(min, value, max) {\n var v = within(min, value, max);\n return v > max ? max : v;\n}","import getFreshSideObject from \"./getFreshSideObject.js\";\nexport default function mergePaddingObject(paddingObject) {\n return Object.assign({}, getFreshSideObject(), paddingObject);\n}","export default function getFreshSideObject() {\n return {\n top: 0,\n right: 0,\n bottom: 0,\n left: 0\n };\n}","export default function expandToHashMap(value, keys) {\n return keys.reduce(function (hashMap, key) {\n hashMap[key] = value;\n return hashMap;\n }, {});\n}","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport contains from \"../dom-utils/contains.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport { within } from \"../utils/within.js\";\nimport mergePaddingObject from \"../utils/mergePaddingObject.js\";\nimport expandToHashMap from \"../utils/expandToHashMap.js\";\nimport { left, right, basePlacements, top, bottom } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar toPaddingObject = function toPaddingObject(padding, state) {\n padding = typeof padding === 'function' ? padding(Object.assign({}, state.rects, {\n placement: state.placement\n })) : padding;\n return mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n};\n\nfunction arrow(_ref) {\n var _state$modifiersData$;\n\n var state = _ref.state,\n name = _ref.name,\n options = _ref.options;\n var arrowElement = state.elements.arrow;\n var popperOffsets = state.modifiersData.popperOffsets;\n var basePlacement = getBasePlacement(state.placement);\n var axis = getMainAxisFromPlacement(basePlacement);\n var isVertical = [left, right].indexOf(basePlacement) >= 0;\n var len = isVertical ? 'height' : 'width';\n\n if (!arrowElement || !popperOffsets) {\n return;\n }\n\n var paddingObject = toPaddingObject(options.padding, state);\n var arrowRect = getLayoutRect(arrowElement);\n var minProp = axis === 'y' ? top : left;\n var maxProp = axis === 'y' ? bottom : right;\n var endDiff = state.rects.reference[len] + state.rects.reference[axis] - popperOffsets[axis] - state.rects.popper[len];\n var startDiff = popperOffsets[axis] - state.rects.reference[axis];\n var arrowOffsetParent = getOffsetParent(arrowElement);\n var clientSize = arrowOffsetParent ? axis === 'y' ? arrowOffsetParent.clientHeight || 0 : arrowOffsetParent.clientWidth || 0 : 0;\n var centerToReference = endDiff / 2 - startDiff / 2; // Make sure the arrow doesn't overflow the popper if the center point is\n // outside of the popper bounds\n\n var min = paddingObject[minProp];\n var max = clientSize - arrowRect[len] - paddingObject[maxProp];\n var center = clientSize / 2 - arrowRect[len] / 2 + centerToReference;\n var offset = within(min, center, max); // Prevents breaking syntax highlighting...\n\n var axisProp = axis;\n state.modifiersData[name] = (_state$modifiersData$ = {}, _state$modifiersData$[axisProp] = offset, _state$modifiersData$.centerOffset = offset - center, _state$modifiersData$);\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state,\n options = _ref2.options;\n var _options$element = options.element,\n arrowElement = _options$element === void 0 ? '[data-popper-arrow]' : _options$element;\n\n if (arrowElement == null) {\n return;\n } // CSS selector\n\n\n if (typeof arrowElement === 'string') {\n arrowElement = state.elements.popper.querySelector(arrowElement);\n\n if (!arrowElement) {\n return;\n }\n }\n\n if (!contains(state.elements.popper, arrowElement)) {\n return;\n }\n\n state.elements.arrow = arrowElement;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'arrow',\n enabled: true,\n phase: 'main',\n fn: arrow,\n effect: effect,\n requires: ['popperOffsets'],\n requiresIfExists: ['preventOverflow']\n};","export default function getVariation(placement) {\n return placement.split('-')[1];\n}","import { top, left, right, bottom, end } from \"../enums.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getWindow from \"../dom-utils/getWindow.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getComputedStyle from \"../dom-utils/getComputedStyle.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport { round } from \"../utils/math.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar unsetSides = {\n top: 'auto',\n right: 'auto',\n bottom: 'auto',\n left: 'auto'\n}; // Round the offsets to the nearest suitable subpixel based on the DPR.\n// Zooming can change the DPR, but it seems to report a value that will\n// cleanly divide the values into the appropriate subpixels.\n\nfunction roundOffsetsByDPR(_ref, win) {\n var x = _ref.x,\n y = _ref.y;\n var dpr = win.devicePixelRatio || 1;\n return {\n x: round(x * dpr) / dpr || 0,\n y: round(y * dpr) / dpr || 0\n };\n}\n\nexport function mapToStyles(_ref2) {\n var _Object$assign2;\n\n var popper = _ref2.popper,\n popperRect = _ref2.popperRect,\n placement = _ref2.placement,\n variation = _ref2.variation,\n offsets = _ref2.offsets,\n position = _ref2.position,\n gpuAcceleration = _ref2.gpuAcceleration,\n adaptive = _ref2.adaptive,\n roundOffsets = _ref2.roundOffsets,\n isFixed = _ref2.isFixed;\n var _offsets$x = offsets.x,\n x = _offsets$x === void 0 ? 0 : _offsets$x,\n _offsets$y = offsets.y,\n y = _offsets$y === void 0 ? 0 : _offsets$y;\n\n var _ref3 = typeof roundOffsets === 'function' ? roundOffsets({\n x: x,\n y: y\n }) : {\n x: x,\n y: y\n };\n\n x = _ref3.x;\n y = _ref3.y;\n var hasX = offsets.hasOwnProperty('x');\n var hasY = offsets.hasOwnProperty('y');\n var sideX = left;\n var sideY = top;\n var win = window;\n\n if (adaptive) {\n var offsetParent = getOffsetParent(popper);\n var heightProp = 'clientHeight';\n var widthProp = 'clientWidth';\n\n if (offsetParent === getWindow(popper)) {\n offsetParent = getDocumentElement(popper);\n\n if (getComputedStyle(offsetParent).position !== 'static' && position === 'absolute') {\n heightProp = 'scrollHeight';\n widthProp = 'scrollWidth';\n }\n } // $FlowFixMe[incompatible-cast]: force type refinement, we compare offsetParent with window above, but Flow doesn't detect it\n\n\n offsetParent = offsetParent;\n\n if (placement === top || (placement === left || placement === right) && variation === end) {\n sideY = bottom;\n var offsetY = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.height : // $FlowFixMe[prop-missing]\n offsetParent[heightProp];\n y -= offsetY - popperRect.height;\n y *= gpuAcceleration ? 1 : -1;\n }\n\n if (placement === left || (placement === top || placement === bottom) && variation === end) {\n sideX = right;\n var offsetX = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.width : // $FlowFixMe[prop-missing]\n offsetParent[widthProp];\n x -= offsetX - popperRect.width;\n x *= gpuAcceleration ? 1 : -1;\n }\n }\n\n var commonStyles = Object.assign({\n position: position\n }, adaptive && unsetSides);\n\n var _ref4 = roundOffsets === true ? roundOffsetsByDPR({\n x: x,\n y: y\n }, getWindow(popper)) : {\n x: x,\n y: y\n };\n\n x = _ref4.x;\n y = _ref4.y;\n\n if (gpuAcceleration) {\n var _Object$assign;\n\n return Object.assign({}, commonStyles, (_Object$assign = {}, _Object$assign[sideY] = hasY ? '0' : '', _Object$assign[sideX] = hasX ? '0' : '', _Object$assign.transform = (win.devicePixelRatio || 1) <= 1 ? \"translate(\" + x + \"px, \" + y + \"px)\" : \"translate3d(\" + x + \"px, \" + y + \"px, 0)\", _Object$assign));\n }\n\n return Object.assign({}, commonStyles, (_Object$assign2 = {}, _Object$assign2[sideY] = hasY ? y + \"px\" : '', _Object$assign2[sideX] = hasX ? x + \"px\" : '', _Object$assign2.transform = '', _Object$assign2));\n}\n\nfunction computeStyles(_ref5) {\n var state = _ref5.state,\n options = _ref5.options;\n var _options$gpuAccelerat = options.gpuAcceleration,\n gpuAcceleration = _options$gpuAccelerat === void 0 ? true : _options$gpuAccelerat,\n _options$adaptive = options.adaptive,\n adaptive = _options$adaptive === void 0 ? true : _options$adaptive,\n _options$roundOffsets = options.roundOffsets,\n roundOffsets = _options$roundOffsets === void 0 ? true : _options$roundOffsets;\n var commonStyles = {\n placement: getBasePlacement(state.placement),\n variation: getVariation(state.placement),\n popper: state.elements.popper,\n popperRect: state.rects.popper,\n gpuAcceleration: gpuAcceleration,\n isFixed: state.options.strategy === 'fixed'\n };\n\n if (state.modifiersData.popperOffsets != null) {\n state.styles.popper = Object.assign({}, state.styles.popper, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.popperOffsets,\n position: state.options.strategy,\n adaptive: adaptive,\n roundOffsets: roundOffsets\n })));\n }\n\n if (state.modifiersData.arrow != null) {\n state.styles.arrow = Object.assign({}, state.styles.arrow, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.arrow,\n position: 'absolute',\n adaptive: false,\n roundOffsets: roundOffsets\n })));\n }\n\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-placement': state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'computeStyles',\n enabled: true,\n phase: 'beforeWrite',\n fn: computeStyles,\n data: {}\n};","import getWindow from \"../dom-utils/getWindow.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar passive = {\n passive: true\n};\n\nfunction effect(_ref) {\n var state = _ref.state,\n instance = _ref.instance,\n options = _ref.options;\n var _options$scroll = options.scroll,\n scroll = _options$scroll === void 0 ? true : _options$scroll,\n _options$resize = options.resize,\n resize = _options$resize === void 0 ? true : _options$resize;\n var window = getWindow(state.elements.popper);\n var scrollParents = [].concat(state.scrollParents.reference, state.scrollParents.popper);\n\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.addEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.addEventListener('resize', instance.update, passive);\n }\n\n return function () {\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.removeEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.removeEventListener('resize', instance.update, passive);\n }\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'eventListeners',\n enabled: true,\n phase: 'write',\n fn: function fn() {},\n effect: effect,\n data: {}\n};","var hash = {\n left: 'right',\n right: 'left',\n bottom: 'top',\n top: 'bottom'\n};\nexport default function getOppositePlacement(placement) {\n return placement.replace(/left|right|bottom|top/g, function (matched) {\n return hash[matched];\n });\n}","var hash = {\n start: 'end',\n end: 'start'\n};\nexport default function getOppositeVariationPlacement(placement) {\n return placement.replace(/start|end/g, function (matched) {\n return hash[matched];\n });\n}","import getWindow from \"./getWindow.js\";\nexport default function getWindowScroll(node) {\n var win = getWindow(node);\n var scrollLeft = win.pageXOffset;\n var scrollTop = win.pageYOffset;\n return {\n scrollLeft: scrollLeft,\n scrollTop: scrollTop\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nexport default function getWindowScrollBarX(element) {\n // If has a CSS width greater than the viewport, then this will be\n // incorrect for RTL.\n // Popper 1 is broken in this case and never had a bug report so let's assume\n // it's not an issue. I don't think anyone ever specifies width on \n // anyway.\n // Browsers where the left scrollbar doesn't cause an issue report `0` for\n // this (e.g. Edge 2019, IE11, Safari)\n return getBoundingClientRect(getDocumentElement(element)).left + getWindowScroll(element).scrollLeft;\n}","import getComputedStyle from \"./getComputedStyle.js\";\nexport default function isScrollParent(element) {\n // Firefox wants us to check `-x` and `-y` variations as well\n var _getComputedStyle = getComputedStyle(element),\n overflow = _getComputedStyle.overflow,\n overflowX = _getComputedStyle.overflowX,\n overflowY = _getComputedStyle.overflowY;\n\n return /auto|scroll|overlay|hidden/.test(overflow + overflowY + overflowX);\n}","import getParentNode from \"./getParentNode.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nexport default function getScrollParent(node) {\n if (['html', 'body', '#document'].indexOf(getNodeName(node)) >= 0) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return node.ownerDocument.body;\n }\n\n if (isHTMLElement(node) && isScrollParent(node)) {\n return node;\n }\n\n return getScrollParent(getParentNode(node));\n}","import getScrollParent from \"./getScrollParent.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getWindow from \"./getWindow.js\";\nimport isScrollParent from \"./isScrollParent.js\";\n/*\ngiven a DOM element, return the list of all scroll parents, up the list of ancesors\nuntil we get to the top window object. This list is what we attach scroll listeners\nto, because if any of these parent elements scroll, we'll need to re-calculate the\nreference element's position.\n*/\n\nexport default function listScrollParents(element, list) {\n var _element$ownerDocumen;\n\n if (list === void 0) {\n list = [];\n }\n\n var scrollParent = getScrollParent(element);\n var isBody = scrollParent === ((_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body);\n var win = getWindow(scrollParent);\n var target = isBody ? [win].concat(win.visualViewport || [], isScrollParent(scrollParent) ? scrollParent : []) : scrollParent;\n var updatedList = list.concat(target);\n return isBody ? updatedList : // $FlowFixMe[incompatible-call]: isBody tells us target will be an HTMLElement here\n updatedList.concat(listScrollParents(getParentNode(target)));\n}","export default function rectToClientRect(rect) {\n return Object.assign({}, rect, {\n left: rect.x,\n top: rect.y,\n right: rect.x + rect.width,\n bottom: rect.y + rect.height\n });\n}","import { viewport } from \"../enums.js\";\nimport getViewportRect from \"./getViewportRect.js\";\nimport getDocumentRect from \"./getDocumentRect.js\";\nimport listScrollParents from \"./listScrollParents.js\";\nimport getOffsetParent from \"./getOffsetParent.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport contains from \"./contains.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport rectToClientRect from \"../utils/rectToClientRect.js\";\nimport { max, min } from \"../utils/math.js\";\n\nfunction getInnerBoundingClientRect(element, strategy) {\n var rect = getBoundingClientRect(element, false, strategy === 'fixed');\n rect.top = rect.top + element.clientTop;\n rect.left = rect.left + element.clientLeft;\n rect.bottom = rect.top + element.clientHeight;\n rect.right = rect.left + element.clientWidth;\n rect.width = element.clientWidth;\n rect.height = element.clientHeight;\n rect.x = rect.left;\n rect.y = rect.top;\n return rect;\n}\n\nfunction getClientRectFromMixedType(element, clippingParent, strategy) {\n return clippingParent === viewport ? rectToClientRect(getViewportRect(element, strategy)) : isElement(clippingParent) ? getInnerBoundingClientRect(clippingParent, strategy) : rectToClientRect(getDocumentRect(getDocumentElement(element)));\n} // A \"clipping parent\" is an overflowable container with the characteristic of\n// clipping (or hiding) overflowing elements with a position different from\n// `initial`\n\n\nfunction getClippingParents(element) {\n var clippingParents = listScrollParents(getParentNode(element));\n var canEscapeClipping = ['absolute', 'fixed'].indexOf(getComputedStyle(element).position) >= 0;\n var clipperElement = canEscapeClipping && isHTMLElement(element) ? getOffsetParent(element) : element;\n\n if (!isElement(clipperElement)) {\n return [];\n } // $FlowFixMe[incompatible-return]: https://github.com/facebook/flow/issues/1414\n\n\n return clippingParents.filter(function (clippingParent) {\n return isElement(clippingParent) && contains(clippingParent, clipperElement) && getNodeName(clippingParent) !== 'body';\n });\n} // Gets the maximum area that the element is visible in due to any number of\n// clipping parents\n\n\nexport default function getClippingRect(element, boundary, rootBoundary, strategy) {\n var mainClippingParents = boundary === 'clippingParents' ? getClippingParents(element) : [].concat(boundary);\n var clippingParents = [].concat(mainClippingParents, [rootBoundary]);\n var firstClippingParent = clippingParents[0];\n var clippingRect = clippingParents.reduce(function (accRect, clippingParent) {\n var rect = getClientRectFromMixedType(element, clippingParent, strategy);\n accRect.top = max(rect.top, accRect.top);\n accRect.right = min(rect.right, accRect.right);\n accRect.bottom = min(rect.bottom, accRect.bottom);\n accRect.left = max(rect.left, accRect.left);\n return accRect;\n }, getClientRectFromMixedType(element, firstClippingParent, strategy));\n clippingRect.width = clippingRect.right - clippingRect.left;\n clippingRect.height = clippingRect.bottom - clippingRect.top;\n clippingRect.x = clippingRect.left;\n clippingRect.y = clippingRect.top;\n return clippingRect;\n}","import getWindow from \"./getWindow.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getViewportRect(element, strategy) {\n var win = getWindow(element);\n var html = getDocumentElement(element);\n var visualViewport = win.visualViewport;\n var width = html.clientWidth;\n var height = html.clientHeight;\n var x = 0;\n var y = 0;\n\n if (visualViewport) {\n width = visualViewport.width;\n height = visualViewport.height;\n var layoutViewport = isLayoutViewport();\n\n if (layoutViewport || !layoutViewport && strategy === 'fixed') {\n x = visualViewport.offsetLeft;\n y = visualViewport.offsetTop;\n }\n }\n\n return {\n width: width,\n height: height,\n x: x + getWindowScrollBarX(element),\n y: y\n };\n}","import getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nimport { max } from \"../utils/math.js\"; // Gets the entire size of the scrollable document area, even extending outside\n// of the `` and `` rect bounds if horizontally scrollable\n\nexport default function getDocumentRect(element) {\n var _element$ownerDocumen;\n\n var html = getDocumentElement(element);\n var winScroll = getWindowScroll(element);\n var body = (_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body;\n var width = max(html.scrollWidth, html.clientWidth, body ? body.scrollWidth : 0, body ? body.clientWidth : 0);\n var height = max(html.scrollHeight, html.clientHeight, body ? body.scrollHeight : 0, body ? body.clientHeight : 0);\n var x = -winScroll.scrollLeft + getWindowScrollBarX(element);\n var y = -winScroll.scrollTop;\n\n if (getComputedStyle(body || html).direction === 'rtl') {\n x += max(html.clientWidth, body ? body.clientWidth : 0) - width;\n }\n\n return {\n width: width,\n height: height,\n x: x,\n y: y\n };\n}","import getBasePlacement from \"./getBasePlacement.js\";\nimport getVariation from \"./getVariation.js\";\nimport getMainAxisFromPlacement from \"./getMainAxisFromPlacement.js\";\nimport { top, right, bottom, left, start, end } from \"../enums.js\";\nexport default function computeOffsets(_ref) {\n var reference = _ref.reference,\n element = _ref.element,\n placement = _ref.placement;\n var basePlacement = placement ? getBasePlacement(placement) : null;\n var variation = placement ? getVariation(placement) : null;\n var commonX = reference.x + reference.width / 2 - element.width / 2;\n var commonY = reference.y + reference.height / 2 - element.height / 2;\n var offsets;\n\n switch (basePlacement) {\n case top:\n offsets = {\n x: commonX,\n y: reference.y - element.height\n };\n break;\n\n case bottom:\n offsets = {\n x: commonX,\n y: reference.y + reference.height\n };\n break;\n\n case right:\n offsets = {\n x: reference.x + reference.width,\n y: commonY\n };\n break;\n\n case left:\n offsets = {\n x: reference.x - element.width,\n y: commonY\n };\n break;\n\n default:\n offsets = {\n x: reference.x,\n y: reference.y\n };\n }\n\n var mainAxis = basePlacement ? getMainAxisFromPlacement(basePlacement) : null;\n\n if (mainAxis != null) {\n var len = mainAxis === 'y' ? 'height' : 'width';\n\n switch (variation) {\n case start:\n offsets[mainAxis] = offsets[mainAxis] - (reference[len] / 2 - element[len] / 2);\n break;\n\n case end:\n offsets[mainAxis] = offsets[mainAxis] + (reference[len] / 2 - element[len] / 2);\n break;\n\n default:\n }\n }\n\n return offsets;\n}","import getClippingRect from \"../dom-utils/getClippingRect.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getBoundingClientRect from \"../dom-utils/getBoundingClientRect.js\";\nimport computeOffsets from \"./computeOffsets.js\";\nimport rectToClientRect from \"./rectToClientRect.js\";\nimport { clippingParents, reference, popper, bottom, top, right, basePlacements, viewport } from \"../enums.js\";\nimport { isElement } from \"../dom-utils/instanceOf.js\";\nimport mergePaddingObject from \"./mergePaddingObject.js\";\nimport expandToHashMap from \"./expandToHashMap.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport default function detectOverflow(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n _options$placement = _options.placement,\n placement = _options$placement === void 0 ? state.placement : _options$placement,\n _options$strategy = _options.strategy,\n strategy = _options$strategy === void 0 ? state.strategy : _options$strategy,\n _options$boundary = _options.boundary,\n boundary = _options$boundary === void 0 ? clippingParents : _options$boundary,\n _options$rootBoundary = _options.rootBoundary,\n rootBoundary = _options$rootBoundary === void 0 ? viewport : _options$rootBoundary,\n _options$elementConte = _options.elementContext,\n elementContext = _options$elementConte === void 0 ? popper : _options$elementConte,\n _options$altBoundary = _options.altBoundary,\n altBoundary = _options$altBoundary === void 0 ? false : _options$altBoundary,\n _options$padding = _options.padding,\n padding = _options$padding === void 0 ? 0 : _options$padding;\n var paddingObject = mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n var altContext = elementContext === popper ? reference : popper;\n var popperRect = state.rects.popper;\n var element = state.elements[altBoundary ? altContext : elementContext];\n var clippingClientRect = getClippingRect(isElement(element) ? element : element.contextElement || getDocumentElement(state.elements.popper), boundary, rootBoundary, strategy);\n var referenceClientRect = getBoundingClientRect(state.elements.reference);\n var popperOffsets = computeOffsets({\n reference: referenceClientRect,\n element: popperRect,\n strategy: 'absolute',\n placement: placement\n });\n var popperClientRect = rectToClientRect(Object.assign({}, popperRect, popperOffsets));\n var elementClientRect = elementContext === popper ? popperClientRect : referenceClientRect; // positive = overflowing the clipping rect\n // 0 or negative = within the clipping rect\n\n var overflowOffsets = {\n top: clippingClientRect.top - elementClientRect.top + paddingObject.top,\n bottom: elementClientRect.bottom - clippingClientRect.bottom + paddingObject.bottom,\n left: clippingClientRect.left - elementClientRect.left + paddingObject.left,\n right: elementClientRect.right - clippingClientRect.right + paddingObject.right\n };\n var offsetData = state.modifiersData.offset; // Offsets can be applied only to the popper element\n\n if (elementContext === popper && offsetData) {\n var offset = offsetData[placement];\n Object.keys(overflowOffsets).forEach(function (key) {\n var multiply = [right, bottom].indexOf(key) >= 0 ? 1 : -1;\n var axis = [top, bottom].indexOf(key) >= 0 ? 'y' : 'x';\n overflowOffsets[key] += offset[axis] * multiply;\n });\n }\n\n return overflowOffsets;\n}","import getOppositePlacement from \"../utils/getOppositePlacement.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getOppositeVariationPlacement from \"../utils/getOppositeVariationPlacement.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport computeAutoPlacement from \"../utils/computeAutoPlacement.js\";\nimport { bottom, top, start, right, left, auto } from \"../enums.js\";\nimport getVariation from \"../utils/getVariation.js\"; // eslint-disable-next-line import/no-unused-modules\n\nfunction getExpandedFallbackPlacements(placement) {\n if (getBasePlacement(placement) === auto) {\n return [];\n }\n\n var oppositePlacement = getOppositePlacement(placement);\n return [getOppositeVariationPlacement(placement), oppositePlacement, getOppositeVariationPlacement(oppositePlacement)];\n}\n\nfunction flip(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n\n if (state.modifiersData[name]._skip) {\n return;\n }\n\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? true : _options$altAxis,\n specifiedFallbackPlacements = options.fallbackPlacements,\n padding = options.padding,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n _options$flipVariatio = options.flipVariations,\n flipVariations = _options$flipVariatio === void 0 ? true : _options$flipVariatio,\n allowedAutoPlacements = options.allowedAutoPlacements;\n var preferredPlacement = state.options.placement;\n var basePlacement = getBasePlacement(preferredPlacement);\n var isBasePlacement = basePlacement === preferredPlacement;\n var fallbackPlacements = specifiedFallbackPlacements || (isBasePlacement || !flipVariations ? [getOppositePlacement(preferredPlacement)] : getExpandedFallbackPlacements(preferredPlacement));\n var placements = [preferredPlacement].concat(fallbackPlacements).reduce(function (acc, placement) {\n return acc.concat(getBasePlacement(placement) === auto ? computeAutoPlacement(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n flipVariations: flipVariations,\n allowedAutoPlacements: allowedAutoPlacements\n }) : placement);\n }, []);\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var checksMap = new Map();\n var makeFallbackChecks = true;\n var firstFittingPlacement = placements[0];\n\n for (var i = 0; i < placements.length; i++) {\n var placement = placements[i];\n\n var _basePlacement = getBasePlacement(placement);\n\n var isStartVariation = getVariation(placement) === start;\n var isVertical = [top, bottom].indexOf(_basePlacement) >= 0;\n var len = isVertical ? 'width' : 'height';\n var overflow = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n altBoundary: altBoundary,\n padding: padding\n });\n var mainVariationSide = isVertical ? isStartVariation ? right : left : isStartVariation ? bottom : top;\n\n if (referenceRect[len] > popperRect[len]) {\n mainVariationSide = getOppositePlacement(mainVariationSide);\n }\n\n var altVariationSide = getOppositePlacement(mainVariationSide);\n var checks = [];\n\n if (checkMainAxis) {\n checks.push(overflow[_basePlacement] <= 0);\n }\n\n if (checkAltAxis) {\n checks.push(overflow[mainVariationSide] <= 0, overflow[altVariationSide] <= 0);\n }\n\n if (checks.every(function (check) {\n return check;\n })) {\n firstFittingPlacement = placement;\n makeFallbackChecks = false;\n break;\n }\n\n checksMap.set(placement, checks);\n }\n\n if (makeFallbackChecks) {\n // `2` may be desired in some cases – research later\n var numberOfChecks = flipVariations ? 3 : 1;\n\n var _loop = function _loop(_i) {\n var fittingPlacement = placements.find(function (placement) {\n var checks = checksMap.get(placement);\n\n if (checks) {\n return checks.slice(0, _i).every(function (check) {\n return check;\n });\n }\n });\n\n if (fittingPlacement) {\n firstFittingPlacement = fittingPlacement;\n return \"break\";\n }\n };\n\n for (var _i = numberOfChecks; _i > 0; _i--) {\n var _ret = _loop(_i);\n\n if (_ret === \"break\") break;\n }\n }\n\n if (state.placement !== firstFittingPlacement) {\n state.modifiersData[name]._skip = true;\n state.placement = firstFittingPlacement;\n state.reset = true;\n }\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'flip',\n enabled: true,\n phase: 'main',\n fn: flip,\n requiresIfExists: ['offset'],\n data: {\n _skip: false\n }\n};","import getVariation from \"./getVariation.js\";\nimport { variationPlacements, basePlacements, placements as allPlacements } from \"../enums.js\";\nimport detectOverflow from \"./detectOverflow.js\";\nimport getBasePlacement from \"./getBasePlacement.js\";\nexport default function computeAutoPlacement(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n placement = _options.placement,\n boundary = _options.boundary,\n rootBoundary = _options.rootBoundary,\n padding = _options.padding,\n flipVariations = _options.flipVariations,\n _options$allowedAutoP = _options.allowedAutoPlacements,\n allowedAutoPlacements = _options$allowedAutoP === void 0 ? allPlacements : _options$allowedAutoP;\n var variation = getVariation(placement);\n var placements = variation ? flipVariations ? variationPlacements : variationPlacements.filter(function (placement) {\n return getVariation(placement) === variation;\n }) : basePlacements;\n var allowedPlacements = placements.filter(function (placement) {\n return allowedAutoPlacements.indexOf(placement) >= 0;\n });\n\n if (allowedPlacements.length === 0) {\n allowedPlacements = placements;\n } // $FlowFixMe[incompatible-type]: Flow seems to have problems with two array unions...\n\n\n var overflows = allowedPlacements.reduce(function (acc, placement) {\n acc[placement] = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding\n })[getBasePlacement(placement)];\n return acc;\n }, {});\n return Object.keys(overflows).sort(function (a, b) {\n return overflows[a] - overflows[b];\n });\n}","import { top, bottom, left, right } from \"../enums.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\n\nfunction getSideOffsets(overflow, rect, preventedOffsets) {\n if (preventedOffsets === void 0) {\n preventedOffsets = {\n x: 0,\n y: 0\n };\n }\n\n return {\n top: overflow.top - rect.height - preventedOffsets.y,\n right: overflow.right - rect.width + preventedOffsets.x,\n bottom: overflow.bottom - rect.height + preventedOffsets.y,\n left: overflow.left - rect.width - preventedOffsets.x\n };\n}\n\nfunction isAnySideFullyClipped(overflow) {\n return [top, right, bottom, left].some(function (side) {\n return overflow[side] >= 0;\n });\n}\n\nfunction hide(_ref) {\n var state = _ref.state,\n name = _ref.name;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var preventedOffsets = state.modifiersData.preventOverflow;\n var referenceOverflow = detectOverflow(state, {\n elementContext: 'reference'\n });\n var popperAltOverflow = detectOverflow(state, {\n altBoundary: true\n });\n var referenceClippingOffsets = getSideOffsets(referenceOverflow, referenceRect);\n var popperEscapeOffsets = getSideOffsets(popperAltOverflow, popperRect, preventedOffsets);\n var isReferenceHidden = isAnySideFullyClipped(referenceClippingOffsets);\n var hasPopperEscaped = isAnySideFullyClipped(popperEscapeOffsets);\n state.modifiersData[name] = {\n referenceClippingOffsets: referenceClippingOffsets,\n popperEscapeOffsets: popperEscapeOffsets,\n isReferenceHidden: isReferenceHidden,\n hasPopperEscaped: hasPopperEscaped\n };\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-reference-hidden': isReferenceHidden,\n 'data-popper-escaped': hasPopperEscaped\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'hide',\n enabled: true,\n phase: 'main',\n requiresIfExists: ['preventOverflow'],\n fn: hide\n};","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport { top, left, right, placements } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport function distanceAndSkiddingToXY(placement, rects, offset) {\n var basePlacement = getBasePlacement(placement);\n var invertDistance = [left, top].indexOf(basePlacement) >= 0 ? -1 : 1;\n\n var _ref = typeof offset === 'function' ? offset(Object.assign({}, rects, {\n placement: placement\n })) : offset,\n skidding = _ref[0],\n distance = _ref[1];\n\n skidding = skidding || 0;\n distance = (distance || 0) * invertDistance;\n return [left, right].indexOf(basePlacement) >= 0 ? {\n x: distance,\n y: skidding\n } : {\n x: skidding,\n y: distance\n };\n}\n\nfunction offset(_ref2) {\n var state = _ref2.state,\n options = _ref2.options,\n name = _ref2.name;\n var _options$offset = options.offset,\n offset = _options$offset === void 0 ? [0, 0] : _options$offset;\n var data = placements.reduce(function (acc, placement) {\n acc[placement] = distanceAndSkiddingToXY(placement, state.rects, offset);\n return acc;\n }, {});\n var _data$state$placement = data[state.placement],\n x = _data$state$placement.x,\n y = _data$state$placement.y;\n\n if (state.modifiersData.popperOffsets != null) {\n state.modifiersData.popperOffsets.x += x;\n state.modifiersData.popperOffsets.y += y;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'offset',\n enabled: true,\n phase: 'main',\n requires: ['popperOffsets'],\n fn: offset\n};","import computeOffsets from \"../utils/computeOffsets.js\";\n\nfunction popperOffsets(_ref) {\n var state = _ref.state,\n name = _ref.name;\n // Offsets are the actual position the popper needs to have to be\n // properly positioned near its reference element\n // This is the most basic placement, and will be adjusted by\n // the modifiers in the next step\n state.modifiersData[name] = computeOffsets({\n reference: state.rects.reference,\n element: state.rects.popper,\n strategy: 'absolute',\n placement: state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'popperOffsets',\n enabled: true,\n phase: 'read',\n fn: popperOffsets,\n data: {}\n};","import { top, left, right, bottom, start } from \"../enums.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport getAltAxis from \"../utils/getAltAxis.js\";\nimport { within, withinMaxClamp } from \"../utils/within.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport getFreshSideObject from \"../utils/getFreshSideObject.js\";\nimport { min as mathMin, max as mathMax } from \"../utils/math.js\";\n\nfunction preventOverflow(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? false : _options$altAxis,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n padding = options.padding,\n _options$tether = options.tether,\n tether = _options$tether === void 0 ? true : _options$tether,\n _options$tetherOffset = options.tetherOffset,\n tetherOffset = _options$tetherOffset === void 0 ? 0 : _options$tetherOffset;\n var overflow = detectOverflow(state, {\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n altBoundary: altBoundary\n });\n var basePlacement = getBasePlacement(state.placement);\n var variation = getVariation(state.placement);\n var isBasePlacement = !variation;\n var mainAxis = getMainAxisFromPlacement(basePlacement);\n var altAxis = getAltAxis(mainAxis);\n var popperOffsets = state.modifiersData.popperOffsets;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var tetherOffsetValue = typeof tetherOffset === 'function' ? tetherOffset(Object.assign({}, state.rects, {\n placement: state.placement\n })) : tetherOffset;\n var normalizedTetherOffsetValue = typeof tetherOffsetValue === 'number' ? {\n mainAxis: tetherOffsetValue,\n altAxis: tetherOffsetValue\n } : Object.assign({\n mainAxis: 0,\n altAxis: 0\n }, tetherOffsetValue);\n var offsetModifierState = state.modifiersData.offset ? state.modifiersData.offset[state.placement] : null;\n var data = {\n x: 0,\n y: 0\n };\n\n if (!popperOffsets) {\n return;\n }\n\n if (checkMainAxis) {\n var _offsetModifierState$;\n\n var mainSide = mainAxis === 'y' ? top : left;\n var altSide = mainAxis === 'y' ? bottom : right;\n var len = mainAxis === 'y' ? 'height' : 'width';\n var offset = popperOffsets[mainAxis];\n var min = offset + overflow[mainSide];\n var max = offset - overflow[altSide];\n var additive = tether ? -popperRect[len] / 2 : 0;\n var minLen = variation === start ? referenceRect[len] : popperRect[len];\n var maxLen = variation === start ? -popperRect[len] : -referenceRect[len]; // We need to include the arrow in the calculation so the arrow doesn't go\n // outside the reference bounds\n\n var arrowElement = state.elements.arrow;\n var arrowRect = tether && arrowElement ? getLayoutRect(arrowElement) : {\n width: 0,\n height: 0\n };\n var arrowPaddingObject = state.modifiersData['arrow#persistent'] ? state.modifiersData['arrow#persistent'].padding : getFreshSideObject();\n var arrowPaddingMin = arrowPaddingObject[mainSide];\n var arrowPaddingMax = arrowPaddingObject[altSide]; // If the reference length is smaller than the arrow length, we don't want\n // to include its full size in the calculation. If the reference is small\n // and near the edge of a boundary, the popper can overflow even if the\n // reference is not overflowing as well (e.g. virtual elements with no\n // width or height)\n\n var arrowLen = within(0, referenceRect[len], arrowRect[len]);\n var minOffset = isBasePlacement ? referenceRect[len] / 2 - additive - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis : minLen - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis;\n var maxOffset = isBasePlacement ? -referenceRect[len] / 2 + additive + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis : maxLen + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis;\n var arrowOffsetParent = state.elements.arrow && getOffsetParent(state.elements.arrow);\n var clientOffset = arrowOffsetParent ? mainAxis === 'y' ? arrowOffsetParent.clientTop || 0 : arrowOffsetParent.clientLeft || 0 : 0;\n var offsetModifierValue = (_offsetModifierState$ = offsetModifierState == null ? void 0 : offsetModifierState[mainAxis]) != null ? _offsetModifierState$ : 0;\n var tetherMin = offset + minOffset - offsetModifierValue - clientOffset;\n var tetherMax = offset + maxOffset - offsetModifierValue;\n var preventedOffset = within(tether ? mathMin(min, tetherMin) : min, offset, tether ? mathMax(max, tetherMax) : max);\n popperOffsets[mainAxis] = preventedOffset;\n data[mainAxis] = preventedOffset - offset;\n }\n\n if (checkAltAxis) {\n var _offsetModifierState$2;\n\n var _mainSide = mainAxis === 'x' ? top : left;\n\n var _altSide = mainAxis === 'x' ? bottom : right;\n\n var _offset = popperOffsets[altAxis];\n\n var _len = altAxis === 'y' ? 'height' : 'width';\n\n var _min = _offset + overflow[_mainSide];\n\n var _max = _offset - overflow[_altSide];\n\n var isOriginSide = [top, left].indexOf(basePlacement) !== -1;\n\n var _offsetModifierValue = (_offsetModifierState$2 = offsetModifierState == null ? void 0 : offsetModifierState[altAxis]) != null ? _offsetModifierState$2 : 0;\n\n var _tetherMin = isOriginSide ? _min : _offset - referenceRect[_len] - popperRect[_len] - _offsetModifierValue + normalizedTetherOffsetValue.altAxis;\n\n var _tetherMax = isOriginSide ? _offset + referenceRect[_len] + popperRect[_len] - _offsetModifierValue - normalizedTetherOffsetValue.altAxis : _max;\n\n var _preventedOffset = tether && isOriginSide ? withinMaxClamp(_tetherMin, _offset, _tetherMax) : within(tether ? _tetherMin : _min, _offset, tether ? _tetherMax : _max);\n\n popperOffsets[altAxis] = _preventedOffset;\n data[altAxis] = _preventedOffset - _offset;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'preventOverflow',\n enabled: true,\n phase: 'main',\n fn: preventOverflow,\n requiresIfExists: ['offset']\n};","export default function getAltAxis(axis) {\n return axis === 'x' ? 'y' : 'x';\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getNodeScroll from \"./getNodeScroll.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport { round } from \"../utils/math.js\";\n\nfunction isElementScaled(element) {\n var rect = element.getBoundingClientRect();\n var scaleX = round(rect.width) / element.offsetWidth || 1;\n var scaleY = round(rect.height) / element.offsetHeight || 1;\n return scaleX !== 1 || scaleY !== 1;\n} // Returns the composite rect of an element relative to its offsetParent.\n// Composite means it takes into account transforms as well as layout.\n\n\nexport default function getCompositeRect(elementOrVirtualElement, offsetParent, isFixed) {\n if (isFixed === void 0) {\n isFixed = false;\n }\n\n var isOffsetParentAnElement = isHTMLElement(offsetParent);\n var offsetParentIsScaled = isHTMLElement(offsetParent) && isElementScaled(offsetParent);\n var documentElement = getDocumentElement(offsetParent);\n var rect = getBoundingClientRect(elementOrVirtualElement, offsetParentIsScaled, isFixed);\n var scroll = {\n scrollLeft: 0,\n scrollTop: 0\n };\n var offsets = {\n x: 0,\n y: 0\n };\n\n if (isOffsetParentAnElement || !isOffsetParentAnElement && !isFixed) {\n if (getNodeName(offsetParent) !== 'body' || // https://github.com/popperjs/popper-core/issues/1078\n isScrollParent(documentElement)) {\n scroll = getNodeScroll(offsetParent);\n }\n\n if (isHTMLElement(offsetParent)) {\n offsets = getBoundingClientRect(offsetParent, true);\n offsets.x += offsetParent.clientLeft;\n offsets.y += offsetParent.clientTop;\n } else if (documentElement) {\n offsets.x = getWindowScrollBarX(documentElement);\n }\n }\n\n return {\n x: rect.left + scroll.scrollLeft - offsets.x,\n y: rect.top + scroll.scrollTop - offsets.y,\n width: rect.width,\n height: rect.height\n };\n}","import getWindowScroll from \"./getWindowScroll.js\";\nimport getWindow from \"./getWindow.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getHTMLElementScroll from \"./getHTMLElementScroll.js\";\nexport default function getNodeScroll(node) {\n if (node === getWindow(node) || !isHTMLElement(node)) {\n return getWindowScroll(node);\n } else {\n return getHTMLElementScroll(node);\n }\n}","export default function getHTMLElementScroll(element) {\n return {\n scrollLeft: element.scrollLeft,\n scrollTop: element.scrollTop\n };\n}","import { modifierPhases } from \"../enums.js\"; // source: https://stackoverflow.com/questions/49875255\n\nfunction order(modifiers) {\n var map = new Map();\n var visited = new Set();\n var result = [];\n modifiers.forEach(function (modifier) {\n map.set(modifier.name, modifier);\n }); // On visiting object, check for its dependencies and visit them recursively\n\n function sort(modifier) {\n visited.add(modifier.name);\n var requires = [].concat(modifier.requires || [], modifier.requiresIfExists || []);\n requires.forEach(function (dep) {\n if (!visited.has(dep)) {\n var depModifier = map.get(dep);\n\n if (depModifier) {\n sort(depModifier);\n }\n }\n });\n result.push(modifier);\n }\n\n modifiers.forEach(function (modifier) {\n if (!visited.has(modifier.name)) {\n // check for visited object\n sort(modifier);\n }\n });\n return result;\n}\n\nexport default function orderModifiers(modifiers) {\n // order based on dependencies\n var orderedModifiers = order(modifiers); // order based on phase\n\n return modifierPhases.reduce(function (acc, phase) {\n return acc.concat(orderedModifiers.filter(function (modifier) {\n return modifier.phase === phase;\n }));\n }, []);\n}","import getCompositeRect from \"./dom-utils/getCompositeRect.js\";\nimport getLayoutRect from \"./dom-utils/getLayoutRect.js\";\nimport listScrollParents from \"./dom-utils/listScrollParents.js\";\nimport getOffsetParent from \"./dom-utils/getOffsetParent.js\";\nimport orderModifiers from \"./utils/orderModifiers.js\";\nimport debounce from \"./utils/debounce.js\";\nimport mergeByName from \"./utils/mergeByName.js\";\nimport detectOverflow from \"./utils/detectOverflow.js\";\nimport { isElement } from \"./dom-utils/instanceOf.js\";\nvar DEFAULT_OPTIONS = {\n placement: 'bottom',\n modifiers: [],\n strategy: 'absolute'\n};\n\nfunction areValidElements() {\n for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n return !args.some(function (element) {\n return !(element && typeof element.getBoundingClientRect === 'function');\n });\n}\n\nexport function popperGenerator(generatorOptions) {\n if (generatorOptions === void 0) {\n generatorOptions = {};\n }\n\n var _generatorOptions = generatorOptions,\n _generatorOptions$def = _generatorOptions.defaultModifiers,\n defaultModifiers = _generatorOptions$def === void 0 ? [] : _generatorOptions$def,\n _generatorOptions$def2 = _generatorOptions.defaultOptions,\n defaultOptions = _generatorOptions$def2 === void 0 ? DEFAULT_OPTIONS : _generatorOptions$def2;\n return function createPopper(reference, popper, options) {\n if (options === void 0) {\n options = defaultOptions;\n }\n\n var state = {\n placement: 'bottom',\n orderedModifiers: [],\n options: Object.assign({}, DEFAULT_OPTIONS, defaultOptions),\n modifiersData: {},\n elements: {\n reference: reference,\n popper: popper\n },\n attributes: {},\n styles: {}\n };\n var effectCleanupFns = [];\n var isDestroyed = false;\n var instance = {\n state: state,\n setOptions: function setOptions(setOptionsAction) {\n var options = typeof setOptionsAction === 'function' ? setOptionsAction(state.options) : setOptionsAction;\n cleanupModifierEffects();\n state.options = Object.assign({}, defaultOptions, state.options, options);\n state.scrollParents = {\n reference: isElement(reference) ? listScrollParents(reference) : reference.contextElement ? listScrollParents(reference.contextElement) : [],\n popper: listScrollParents(popper)\n }; // Orders the modifiers based on their dependencies and `phase`\n // properties\n\n var orderedModifiers = orderModifiers(mergeByName([].concat(defaultModifiers, state.options.modifiers))); // Strip out disabled modifiers\n\n state.orderedModifiers = orderedModifiers.filter(function (m) {\n return m.enabled;\n });\n runModifierEffects();\n return instance.update();\n },\n // Sync update – it will always be executed, even if not necessary. This\n // is useful for low frequency updates where sync behavior simplifies the\n // logic.\n // For high frequency updates (e.g. `resize` and `scroll` events), always\n // prefer the async Popper#update method\n forceUpdate: function forceUpdate() {\n if (isDestroyed) {\n return;\n }\n\n var _state$elements = state.elements,\n reference = _state$elements.reference,\n popper = _state$elements.popper; // Don't proceed if `reference` or `popper` are not valid elements\n // anymore\n\n if (!areValidElements(reference, popper)) {\n return;\n } // Store the reference and popper rects to be read by modifiers\n\n\n state.rects = {\n reference: getCompositeRect(reference, getOffsetParent(popper), state.options.strategy === 'fixed'),\n popper: getLayoutRect(popper)\n }; // Modifiers have the ability to reset the current update cycle. The\n // most common use case for this is the `flip` modifier changing the\n // placement, which then needs to re-run all the modifiers, because the\n // logic was previously ran for the previous placement and is therefore\n // stale/incorrect\n\n state.reset = false;\n state.placement = state.options.placement; // On each update cycle, the `modifiersData` property for each modifier\n // is filled with the initial data specified by the modifier. This means\n // it doesn't persist and is fresh on each update.\n // To ensure persistent data, use `${name}#persistent`\n\n state.orderedModifiers.forEach(function (modifier) {\n return state.modifiersData[modifier.name] = Object.assign({}, modifier.data);\n });\n\n for (var index = 0; index < state.orderedModifiers.length; index++) {\n if (state.reset === true) {\n state.reset = false;\n index = -1;\n continue;\n }\n\n var _state$orderedModifie = state.orderedModifiers[index],\n fn = _state$orderedModifie.fn,\n _state$orderedModifie2 = _state$orderedModifie.options,\n _options = _state$orderedModifie2 === void 0 ? {} : _state$orderedModifie2,\n name = _state$orderedModifie.name;\n\n if (typeof fn === 'function') {\n state = fn({\n state: state,\n options: _options,\n name: name,\n instance: instance\n }) || state;\n }\n }\n },\n // Async and optimistically optimized update – it will not be executed if\n // not necessary (debounced to run at most once-per-tick)\n update: debounce(function () {\n return new Promise(function (resolve) {\n instance.forceUpdate();\n resolve(state);\n });\n }),\n destroy: function destroy() {\n cleanupModifierEffects();\n isDestroyed = true;\n }\n };\n\n if (!areValidElements(reference, popper)) {\n return instance;\n }\n\n instance.setOptions(options).then(function (state) {\n if (!isDestroyed && options.onFirstUpdate) {\n options.onFirstUpdate(state);\n }\n }); // Modifiers have the ability to execute arbitrary code before the first\n // update cycle runs. They will be executed in the same order as the update\n // cycle. This is useful when a modifier adds some persistent data that\n // other modifiers need to use, but the modifier is run after the dependent\n // one.\n\n function runModifierEffects() {\n state.orderedModifiers.forEach(function (_ref) {\n var name = _ref.name,\n _ref$options = _ref.options,\n options = _ref$options === void 0 ? {} : _ref$options,\n effect = _ref.effect;\n\n if (typeof effect === 'function') {\n var cleanupFn = effect({\n state: state,\n name: name,\n instance: instance,\n options: options\n });\n\n var noopFn = function noopFn() {};\n\n effectCleanupFns.push(cleanupFn || noopFn);\n }\n });\n }\n\n function cleanupModifierEffects() {\n effectCleanupFns.forEach(function (fn) {\n return fn();\n });\n effectCleanupFns = [];\n }\n\n return instance;\n };\n}\nexport var createPopper = /*#__PURE__*/popperGenerator(); // eslint-disable-next-line import/no-unused-modules\n\nexport { detectOverflow };","export default function debounce(fn) {\n var pending;\n return function () {\n if (!pending) {\n pending = new Promise(function (resolve) {\n Promise.resolve().then(function () {\n pending = undefined;\n resolve(fn());\n });\n });\n }\n\n return pending;\n };\n}","export default function mergeByName(modifiers) {\n var merged = modifiers.reduce(function (merged, current) {\n var existing = merged[current.name];\n merged[current.name] = existing ? Object.assign({}, existing, current, {\n options: Object.assign({}, existing.options, current.options),\n data: Object.assign({}, existing.data, current.data)\n }) : current;\n return merged;\n }, {}); // IE11 does not support Object.values\n\n return Object.keys(merged).map(function (key) {\n return merged[key];\n });\n}","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nimport offset from \"./modifiers/offset.js\";\nimport flip from \"./modifiers/flip.js\";\nimport preventOverflow from \"./modifiers/preventOverflow.js\";\nimport arrow from \"./modifiers/arrow.js\";\nimport hide from \"./modifiers/hide.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles, offset, flip, preventOverflow, arrow, hide];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow }; // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper as createPopperLite } from \"./popper-lite.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport * from \"./modifiers/index.js\";","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow };","/*!\n * Bootstrap v5.3.3 (https://getbootstrap.com/)\n * Copyright 2011-2024 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n */\nimport * as Popper from '@popperjs/core';\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/data.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n/**\n * Constants\n */\n\nconst elementMap = new Map();\nconst Data = {\n set(element, key, instance) {\n if (!elementMap.has(element)) {\n elementMap.set(element, new Map());\n }\n const instanceMap = elementMap.get(element);\n\n // make it clear we only want one instance per element\n // can be removed later when multiple key/instances are fine to be used\n if (!instanceMap.has(key) && instanceMap.size !== 0) {\n // eslint-disable-next-line no-console\n console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(instanceMap.keys())[0]}.`);\n return;\n }\n instanceMap.set(key, instance);\n },\n get(element, key) {\n if (elementMap.has(element)) {\n return elementMap.get(element).get(key) || null;\n }\n return null;\n },\n remove(element, key) {\n if (!elementMap.has(element)) {\n return;\n }\n const instanceMap = elementMap.get(element);\n instanceMap.delete(key);\n\n // free up element references if there are no instances left for an element\n if (instanceMap.size === 0) {\n elementMap.delete(element);\n }\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/index.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst MAX_UID = 1000000;\nconst MILLISECONDS_MULTIPLIER = 1000;\nconst TRANSITION_END = 'transitionend';\n\n/**\n * Properly escape IDs selectors to handle weird IDs\n * @param {string} selector\n * @returns {string}\n */\nconst parseSelector = selector => {\n if (selector && window.CSS && window.CSS.escape) {\n // document.querySelector needs escaping to handle IDs (html5+) containing for instance /\n selector = selector.replace(/#([^\\s\"#']+)/g, (match, id) => `#${CSS.escape(id)}`);\n }\n return selector;\n};\n\n// Shout-out Angus Croll (https://goo.gl/pxwQGp)\nconst toType = object => {\n if (object === null || object === undefined) {\n return `${object}`;\n }\n return Object.prototype.toString.call(object).match(/\\s([a-z]+)/i)[1].toLowerCase();\n};\n\n/**\n * Public Util API\n */\n\nconst getUID = prefix => {\n do {\n prefix += Math.floor(Math.random() * MAX_UID);\n } while (document.getElementById(prefix));\n return prefix;\n};\nconst getTransitionDurationFromElement = element => {\n if (!element) {\n return 0;\n }\n\n // Get transition-duration of the element\n let {\n transitionDuration,\n transitionDelay\n } = window.getComputedStyle(element);\n const floatTransitionDuration = Number.parseFloat(transitionDuration);\n const floatTransitionDelay = Number.parseFloat(transitionDelay);\n\n // Return 0 if element or transition duration is not found\n if (!floatTransitionDuration && !floatTransitionDelay) {\n return 0;\n }\n\n // If multiple durations are defined, take the first\n transitionDuration = transitionDuration.split(',')[0];\n transitionDelay = transitionDelay.split(',')[0];\n return (Number.parseFloat(transitionDuration) + Number.parseFloat(transitionDelay)) * MILLISECONDS_MULTIPLIER;\n};\nconst triggerTransitionEnd = element => {\n element.dispatchEvent(new Event(TRANSITION_END));\n};\nconst isElement = object => {\n if (!object || typeof object !== 'object') {\n return false;\n }\n if (typeof object.jquery !== 'undefined') {\n object = object[0];\n }\n return typeof object.nodeType !== 'undefined';\n};\nconst getElement = object => {\n // it's a jQuery object or a node element\n if (isElement(object)) {\n return object.jquery ? object[0] : object;\n }\n if (typeof object === 'string' && object.length > 0) {\n return document.querySelector(parseSelector(object));\n }\n return null;\n};\nconst isVisible = element => {\n if (!isElement(element) || element.getClientRects().length === 0) {\n return false;\n }\n const elementIsVisible = getComputedStyle(element).getPropertyValue('visibility') === 'visible';\n // Handle `details` element as its content may falsie appear visible when it is closed\n const closedDetails = element.closest('details:not([open])');\n if (!closedDetails) {\n return elementIsVisible;\n }\n if (closedDetails !== element) {\n const summary = element.closest('summary');\n if (summary && summary.parentNode !== closedDetails) {\n return false;\n }\n if (summary === null) {\n return false;\n }\n }\n return elementIsVisible;\n};\nconst isDisabled = element => {\n if (!element || element.nodeType !== Node.ELEMENT_NODE) {\n return true;\n }\n if (element.classList.contains('disabled')) {\n return true;\n }\n if (typeof element.disabled !== 'undefined') {\n return element.disabled;\n }\n return element.hasAttribute('disabled') && element.getAttribute('disabled') !== 'false';\n};\nconst findShadowRoot = element => {\n if (!document.documentElement.attachShadow) {\n return null;\n }\n\n // Can find the shadow root otherwise it'll return the document\n if (typeof element.getRootNode === 'function') {\n const root = element.getRootNode();\n return root instanceof ShadowRoot ? root : null;\n }\n if (element instanceof ShadowRoot) {\n return element;\n }\n\n // when we don't find a shadow root\n if (!element.parentNode) {\n return null;\n }\n return findShadowRoot(element.parentNode);\n};\nconst noop = () => {};\n\n/**\n * Trick to restart an element's animation\n *\n * @param {HTMLElement} element\n * @return void\n *\n * @see https://www.charistheo.io/blog/2021/02/restart-a-css-animation-with-javascript/#restarting-a-css-animation\n */\nconst reflow = element => {\n element.offsetHeight; // eslint-disable-line no-unused-expressions\n};\nconst getjQuery = () => {\n if (window.jQuery && !document.body.hasAttribute('data-bs-no-jquery')) {\n return window.jQuery;\n }\n return null;\n};\nconst DOMContentLoadedCallbacks = [];\nconst onDOMContentLoaded = callback => {\n if (document.readyState === 'loading') {\n // add listener on the first call when the document is in loading state\n if (!DOMContentLoadedCallbacks.length) {\n document.addEventListener('DOMContentLoaded', () => {\n for (const callback of DOMContentLoadedCallbacks) {\n callback();\n }\n });\n }\n DOMContentLoadedCallbacks.push(callback);\n } else {\n callback();\n }\n};\nconst isRTL = () => document.documentElement.dir === 'rtl';\nconst defineJQueryPlugin = plugin => {\n onDOMContentLoaded(() => {\n const $ = getjQuery();\n /* istanbul ignore if */\n if ($) {\n const name = plugin.NAME;\n const JQUERY_NO_CONFLICT = $.fn[name];\n $.fn[name] = plugin.jQueryInterface;\n $.fn[name].Constructor = plugin;\n $.fn[name].noConflict = () => {\n $.fn[name] = JQUERY_NO_CONFLICT;\n return plugin.jQueryInterface;\n };\n }\n });\n};\nconst execute = (possibleCallback, args = [], defaultValue = possibleCallback) => {\n return typeof possibleCallback === 'function' ? possibleCallback(...args) : defaultValue;\n};\nconst executeAfterTransition = (callback, transitionElement, waitForTransition = true) => {\n if (!waitForTransition) {\n execute(callback);\n return;\n }\n const durationPadding = 5;\n const emulatedDuration = getTransitionDurationFromElement(transitionElement) + durationPadding;\n let called = false;\n const handler = ({\n target\n }) => {\n if (target !== transitionElement) {\n return;\n }\n called = true;\n transitionElement.removeEventListener(TRANSITION_END, handler);\n execute(callback);\n };\n transitionElement.addEventListener(TRANSITION_END, handler);\n setTimeout(() => {\n if (!called) {\n triggerTransitionEnd(transitionElement);\n }\n }, emulatedDuration);\n};\n\n/**\n * Return the previous/next element of a list.\n *\n * @param {array} list The list of elements\n * @param activeElement The active element\n * @param shouldGetNext Choose to get next or previous element\n * @param isCycleAllowed\n * @return {Element|elem} The proper element\n */\nconst getNextActiveElement = (list, activeElement, shouldGetNext, isCycleAllowed) => {\n const listLength = list.length;\n let index = list.indexOf(activeElement);\n\n // if the element does not exist in the list return an element\n // depending on the direction and if cycle is allowed\n if (index === -1) {\n return !shouldGetNext && isCycleAllowed ? list[listLength - 1] : list[0];\n }\n index += shouldGetNext ? 1 : -1;\n if (isCycleAllowed) {\n index = (index + listLength) % listLength;\n }\n return list[Math.max(0, Math.min(index, listLength - 1))];\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/event-handler.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst namespaceRegex = /[^.]*(?=\\..*)\\.|.*/;\nconst stripNameRegex = /\\..*/;\nconst stripUidRegex = /::\\d+$/;\nconst eventRegistry = {}; // Events storage\nlet uidEvent = 1;\nconst customEvents = {\n mouseenter: 'mouseover',\n mouseleave: 'mouseout'\n};\nconst nativeEvents = new Set(['click', 'dblclick', 'mouseup', 'mousedown', 'contextmenu', 'mousewheel', 'DOMMouseScroll', 'mouseover', 'mouseout', 'mousemove', 'selectstart', 'selectend', 'keydown', 'keypress', 'keyup', 'orientationchange', 'touchstart', 'touchmove', 'touchend', 'touchcancel', 'pointerdown', 'pointermove', 'pointerup', 'pointerleave', 'pointercancel', 'gesturestart', 'gesturechange', 'gestureend', 'focus', 'blur', 'change', 'reset', 'select', 'submit', 'focusin', 'focusout', 'load', 'unload', 'beforeunload', 'resize', 'move', 'DOMContentLoaded', 'readystatechange', 'error', 'abort', 'scroll']);\n\n/**\n * Private methods\n */\n\nfunction makeEventUid(element, uid) {\n return uid && `${uid}::${uidEvent++}` || element.uidEvent || uidEvent++;\n}\nfunction getElementEvents(element) {\n const uid = makeEventUid(element);\n element.uidEvent = uid;\n eventRegistry[uid] = eventRegistry[uid] || {};\n return eventRegistry[uid];\n}\nfunction bootstrapHandler(element, fn) {\n return function handler(event) {\n hydrateObj(event, {\n delegateTarget: element\n });\n if (handler.oneOff) {\n EventHandler.off(element, event.type, fn);\n }\n return fn.apply(element, [event]);\n };\n}\nfunction bootstrapDelegationHandler(element, selector, fn) {\n return function handler(event) {\n const domElements = element.querySelectorAll(selector);\n for (let {\n target\n } = event; target && target !== this; target = target.parentNode) {\n for (const domElement of domElements) {\n if (domElement !== target) {\n continue;\n }\n hydrateObj(event, {\n delegateTarget: target\n });\n if (handler.oneOff) {\n EventHandler.off(element, event.type, selector, fn);\n }\n return fn.apply(target, [event]);\n }\n }\n };\n}\nfunction findHandler(events, callable, delegationSelector = null) {\n return Object.values(events).find(event => event.callable === callable && event.delegationSelector === delegationSelector);\n}\nfunction normalizeParameters(originalTypeEvent, handler, delegationFunction) {\n const isDelegated = typeof handler === 'string';\n // TODO: tooltip passes `false` instead of selector, so we need to check\n const callable = isDelegated ? delegationFunction : handler || delegationFunction;\n let typeEvent = getTypeEvent(originalTypeEvent);\n if (!nativeEvents.has(typeEvent)) {\n typeEvent = originalTypeEvent;\n }\n return [isDelegated, callable, typeEvent];\n}\nfunction addHandler(element, originalTypeEvent, handler, delegationFunction, oneOff) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n let [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n\n // in case of mouseenter or mouseleave wrap the handler within a function that checks for its DOM position\n // this prevents the handler from being dispatched the same way as mouseover or mouseout does\n if (originalTypeEvent in customEvents) {\n const wrapFunction = fn => {\n return function (event) {\n if (!event.relatedTarget || event.relatedTarget !== event.delegateTarget && !event.delegateTarget.contains(event.relatedTarget)) {\n return fn.call(this, event);\n }\n };\n };\n callable = wrapFunction(callable);\n }\n const events = getElementEvents(element);\n const handlers = events[typeEvent] || (events[typeEvent] = {});\n const previousFunction = findHandler(handlers, callable, isDelegated ? handler : null);\n if (previousFunction) {\n previousFunction.oneOff = previousFunction.oneOff && oneOff;\n return;\n }\n const uid = makeEventUid(callable, originalTypeEvent.replace(namespaceRegex, ''));\n const fn = isDelegated ? bootstrapDelegationHandler(element, handler, callable) : bootstrapHandler(element, callable);\n fn.delegationSelector = isDelegated ? handler : null;\n fn.callable = callable;\n fn.oneOff = oneOff;\n fn.uidEvent = uid;\n handlers[uid] = fn;\n element.addEventListener(typeEvent, fn, isDelegated);\n}\nfunction removeHandler(element, events, typeEvent, handler, delegationSelector) {\n const fn = findHandler(events[typeEvent], handler, delegationSelector);\n if (!fn) {\n return;\n }\n element.removeEventListener(typeEvent, fn, Boolean(delegationSelector));\n delete events[typeEvent][fn.uidEvent];\n}\nfunction removeNamespacedHandlers(element, events, typeEvent, namespace) {\n const storeElementEvent = events[typeEvent] || {};\n for (const [handlerKey, event] of Object.entries(storeElementEvent)) {\n if (handlerKey.includes(namespace)) {\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n}\nfunction getTypeEvent(event) {\n // allow to get the native events from namespaced events ('click.bs.button' --> 'click')\n event = event.replace(stripNameRegex, '');\n return customEvents[event] || event;\n}\nconst EventHandler = {\n on(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, false);\n },\n one(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, true);\n },\n off(element, originalTypeEvent, handler, delegationFunction) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n const [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n const inNamespace = typeEvent !== originalTypeEvent;\n const events = getElementEvents(element);\n const storeElementEvent = events[typeEvent] || {};\n const isNamespace = originalTypeEvent.startsWith('.');\n if (typeof callable !== 'undefined') {\n // Simplest case: handler is passed, remove that listener ONLY.\n if (!Object.keys(storeElementEvent).length) {\n return;\n }\n removeHandler(element, events, typeEvent, callable, isDelegated ? handler : null);\n return;\n }\n if (isNamespace) {\n for (const elementEvent of Object.keys(events)) {\n removeNamespacedHandlers(element, events, elementEvent, originalTypeEvent.slice(1));\n }\n }\n for (const [keyHandlers, event] of Object.entries(storeElementEvent)) {\n const handlerKey = keyHandlers.replace(stripUidRegex, '');\n if (!inNamespace || originalTypeEvent.includes(handlerKey)) {\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n },\n trigger(element, event, args) {\n if (typeof event !== 'string' || !element) {\n return null;\n }\n const $ = getjQuery();\n const typeEvent = getTypeEvent(event);\n const inNamespace = event !== typeEvent;\n let jQueryEvent = null;\n let bubbles = true;\n let nativeDispatch = true;\n let defaultPrevented = false;\n if (inNamespace && $) {\n jQueryEvent = $.Event(event, args);\n $(element).trigger(jQueryEvent);\n bubbles = !jQueryEvent.isPropagationStopped();\n nativeDispatch = !jQueryEvent.isImmediatePropagationStopped();\n defaultPrevented = jQueryEvent.isDefaultPrevented();\n }\n const evt = hydrateObj(new Event(event, {\n bubbles,\n cancelable: true\n }), args);\n if (defaultPrevented) {\n evt.preventDefault();\n }\n if (nativeDispatch) {\n element.dispatchEvent(evt);\n }\n if (evt.defaultPrevented && jQueryEvent) {\n jQueryEvent.preventDefault();\n }\n return evt;\n }\n};\nfunction hydrateObj(obj, meta = {}) {\n for (const [key, value] of Object.entries(meta)) {\n try {\n obj[key] = value;\n } catch (_unused) {\n Object.defineProperty(obj, key, {\n configurable: true,\n get() {\n return value;\n }\n });\n }\n }\n return obj;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/manipulator.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nfunction normalizeData(value) {\n if (value === 'true') {\n return true;\n }\n if (value === 'false') {\n return false;\n }\n if (value === Number(value).toString()) {\n return Number(value);\n }\n if (value === '' || value === 'null') {\n return null;\n }\n if (typeof value !== 'string') {\n return value;\n }\n try {\n return JSON.parse(decodeURIComponent(value));\n } catch (_unused) {\n return value;\n }\n}\nfunction normalizeDataKey(key) {\n return key.replace(/[A-Z]/g, chr => `-${chr.toLowerCase()}`);\n}\nconst Manipulator = {\n setDataAttribute(element, key, value) {\n element.setAttribute(`data-bs-${normalizeDataKey(key)}`, value);\n },\n removeDataAttribute(element, key) {\n element.removeAttribute(`data-bs-${normalizeDataKey(key)}`);\n },\n getDataAttributes(element) {\n if (!element) {\n return {};\n }\n const attributes = {};\n const bsKeys = Object.keys(element.dataset).filter(key => key.startsWith('bs') && !key.startsWith('bsConfig'));\n for (const key of bsKeys) {\n let pureKey = key.replace(/^bs/, '');\n pureKey = pureKey.charAt(0).toLowerCase() + pureKey.slice(1, pureKey.length);\n attributes[pureKey] = normalizeData(element.dataset[key]);\n }\n return attributes;\n },\n getDataAttribute(element, key) {\n return normalizeData(element.getAttribute(`data-bs-${normalizeDataKey(key)}`));\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/config.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Class definition\n */\n\nclass Config {\n // Getters\n static get Default() {\n return {};\n }\n static get DefaultType() {\n return {};\n }\n static get NAME() {\n throw new Error('You have to implement the static method \"NAME\", for each component!');\n }\n _getConfig(config) {\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n _configAfterMerge(config) {\n return config;\n }\n _mergeConfigObj(config, element) {\n const jsonConfig = isElement(element) ? Manipulator.getDataAttribute(element, 'config') : {}; // try to parse\n\n return {\n ...this.constructor.Default,\n ...(typeof jsonConfig === 'object' ? jsonConfig : {}),\n ...(isElement(element) ? Manipulator.getDataAttributes(element) : {}),\n ...(typeof config === 'object' ? config : {})\n };\n }\n _typeCheckConfig(config, configTypes = this.constructor.DefaultType) {\n for (const [property, expectedTypes] of Object.entries(configTypes)) {\n const value = config[property];\n const valueType = isElement(value) ? 'element' : toType(value);\n if (!new RegExp(expectedTypes).test(valueType)) {\n throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option \"${property}\" provided type \"${valueType}\" but expected type \"${expectedTypes}\".`);\n }\n }\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap base-component.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst VERSION = '5.3.3';\n\n/**\n * Class definition\n */\n\nclass BaseComponent extends Config {\n constructor(element, config) {\n super();\n element = getElement(element);\n if (!element) {\n return;\n }\n this._element = element;\n this._config = this._getConfig(config);\n Data.set(this._element, this.constructor.DATA_KEY, this);\n }\n\n // Public\n dispose() {\n Data.remove(this._element, this.constructor.DATA_KEY);\n EventHandler.off(this._element, this.constructor.EVENT_KEY);\n for (const propertyName of Object.getOwnPropertyNames(this)) {\n this[propertyName] = null;\n }\n }\n _queueCallback(callback, element, isAnimated = true) {\n executeAfterTransition(callback, element, isAnimated);\n }\n _getConfig(config) {\n config = this._mergeConfigObj(config, this._element);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n\n // Static\n static getInstance(element) {\n return Data.get(getElement(element), this.DATA_KEY);\n }\n static getOrCreateInstance(element, config = {}) {\n return this.getInstance(element) || new this(element, typeof config === 'object' ? config : null);\n }\n static get VERSION() {\n return VERSION;\n }\n static get DATA_KEY() {\n return `bs.${this.NAME}`;\n }\n static get EVENT_KEY() {\n return `.${this.DATA_KEY}`;\n }\n static eventName(name) {\n return `${name}${this.EVENT_KEY}`;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/selector-engine.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst getSelector = element => {\n let selector = element.getAttribute('data-bs-target');\n if (!selector || selector === '#') {\n let hrefAttribute = element.getAttribute('href');\n\n // The only valid content that could double as a selector are IDs or classes,\n // so everything starting with `#` or `.`. If a \"real\" URL is used as the selector,\n // `document.querySelector` will rightfully complain it is invalid.\n // See https://github.com/twbs/bootstrap/issues/32273\n if (!hrefAttribute || !hrefAttribute.includes('#') && !hrefAttribute.startsWith('.')) {\n return null;\n }\n\n // Just in case some CMS puts out a full URL with the anchor appended\n if (hrefAttribute.includes('#') && !hrefAttribute.startsWith('#')) {\n hrefAttribute = `#${hrefAttribute.split('#')[1]}`;\n }\n selector = hrefAttribute && hrefAttribute !== '#' ? hrefAttribute.trim() : null;\n }\n return selector ? selector.split(',').map(sel => parseSelector(sel)).join(',') : null;\n};\nconst SelectorEngine = {\n find(selector, element = document.documentElement) {\n return [].concat(...Element.prototype.querySelectorAll.call(element, selector));\n },\n findOne(selector, element = document.documentElement) {\n return Element.prototype.querySelector.call(element, selector);\n },\n children(element, selector) {\n return [].concat(...element.children).filter(child => child.matches(selector));\n },\n parents(element, selector) {\n const parents = [];\n let ancestor = element.parentNode.closest(selector);\n while (ancestor) {\n parents.push(ancestor);\n ancestor = ancestor.parentNode.closest(selector);\n }\n return parents;\n },\n prev(element, selector) {\n let previous = element.previousElementSibling;\n while (previous) {\n if (previous.matches(selector)) {\n return [previous];\n }\n previous = previous.previousElementSibling;\n }\n return [];\n },\n // TODO: this is now unused; remove later along with prev()\n next(element, selector) {\n let next = element.nextElementSibling;\n while (next) {\n if (next.matches(selector)) {\n return [next];\n }\n next = next.nextElementSibling;\n }\n return [];\n },\n focusableChildren(element) {\n const focusables = ['a', 'button', 'input', 'textarea', 'select', 'details', '[tabindex]', '[contenteditable=\"true\"]'].map(selector => `${selector}:not([tabindex^=\"-\"])`).join(',');\n return this.find(focusables, element).filter(el => !isDisabled(el) && isVisible(el));\n },\n getSelectorFromElement(element) {\n const selector = getSelector(element);\n if (selector) {\n return SelectorEngine.findOne(selector) ? selector : null;\n }\n return null;\n },\n getElementFromSelector(element) {\n const selector = getSelector(element);\n return selector ? SelectorEngine.findOne(selector) : null;\n },\n getMultipleElementsFromSelector(element) {\n const selector = getSelector(element);\n return selector ? SelectorEngine.find(selector) : [];\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/component-functions.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst enableDismissTrigger = (component, method = 'hide') => {\n const clickEvent = `click.dismiss${component.EVENT_KEY}`;\n const name = component.NAME;\n EventHandler.on(document, clickEvent, `[data-bs-dismiss=\"${name}\"]`, function (event) {\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n if (isDisabled(this)) {\n return;\n }\n const target = SelectorEngine.getElementFromSelector(this) || this.closest(`.${name}`);\n const instance = component.getOrCreateInstance(target);\n\n // Method argument is left, for Alert and only, as it doesn't implement the 'hide' method\n instance[method]();\n });\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap alert.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$f = 'alert';\nconst DATA_KEY$a = 'bs.alert';\nconst EVENT_KEY$b = `.${DATA_KEY$a}`;\nconst EVENT_CLOSE = `close${EVENT_KEY$b}`;\nconst EVENT_CLOSED = `closed${EVENT_KEY$b}`;\nconst CLASS_NAME_FADE$5 = 'fade';\nconst CLASS_NAME_SHOW$8 = 'show';\n\n/**\n * Class definition\n */\n\nclass Alert extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$f;\n }\n\n // Public\n close() {\n const closeEvent = EventHandler.trigger(this._element, EVENT_CLOSE);\n if (closeEvent.defaultPrevented) {\n return;\n }\n this._element.classList.remove(CLASS_NAME_SHOW$8);\n const isAnimated = this._element.classList.contains(CLASS_NAME_FADE$5);\n this._queueCallback(() => this._destroyElement(), this._element, isAnimated);\n }\n\n // Private\n _destroyElement() {\n this._element.remove();\n EventHandler.trigger(this._element, EVENT_CLOSED);\n this.dispose();\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Alert.getOrCreateInstance(this);\n if (typeof config !== 'string') {\n return;\n }\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](this);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nenableDismissTrigger(Alert, 'close');\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Alert);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap button.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$e = 'button';\nconst DATA_KEY$9 = 'bs.button';\nconst EVENT_KEY$a = `.${DATA_KEY$9}`;\nconst DATA_API_KEY$6 = '.data-api';\nconst CLASS_NAME_ACTIVE$3 = 'active';\nconst SELECTOR_DATA_TOGGLE$5 = '[data-bs-toggle=\"button\"]';\nconst EVENT_CLICK_DATA_API$6 = `click${EVENT_KEY$a}${DATA_API_KEY$6}`;\n\n/**\n * Class definition\n */\n\nclass Button extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$e;\n }\n\n // Public\n toggle() {\n // Toggle class and sync the `aria-pressed` attribute with the return value of the `.toggle()` method\n this._element.setAttribute('aria-pressed', this._element.classList.toggle(CLASS_NAME_ACTIVE$3));\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Button.getOrCreateInstance(this);\n if (config === 'toggle') {\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$6, SELECTOR_DATA_TOGGLE$5, event => {\n event.preventDefault();\n const button = event.target.closest(SELECTOR_DATA_TOGGLE$5);\n const data = Button.getOrCreateInstance(button);\n data.toggle();\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Button);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/swipe.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$d = 'swipe';\nconst EVENT_KEY$9 = '.bs.swipe';\nconst EVENT_TOUCHSTART = `touchstart${EVENT_KEY$9}`;\nconst EVENT_TOUCHMOVE = `touchmove${EVENT_KEY$9}`;\nconst EVENT_TOUCHEND = `touchend${EVENT_KEY$9}`;\nconst EVENT_POINTERDOWN = `pointerdown${EVENT_KEY$9}`;\nconst EVENT_POINTERUP = `pointerup${EVENT_KEY$9}`;\nconst POINTER_TYPE_TOUCH = 'touch';\nconst POINTER_TYPE_PEN = 'pen';\nconst CLASS_NAME_POINTER_EVENT = 'pointer-event';\nconst SWIPE_THRESHOLD = 40;\nconst Default$c = {\n endCallback: null,\n leftCallback: null,\n rightCallback: null\n};\nconst DefaultType$c = {\n endCallback: '(function|null)',\n leftCallback: '(function|null)',\n rightCallback: '(function|null)'\n};\n\n/**\n * Class definition\n */\n\nclass Swipe extends Config {\n constructor(element, config) {\n super();\n this._element = element;\n if (!element || !Swipe.isSupported()) {\n return;\n }\n this._config = this._getConfig(config);\n this._deltaX = 0;\n this._supportPointerEvents = Boolean(window.PointerEvent);\n this._initEvents();\n }\n\n // Getters\n static get Default() {\n return Default$c;\n }\n static get DefaultType() {\n return DefaultType$c;\n }\n static get NAME() {\n return NAME$d;\n }\n\n // Public\n dispose() {\n EventHandler.off(this._element, EVENT_KEY$9);\n }\n\n // Private\n _start(event) {\n if (!this._supportPointerEvents) {\n this._deltaX = event.touches[0].clientX;\n return;\n }\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX;\n }\n }\n _end(event) {\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX - this._deltaX;\n }\n this._handleSwipe();\n execute(this._config.endCallback);\n }\n _move(event) {\n this._deltaX = event.touches && event.touches.length > 1 ? 0 : event.touches[0].clientX - this._deltaX;\n }\n _handleSwipe() {\n const absDeltaX = Math.abs(this._deltaX);\n if (absDeltaX <= SWIPE_THRESHOLD) {\n return;\n }\n const direction = absDeltaX / this._deltaX;\n this._deltaX = 0;\n if (!direction) {\n return;\n }\n execute(direction > 0 ? this._config.rightCallback : this._config.leftCallback);\n }\n _initEvents() {\n if (this._supportPointerEvents) {\n EventHandler.on(this._element, EVENT_POINTERDOWN, event => this._start(event));\n EventHandler.on(this._element, EVENT_POINTERUP, event => this._end(event));\n this._element.classList.add(CLASS_NAME_POINTER_EVENT);\n } else {\n EventHandler.on(this._element, EVENT_TOUCHSTART, event => this._start(event));\n EventHandler.on(this._element, EVENT_TOUCHMOVE, event => this._move(event));\n EventHandler.on(this._element, EVENT_TOUCHEND, event => this._end(event));\n }\n }\n _eventIsPointerPenTouch(event) {\n return this._supportPointerEvents && (event.pointerType === POINTER_TYPE_PEN || event.pointerType === POINTER_TYPE_TOUCH);\n }\n\n // Static\n static isSupported() {\n return 'ontouchstart' in document.documentElement || navigator.maxTouchPoints > 0;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap carousel.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$c = 'carousel';\nconst DATA_KEY$8 = 'bs.carousel';\nconst EVENT_KEY$8 = `.${DATA_KEY$8}`;\nconst DATA_API_KEY$5 = '.data-api';\nconst ARROW_LEFT_KEY$1 = 'ArrowLeft';\nconst ARROW_RIGHT_KEY$1 = 'ArrowRight';\nconst TOUCHEVENT_COMPAT_WAIT = 500; // Time for mouse compat events to fire after touch\n\nconst ORDER_NEXT = 'next';\nconst ORDER_PREV = 'prev';\nconst DIRECTION_LEFT = 'left';\nconst DIRECTION_RIGHT = 'right';\nconst EVENT_SLIDE = `slide${EVENT_KEY$8}`;\nconst EVENT_SLID = `slid${EVENT_KEY$8}`;\nconst EVENT_KEYDOWN$1 = `keydown${EVENT_KEY$8}`;\nconst EVENT_MOUSEENTER$1 = `mouseenter${EVENT_KEY$8}`;\nconst EVENT_MOUSELEAVE$1 = `mouseleave${EVENT_KEY$8}`;\nconst EVENT_DRAG_START = `dragstart${EVENT_KEY$8}`;\nconst EVENT_LOAD_DATA_API$3 = `load${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst EVENT_CLICK_DATA_API$5 = `click${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst CLASS_NAME_CAROUSEL = 'carousel';\nconst CLASS_NAME_ACTIVE$2 = 'active';\nconst CLASS_NAME_SLIDE = 'slide';\nconst CLASS_NAME_END = 'carousel-item-end';\nconst CLASS_NAME_START = 'carousel-item-start';\nconst CLASS_NAME_NEXT = 'carousel-item-next';\nconst CLASS_NAME_PREV = 'carousel-item-prev';\nconst SELECTOR_ACTIVE = '.active';\nconst SELECTOR_ITEM = '.carousel-item';\nconst SELECTOR_ACTIVE_ITEM = SELECTOR_ACTIVE + SELECTOR_ITEM;\nconst SELECTOR_ITEM_IMG = '.carousel-item img';\nconst SELECTOR_INDICATORS = '.carousel-indicators';\nconst SELECTOR_DATA_SLIDE = '[data-bs-slide], [data-bs-slide-to]';\nconst SELECTOR_DATA_RIDE = '[data-bs-ride=\"carousel\"]';\nconst KEY_TO_DIRECTION = {\n [ARROW_LEFT_KEY$1]: DIRECTION_RIGHT,\n [ARROW_RIGHT_KEY$1]: DIRECTION_LEFT\n};\nconst Default$b = {\n interval: 5000,\n keyboard: true,\n pause: 'hover',\n ride: false,\n touch: true,\n wrap: true\n};\nconst DefaultType$b = {\n interval: '(number|boolean)',\n // TODO:v6 remove boolean support\n keyboard: 'boolean',\n pause: '(string|boolean)',\n ride: '(boolean|string)',\n touch: 'boolean',\n wrap: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Carousel extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._interval = null;\n this._activeElement = null;\n this._isSliding = false;\n this.touchTimeout = null;\n this._swipeHelper = null;\n this._indicatorsElement = SelectorEngine.findOne(SELECTOR_INDICATORS, this._element);\n this._addEventListeners();\n if (this._config.ride === CLASS_NAME_CAROUSEL) {\n this.cycle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$b;\n }\n static get DefaultType() {\n return DefaultType$b;\n }\n static get NAME() {\n return NAME$c;\n }\n\n // Public\n next() {\n this._slide(ORDER_NEXT);\n }\n nextWhenVisible() {\n // FIXME TODO use `document.visibilityState`\n // Don't call next when the page isn't visible\n // or the carousel or its parent isn't visible\n if (!document.hidden && isVisible(this._element)) {\n this.next();\n }\n }\n prev() {\n this._slide(ORDER_PREV);\n }\n pause() {\n if (this._isSliding) {\n triggerTransitionEnd(this._element);\n }\n this._clearInterval();\n }\n cycle() {\n this._clearInterval();\n this._updateInterval();\n this._interval = setInterval(() => this.nextWhenVisible(), this._config.interval);\n }\n _maybeEnableCycle() {\n if (!this._config.ride) {\n return;\n }\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.cycle());\n return;\n }\n this.cycle();\n }\n to(index) {\n const items = this._getItems();\n if (index > items.length - 1 || index < 0) {\n return;\n }\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.to(index));\n return;\n }\n const activeIndex = this._getItemIndex(this._getActive());\n if (activeIndex === index) {\n return;\n }\n const order = index > activeIndex ? ORDER_NEXT : ORDER_PREV;\n this._slide(order, items[index]);\n }\n dispose() {\n if (this._swipeHelper) {\n this._swipeHelper.dispose();\n }\n super.dispose();\n }\n\n // Private\n _configAfterMerge(config) {\n config.defaultInterval = config.interval;\n return config;\n }\n _addEventListeners() {\n if (this._config.keyboard) {\n EventHandler.on(this._element, EVENT_KEYDOWN$1, event => this._keydown(event));\n }\n if (this._config.pause === 'hover') {\n EventHandler.on(this._element, EVENT_MOUSEENTER$1, () => this.pause());\n EventHandler.on(this._element, EVENT_MOUSELEAVE$1, () => this._maybeEnableCycle());\n }\n if (this._config.touch && Swipe.isSupported()) {\n this._addTouchEventListeners();\n }\n }\n _addTouchEventListeners() {\n for (const img of SelectorEngine.find(SELECTOR_ITEM_IMG, this._element)) {\n EventHandler.on(img, EVENT_DRAG_START, event => event.preventDefault());\n }\n const endCallBack = () => {\n if (this._config.pause !== 'hover') {\n return;\n }\n\n // If it's a touch-enabled device, mouseenter/leave are fired as\n // part of the mouse compatibility events on first tap - the carousel\n // would stop cycling until user tapped out of it;\n // here, we listen for touchend, explicitly pause the carousel\n // (as if it's the second time we tap on it, mouseenter compat event\n // is NOT fired) and after a timeout (to allow for mouse compatibility\n // events to fire) we explicitly restart cycling\n\n this.pause();\n if (this.touchTimeout) {\n clearTimeout(this.touchTimeout);\n }\n this.touchTimeout = setTimeout(() => this._maybeEnableCycle(), TOUCHEVENT_COMPAT_WAIT + this._config.interval);\n };\n const swipeConfig = {\n leftCallback: () => this._slide(this._directionToOrder(DIRECTION_LEFT)),\n rightCallback: () => this._slide(this._directionToOrder(DIRECTION_RIGHT)),\n endCallback: endCallBack\n };\n this._swipeHelper = new Swipe(this._element, swipeConfig);\n }\n _keydown(event) {\n if (/input|textarea/i.test(event.target.tagName)) {\n return;\n }\n const direction = KEY_TO_DIRECTION[event.key];\n if (direction) {\n event.preventDefault();\n this._slide(this._directionToOrder(direction));\n }\n }\n _getItemIndex(element) {\n return this._getItems().indexOf(element);\n }\n _setActiveIndicatorElement(index) {\n if (!this._indicatorsElement) {\n return;\n }\n const activeIndicator = SelectorEngine.findOne(SELECTOR_ACTIVE, this._indicatorsElement);\n activeIndicator.classList.remove(CLASS_NAME_ACTIVE$2);\n activeIndicator.removeAttribute('aria-current');\n const newActiveIndicator = SelectorEngine.findOne(`[data-bs-slide-to=\"${index}\"]`, this._indicatorsElement);\n if (newActiveIndicator) {\n newActiveIndicator.classList.add(CLASS_NAME_ACTIVE$2);\n newActiveIndicator.setAttribute('aria-current', 'true');\n }\n }\n _updateInterval() {\n const element = this._activeElement || this._getActive();\n if (!element) {\n return;\n }\n const elementInterval = Number.parseInt(element.getAttribute('data-bs-interval'), 10);\n this._config.interval = elementInterval || this._config.defaultInterval;\n }\n _slide(order, element = null) {\n if (this._isSliding) {\n return;\n }\n const activeElement = this._getActive();\n const isNext = order === ORDER_NEXT;\n const nextElement = element || getNextActiveElement(this._getItems(), activeElement, isNext, this._config.wrap);\n if (nextElement === activeElement) {\n return;\n }\n const nextElementIndex = this._getItemIndex(nextElement);\n const triggerEvent = eventName => {\n return EventHandler.trigger(this._element, eventName, {\n relatedTarget: nextElement,\n direction: this._orderToDirection(order),\n from: this._getItemIndex(activeElement),\n to: nextElementIndex\n });\n };\n const slideEvent = triggerEvent(EVENT_SLIDE);\n if (slideEvent.defaultPrevented) {\n return;\n }\n if (!activeElement || !nextElement) {\n // Some weirdness is happening, so we bail\n // TODO: change tests that use empty divs to avoid this check\n return;\n }\n const isCycling = Boolean(this._interval);\n this.pause();\n this._isSliding = true;\n this._setActiveIndicatorElement(nextElementIndex);\n this._activeElement = nextElement;\n const directionalClassName = isNext ? CLASS_NAME_START : CLASS_NAME_END;\n const orderClassName = isNext ? CLASS_NAME_NEXT : CLASS_NAME_PREV;\n nextElement.classList.add(orderClassName);\n reflow(nextElement);\n activeElement.classList.add(directionalClassName);\n nextElement.classList.add(directionalClassName);\n const completeCallBack = () => {\n nextElement.classList.remove(directionalClassName, orderClassName);\n nextElement.classList.add(CLASS_NAME_ACTIVE$2);\n activeElement.classList.remove(CLASS_NAME_ACTIVE$2, orderClassName, directionalClassName);\n this._isSliding = false;\n triggerEvent(EVENT_SLID);\n };\n this._queueCallback(completeCallBack, activeElement, this._isAnimated());\n if (isCycling) {\n this.cycle();\n }\n }\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_SLIDE);\n }\n _getActive() {\n return SelectorEngine.findOne(SELECTOR_ACTIVE_ITEM, this._element);\n }\n _getItems() {\n return SelectorEngine.find(SELECTOR_ITEM, this._element);\n }\n _clearInterval() {\n if (this._interval) {\n clearInterval(this._interval);\n this._interval = null;\n }\n }\n _directionToOrder(direction) {\n if (isRTL()) {\n return direction === DIRECTION_LEFT ? ORDER_PREV : ORDER_NEXT;\n }\n return direction === DIRECTION_LEFT ? ORDER_NEXT : ORDER_PREV;\n }\n _orderToDirection(order) {\n if (isRTL()) {\n return order === ORDER_PREV ? DIRECTION_LEFT : DIRECTION_RIGHT;\n }\n return order === ORDER_PREV ? DIRECTION_RIGHT : DIRECTION_LEFT;\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Carousel.getOrCreateInstance(this, config);\n if (typeof config === 'number') {\n data.to(config);\n return;\n }\n if (typeof config === 'string') {\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$5, SELECTOR_DATA_SLIDE, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (!target || !target.classList.contains(CLASS_NAME_CAROUSEL)) {\n return;\n }\n event.preventDefault();\n const carousel = Carousel.getOrCreateInstance(target);\n const slideIndex = this.getAttribute('data-bs-slide-to');\n if (slideIndex) {\n carousel.to(slideIndex);\n carousel._maybeEnableCycle();\n return;\n }\n if (Manipulator.getDataAttribute(this, 'slide') === 'next') {\n carousel.next();\n carousel._maybeEnableCycle();\n return;\n }\n carousel.prev();\n carousel._maybeEnableCycle();\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$3, () => {\n const carousels = SelectorEngine.find(SELECTOR_DATA_RIDE);\n for (const carousel of carousels) {\n Carousel.getOrCreateInstance(carousel);\n }\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Carousel);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap collapse.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$b = 'collapse';\nconst DATA_KEY$7 = 'bs.collapse';\nconst EVENT_KEY$7 = `.${DATA_KEY$7}`;\nconst DATA_API_KEY$4 = '.data-api';\nconst EVENT_SHOW$6 = `show${EVENT_KEY$7}`;\nconst EVENT_SHOWN$6 = `shown${EVENT_KEY$7}`;\nconst EVENT_HIDE$6 = `hide${EVENT_KEY$7}`;\nconst EVENT_HIDDEN$6 = `hidden${EVENT_KEY$7}`;\nconst EVENT_CLICK_DATA_API$4 = `click${EVENT_KEY$7}${DATA_API_KEY$4}`;\nconst CLASS_NAME_SHOW$7 = 'show';\nconst CLASS_NAME_COLLAPSE = 'collapse';\nconst CLASS_NAME_COLLAPSING = 'collapsing';\nconst CLASS_NAME_COLLAPSED = 'collapsed';\nconst CLASS_NAME_DEEPER_CHILDREN = `:scope .${CLASS_NAME_COLLAPSE} .${CLASS_NAME_COLLAPSE}`;\nconst CLASS_NAME_HORIZONTAL = 'collapse-horizontal';\nconst WIDTH = 'width';\nconst HEIGHT = 'height';\nconst SELECTOR_ACTIVES = '.collapse.show, .collapse.collapsing';\nconst SELECTOR_DATA_TOGGLE$4 = '[data-bs-toggle=\"collapse\"]';\nconst Default$a = {\n parent: null,\n toggle: true\n};\nconst DefaultType$a = {\n parent: '(null|element)',\n toggle: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Collapse extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isTransitioning = false;\n this._triggerArray = [];\n const toggleList = SelectorEngine.find(SELECTOR_DATA_TOGGLE$4);\n for (const elem of toggleList) {\n const selector = SelectorEngine.getSelectorFromElement(elem);\n const filterElement = SelectorEngine.find(selector).filter(foundElement => foundElement === this._element);\n if (selector !== null && filterElement.length) {\n this._triggerArray.push(elem);\n }\n }\n this._initializeChildren();\n if (!this._config.parent) {\n this._addAriaAndCollapsedClass(this._triggerArray, this._isShown());\n }\n if (this._config.toggle) {\n this.toggle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$a;\n }\n static get DefaultType() {\n return DefaultType$a;\n }\n static get NAME() {\n return NAME$b;\n }\n\n // Public\n toggle() {\n if (this._isShown()) {\n this.hide();\n } else {\n this.show();\n }\n }\n show() {\n if (this._isTransitioning || this._isShown()) {\n return;\n }\n let activeChildren = [];\n\n // find active children\n if (this._config.parent) {\n activeChildren = this._getFirstLevelChildren(SELECTOR_ACTIVES).filter(element => element !== this._element).map(element => Collapse.getOrCreateInstance(element, {\n toggle: false\n }));\n }\n if (activeChildren.length && activeChildren[0]._isTransitioning) {\n return;\n }\n const startEvent = EventHandler.trigger(this._element, EVENT_SHOW$6);\n if (startEvent.defaultPrevented) {\n return;\n }\n for (const activeInstance of activeChildren) {\n activeInstance.hide();\n }\n const dimension = this._getDimension();\n this._element.classList.remove(CLASS_NAME_COLLAPSE);\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n this._element.style[dimension] = 0;\n this._addAriaAndCollapsedClass(this._triggerArray, true);\n this._isTransitioning = true;\n const complete = () => {\n this._isTransitioning = false;\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n this._element.classList.add(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n this._element.style[dimension] = '';\n EventHandler.trigger(this._element, EVENT_SHOWN$6);\n };\n const capitalizedDimension = dimension[0].toUpperCase() + dimension.slice(1);\n const scrollSize = `scroll${capitalizedDimension}`;\n this._queueCallback(complete, this._element, true);\n this._element.style[dimension] = `${this._element[scrollSize]}px`;\n }\n hide() {\n if (this._isTransitioning || !this._isShown()) {\n return;\n }\n const startEvent = EventHandler.trigger(this._element, EVENT_HIDE$6);\n if (startEvent.defaultPrevented) {\n return;\n }\n const dimension = this._getDimension();\n this._element.style[dimension] = `${this._element.getBoundingClientRect()[dimension]}px`;\n reflow(this._element);\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n this._element.classList.remove(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n for (const trigger of this._triggerArray) {\n const element = SelectorEngine.getElementFromSelector(trigger);\n if (element && !this._isShown(element)) {\n this._addAriaAndCollapsedClass([trigger], false);\n }\n }\n this._isTransitioning = true;\n const complete = () => {\n this._isTransitioning = false;\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n this._element.classList.add(CLASS_NAME_COLLAPSE);\n EventHandler.trigger(this._element, EVENT_HIDDEN$6);\n };\n this._element.style[dimension] = '';\n this._queueCallback(complete, this._element, true);\n }\n _isShown(element = this._element) {\n return element.classList.contains(CLASS_NAME_SHOW$7);\n }\n\n // Private\n _configAfterMerge(config) {\n config.toggle = Boolean(config.toggle); // Coerce string values\n config.parent = getElement(config.parent);\n return config;\n }\n _getDimension() {\n return this._element.classList.contains(CLASS_NAME_HORIZONTAL) ? WIDTH : HEIGHT;\n }\n _initializeChildren() {\n if (!this._config.parent) {\n return;\n }\n const children = this._getFirstLevelChildren(SELECTOR_DATA_TOGGLE$4);\n for (const element of children) {\n const selected = SelectorEngine.getElementFromSelector(element);\n if (selected) {\n this._addAriaAndCollapsedClass([element], this._isShown(selected));\n }\n }\n }\n _getFirstLevelChildren(selector) {\n const children = SelectorEngine.find(CLASS_NAME_DEEPER_CHILDREN, this._config.parent);\n // remove children if greater depth\n return SelectorEngine.find(selector, this._config.parent).filter(element => !children.includes(element));\n }\n _addAriaAndCollapsedClass(triggerArray, isOpen) {\n if (!triggerArray.length) {\n return;\n }\n for (const element of triggerArray) {\n element.classList.toggle(CLASS_NAME_COLLAPSED, !isOpen);\n element.setAttribute('aria-expanded', isOpen);\n }\n }\n\n // Static\n static jQueryInterface(config) {\n const _config = {};\n if (typeof config === 'string' && /show|hide/.test(config)) {\n _config.toggle = false;\n }\n return this.each(function () {\n const data = Collapse.getOrCreateInstance(this, _config);\n if (typeof config === 'string') {\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$4, SELECTOR_DATA_TOGGLE$4, function (event) {\n // preventDefault only for elements (which change the URL) not inside the collapsible element\n if (event.target.tagName === 'A' || event.delegateTarget && event.delegateTarget.tagName === 'A') {\n event.preventDefault();\n }\n for (const element of SelectorEngine.getMultipleElementsFromSelector(this)) {\n Collapse.getOrCreateInstance(element, {\n toggle: false\n }).toggle();\n }\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Collapse);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dropdown.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$a = 'dropdown';\nconst DATA_KEY$6 = 'bs.dropdown';\nconst EVENT_KEY$6 = `.${DATA_KEY$6}`;\nconst DATA_API_KEY$3 = '.data-api';\nconst ESCAPE_KEY$2 = 'Escape';\nconst TAB_KEY$1 = 'Tab';\nconst ARROW_UP_KEY$1 = 'ArrowUp';\nconst ARROW_DOWN_KEY$1 = 'ArrowDown';\nconst RIGHT_MOUSE_BUTTON = 2; // MouseEvent.button value for the secondary button, usually the right button\n\nconst EVENT_HIDE$5 = `hide${EVENT_KEY$6}`;\nconst EVENT_HIDDEN$5 = `hidden${EVENT_KEY$6}`;\nconst EVENT_SHOW$5 = `show${EVENT_KEY$6}`;\nconst EVENT_SHOWN$5 = `shown${EVENT_KEY$6}`;\nconst EVENT_CLICK_DATA_API$3 = `click${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYDOWN_DATA_API = `keydown${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYUP_DATA_API = `keyup${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst CLASS_NAME_SHOW$6 = 'show';\nconst CLASS_NAME_DROPUP = 'dropup';\nconst CLASS_NAME_DROPEND = 'dropend';\nconst CLASS_NAME_DROPSTART = 'dropstart';\nconst CLASS_NAME_DROPUP_CENTER = 'dropup-center';\nconst CLASS_NAME_DROPDOWN_CENTER = 'dropdown-center';\nconst SELECTOR_DATA_TOGGLE$3 = '[data-bs-toggle=\"dropdown\"]:not(.disabled):not(:disabled)';\nconst SELECTOR_DATA_TOGGLE_SHOWN = `${SELECTOR_DATA_TOGGLE$3}.${CLASS_NAME_SHOW$6}`;\nconst SELECTOR_MENU = '.dropdown-menu';\nconst SELECTOR_NAVBAR = '.navbar';\nconst SELECTOR_NAVBAR_NAV = '.navbar-nav';\nconst SELECTOR_VISIBLE_ITEMS = '.dropdown-menu .dropdown-item:not(.disabled):not(:disabled)';\nconst PLACEMENT_TOP = isRTL() ? 'top-end' : 'top-start';\nconst PLACEMENT_TOPEND = isRTL() ? 'top-start' : 'top-end';\nconst PLACEMENT_BOTTOM = isRTL() ? 'bottom-end' : 'bottom-start';\nconst PLACEMENT_BOTTOMEND = isRTL() ? 'bottom-start' : 'bottom-end';\nconst PLACEMENT_RIGHT = isRTL() ? 'left-start' : 'right-start';\nconst PLACEMENT_LEFT = isRTL() ? 'right-start' : 'left-start';\nconst PLACEMENT_TOPCENTER = 'top';\nconst PLACEMENT_BOTTOMCENTER = 'bottom';\nconst Default$9 = {\n autoClose: true,\n boundary: 'clippingParents',\n display: 'dynamic',\n offset: [0, 2],\n popperConfig: null,\n reference: 'toggle'\n};\nconst DefaultType$9 = {\n autoClose: '(boolean|string)',\n boundary: '(string|element)',\n display: 'string',\n offset: '(array|string|function)',\n popperConfig: '(null|object|function)',\n reference: '(string|element|object)'\n};\n\n/**\n * Class definition\n */\n\nclass Dropdown extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._popper = null;\n this._parent = this._element.parentNode; // dropdown wrapper\n // TODO: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.3/forms/input-group/\n this._menu = SelectorEngine.next(this._element, SELECTOR_MENU)[0] || SelectorEngine.prev(this._element, SELECTOR_MENU)[0] || SelectorEngine.findOne(SELECTOR_MENU, this._parent);\n this._inNavbar = this._detectNavbar();\n }\n\n // Getters\n static get Default() {\n return Default$9;\n }\n static get DefaultType() {\n return DefaultType$9;\n }\n static get NAME() {\n return NAME$a;\n }\n\n // Public\n toggle() {\n return this._isShown() ? this.hide() : this.show();\n }\n show() {\n if (isDisabled(this._element) || this._isShown()) {\n return;\n }\n const relatedTarget = {\n relatedTarget: this._element\n };\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$5, relatedTarget);\n if (showEvent.defaultPrevented) {\n return;\n }\n this._createPopper();\n\n // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n if ('ontouchstart' in document.documentElement && !this._parent.closest(SELECTOR_NAVBAR_NAV)) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n this._element.focus();\n this._element.setAttribute('aria-expanded', true);\n this._menu.classList.add(CLASS_NAME_SHOW$6);\n this._element.classList.add(CLASS_NAME_SHOW$6);\n EventHandler.trigger(this._element, EVENT_SHOWN$5, relatedTarget);\n }\n hide() {\n if (isDisabled(this._element) || !this._isShown()) {\n return;\n }\n const relatedTarget = {\n relatedTarget: this._element\n };\n this._completeHide(relatedTarget);\n }\n dispose() {\n if (this._popper) {\n this._popper.destroy();\n }\n super.dispose();\n }\n update() {\n this._inNavbar = this._detectNavbar();\n if (this._popper) {\n this._popper.update();\n }\n }\n\n // Private\n _completeHide(relatedTarget) {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$5, relatedTarget);\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n if (this._popper) {\n this._popper.destroy();\n }\n this._menu.classList.remove(CLASS_NAME_SHOW$6);\n this._element.classList.remove(CLASS_NAME_SHOW$6);\n this._element.setAttribute('aria-expanded', 'false');\n Manipulator.removeDataAttribute(this._menu, 'popper');\n EventHandler.trigger(this._element, EVENT_HIDDEN$5, relatedTarget);\n }\n _getConfig(config) {\n config = super._getConfig(config);\n if (typeof config.reference === 'object' && !isElement(config.reference) && typeof config.reference.getBoundingClientRect !== 'function') {\n // Popper virtual elements require a getBoundingClientRect method\n throw new TypeError(`${NAME$a.toUpperCase()}: Option \"reference\" provided type \"object\" without a required \"getBoundingClientRect\" method.`);\n }\n return config;\n }\n _createPopper() {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s dropdowns require Popper (https://popper.js.org)');\n }\n let referenceElement = this._element;\n if (this._config.reference === 'parent') {\n referenceElement = this._parent;\n } else if (isElement(this._config.reference)) {\n referenceElement = getElement(this._config.reference);\n } else if (typeof this._config.reference === 'object') {\n referenceElement = this._config.reference;\n }\n const popperConfig = this._getPopperConfig();\n this._popper = Popper.createPopper(referenceElement, this._menu, popperConfig);\n }\n _isShown() {\n return this._menu.classList.contains(CLASS_NAME_SHOW$6);\n }\n _getPlacement() {\n const parentDropdown = this._parent;\n if (parentDropdown.classList.contains(CLASS_NAME_DROPEND)) {\n return PLACEMENT_RIGHT;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPSTART)) {\n return PLACEMENT_LEFT;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP_CENTER)) {\n return PLACEMENT_TOPCENTER;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPDOWN_CENTER)) {\n return PLACEMENT_BOTTOMCENTER;\n }\n\n // We need to trim the value because custom properties can also include spaces\n const isEnd = getComputedStyle(this._menu).getPropertyValue('--bs-position').trim() === 'end';\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP)) {\n return isEnd ? PLACEMENT_TOPEND : PLACEMENT_TOP;\n }\n return isEnd ? PLACEMENT_BOTTOMEND : PLACEMENT_BOTTOM;\n }\n _detectNavbar() {\n return this._element.closest(SELECTOR_NAVBAR) !== null;\n }\n _getOffset() {\n const {\n offset\n } = this._config;\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n return offset;\n }\n _getPopperConfig() {\n const defaultBsPopperConfig = {\n placement: this._getPlacement(),\n modifiers: [{\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }]\n };\n\n // Disable Popper if we have a static display or Dropdown is in Navbar\n if (this._inNavbar || this._config.display === 'static') {\n Manipulator.setDataAttribute(this._menu, 'popper', 'static'); // TODO: v6 remove\n defaultBsPopperConfig.modifiers = [{\n name: 'applyStyles',\n enabled: false\n }];\n }\n return {\n ...defaultBsPopperConfig,\n ...execute(this._config.popperConfig, [defaultBsPopperConfig])\n };\n }\n _selectMenuItem({\n key,\n target\n }) {\n const items = SelectorEngine.find(SELECTOR_VISIBLE_ITEMS, this._menu).filter(element => isVisible(element));\n if (!items.length) {\n return;\n }\n\n // if target isn't included in items (e.g. when expanding the dropdown)\n // allow cycling to get the last item in case key equals ARROW_UP_KEY\n getNextActiveElement(items, target, key === ARROW_DOWN_KEY$1, !items.includes(target)).focus();\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Dropdown.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n static clearMenus(event) {\n if (event.button === RIGHT_MOUSE_BUTTON || event.type === 'keyup' && event.key !== TAB_KEY$1) {\n return;\n }\n const openToggles = SelectorEngine.find(SELECTOR_DATA_TOGGLE_SHOWN);\n for (const toggle of openToggles) {\n const context = Dropdown.getInstance(toggle);\n if (!context || context._config.autoClose === false) {\n continue;\n }\n const composedPath = event.composedPath();\n const isMenuTarget = composedPath.includes(context._menu);\n if (composedPath.includes(context._element) || context._config.autoClose === 'inside' && !isMenuTarget || context._config.autoClose === 'outside' && isMenuTarget) {\n continue;\n }\n\n // Tab navigation through the dropdown menu or events from contained inputs shouldn't close the menu\n if (context._menu.contains(event.target) && (event.type === 'keyup' && event.key === TAB_KEY$1 || /input|select|option|textarea|form/i.test(event.target.tagName))) {\n continue;\n }\n const relatedTarget = {\n relatedTarget: context._element\n };\n if (event.type === 'click') {\n relatedTarget.clickEvent = event;\n }\n context._completeHide(relatedTarget);\n }\n }\n static dataApiKeydownHandler(event) {\n // If not an UP | DOWN | ESCAPE key => not a dropdown command\n // If input/textarea && if key is other than ESCAPE => not a dropdown command\n\n const isInput = /input|textarea/i.test(event.target.tagName);\n const isEscapeEvent = event.key === ESCAPE_KEY$2;\n const isUpOrDownEvent = [ARROW_UP_KEY$1, ARROW_DOWN_KEY$1].includes(event.key);\n if (!isUpOrDownEvent && !isEscapeEvent) {\n return;\n }\n if (isInput && !isEscapeEvent) {\n return;\n }\n event.preventDefault();\n\n // TODO: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.3/forms/input-group/\n const getToggleButton = this.matches(SELECTOR_DATA_TOGGLE$3) ? this : SelectorEngine.prev(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.next(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.findOne(SELECTOR_DATA_TOGGLE$3, event.delegateTarget.parentNode);\n const instance = Dropdown.getOrCreateInstance(getToggleButton);\n if (isUpOrDownEvent) {\n event.stopPropagation();\n instance.show();\n instance._selectMenuItem(event);\n return;\n }\n if (instance._isShown()) {\n // else is escape and we check if it is shown\n event.stopPropagation();\n instance.hide();\n getToggleButton.focus();\n }\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_DATA_TOGGLE$3, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_MENU, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_KEYUP_DATA_API, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, SELECTOR_DATA_TOGGLE$3, function (event) {\n event.preventDefault();\n Dropdown.getOrCreateInstance(this).toggle();\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Dropdown);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/backdrop.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$9 = 'backdrop';\nconst CLASS_NAME_FADE$4 = 'fade';\nconst CLASS_NAME_SHOW$5 = 'show';\nconst EVENT_MOUSEDOWN = `mousedown.bs.${NAME$9}`;\nconst Default$8 = {\n className: 'modal-backdrop',\n clickCallback: null,\n isAnimated: false,\n isVisible: true,\n // if false, we use the backdrop helper without adding any element to the dom\n rootElement: 'body' // give the choice to place backdrop under different elements\n};\nconst DefaultType$8 = {\n className: 'string',\n clickCallback: '(function|null)',\n isAnimated: 'boolean',\n isVisible: 'boolean',\n rootElement: '(element|string)'\n};\n\n/**\n * Class definition\n */\n\nclass Backdrop extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isAppended = false;\n this._element = null;\n }\n\n // Getters\n static get Default() {\n return Default$8;\n }\n static get DefaultType() {\n return DefaultType$8;\n }\n static get NAME() {\n return NAME$9;\n }\n\n // Public\n show(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n this._append();\n const element = this._getElement();\n if (this._config.isAnimated) {\n reflow(element);\n }\n element.classList.add(CLASS_NAME_SHOW$5);\n this._emulateAnimation(() => {\n execute(callback);\n });\n }\n hide(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n this._getElement().classList.remove(CLASS_NAME_SHOW$5);\n this._emulateAnimation(() => {\n this.dispose();\n execute(callback);\n });\n }\n dispose() {\n if (!this._isAppended) {\n return;\n }\n EventHandler.off(this._element, EVENT_MOUSEDOWN);\n this._element.remove();\n this._isAppended = false;\n }\n\n // Private\n _getElement() {\n if (!this._element) {\n const backdrop = document.createElement('div');\n backdrop.className = this._config.className;\n if (this._config.isAnimated) {\n backdrop.classList.add(CLASS_NAME_FADE$4);\n }\n this._element = backdrop;\n }\n return this._element;\n }\n _configAfterMerge(config) {\n // use getElement() with the default \"body\" to get a fresh Element on each instantiation\n config.rootElement = getElement(config.rootElement);\n return config;\n }\n _append() {\n if (this._isAppended) {\n return;\n }\n const element = this._getElement();\n this._config.rootElement.append(element);\n EventHandler.on(element, EVENT_MOUSEDOWN, () => {\n execute(this._config.clickCallback);\n });\n this._isAppended = true;\n }\n _emulateAnimation(callback) {\n executeAfterTransition(callback, this._getElement(), this._config.isAnimated);\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/focustrap.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$8 = 'focustrap';\nconst DATA_KEY$5 = 'bs.focustrap';\nconst EVENT_KEY$5 = `.${DATA_KEY$5}`;\nconst EVENT_FOCUSIN$2 = `focusin${EVENT_KEY$5}`;\nconst EVENT_KEYDOWN_TAB = `keydown.tab${EVENT_KEY$5}`;\nconst TAB_KEY = 'Tab';\nconst TAB_NAV_FORWARD = 'forward';\nconst TAB_NAV_BACKWARD = 'backward';\nconst Default$7 = {\n autofocus: true,\n trapElement: null // The element to trap focus inside of\n};\nconst DefaultType$7 = {\n autofocus: 'boolean',\n trapElement: 'element'\n};\n\n/**\n * Class definition\n */\n\nclass FocusTrap extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isActive = false;\n this._lastTabNavDirection = null;\n }\n\n // Getters\n static get Default() {\n return Default$7;\n }\n static get DefaultType() {\n return DefaultType$7;\n }\n static get NAME() {\n return NAME$8;\n }\n\n // Public\n activate() {\n if (this._isActive) {\n return;\n }\n if (this._config.autofocus) {\n this._config.trapElement.focus();\n }\n EventHandler.off(document, EVENT_KEY$5); // guard against infinite focus loop\n EventHandler.on(document, EVENT_FOCUSIN$2, event => this._handleFocusin(event));\n EventHandler.on(document, EVENT_KEYDOWN_TAB, event => this._handleKeydown(event));\n this._isActive = true;\n }\n deactivate() {\n if (!this._isActive) {\n return;\n }\n this._isActive = false;\n EventHandler.off(document, EVENT_KEY$5);\n }\n\n // Private\n _handleFocusin(event) {\n const {\n trapElement\n } = this._config;\n if (event.target === document || event.target === trapElement || trapElement.contains(event.target)) {\n return;\n }\n const elements = SelectorEngine.focusableChildren(trapElement);\n if (elements.length === 0) {\n trapElement.focus();\n } else if (this._lastTabNavDirection === TAB_NAV_BACKWARD) {\n elements[elements.length - 1].focus();\n } else {\n elements[0].focus();\n }\n }\n _handleKeydown(event) {\n if (event.key !== TAB_KEY) {\n return;\n }\n this._lastTabNavDirection = event.shiftKey ? TAB_NAV_BACKWARD : TAB_NAV_FORWARD;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/scrollBar.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst SELECTOR_FIXED_CONTENT = '.fixed-top, .fixed-bottom, .is-fixed, .sticky-top';\nconst SELECTOR_STICKY_CONTENT = '.sticky-top';\nconst PROPERTY_PADDING = 'padding-right';\nconst PROPERTY_MARGIN = 'margin-right';\n\n/**\n * Class definition\n */\n\nclass ScrollBarHelper {\n constructor() {\n this._element = document.body;\n }\n\n // Public\n getWidth() {\n // https://developer.mozilla.org/en-US/docs/Web/API/Window/innerWidth#usage_notes\n const documentWidth = document.documentElement.clientWidth;\n return Math.abs(window.innerWidth - documentWidth);\n }\n hide() {\n const width = this.getWidth();\n this._disableOverFlow();\n // give padding to element to balance the hidden scrollbar width\n this._setElementAttributes(this._element, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n // trick: We adjust positive paddingRight and negative marginRight to sticky-top elements to keep showing fullwidth\n this._setElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n this._setElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN, calculatedValue => calculatedValue - width);\n }\n reset() {\n this._resetElementAttributes(this._element, 'overflow');\n this._resetElementAttributes(this._element, PROPERTY_PADDING);\n this._resetElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING);\n this._resetElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN);\n }\n isOverflowing() {\n return this.getWidth() > 0;\n }\n\n // Private\n _disableOverFlow() {\n this._saveInitialAttribute(this._element, 'overflow');\n this._element.style.overflow = 'hidden';\n }\n _setElementAttributes(selector, styleProperty, callback) {\n const scrollbarWidth = this.getWidth();\n const manipulationCallBack = element => {\n if (element !== this._element && window.innerWidth > element.clientWidth + scrollbarWidth) {\n return;\n }\n this._saveInitialAttribute(element, styleProperty);\n const calculatedValue = window.getComputedStyle(element).getPropertyValue(styleProperty);\n element.style.setProperty(styleProperty, `${callback(Number.parseFloat(calculatedValue))}px`);\n };\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n _saveInitialAttribute(element, styleProperty) {\n const actualValue = element.style.getPropertyValue(styleProperty);\n if (actualValue) {\n Manipulator.setDataAttribute(element, styleProperty, actualValue);\n }\n }\n _resetElementAttributes(selector, styleProperty) {\n const manipulationCallBack = element => {\n const value = Manipulator.getDataAttribute(element, styleProperty);\n // We only want to remove the property if the value is `null`; the value can also be zero\n if (value === null) {\n element.style.removeProperty(styleProperty);\n return;\n }\n Manipulator.removeDataAttribute(element, styleProperty);\n element.style.setProperty(styleProperty, value);\n };\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n _applyManipulationCallback(selector, callBack) {\n if (isElement(selector)) {\n callBack(selector);\n return;\n }\n for (const sel of SelectorEngine.find(selector, this._element)) {\n callBack(sel);\n }\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap modal.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$7 = 'modal';\nconst DATA_KEY$4 = 'bs.modal';\nconst EVENT_KEY$4 = `.${DATA_KEY$4}`;\nconst DATA_API_KEY$2 = '.data-api';\nconst ESCAPE_KEY$1 = 'Escape';\nconst EVENT_HIDE$4 = `hide${EVENT_KEY$4}`;\nconst EVENT_HIDE_PREVENTED$1 = `hidePrevented${EVENT_KEY$4}`;\nconst EVENT_HIDDEN$4 = `hidden${EVENT_KEY$4}`;\nconst EVENT_SHOW$4 = `show${EVENT_KEY$4}`;\nconst EVENT_SHOWN$4 = `shown${EVENT_KEY$4}`;\nconst EVENT_RESIZE$1 = `resize${EVENT_KEY$4}`;\nconst EVENT_CLICK_DISMISS = `click.dismiss${EVENT_KEY$4}`;\nconst EVENT_MOUSEDOWN_DISMISS = `mousedown.dismiss${EVENT_KEY$4}`;\nconst EVENT_KEYDOWN_DISMISS$1 = `keydown.dismiss${EVENT_KEY$4}`;\nconst EVENT_CLICK_DATA_API$2 = `click${EVENT_KEY$4}${DATA_API_KEY$2}`;\nconst CLASS_NAME_OPEN = 'modal-open';\nconst CLASS_NAME_FADE$3 = 'fade';\nconst CLASS_NAME_SHOW$4 = 'show';\nconst CLASS_NAME_STATIC = 'modal-static';\nconst OPEN_SELECTOR$1 = '.modal.show';\nconst SELECTOR_DIALOG = '.modal-dialog';\nconst SELECTOR_MODAL_BODY = '.modal-body';\nconst SELECTOR_DATA_TOGGLE$2 = '[data-bs-toggle=\"modal\"]';\nconst Default$6 = {\n backdrop: true,\n focus: true,\n keyboard: true\n};\nconst DefaultType$6 = {\n backdrop: '(boolean|string)',\n focus: 'boolean',\n keyboard: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Modal extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._dialog = SelectorEngine.findOne(SELECTOR_DIALOG, this._element);\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._isShown = false;\n this._isTransitioning = false;\n this._scrollBar = new ScrollBarHelper();\n this._addEventListeners();\n }\n\n // Getters\n static get Default() {\n return Default$6;\n }\n static get DefaultType() {\n return DefaultType$6;\n }\n static get NAME() {\n return NAME$7;\n }\n\n // Public\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n show(relatedTarget) {\n if (this._isShown || this._isTransitioning) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$4, {\n relatedTarget\n });\n if (showEvent.defaultPrevented) {\n return;\n }\n this._isShown = true;\n this._isTransitioning = true;\n this._scrollBar.hide();\n document.body.classList.add(CLASS_NAME_OPEN);\n this._adjustDialog();\n this._backdrop.show(() => this._showElement(relatedTarget));\n }\n hide() {\n if (!this._isShown || this._isTransitioning) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$4);\n if (hideEvent.defaultPrevented) {\n return;\n }\n this._isShown = false;\n this._isTransitioning = true;\n this._focustrap.deactivate();\n this._element.classList.remove(CLASS_NAME_SHOW$4);\n this._queueCallback(() => this._hideModal(), this._element, this._isAnimated());\n }\n dispose() {\n EventHandler.off(window, EVENT_KEY$4);\n EventHandler.off(this._dialog, EVENT_KEY$4);\n this._backdrop.dispose();\n this._focustrap.deactivate();\n super.dispose();\n }\n handleUpdate() {\n this._adjustDialog();\n }\n\n // Private\n _initializeBackDrop() {\n return new Backdrop({\n isVisible: Boolean(this._config.backdrop),\n // 'static' option will be translated to true, and booleans will keep their value,\n isAnimated: this._isAnimated()\n });\n }\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n _showElement(relatedTarget) {\n // try to append dynamic modal\n if (!document.body.contains(this._element)) {\n document.body.append(this._element);\n }\n this._element.style.display = 'block';\n this._element.removeAttribute('aria-hidden');\n this._element.setAttribute('aria-modal', true);\n this._element.setAttribute('role', 'dialog');\n this._element.scrollTop = 0;\n const modalBody = SelectorEngine.findOne(SELECTOR_MODAL_BODY, this._dialog);\n if (modalBody) {\n modalBody.scrollTop = 0;\n }\n reflow(this._element);\n this._element.classList.add(CLASS_NAME_SHOW$4);\n const transitionComplete = () => {\n if (this._config.focus) {\n this._focustrap.activate();\n }\n this._isTransitioning = false;\n EventHandler.trigger(this._element, EVENT_SHOWN$4, {\n relatedTarget\n });\n };\n this._queueCallback(transitionComplete, this._dialog, this._isAnimated());\n }\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS$1, event => {\n if (event.key !== ESCAPE_KEY$1) {\n return;\n }\n if (this._config.keyboard) {\n this.hide();\n return;\n }\n this._triggerBackdropTransition();\n });\n EventHandler.on(window, EVENT_RESIZE$1, () => {\n if (this._isShown && !this._isTransitioning) {\n this._adjustDialog();\n }\n });\n EventHandler.on(this._element, EVENT_MOUSEDOWN_DISMISS, event => {\n // a bad trick to segregate clicks that may start inside dialog but end outside, and avoid listen to scrollbar clicks\n EventHandler.one(this._element, EVENT_CLICK_DISMISS, event2 => {\n if (this._element !== event.target || this._element !== event2.target) {\n return;\n }\n if (this._config.backdrop === 'static') {\n this._triggerBackdropTransition();\n return;\n }\n if (this._config.backdrop) {\n this.hide();\n }\n });\n });\n }\n _hideModal() {\n this._element.style.display = 'none';\n this._element.setAttribute('aria-hidden', true);\n this._element.removeAttribute('aria-modal');\n this._element.removeAttribute('role');\n this._isTransitioning = false;\n this._backdrop.hide(() => {\n document.body.classList.remove(CLASS_NAME_OPEN);\n this._resetAdjustments();\n this._scrollBar.reset();\n EventHandler.trigger(this._element, EVENT_HIDDEN$4);\n });\n }\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_FADE$3);\n }\n _triggerBackdropTransition() {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED$1);\n if (hideEvent.defaultPrevented) {\n return;\n }\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const initialOverflowY = this._element.style.overflowY;\n // return if the following background transition hasn't yet completed\n if (initialOverflowY === 'hidden' || this._element.classList.contains(CLASS_NAME_STATIC)) {\n return;\n }\n if (!isModalOverflowing) {\n this._element.style.overflowY = 'hidden';\n }\n this._element.classList.add(CLASS_NAME_STATIC);\n this._queueCallback(() => {\n this._element.classList.remove(CLASS_NAME_STATIC);\n this._queueCallback(() => {\n this._element.style.overflowY = initialOverflowY;\n }, this._dialog);\n }, this._dialog);\n this._element.focus();\n }\n\n /**\n * The following methods are used to handle overflowing modals\n */\n\n _adjustDialog() {\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const scrollbarWidth = this._scrollBar.getWidth();\n const isBodyOverflowing = scrollbarWidth > 0;\n if (isBodyOverflowing && !isModalOverflowing) {\n const property = isRTL() ? 'paddingLeft' : 'paddingRight';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n if (!isBodyOverflowing && isModalOverflowing) {\n const property = isRTL() ? 'paddingRight' : 'paddingLeft';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n }\n _resetAdjustments() {\n this._element.style.paddingLeft = '';\n this._element.style.paddingRight = '';\n }\n\n // Static\n static jQueryInterface(config, relatedTarget) {\n return this.each(function () {\n const data = Modal.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](relatedTarget);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$2, SELECTOR_DATA_TOGGLE$2, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n EventHandler.one(target, EVENT_SHOW$4, showEvent => {\n if (showEvent.defaultPrevented) {\n // only register focus restorer if modal will actually get shown\n return;\n }\n EventHandler.one(target, EVENT_HIDDEN$4, () => {\n if (isVisible(this)) {\n this.focus();\n }\n });\n });\n\n // avoid conflict when clicking modal toggler while another one is open\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR$1);\n if (alreadyOpen) {\n Modal.getInstance(alreadyOpen).hide();\n }\n const data = Modal.getOrCreateInstance(target);\n data.toggle(this);\n});\nenableDismissTrigger(Modal);\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Modal);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap offcanvas.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$6 = 'offcanvas';\nconst DATA_KEY$3 = 'bs.offcanvas';\nconst EVENT_KEY$3 = `.${DATA_KEY$3}`;\nconst DATA_API_KEY$1 = '.data-api';\nconst EVENT_LOAD_DATA_API$2 = `load${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst ESCAPE_KEY = 'Escape';\nconst CLASS_NAME_SHOW$3 = 'show';\nconst CLASS_NAME_SHOWING$1 = 'showing';\nconst CLASS_NAME_HIDING = 'hiding';\nconst CLASS_NAME_BACKDROP = 'offcanvas-backdrop';\nconst OPEN_SELECTOR = '.offcanvas.show';\nconst EVENT_SHOW$3 = `show${EVENT_KEY$3}`;\nconst EVENT_SHOWN$3 = `shown${EVENT_KEY$3}`;\nconst EVENT_HIDE$3 = `hide${EVENT_KEY$3}`;\nconst EVENT_HIDE_PREVENTED = `hidePrevented${EVENT_KEY$3}`;\nconst EVENT_HIDDEN$3 = `hidden${EVENT_KEY$3}`;\nconst EVENT_RESIZE = `resize${EVENT_KEY$3}`;\nconst EVENT_CLICK_DATA_API$1 = `click${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst EVENT_KEYDOWN_DISMISS = `keydown.dismiss${EVENT_KEY$3}`;\nconst SELECTOR_DATA_TOGGLE$1 = '[data-bs-toggle=\"offcanvas\"]';\nconst Default$5 = {\n backdrop: true,\n keyboard: true,\n scroll: false\n};\nconst DefaultType$5 = {\n backdrop: '(boolean|string)',\n keyboard: 'boolean',\n scroll: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Offcanvas extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isShown = false;\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._addEventListeners();\n }\n\n // Getters\n static get Default() {\n return Default$5;\n }\n static get DefaultType() {\n return DefaultType$5;\n }\n static get NAME() {\n return NAME$6;\n }\n\n // Public\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n show(relatedTarget) {\n if (this._isShown) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$3, {\n relatedTarget\n });\n if (showEvent.defaultPrevented) {\n return;\n }\n this._isShown = true;\n this._backdrop.show();\n if (!this._config.scroll) {\n new ScrollBarHelper().hide();\n }\n this._element.setAttribute('aria-modal', true);\n this._element.setAttribute('role', 'dialog');\n this._element.classList.add(CLASS_NAME_SHOWING$1);\n const completeCallBack = () => {\n if (!this._config.scroll || this._config.backdrop) {\n this._focustrap.activate();\n }\n this._element.classList.add(CLASS_NAME_SHOW$3);\n this._element.classList.remove(CLASS_NAME_SHOWING$1);\n EventHandler.trigger(this._element, EVENT_SHOWN$3, {\n relatedTarget\n });\n };\n this._queueCallback(completeCallBack, this._element, true);\n }\n hide() {\n if (!this._isShown) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$3);\n if (hideEvent.defaultPrevented) {\n return;\n }\n this._focustrap.deactivate();\n this._element.blur();\n this._isShown = false;\n this._element.classList.add(CLASS_NAME_HIDING);\n this._backdrop.hide();\n const completeCallback = () => {\n this._element.classList.remove(CLASS_NAME_SHOW$3, CLASS_NAME_HIDING);\n this._element.removeAttribute('aria-modal');\n this._element.removeAttribute('role');\n if (!this._config.scroll) {\n new ScrollBarHelper().reset();\n }\n EventHandler.trigger(this._element, EVENT_HIDDEN$3);\n };\n this._queueCallback(completeCallback, this._element, true);\n }\n dispose() {\n this._backdrop.dispose();\n this._focustrap.deactivate();\n super.dispose();\n }\n\n // Private\n _initializeBackDrop() {\n const clickCallback = () => {\n if (this._config.backdrop === 'static') {\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n return;\n }\n this.hide();\n };\n\n // 'static' option will be translated to true, and booleans will keep their value\n const isVisible = Boolean(this._config.backdrop);\n return new Backdrop({\n className: CLASS_NAME_BACKDROP,\n isVisible,\n isAnimated: true,\n rootElement: this._element.parentNode,\n clickCallback: isVisible ? clickCallback : null\n });\n }\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS, event => {\n if (event.key !== ESCAPE_KEY) {\n return;\n }\n if (this._config.keyboard) {\n this.hide();\n return;\n }\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n });\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Offcanvas.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](this);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$1, SELECTOR_DATA_TOGGLE$1, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n if (isDisabled(this)) {\n return;\n }\n EventHandler.one(target, EVENT_HIDDEN$3, () => {\n // focus on trigger when it is closed\n if (isVisible(this)) {\n this.focus();\n }\n });\n\n // avoid conflict when clicking a toggler of an offcanvas, while another is open\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR);\n if (alreadyOpen && alreadyOpen !== target) {\n Offcanvas.getInstance(alreadyOpen).hide();\n }\n const data = Offcanvas.getOrCreateInstance(target);\n data.toggle(this);\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$2, () => {\n for (const selector of SelectorEngine.find(OPEN_SELECTOR)) {\n Offcanvas.getOrCreateInstance(selector).show();\n }\n});\nEventHandler.on(window, EVENT_RESIZE, () => {\n for (const element of SelectorEngine.find('[aria-modal][class*=show][class*=offcanvas-]')) {\n if (getComputedStyle(element).position !== 'fixed') {\n Offcanvas.getOrCreateInstance(element).hide();\n }\n }\n});\nenableDismissTrigger(Offcanvas);\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Offcanvas);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/sanitizer.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n// js-docs-start allow-list\nconst ARIA_ATTRIBUTE_PATTERN = /^aria-[\\w-]*$/i;\nconst DefaultAllowlist = {\n // Global attributes allowed on any supplied element below.\n '*': ['class', 'dir', 'id', 'lang', 'role', ARIA_ATTRIBUTE_PATTERN],\n a: ['target', 'href', 'title', 'rel'],\n area: [],\n b: [],\n br: [],\n col: [],\n code: [],\n dd: [],\n div: [],\n dl: [],\n dt: [],\n em: [],\n hr: [],\n h1: [],\n h2: [],\n h3: [],\n h4: [],\n h5: [],\n h6: [],\n i: [],\n img: ['src', 'srcset', 'alt', 'title', 'width', 'height'],\n li: [],\n ol: [],\n p: [],\n pre: [],\n s: [],\n small: [],\n span: [],\n sub: [],\n sup: [],\n strong: [],\n u: [],\n ul: []\n};\n// js-docs-end allow-list\n\nconst uriAttributes = new Set(['background', 'cite', 'href', 'itemtype', 'longdesc', 'poster', 'src', 'xlink:href']);\n\n/**\n * A pattern that recognizes URLs that are safe wrt. XSS in URL navigation\n * contexts.\n *\n * Shout-out to Angular https://github.com/angular/angular/blob/15.2.8/packages/core/src/sanitization/url_sanitizer.ts#L38\n */\n// eslint-disable-next-line unicorn/better-regex\nconst SAFE_URL_PATTERN = /^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i;\nconst allowedAttribute = (attribute, allowedAttributeList) => {\n const attributeName = attribute.nodeName.toLowerCase();\n if (allowedAttributeList.includes(attributeName)) {\n if (uriAttributes.has(attributeName)) {\n return Boolean(SAFE_URL_PATTERN.test(attribute.nodeValue));\n }\n return true;\n }\n\n // Check if a regular expression validates the attribute.\n return allowedAttributeList.filter(attributeRegex => attributeRegex instanceof RegExp).some(regex => regex.test(attributeName));\n};\nfunction sanitizeHtml(unsafeHtml, allowList, sanitizeFunction) {\n if (!unsafeHtml.length) {\n return unsafeHtml;\n }\n if (sanitizeFunction && typeof sanitizeFunction === 'function') {\n return sanitizeFunction(unsafeHtml);\n }\n const domParser = new window.DOMParser();\n const createdDocument = domParser.parseFromString(unsafeHtml, 'text/html');\n const elements = [].concat(...createdDocument.body.querySelectorAll('*'));\n for (const element of elements) {\n const elementName = element.nodeName.toLowerCase();\n if (!Object.keys(allowList).includes(elementName)) {\n element.remove();\n continue;\n }\n const attributeList = [].concat(...element.attributes);\n const allowedAttributes = [].concat(allowList['*'] || [], allowList[elementName] || []);\n for (const attribute of attributeList) {\n if (!allowedAttribute(attribute, allowedAttributes)) {\n element.removeAttribute(attribute.nodeName);\n }\n }\n }\n return createdDocument.body.innerHTML;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/template-factory.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$5 = 'TemplateFactory';\nconst Default$4 = {\n allowList: DefaultAllowlist,\n content: {},\n // { selector : text , selector2 : text2 , }\n extraClass: '',\n html: false,\n sanitize: true,\n sanitizeFn: null,\n template: '
'\n};\nconst DefaultType$4 = {\n allowList: 'object',\n content: 'object',\n extraClass: '(string|function)',\n html: 'boolean',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n template: 'string'\n};\nconst DefaultContentType = {\n entry: '(string|element|function|null)',\n selector: '(string|element)'\n};\n\n/**\n * Class definition\n */\n\nclass TemplateFactory extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n }\n\n // Getters\n static get Default() {\n return Default$4;\n }\n static get DefaultType() {\n return DefaultType$4;\n }\n static get NAME() {\n return NAME$5;\n }\n\n // Public\n getContent() {\n return Object.values(this._config.content).map(config => this._resolvePossibleFunction(config)).filter(Boolean);\n }\n hasContent() {\n return this.getContent().length > 0;\n }\n changeContent(content) {\n this._checkContent(content);\n this._config.content = {\n ...this._config.content,\n ...content\n };\n return this;\n }\n toHtml() {\n const templateWrapper = document.createElement('div');\n templateWrapper.innerHTML = this._maybeSanitize(this._config.template);\n for (const [selector, text] of Object.entries(this._config.content)) {\n this._setContent(templateWrapper, text, selector);\n }\n const template = templateWrapper.children[0];\n const extraClass = this._resolvePossibleFunction(this._config.extraClass);\n if (extraClass) {\n template.classList.add(...extraClass.split(' '));\n }\n return template;\n }\n\n // Private\n _typeCheckConfig(config) {\n super._typeCheckConfig(config);\n this._checkContent(config.content);\n }\n _checkContent(arg) {\n for (const [selector, content] of Object.entries(arg)) {\n super._typeCheckConfig({\n selector,\n entry: content\n }, DefaultContentType);\n }\n }\n _setContent(template, content, selector) {\n const templateElement = SelectorEngine.findOne(selector, template);\n if (!templateElement) {\n return;\n }\n content = this._resolvePossibleFunction(content);\n if (!content) {\n templateElement.remove();\n return;\n }\n if (isElement(content)) {\n this._putElementInTemplate(getElement(content), templateElement);\n return;\n }\n if (this._config.html) {\n templateElement.innerHTML = this._maybeSanitize(content);\n return;\n }\n templateElement.textContent = content;\n }\n _maybeSanitize(arg) {\n return this._config.sanitize ? sanitizeHtml(arg, this._config.allowList, this._config.sanitizeFn) : arg;\n }\n _resolvePossibleFunction(arg) {\n return execute(arg, [this]);\n }\n _putElementInTemplate(element, templateElement) {\n if (this._config.html) {\n templateElement.innerHTML = '';\n templateElement.append(element);\n return;\n }\n templateElement.textContent = element.textContent;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap tooltip.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$4 = 'tooltip';\nconst DISALLOWED_ATTRIBUTES = new Set(['sanitize', 'allowList', 'sanitizeFn']);\nconst CLASS_NAME_FADE$2 = 'fade';\nconst CLASS_NAME_MODAL = 'modal';\nconst CLASS_NAME_SHOW$2 = 'show';\nconst SELECTOR_TOOLTIP_INNER = '.tooltip-inner';\nconst SELECTOR_MODAL = `.${CLASS_NAME_MODAL}`;\nconst EVENT_MODAL_HIDE = 'hide.bs.modal';\nconst TRIGGER_HOVER = 'hover';\nconst TRIGGER_FOCUS = 'focus';\nconst TRIGGER_CLICK = 'click';\nconst TRIGGER_MANUAL = 'manual';\nconst EVENT_HIDE$2 = 'hide';\nconst EVENT_HIDDEN$2 = 'hidden';\nconst EVENT_SHOW$2 = 'show';\nconst EVENT_SHOWN$2 = 'shown';\nconst EVENT_INSERTED = 'inserted';\nconst EVENT_CLICK$1 = 'click';\nconst EVENT_FOCUSIN$1 = 'focusin';\nconst EVENT_FOCUSOUT$1 = 'focusout';\nconst EVENT_MOUSEENTER = 'mouseenter';\nconst EVENT_MOUSELEAVE = 'mouseleave';\nconst AttachmentMap = {\n AUTO: 'auto',\n TOP: 'top',\n RIGHT: isRTL() ? 'left' : 'right',\n BOTTOM: 'bottom',\n LEFT: isRTL() ? 'right' : 'left'\n};\nconst Default$3 = {\n allowList: DefaultAllowlist,\n animation: true,\n boundary: 'clippingParents',\n container: false,\n customClass: '',\n delay: 0,\n fallbackPlacements: ['top', 'right', 'bottom', 'left'],\n html: false,\n offset: [0, 6],\n placement: 'top',\n popperConfig: null,\n sanitize: true,\n sanitizeFn: null,\n selector: false,\n template: '
' + '
' + '
' + '
',\n title: '',\n trigger: 'hover focus'\n};\nconst DefaultType$3 = {\n allowList: 'object',\n animation: 'boolean',\n boundary: '(string|element)',\n container: '(string|element|boolean)',\n customClass: '(string|function)',\n delay: '(number|object)',\n fallbackPlacements: 'array',\n html: 'boolean',\n offset: '(array|string|function)',\n placement: '(string|function)',\n popperConfig: '(null|object|function)',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n selector: '(string|boolean)',\n template: 'string',\n title: '(string|element|function)',\n trigger: 'string'\n};\n\n/**\n * Class definition\n */\n\nclass Tooltip extends BaseComponent {\n constructor(element, config) {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s tooltips require Popper (https://popper.js.org)');\n }\n super(element, config);\n\n // Private\n this._isEnabled = true;\n this._timeout = 0;\n this._isHovered = null;\n this._activeTrigger = {};\n this._popper = null;\n this._templateFactory = null;\n this._newContent = null;\n\n // Protected\n this.tip = null;\n this._setListeners();\n if (!this._config.selector) {\n this._fixTitle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$3;\n }\n static get DefaultType() {\n return DefaultType$3;\n }\n static get NAME() {\n return NAME$4;\n }\n\n // Public\n enable() {\n this._isEnabled = true;\n }\n disable() {\n this._isEnabled = false;\n }\n toggleEnabled() {\n this._isEnabled = !this._isEnabled;\n }\n toggle() {\n if (!this._isEnabled) {\n return;\n }\n this._activeTrigger.click = !this._activeTrigger.click;\n if (this._isShown()) {\n this._leave();\n return;\n }\n this._enter();\n }\n dispose() {\n clearTimeout(this._timeout);\n EventHandler.off(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n if (this._element.getAttribute('data-bs-original-title')) {\n this._element.setAttribute('title', this._element.getAttribute('data-bs-original-title'));\n }\n this._disposePopper();\n super.dispose();\n }\n show() {\n if (this._element.style.display === 'none') {\n throw new Error('Please use show on visible elements');\n }\n if (!(this._isWithContent() && this._isEnabled)) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOW$2));\n const shadowRoot = findShadowRoot(this._element);\n const isInTheDom = (shadowRoot || this._element.ownerDocument.documentElement).contains(this._element);\n if (showEvent.defaultPrevented || !isInTheDom) {\n return;\n }\n\n // TODO: v6 remove this or make it optional\n this._disposePopper();\n const tip = this._getTipElement();\n this._element.setAttribute('aria-describedby', tip.getAttribute('id'));\n const {\n container\n } = this._config;\n if (!this._element.ownerDocument.documentElement.contains(this.tip)) {\n container.append(tip);\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_INSERTED));\n }\n this._popper = this._createPopper(tip);\n tip.classList.add(CLASS_NAME_SHOW$2);\n\n // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n const complete = () => {\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOWN$2));\n if (this._isHovered === false) {\n this._leave();\n }\n this._isHovered = false;\n };\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n hide() {\n if (!this._isShown()) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDE$2));\n if (hideEvent.defaultPrevented) {\n return;\n }\n const tip = this._getTipElement();\n tip.classList.remove(CLASS_NAME_SHOW$2);\n\n // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n this._activeTrigger[TRIGGER_CLICK] = false;\n this._activeTrigger[TRIGGER_FOCUS] = false;\n this._activeTrigger[TRIGGER_HOVER] = false;\n this._isHovered = null; // it is a trick to support manual triggering\n\n const complete = () => {\n if (this._isWithActiveTrigger()) {\n return;\n }\n if (!this._isHovered) {\n this._disposePopper();\n }\n this._element.removeAttribute('aria-describedby');\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDDEN$2));\n };\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n update() {\n if (this._popper) {\n this._popper.update();\n }\n }\n\n // Protected\n _isWithContent() {\n return Boolean(this._getTitle());\n }\n _getTipElement() {\n if (!this.tip) {\n this.tip = this._createTipElement(this._newContent || this._getContentForTemplate());\n }\n return this.tip;\n }\n _createTipElement(content) {\n const tip = this._getTemplateFactory(content).toHtml();\n\n // TODO: remove this check in v6\n if (!tip) {\n return null;\n }\n tip.classList.remove(CLASS_NAME_FADE$2, CLASS_NAME_SHOW$2);\n // TODO: v6 the following can be achieved with CSS only\n tip.classList.add(`bs-${this.constructor.NAME}-auto`);\n const tipId = getUID(this.constructor.NAME).toString();\n tip.setAttribute('id', tipId);\n if (this._isAnimated()) {\n tip.classList.add(CLASS_NAME_FADE$2);\n }\n return tip;\n }\n setContent(content) {\n this._newContent = content;\n if (this._isShown()) {\n this._disposePopper();\n this.show();\n }\n }\n _getTemplateFactory(content) {\n if (this._templateFactory) {\n this._templateFactory.changeContent(content);\n } else {\n this._templateFactory = new TemplateFactory({\n ...this._config,\n // the `content` var has to be after `this._config`\n // to override config.content in case of popover\n content,\n extraClass: this._resolvePossibleFunction(this._config.customClass)\n });\n }\n return this._templateFactory;\n }\n _getContentForTemplate() {\n return {\n [SELECTOR_TOOLTIP_INNER]: this._getTitle()\n };\n }\n _getTitle() {\n return this._resolvePossibleFunction(this._config.title) || this._element.getAttribute('data-bs-original-title');\n }\n\n // Private\n _initializeOnDelegatedTarget(event) {\n return this.constructor.getOrCreateInstance(event.delegateTarget, this._getDelegateConfig());\n }\n _isAnimated() {\n return this._config.animation || this.tip && this.tip.classList.contains(CLASS_NAME_FADE$2);\n }\n _isShown() {\n return this.tip && this.tip.classList.contains(CLASS_NAME_SHOW$2);\n }\n _createPopper(tip) {\n const placement = execute(this._config.placement, [this, tip, this._element]);\n const attachment = AttachmentMap[placement.toUpperCase()];\n return Popper.createPopper(this._element, tip, this._getPopperConfig(attachment));\n }\n _getOffset() {\n const {\n offset\n } = this._config;\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n return offset;\n }\n _resolvePossibleFunction(arg) {\n return execute(arg, [this._element]);\n }\n _getPopperConfig(attachment) {\n const defaultBsPopperConfig = {\n placement: attachment,\n modifiers: [{\n name: 'flip',\n options: {\n fallbackPlacements: this._config.fallbackPlacements\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }, {\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'arrow',\n options: {\n element: `.${this.constructor.NAME}-arrow`\n }\n }, {\n name: 'preSetPlacement',\n enabled: true,\n phase: 'beforeMain',\n fn: data => {\n // Pre-set Popper's placement attribute in order to read the arrow sizes properly.\n // Otherwise, Popper mixes up the width and height dimensions since the initial arrow style is for top placement\n this._getTipElement().setAttribute('data-popper-placement', data.state.placement);\n }\n }]\n };\n return {\n ...defaultBsPopperConfig,\n ...execute(this._config.popperConfig, [defaultBsPopperConfig])\n };\n }\n _setListeners() {\n const triggers = this._config.trigger.split(' ');\n for (const trigger of triggers) {\n if (trigger === 'click') {\n EventHandler.on(this._element, this.constructor.eventName(EVENT_CLICK$1), this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context.toggle();\n });\n } else if (trigger !== TRIGGER_MANUAL) {\n const eventIn = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSEENTER) : this.constructor.eventName(EVENT_FOCUSIN$1);\n const eventOut = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSELEAVE) : this.constructor.eventName(EVENT_FOCUSOUT$1);\n EventHandler.on(this._element, eventIn, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context._activeTrigger[event.type === 'focusin' ? TRIGGER_FOCUS : TRIGGER_HOVER] = true;\n context._enter();\n });\n EventHandler.on(this._element, eventOut, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context._activeTrigger[event.type === 'focusout' ? TRIGGER_FOCUS : TRIGGER_HOVER] = context._element.contains(event.relatedTarget);\n context._leave();\n });\n }\n }\n this._hideModalHandler = () => {\n if (this._element) {\n this.hide();\n }\n };\n EventHandler.on(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n }\n _fixTitle() {\n const title = this._element.getAttribute('title');\n if (!title) {\n return;\n }\n if (!this._element.getAttribute('aria-label') && !this._element.textContent.trim()) {\n this._element.setAttribute('aria-label', title);\n }\n this._element.setAttribute('data-bs-original-title', title); // DO NOT USE IT. Is only for backwards compatibility\n this._element.removeAttribute('title');\n }\n _enter() {\n if (this._isShown() || this._isHovered) {\n this._isHovered = true;\n return;\n }\n this._isHovered = true;\n this._setTimeout(() => {\n if (this._isHovered) {\n this.show();\n }\n }, this._config.delay.show);\n }\n _leave() {\n if (this._isWithActiveTrigger()) {\n return;\n }\n this._isHovered = false;\n this._setTimeout(() => {\n if (!this._isHovered) {\n this.hide();\n }\n }, this._config.delay.hide);\n }\n _setTimeout(handler, timeout) {\n clearTimeout(this._timeout);\n this._timeout = setTimeout(handler, timeout);\n }\n _isWithActiveTrigger() {\n return Object.values(this._activeTrigger).includes(true);\n }\n _getConfig(config) {\n const dataAttributes = Manipulator.getDataAttributes(this._element);\n for (const dataAttribute of Object.keys(dataAttributes)) {\n if (DISALLOWED_ATTRIBUTES.has(dataAttribute)) {\n delete dataAttributes[dataAttribute];\n }\n }\n config = {\n ...dataAttributes,\n ...(typeof config === 'object' && config ? config : {})\n };\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n _configAfterMerge(config) {\n config.container = config.container === false ? document.body : getElement(config.container);\n if (typeof config.delay === 'number') {\n config.delay = {\n show: config.delay,\n hide: config.delay\n };\n }\n if (typeof config.title === 'number') {\n config.title = config.title.toString();\n }\n if (typeof config.content === 'number') {\n config.content = config.content.toString();\n }\n return config;\n }\n _getDelegateConfig() {\n const config = {};\n for (const [key, value] of Object.entries(this._config)) {\n if (this.constructor.Default[key] !== value) {\n config[key] = value;\n }\n }\n config.selector = false;\n config.trigger = 'manual';\n\n // In the future can be replaced with:\n // const keysWithDifferentValues = Object.entries(this._config).filter(entry => this.constructor.Default[entry[0]] !== this._config[entry[0]])\n // `Object.fromEntries(keysWithDifferentValues)`\n return config;\n }\n _disposePopper() {\n if (this._popper) {\n this._popper.destroy();\n this._popper = null;\n }\n if (this.tip) {\n this.tip.remove();\n this.tip = null;\n }\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Tooltip.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n}\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Tooltip);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap popover.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$3 = 'popover';\nconst SELECTOR_TITLE = '.popover-header';\nconst SELECTOR_CONTENT = '.popover-body';\nconst Default$2 = {\n ...Tooltip.Default,\n content: '',\n offset: [0, 8],\n placement: 'right',\n template: '
' + '
' + '

' + '
' + '
',\n trigger: 'click'\n};\nconst DefaultType$2 = {\n ...Tooltip.DefaultType,\n content: '(null|string|element|function)'\n};\n\n/**\n * Class definition\n */\n\nclass Popover extends Tooltip {\n // Getters\n static get Default() {\n return Default$2;\n }\n static get DefaultType() {\n return DefaultType$2;\n }\n static get NAME() {\n return NAME$3;\n }\n\n // Overrides\n _isWithContent() {\n return this._getTitle() || this._getContent();\n }\n\n // Private\n _getContentForTemplate() {\n return {\n [SELECTOR_TITLE]: this._getTitle(),\n [SELECTOR_CONTENT]: this._getContent()\n };\n }\n _getContent() {\n return this._resolvePossibleFunction(this._config.content);\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Popover.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n}\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Popover);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap scrollspy.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$2 = 'scrollspy';\nconst DATA_KEY$2 = 'bs.scrollspy';\nconst EVENT_KEY$2 = `.${DATA_KEY$2}`;\nconst DATA_API_KEY = '.data-api';\nconst EVENT_ACTIVATE = `activate${EVENT_KEY$2}`;\nconst EVENT_CLICK = `click${EVENT_KEY$2}`;\nconst EVENT_LOAD_DATA_API$1 = `load${EVENT_KEY$2}${DATA_API_KEY}`;\nconst CLASS_NAME_DROPDOWN_ITEM = 'dropdown-item';\nconst CLASS_NAME_ACTIVE$1 = 'active';\nconst SELECTOR_DATA_SPY = '[data-bs-spy=\"scroll\"]';\nconst SELECTOR_TARGET_LINKS = '[href]';\nconst SELECTOR_NAV_LIST_GROUP = '.nav, .list-group';\nconst SELECTOR_NAV_LINKS = '.nav-link';\nconst SELECTOR_NAV_ITEMS = '.nav-item';\nconst SELECTOR_LIST_ITEMS = '.list-group-item';\nconst SELECTOR_LINK_ITEMS = `${SELECTOR_NAV_LINKS}, ${SELECTOR_NAV_ITEMS} > ${SELECTOR_NAV_LINKS}, ${SELECTOR_LIST_ITEMS}`;\nconst SELECTOR_DROPDOWN = '.dropdown';\nconst SELECTOR_DROPDOWN_TOGGLE$1 = '.dropdown-toggle';\nconst Default$1 = {\n offset: null,\n // TODO: v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: '0px 0px -25%',\n smoothScroll: false,\n target: null,\n threshold: [0.1, 0.5, 1]\n};\nconst DefaultType$1 = {\n offset: '(number|null)',\n // TODO v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: 'string',\n smoothScroll: 'boolean',\n target: 'element',\n threshold: 'array'\n};\n\n/**\n * Class definition\n */\n\nclass ScrollSpy extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n\n // this._element is the observablesContainer and config.target the menu links wrapper\n this._targetLinks = new Map();\n this._observableSections = new Map();\n this._rootElement = getComputedStyle(this._element).overflowY === 'visible' ? null : this._element;\n this._activeTarget = null;\n this._observer = null;\n this._previousScrollData = {\n visibleEntryTop: 0,\n parentScrollTop: 0\n };\n this.refresh(); // initialize\n }\n\n // Getters\n static get Default() {\n return Default$1;\n }\n static get DefaultType() {\n return DefaultType$1;\n }\n static get NAME() {\n return NAME$2;\n }\n\n // Public\n refresh() {\n this._initializeTargetsAndObservables();\n this._maybeEnableSmoothScroll();\n if (this._observer) {\n this._observer.disconnect();\n } else {\n this._observer = this._getNewObserver();\n }\n for (const section of this._observableSections.values()) {\n this._observer.observe(section);\n }\n }\n dispose() {\n this._observer.disconnect();\n super.dispose();\n }\n\n // Private\n _configAfterMerge(config) {\n // TODO: on v6 target should be given explicitly & remove the {target: 'ss-target'} case\n config.target = getElement(config.target) || document.body;\n\n // TODO: v6 Only for backwards compatibility reasons. Use rootMargin only\n config.rootMargin = config.offset ? `${config.offset}px 0px -30%` : config.rootMargin;\n if (typeof config.threshold === 'string') {\n config.threshold = config.threshold.split(',').map(value => Number.parseFloat(value));\n }\n return config;\n }\n _maybeEnableSmoothScroll() {\n if (!this._config.smoothScroll) {\n return;\n }\n\n // unregister any previous listeners\n EventHandler.off(this._config.target, EVENT_CLICK);\n EventHandler.on(this._config.target, EVENT_CLICK, SELECTOR_TARGET_LINKS, event => {\n const observableSection = this._observableSections.get(event.target.hash);\n if (observableSection) {\n event.preventDefault();\n const root = this._rootElement || window;\n const height = observableSection.offsetTop - this._element.offsetTop;\n if (root.scrollTo) {\n root.scrollTo({\n top: height,\n behavior: 'smooth'\n });\n return;\n }\n\n // Chrome 60 doesn't support `scrollTo`\n root.scrollTop = height;\n }\n });\n }\n _getNewObserver() {\n const options = {\n root: this._rootElement,\n threshold: this._config.threshold,\n rootMargin: this._config.rootMargin\n };\n return new IntersectionObserver(entries => this._observerCallback(entries), options);\n }\n\n // The logic of selection\n _observerCallback(entries) {\n const targetElement = entry => this._targetLinks.get(`#${entry.target.id}`);\n const activate = entry => {\n this._previousScrollData.visibleEntryTop = entry.target.offsetTop;\n this._process(targetElement(entry));\n };\n const parentScrollTop = (this._rootElement || document.documentElement).scrollTop;\n const userScrollsDown = parentScrollTop >= this._previousScrollData.parentScrollTop;\n this._previousScrollData.parentScrollTop = parentScrollTop;\n for (const entry of entries) {\n if (!entry.isIntersecting) {\n this._activeTarget = null;\n this._clearActiveClass(targetElement(entry));\n continue;\n }\n const entryIsLowerThanPrevious = entry.target.offsetTop >= this._previousScrollData.visibleEntryTop;\n // if we are scrolling down, pick the bigger offsetTop\n if (userScrollsDown && entryIsLowerThanPrevious) {\n activate(entry);\n // if parent isn't scrolled, let's keep the first visible item, breaking the iteration\n if (!parentScrollTop) {\n return;\n }\n continue;\n }\n\n // if we are scrolling up, pick the smallest offsetTop\n if (!userScrollsDown && !entryIsLowerThanPrevious) {\n activate(entry);\n }\n }\n }\n _initializeTargetsAndObservables() {\n this._targetLinks = new Map();\n this._observableSections = new Map();\n const targetLinks = SelectorEngine.find(SELECTOR_TARGET_LINKS, this._config.target);\n for (const anchor of targetLinks) {\n // ensure that the anchor has an id and is not disabled\n if (!anchor.hash || isDisabled(anchor)) {\n continue;\n }\n const observableSection = SelectorEngine.findOne(decodeURI(anchor.hash), this._element);\n\n // ensure that the observableSection exists & is visible\n if (isVisible(observableSection)) {\n this._targetLinks.set(decodeURI(anchor.hash), anchor);\n this._observableSections.set(anchor.hash, observableSection);\n }\n }\n }\n _process(target) {\n if (this._activeTarget === target) {\n return;\n }\n this._clearActiveClass(this._config.target);\n this._activeTarget = target;\n target.classList.add(CLASS_NAME_ACTIVE$1);\n this._activateParents(target);\n EventHandler.trigger(this._element, EVENT_ACTIVATE, {\n relatedTarget: target\n });\n }\n _activateParents(target) {\n // Activate dropdown parents\n if (target.classList.contains(CLASS_NAME_DROPDOWN_ITEM)) {\n SelectorEngine.findOne(SELECTOR_DROPDOWN_TOGGLE$1, target.closest(SELECTOR_DROPDOWN)).classList.add(CLASS_NAME_ACTIVE$1);\n return;\n }\n for (const listGroup of SelectorEngine.parents(target, SELECTOR_NAV_LIST_GROUP)) {\n // Set triggered links parents as active\n // With both
` alignment by inheriting `text-align`.\n// 3. Fix alignment for Safari\n\nth {\n font-weight: $table-th-font-weight; // 1\n text-align: inherit; // 2\n text-align: -webkit-match-parent; // 3\n}\n\nthead,\ntbody,\ntfoot,\ntr,\ntd,\nth {\n border-color: inherit;\n border-style: solid;\n border-width: 0;\n}\n\n\n// Forms\n//\n// 1. Allow labels to use `margin` for spacing.\n\nlabel {\n display: inline-block; // 1\n}\n\n// Remove the default `border-radius` that macOS Chrome adds.\n// See https://github.com/twbs/bootstrap/issues/24093\n\nbutton {\n // stylelint-disable-next-line property-disallowed-list\n border-radius: 0;\n}\n\n// Explicitly remove focus outline in Chromium when it shouldn't be\n// visible (e.g. as result of mouse click or touch tap). It already\n// should be doing this automatically, but seems to currently be\n// confused and applies its very visible two-tone outline anyway.\n\nbutton:focus:not(:focus-visible) {\n outline: 0;\n}\n\n// 1. Remove the margin in Firefox and Safari\n\ninput,\nbutton,\nselect,\noptgroup,\ntextarea {\n margin: 0; // 1\n font-family: inherit;\n @include font-size(inherit);\n line-height: inherit;\n}\n\n// Remove the inheritance of text transform in Firefox\nbutton,\nselect {\n text-transform: none;\n}\n// Set the cursor for non-` + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

Contributors#

+

Shizhe Diao, Rui Pan, Hanze Dong, Ka Shun Shum, Jipeng Zhang, Wei Xiong, Tong Zhang

+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/about/changelog.html b/about/changelog.html new file mode 100644 index 000000000..514427043 --- /dev/null +++ b/about/changelog.html @@ -0,0 +1,531 @@ + + + + + + + + + + + Changelog — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

Changelog#

+
+

Version 0.0.1 (Mar 28, 2023)#

+

The first public version.

+

Task tuning, instruction tuning, on user defined datasets.

+

A simple and extensible API for developers.

+

Efficient finetuning with LoRA.

+

Simplified model inference framework.

+
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/about/index.html b/about/index.html new file mode 100644 index 000000000..3ec668e37 --- /dev/null +++ b/about/index.html @@ -0,0 +1,521 @@ + + + + + + + + + + + About — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/index.html b/autoapi/index.html new file mode 100644 index 000000000..2473a2c5a --- /dev/null +++ b/autoapi/index.html @@ -0,0 +1,763 @@ + + + + + + + + + + + API Reference — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

API Reference#

+

This page contains auto-generated API reference documentation [1].

+
+ +
+ +
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/args/index.html b/autoapi/lmflow/args/index.html new file mode 100644 index 000000000..626afe526 --- /dev/null +++ b/autoapi/lmflow/args/index.html @@ -0,0 +1,2436 @@ + + + + + + + + + + + lmflow.args — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.args#

+

This script defines dataclasses: ModelArguments and DatasetArguments, +that contain the arguments for the model and dataset used in training.

+

It imports several modules, including dataclasses, field from typing, Optional from typing, +require_version from transformers.utils.versions, MODEL_FOR_CAUSAL_LM_MAPPING, +and TrainingArguments from transformers.

+

MODEL_CONFIG_CLASSES is assigned a list of the model config classes from +MODEL_FOR_CAUSAL_LM_MAPPING. MODEL_TYPES is assigned a tuple of the model types +extracted from the MODEL_CONFIG_CLASSES.

+
+

Attributes#

+ +
+
+

Classes#

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

OptimizerNames

ModelArguments

Define a class ModelArguments using the dataclass decorator.

VisModelArguments

Define a class ModelArguments using the dataclass decorator.

DatasetArguments

Define a class DatasetArguments using the dataclass decorator.

MultiModalDatasetArguments

Define a class DatasetArguments using the dataclass decorator.

FinetunerArguments

Adapt transformers.TrainingArguments

RewardModelTunerArguments

Arguments for reward modeling.

EvaluatorArguments

Define a class EvaluatorArguments using the dataclass decorator. The class contains several optional

InferencerArguments

Define a class InferencerArguments using the dataclass decorator. The class contains several optional

RaftAlignerArguments

Define a class RaftAlignerArguments to configure raft aligner.

BenchmarkingArguments

DPOAlignerArguments

The arguments for the DPO training script.

DPOv2AlignerArguments

The arguments for the DPOv2 training script.

IterativeAlignerArguments

Arguments for iterative aligners.

IterativeDPOAlignerArguments

Arguments for iterative DPO aligners.

AutoArguments

Automatically choose arguments from FinetunerArguments or EvaluatorArguments.

+
+
+
+

Module Contents#

+
+
+lmflow.args.MODEL_CONFIG_CLASSES[source]#
+
+ +
+
+lmflow.args.MODEL_TYPES[source]#
+
+ +
+
+lmflow.args.logger[source]#
+
+ +
+
+class lmflow.args.OptimizerNames[source]#
+
+
+DUMMY = 'dummy'[source]#
+
+ +
+
+ADABELIEF = 'adabelief'[source]#
+
+ +
+
+ADABOUND = 'adabound'[source]#
+
+ +
+
+LARS = 'lars'[source]#
+
+ +
+
+LAMB = 'lamb'[source]#
+
+ +
+
+ADAMAX = 'adamax'[source]#
+
+ +
+
+NADAM = 'nadam'[source]#
+
+ +
+
+RADAM = 'radam'[source]#
+
+ +
+
+ADAMP = 'adamp'[source]#
+
+ +
+
+SGDP = 'sgdp'[source]#
+
+ +
+
+YOGI = 'yogi'[source]#
+
+ +
+
+SOPHIA = 'sophia'[source]#
+
+ +
+
+ADAN = 'adan'[source]#
+
+ +
+
+ADAM = 'adam'[source]#
+
+ +
+
+NOVOGRAD = 'novograd'[source]#
+
+ +
+
+ADADELTA = 'adadelta'[source]#
+
+ +
+
+ADAGRAD = 'adagrad'[source]#
+
+ +
+
+ADAMW_SCHEDULE_FREE = 'adamw_schedule_free'[source]#
+
+ +
+
+SGD_SCHEDULE_FREE = 'sgd_schedule_free'[source]#
+
+ +
+ +
+
+class lmflow.args.ModelArguments[source]#
+

Define a class ModelArguments using the dataclass decorator. +The class contains several optional parameters that can be used to configure a model.

+
+
model_name_or_pathstr

a string representing the path or name of a pretrained +model checkpoint for weights initialization. If None, a model will be trained from scratch.

+
+
model_typestr

a string representing the type of model to use if training from +scratch. If not provided, a pretrained model will be used.

+
+
config_overridesstr

a string representing the default config settings to override +when training a model from scratch.

+
+
config_namestr

a string representing the name or path of the pretrained config to +use, if different from the model_name_or_path.

+
+
tokenizer_namestr

a string representing the name or path of the pretrained tokenizer +to use, if different from the model_name_or_path.

+
+
cache_dirstr

a string representing the path to the directory where pretrained models +downloaded from huggingface.co will be stored.

+
+
use_fast_tokenizerbool

a boolean indicating whether to use a fast tokenizer (backed by the +tokenizers library) or not.

+
+
model_revisionstr

a string representing the specific model version to use (can be a +branch name, tag name, or commit id).

+
+
use_auth_tokenbool

a boolean indicating whether to use the token generated when running +huggingface-cli login (necessary to use this script with private models).

+
+
torch_dtypestr

a string representing the dtype to load the model under. If auto is +passed, the dtype will be automatically derived from the model’s weights.

+
+
use_ram_optimized_loadbool

a boolean indicating whether to use disk mapping when memory is not +enough.

+
+
use_int8bool

a boolean indicating whether to load int8 quantization for inference.

+
+
load_in_4bitbool

whether to load the model in 4bit

+
+
model_max_lengthint

The maximum length of the model.

+
+
truncation_sidestr

The side on which the model should have truncation applied.

+
+
arch_typestr

Model architecture type.

+
+
padding_sidestr

The side on which the tokenizer should have padding applied.

+
+
eos_paddingbool

whether to pad with eos token instead of pad token.

+
+
ignore_bias_buffersbool

fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation.

+
+
+
+
+model_name_or_path: str | None[source]#
+
+ +
+
+lora_model_path: str | None[source]#
+
+ +
+
+model_type: str | None[source]#
+
+ +
+
+config_overrides: str | None[source]#
+
+ +
+
+arch_type: str | None[source]#
+
+ +
+
+config_name: str | None[source]#
+
+ +
+
+tokenizer_name: str | None[source]#
+
+ +
+
+cache_dir: str | None[source]#
+
+ +
+
+use_fast_tokenizer: bool[source]#
+
+ +
+
+model_revision: str[source]#
+
+ +
+
+use_auth_token: bool[source]#
+
+ +
+
+trust_remote_code: bool[source]#
+
+ +
+
+torch_dtype: str | None[source]#
+
+ +
+
+use_lora: bool[source]#
+
+ +
+
+use_qlora: bool[source]#
+
+ +
+
+bits: int[source]#
+
+ +
+
+quant_type: str[source]#
+
+ +
+
+double_quant: bool[source]#
+
+ +
+
+lora_r: int[source]#
+
+ +
+
+lora_alpha: int[source]#
+
+ +
+
+lora_target_modules: List[str][source]#
+
+ +
+
+lora_dropout: float[source]#
+
+ +
+
+save_aggregated_lora: bool[source]#
+
+ +
+
+use_ram_optimized_load: bool[source]#
+
+ +
+
+use_flash_attention: bool[source]#
+
+ +
+
+truncate_to_model_max_length: bool[source]#
+
+ +
+
+do_rope_scaling: bool[source]#
+
+ +
+
+rope_pi_ratio: int[source]#
+
+ +
+
+rope_ntk_ratio: int[source]#
+
+ +
+
+use_int8: bool[source]#
+
+ +
+
+load_in_4bit: bool | None[source]#
+
+ +
+
+model_max_length: int | None[source]#
+
+ +
+
+truncation_side: str[source]#
+
+ +
+
+padding_side: str[source]#
+
+ +
+
+eos_padding: bool | None[source]#
+
+ +
+
+ignore_bias_buffers: bool | None[source]#
+
+ +
+
+__post_init__()[source]#
+
+ +
+ +
+
+class lmflow.args.VisModelArguments[source]#
+

Bases: ModelArguments

+

Define a class ModelArguments using the dataclass decorator. +The class contains several optional parameters that can be used to configure a model.

+
+
model_name_or_pathstr

a string representing the path or name of a pretrained +model checkpoint for weights initialization. If None, a model will be trained from scratch.

+
+
model_typestr

a string representing the type of model to use if training from +scratch. If not provided, a pretrained model will be used.

+
+
config_overridesstr

a string representing the default config settings to override +when training a model from scratch.

+
+
config_namestr

a string representing the name or path of the pretrained config to +use, if different from the model_name_or_path.

+
+
tokenizer_namestr

a string representing the name or path of the pretrained tokenizer +to use, if different from the model_name_or_path.

+
+
cache_dirstr

a string representing the path to the directory where pretrained models +downloaded from huggingface.co will be stored.

+
+
use_fast_tokenizerbool

a boolean indicating whether to use a fast tokenizer (backed by the +tokenizers library) or not.

+
+
model_revisionstr

a string representing the specific model version to use (can be a +branch name, tag name, or commit id).

+
+
use_auth_tokenbool

a boolean indicating whether to use the token generated when running +huggingface-cli login (necessary to use this script with private models).

+
+
torch_dtypestr

a string representing the dtype to load the model under. If auto is +passed, the dtype will be automatically derived from the model’s weights.

+
+
use_ram_optimized_loadbool

a boolean indicating whether to use disk mapping when memory is not +enough.

+
+
use_int8bool

a boolean indicating whether to load int8 quantization for inference.

+
+
load_in_4bitbool

whether to load the model in 4bit

+
+
model_max_lengthint

The maximum length of the model.

+
+
truncation_sidestr

The side on which the model should have truncation applied.

+
+
arch_typestr

Model architecture type.

+
+
padding_sidestr

The side on which the tokenizer should have padding applied.

+
+
eos_paddingbool

whether to pad with eos token instead of pad token.

+
+
ignore_bias_buffersbool

fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation.

+
+
+
+
+low_resource: bool | None[source]#
+
+ +
+
+custom_model: bool[source]#
+
+ +
+
+pretrained_language_projection_path: str[source]#
+
+ +
+
+custom_vision_model: bool[source]#
+
+ +
+
+image_encoder_name_or_path: str | None[source]#
+
+ +
+
+qformer_name_or_path: str | None[source]#
+
+ +
+
+llm_model_name_or_path: str | None[source]#
+
+ +
+
+use_prompt_cache: bool[source]#
+
+ +
+
+prompt_cache_path: str | None[source]#
+
+ +
+
+llava_loading: bool | None[source]#
+
+ +
+
+with_qformer: bool | None[source]#
+
+ +
+
+vision_select_layer: int | None[source]#
+
+ +
+
+llava_pretrain_model_path: str | None[source]#
+
+ +
+
+save_pretrain_model_path: str | None[source]#
+
+ +
+ +
+
+class lmflow.args.DatasetArguments[source]#
+

Define a class DatasetArguments using the dataclass decorator. +The class contains several optional parameters that can be used to configure a dataset for a language model.

+
+
dataset_pathstr

a string representing the path of the dataset to use.

+
+
dataset_namestr

a string representing the name of the dataset to use. The default value is “customized”.

+
+
is_custom_datasetbool

a boolean indicating whether to use custom data. The default value is False.

+
+
customized_cache_dirstr

a string representing the path to the directory where customized dataset caches will be stored.

+
+
dataset_config_namestr

a string representing the configuration name of the dataset to use (via the datasets library).

+
+
train_filestr

a string representing the path to the input training data file (a text file).

+
+
validation_filestr

a string representing the path to the input evaluation data file to evaluate the perplexity on (a text file).

+
+
max_train_samplesint

an integer indicating the maximum number of training examples to use for debugging or quicker training. +If set, the training dataset will be truncated to this number.

+
+
max_eval_samples: int

an integer indicating the maximum number of evaluation examples to use for debugging or quicker training. +If set, the evaluation dataset will be truncated to this number.

+
+
streamingbool

a boolean indicating whether to enable streaming mode.

+
+
block_size: int

an integer indicating the optional input sequence length after tokenization. The training dataset will be +truncated in blocks of this size for training.

+
+
train_on_prompt: bool

a boolean indicating whether to train on prompt for conversation datasets such as ShareGPT.

+
+
conversation_template: str

a string representing the template for conversation datasets.

+
+
+

The class also includes some additional parameters that can be used to configure the dataset further, such as overwrite_cache, +validation_split_percentage, preprocessing_num_workers, disable_group_texts, demo_example_in_prompt, explanation_in_prompt, +keep_linebreaks, and prompt_structure.

+

The field function is used to set default values and provide help messages for each parameter. The Optional type hint is +used to indicate that a parameter is optional. The metadata argument is used to provide additional information about +each parameter, such as a help message.

+
+
+dataset_path: str | None[source]#
+
+ +
+
+dataset_name: str | None[source]#
+
+ +
+
+is_custom_dataset: bool | None[source]#
+
+ +
+
+customized_cache_dir: str | None[source]#
+
+ +
+
+dataset_config_name: str | None[source]#
+
+ +
+
+train_file: str | None[source]#
+
+ +
+
+validation_file: str | None[source]#
+
+ +
+
+max_train_samples: int | None[source]#
+
+ +
+
+max_eval_samples: int | None[source]#
+
+ +
+
+streaming: bool[source]#
+
+ +
+
+block_size: int | None[source]#
+
+ +
+
+overwrite_cache: bool[source]#
+
+ +
+
+validation_split_percentage: int | None[source]#
+
+ +
+
+preprocessing_num_workers: int | None[source]#
+
+ +
+
+group_texts_batch_size: int[source]#
+
+ +
+
+disable_group_texts: bool[source]#
+
+ +
+
+keep_linebreaks: bool[source]#
+
+ +
+
+test_file: str | None[source]#
+
+ +
+
+train_on_prompt: bool[source]#
+
+ +
+
+conversation_template: str | None[source]#
+
+ +
+
+__post_init__()[source]#
+
+ +
+ +
+
+class lmflow.args.MultiModalDatasetArguments[source]#
+

Bases: DatasetArguments

+

Define a class DatasetArguments using the dataclass decorator. +The class contains several optional parameters that can be used to configure a dataset for a language model.

+
+
dataset_pathstr

a string representing the path of the dataset to use.

+
+
dataset_namestr

a string representing the name of the dataset to use. The default value is “customized”.

+
+
is_custom_datasetbool

a boolean indicating whether to use custom data. The default value is False.

+
+
customized_cache_dirstr

a string representing the path to the directory where customized dataset caches will be stored.

+
+
dataset_config_namestr

a string representing the configuration name of the dataset to use (via the datasets library).

+
+
train_filestr

a string representing the path to the input training data file (a text file).

+
+
validation_filestr

a string representing the path to the input evaluation data file to evaluate the perplexity on (a text file).

+
+
max_train_samplesint

an integer indicating the maximum number of training examples to use for debugging or quicker training. +If set, the training dataset will be truncated to this number.

+
+
max_eval_samples: int

an integer indicating the maximum number of evaluation examples to use for debugging or quicker training. +If set, the evaluation dataset will be truncated to this number.

+
+
streamingbool

a boolean indicating whether to enable streaming mode.

+
+
block_size: int

an integer indicating the optional input sequence length after tokenization. The training dataset will be +truncated in blocks of this size for training.

+
+
train_on_prompt: bool

a boolean indicating whether to train on prompt for conversation datasets such as ShareGPT.

+
+
conversation_template: str

a string representing the template for conversation datasets.

+
+
+

The class also includes some additional parameters that can be used to configure the dataset further, such as overwrite_cache, +validation_split_percentage, preprocessing_num_workers, disable_group_texts, demo_example_in_prompt, explanation_in_prompt, +keep_linebreaks, and prompt_structure.

+

The field function is used to set default values and provide help messages for each parameter. The Optional type hint is +used to indicate that a parameter is optional. The metadata argument is used to provide additional information about +each parameter, such as a help message.

+
+
+image_folder: str | None[source]#
+
+ +
+
+image_aspect_ratio: str | None[source]#
+
+ +
+
+is_multimodal: bool | None[source]#
+
+ +
+
+use_image_start_end: bool | None[source]#
+
+ +
+
+sep_style: str | None[source]#
+
+ +
+ +
+
+class lmflow.args.FinetunerArguments[source]#
+

Bases: transformers.TrainingArguments

+

Adapt transformers.TrainingArguments

+
+
+eval_dataset_path: str | None[source]#
+
+ +
+
+remove_unused_columns: bool | None[source]#
+
+ +
+
+finetune_part: str | None[source]#
+
+ +
+
+save_language_projection: str | None[source]#
+
+ +
+
+use_lisa: bool[source]#
+
+ +
+
+lisa_activated_layers: int[source]#
+
+ +
+
+lisa_interval_steps: int[source]#
+
+ +
+
+lisa_layers_attribute: str[source]#
+
+ +
+
+use_customized_optim: bool[source]#
+
+ +
+
+customized_optim: str[source]#
+
+ +
+
+customized_optim_args: str[source]#
+
+ +
+
+optim_dummy_beta1: float[source]#
+
+ +
+
+optim_dummy_beta2: float[source]#
+
+ +
+
+optim_adam_beta1: float[source]#
+
+ +
+
+optim_adam_beta2: float[source]#
+
+ +
+
+optim_beta1: float[source]#
+
+ +
+
+optim_beta2: float[source]#
+
+ +
+
+optim_beta3: float[source]#
+
+ +
+
+optim_momentum: float[source]#
+
+ +
+
+optim_weight_decay: float[source]#
+
+ +
+ +
+
+class lmflow.args.RewardModelTunerArguments[source]#
+

Bases: FinetunerArguments

+

Arguments for reward modeling.

+
+ +
+
+class lmflow.args.EvaluatorArguments[source]#
+

Define a class EvaluatorArguments using the dataclass decorator. The class contains several optional +parameters that can be used to configure a evaluator.

+
+
local_rankstr

For distributed training: local_rank

+
+
+

random_shuffle : bool

+

use_wandb : bool

+

random_seed : int, default = 1

+

output_dir : str, default = ‘./output_dir’,

+
+
mixed_precisionstr, choice from [“bf16”,”fp16”].

mixed precision mode, whether to use bf16 or fp16

+
+
deepspeed :

Enable deepspeed and pass the path to deepspeed json config file (e.g. ds_config.json) or an already +loaded json file as a dict

+
+
temperaturefloat

An argument of model.generate in huggingface to control the diversity of generation.

+
+
repetition_penaltyfloat

An argument of model.generate in huggingface to penalize repetitions.

+
+
+
+
+local_rank: int[source]#
+
+ +
+
+random_shuffle: bool | None[source]#
+
+ +
+
+use_wandb: bool | None[source]#
+
+ +
+
+random_seed: int | None[source]#
+
+ +
+
+output_dir: str | None[source]#
+
+ +
+
+mixed_precision: str | None[source]#
+
+ +
+
+deepspeed: str | None[source]#
+
+ +
+
+answer_type: str | None[source]#
+
+ +
+
+prompt_structure: str | None[source]#
+
+ +
+
+evaluate_block_size: int | None[source]#
+
+ +
+
+metric: str | None[source]#
+
+ +
+
+inference_batch_size_per_device: int | None[source]#
+
+ +
+
+use_accelerator_for_evaluator: bool[source]#
+
+ +
+
+temperature: float[source]#
+
+ +
+
+repetition_penalty: float[source]#
+
+ +
+
+max_new_tokens: int[source]#
+
+ +
+ +
+
+class lmflow.args.InferencerArguments[source]#
+

Define a class InferencerArguments using the dataclass decorator. The class contains several optional +parameters that can be used to configure a inferencer.

+
+
local_rankstr

For distributed training: local_rank

+
+
+

random_seed : int, default = 1 +inference_batch_size : int, default = 1 +deepspeed :

+
+

Enable deepspeed and pass the path to deepspeed json config file (e.g. ds_config.json) or an already +loaded json file as a dict

+
+
+
mixed_precisionstr, choice from [“bf16”,”fp16”].

mixed precision mode, whether to use bf16 or fp16

+
+
temperaturefloat

An argument of model.generate in huggingface to control the diversity of generation.

+
+
repetition_penaltyfloat

An argument of model.generate in huggingface to penalize repetitions.

+
+
use_beam_searchOptional[bool]

Whether to use beam search during inference, By default False.

+
+
num_output_sequencesOptional[int]

Number of output sequences to return for the given prompt, +currently only used in vllm inference, By default 8.

+
+
top_pOptional[float]

top_p for sampling, By default 1.0.

+
+
top_kOptional[int]

top_k for sampling, By default -1 (no top_k).

+
+
additional_stop_token_idsOptional[List[int]]

the ids of the end of sentence tokens, By default [].

+
+
apply_chat_templateOptional[bool]

Whether to apply chat template, By default True.

+
+
save_resultsOptional[bool]

Whether to save inference results, By default False.

+
+
results_pathOptional[str]

The json file path of inference results, By default None.

+
+
enable_decode_inference_resultOptional[bool]

Whether to detokenize the inference results.

+

NOTE: For iterative align pipelines, whether to detokenize depends on +the homogeneity of the policy model and the reward model +(i.e., if they have the same tokenizer).

+
+
use_vllm: bool, optional

Whether to use VLLM for inference, By default False.

+
+
vllm_tensor_parallel_size: int, optional

The tensor parallel size for VLLM inference.

+
+
vllm_gpu_memory_utilization: float, optional

The GPU memory utilization for VLLM inference. The proportion of GPU +memory (per GPU) to use for VLLM inference.

+
+
+
+
+device: str[source]#
+
+ +
+
+local_rank: int[source]#
+
+ +
+
+inference_batch_size: int[source]#
+
+ +
+
+vllm_inference_batch_size: int[source]#
+
+ +
+
+temperature: float[source]#
+
+ +
+
+repetition_penalty: float[source]#
+
+ +
+
+max_new_tokens: int[source]#
+
+ +
+
+random_seed: int | None[source]#
+
+ +
+
+deepspeed: str | None[source]#
+
+ +
+
+mixed_precision: str | None[source]#
+
+ +
+
+do_sample: bool | None[source]#
+
+ +
+
+use_accelerator: bool[source]#
+
+ +
+ +
+ +
+
+num_output_sequences: int | None[source]#
+
+ +
+
+top_p: float | None[source]#
+
+ +
+
+top_k: int | None[source]#
+
+ +
+
+additional_stop_token_ids: List[int] | None[source]#
+
+ +
+
+apply_chat_template: bool | None[source]#
+
+ +
+
+enable_decode_inference_result: bool | None[source]#
+
+ +
+
+tensor_parallel_size: int | None[source]#
+
+ +
+
+enable_distributed_inference: bool | None[source]#
+
+ +
+
+distributed_inference_num_instances: int | None[source]#
+
+ +
+
+use_vllm: bool[source]#
+
+ +
+
+vllm_tensor_parallel_size: int | None[source]#
+
+ +
+
+vllm_gpu_memory_utilization: float | None[source]#
+
+ +
+
+save_results: bool | None[source]#
+
+ +
+
+results_path: str | None[source]#
+
+ +
+
+__post_init__()[source]#
+
+ +
+ +
+
+class lmflow.args.RaftAlignerArguments[source]#
+

Bases: transformers.TrainingArguments

+

Define a class RaftAlignerArguments to configure raft aligner.

+
+
+output_reward_path: str | None[source]#
+
+ +
+
+output_min_length: int | None[source]#
+
+ +
+
+output_max_length: int | None[source]#
+
+ +
+
+num_raft_iteration: int | None[source]#
+
+ +
+
+raft_batch_size: int | None[source]#
+
+ +
+
+top_reward_percentage: float | None[source]#
+
+ +
+
+inference_batch_size_per_device: int | None[source]#
+
+ +
+
+collection_strategy: str | None[source]#
+
+ +
+ +
+
+class lmflow.args.BenchmarkingArguments[source]#
+
+
+dataset_name: str | None[source]#
+
+ +
+
+lm_evaluation_metric: str | None[source]#
+
+ +
+ +
+
+class lmflow.args.DPOAlignerArguments[source]#
+

The arguments for the DPO training script.

+
+
+local_rank: int[source]#
+
+ +
+
+beta: float | None[source]#
+
+ +
+
+learning_rate: float | None[source]#
+
+ +
+
+lr_scheduler_type: str | None[source]#
+
+ +
+
+warmup_steps: int | None[source]#
+
+ +
+
+weight_decay: float | None[source]#
+
+ +
+
+optimizer_type: str | None[source]#
+
+ +
+
+per_device_train_batch_size: int | None[source]#
+
+ +
+
+per_device_eval_batch_size: int | None[source]#
+
+ +
+
+gradient_accumulation_steps: int | None[source]#
+
+ +
+
+gradient_checkpointing: bool | None[source]#
+
+ +
+
+gradient_checkpointing_use_reentrant: bool | None[source]#
+
+ +
+
+max_prompt_length: int | None[source]#
+
+ +
+
+max_length: int | None[source]#
+
+ +
+
+max_steps: int | None[source]#
+
+ +
+
+logging_steps: int | None[source]#
+
+ +
+
+save_steps: int | None[source]#
+
+ +
+
+eval_steps: int | None[source]#
+
+ +
+
+output_dir: str | None[source]#
+
+ +
+
+log_freq: int | None[source]#
+
+ +
+
+sanity_check: bool | None[source]#
+
+ +
+
+report_to: str | None[source]#
+
+ +
+
+seed: int | None[source]#
+
+ +
+
+run_name: str | None[source]#
+
+ +
+ +
+
+class lmflow.args.DPOv2AlignerArguments[source]#
+

Bases: FinetunerArguments

+

The arguments for the DPOv2 training script.

+
+
+random_seed: int | None[source]#
+
+ +
+
+accelerate_config_file: str | None[source]#
+
+ +
+
+margin_scale: float | None[source]#
+
+ +
+
+sampling_paired_method: str | None[source]#
+
+ +
+
+length_penalty: float | None[source]#
+
+ +
+
+max_length: int | None[source]#
+
+ +
+
+max_prompt_length: int | None[source]#
+
+ +
+
+mask_prompt: bool | None[source]#
+
+ +
+
+beta: float | None[source]#
+
+ +
+
+loss_type: str | None[source]#
+
+ +
+ +
+
+class lmflow.args.IterativeAlignerArguments[source]#
+

Bases: InferencerArguments

+

Arguments for iterative aligners.

+
+
+dataset_path_list: List[str][source]#
+
+ +
+
+initial_iter_idx: int[source]#
+
+ +
+ +
+
+class lmflow.args.IterativeDPOAlignerArguments[source]#
+

Bases: IterativeAlignerArguments, DPOv2AlignerArguments

+

Arguments for iterative DPO aligners.

+
+
+output_dir: str | None[source]#
+
+ +
+
+reward_model_inference_batch_size: int[source]#
+
+ +
+
+reward_model_inference_block_size: int[source]#
+
+ +
+
+do_response_generation: bool[source]#
+
+ +
+
+do_scoring: bool[source]#
+
+ +
+
+do_dpo_align: bool[source]#
+
+ +
+ +
+
+lmflow.args.PIPELINE_ARGUMENT_MAPPING[source]#
+
+ +
+
+class lmflow.args.AutoArguments[source]#
+

Automatically choose arguments from FinetunerArguments or EvaluatorArguments.

+
+
+get_pipeline_args_class()[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/datasets/dataset/index.html b/autoapi/lmflow/datasets/dataset/index.html new file mode 100644 index 000000000..490ebbad6 --- /dev/null +++ b/autoapi/lmflow/datasets/dataset/index.html @@ -0,0 +1,1072 @@ + + + + + + + + + + + lmflow.datasets.dataset — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.datasets.dataset#

+

This Python code defines a class Dataset with methods for initializing, loading, +and manipulating datasets from different backends such as Hugging Face and JSON.

+

The Dataset class includes methods for loading datasets from a dictionary and a Hugging +Face dataset, mapping datasets, and retrieving the backend dataset and arguments.

+
+

Attributes#

+
+ + + + + + + + + + + + + + + + + +

logger

DATASET_TYPES

KEY_TYPE

KEY_INSTANCES

KEY_SCORE

+
+
+
+

Classes#

+
+ + + + + +

Dataset

Initializes the Dataset object with the given parameters.

+
+
+
+

Module Contents#

+
+
+lmflow.datasets.dataset.logger[source]#
+
+ +
+
+lmflow.datasets.dataset.DATASET_TYPES = ['text_only', 'text2text', 'float_only', 'image_text', 'conversation', 'paired_conversation',...[source]#
+
+ +
+
+lmflow.datasets.dataset.KEY_TYPE = 'type'[source]#
+
+ +
+
+lmflow.datasets.dataset.KEY_INSTANCES = 'instances'[source]#
+
+ +
+
+lmflow.datasets.dataset.KEY_SCORE = 'score'[source]#
+
+ +
+
+class lmflow.datasets.dataset.Dataset(data_args: lmflow.args.DatasetArguments = None, backend: str = 'huggingface', *args, **kwargs)[source]#
+

Initializes the Dataset object with the given parameters.

+
+
Parameters:
+
+
data_argsDatasetArguments object.

Contains the arguments required to load the dataset.

+
+
backendstr, default=”huggingface”

A string representing the dataset backend. Defaults to “huggingface”.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
+
+
+data_args[source]#
+
+ +
+
+backend[source]#
+
+ +
+
+backend_dataset = None[source]#
+
+ +
+
+type = None[source]#
+
+ +
+
+dataset_path[source]#
+
+ +
+
+__len__()[source]#
+
+ +
+
+_check_data_format()[source]#
+

Checks if data type and data structure matches

+

Raise messages with hints if not matched.

+
+ +
+
+from_dict(dict_obj: dict, *args, **kwargs)[source]#
+

Create a Dataset object from a dictionary.

+
+
Return a Dataset given a dict with format:
+
{

“type”: TYPE, +“instances”: [

+
+
+
{

“key_1”: VALUE_1.1, +“key_2”: VALUE_1.2, +…

+
+
+

}, +{

+
+

“key_1”: VALUE_2.1, +“key_2”: VALUE_2.2, +…

+
+
+

]

+
+
+

}

+
+
+
+
Parameters:
+
+
dict_objdict.

A dictionary containing the dataset information.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
selfDataset object.
+
+
+
+
+ +
+
+classmethod create_from_dict(dict_obj, *args, **kwargs)[source]#
+
+
Returns:
+
+
Returns a Dataset object given a dict.
+
+
+
+
+ +
+
+to_dict()[source]#
+
+
Returns:
+
+
Return a dict represents the dataset:
+
{

“type”: TYPE, +“instances”: [

+
+
+
{

“key_1”: VALUE_1.1, +“key_2”: VALUE_1.2, +…

+
+
+

}, +{

+
+

“key_1”: VALUE_2.1, +“key_2”: VALUE_2.2, +…

+
+
+

]

+
+
+

}

+
+
A python dict object represents the content of this dataset.
+
+
+
+
+ +
+
+to_list()[source]#
+

Returns a list of instances.

+
+ +
+
+map(*args, **kwargs)[source]#
+
+
Parameters:
+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
selfDataset object.
+
+
+
+
+ +
+
+get_backend() str | None[source]#
+
+
Returns:
+
+
self.backend
+
+
+
+
+ +
+
+get_backend_dataset()[source]#
+
+
Returns:
+
+
self.backend_dataset
+
+
+
+
+ +
+
+get_fingerprint()[source]#
+
+
Returns:
+
+
Fingerprint of the backend_dataset which controls the cache
+
+
+
+
+ +
+
+get_data_args()[source]#
+
+
Returns:
+
+
self.data_args
+
+
+
+
+ +
+
+get_type() str[source]#
+
+
Returns:
+
+
self.type
+
+
+
+
+ +
+
+save(file_path: str, format: str = 'json')[source]#
+

Save the dataset to a json file.

+
+
Parameters:
+
+
file_pathstr.

The path to the file where the dataset will be saved.

+
+
+
+
+
+ +
+
+sample(n: int, seed: int = 42)[source]#
+

Sample n instances from the dataset.

+
+
Parameters:
+
+
nint.

The number of instances to sample from the dataset.

+
+
+
+
Returns:
+
+
sample_datasetDataset object.

A new dataset object containing the sampled instances.

+
+
+
+
+
+ +
+
+train_test_split(test_size: float = 0.2, shuffle: bool = True, seed: int = 42)[source]#
+

Split the dataset into training and testing sets.

+
+
Parameters:
+
+
test_sizefloat, default=0.2.

The proportion of the dataset that will be used for testing.

+
+
+
+
Returns:
+
+
train_datasetDataset object.

A new dataset object containing the training instances.

+
+
test_datasetDataset object.

A new dataset object containing the testing instances.

+
+
+
+
+
+ +
+
+drop_instances(indices: list)[source]#
+

Drop instances from the dataset.

+
+
Parameters:
+
+
indiceslist.

A list of indices of the instances to drop from the dataset.

+
+
+
+
+
+ +
+
+sanity_check(drop_invalid: bool = True)[source]#
+

Perform a sanity check on the dataset.

+
+ +
+
+hf_dataset_sanity_check(drop_invalid: bool = True)[source]#
+

Perform a sanity check on the HuggingFace dataset.

+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/datasets/index.html b/autoapi/lmflow/datasets/index.html new file mode 100644 index 000000000..c784fbec3 --- /dev/null +++ b/autoapi/lmflow/datasets/index.html @@ -0,0 +1,1078 @@ + + + + + + + + + + + lmflow.datasets — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.datasets#

+

This Python code defines a class Dataset with methods for initializing, loading, +and manipulating datasets from different backends such as Hugging Face and JSON.

+

The Dataset class includes methods for loading datasets from a dictionary and a Hugging +Face dataset, mapping datasets, and retrieving the backend dataset and arguments.

+
+

Submodules#

+ +
+
+

Classes#

+
+ + + + + + + + +

Dataset

Initializes the Dataset object with the given parameters.

CustomMultiModalDataset

Dataset for Multi Modal data

+
+
+
+

Package Contents#

+
+
+class lmflow.datasets.Dataset(data_args: lmflow.args.DatasetArguments = None, backend: str = 'huggingface', *args, **kwargs)[source]#
+

Initializes the Dataset object with the given parameters.

+
+
Parameters:
+
+
data_argsDatasetArguments object.

Contains the arguments required to load the dataset.

+
+
backendstr, default=”huggingface”

A string representing the dataset backend. Defaults to “huggingface”.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
+
+
+data_args#
+
+ +
+
+backend#
+
+ +
+
+backend_dataset = None#
+
+ +
+
+type = None#
+
+ +
+
+dataset_path#
+
+ +
+
+__len__()[source]#
+
+ +
+
+_check_data_format()[source]#
+

Checks if data type and data structure matches

+

Raise messages with hints if not matched.

+
+ +
+
+from_dict(dict_obj: dict, *args, **kwargs)[source]#
+

Create a Dataset object from a dictionary.

+
+
Return a Dataset given a dict with format:
+
{

“type”: TYPE, +“instances”: [

+
+
+
{

“key_1”: VALUE_1.1, +“key_2”: VALUE_1.2, +…

+
+
+

}, +{

+
+

“key_1”: VALUE_2.1, +“key_2”: VALUE_2.2, +…

+
+
+

]

+
+
+

}

+
+
+
+
Parameters:
+
+
dict_objdict.

A dictionary containing the dataset information.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
selfDataset object.
+
+
+
+
+ +
+
+classmethod create_from_dict(dict_obj, *args, **kwargs)[source]#
+
+
Returns:
+
+
Returns a Dataset object given a dict.
+
+
+
+
+ +
+
+to_dict()[source]#
+
+
Returns:
+
+
Return a dict represents the dataset:
+
{

“type”: TYPE, +“instances”: [

+
+
+
{

“key_1”: VALUE_1.1, +“key_2”: VALUE_1.2, +…

+
+
+

}, +{

+
+

“key_1”: VALUE_2.1, +“key_2”: VALUE_2.2, +…

+
+
+

]

+
+
+

}

+
+
A python dict object represents the content of this dataset.
+
+
+
+
+ +
+
+to_list()[source]#
+

Returns a list of instances.

+
+ +
+
+map(*args, **kwargs)[source]#
+
+
Parameters:
+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
selfDataset object.
+
+
+
+
+ +
+
+get_backend() str | None[source]#
+
+
Returns:
+
+
self.backend
+
+
+
+
+ +
+
+get_backend_dataset()[source]#
+
+
Returns:
+
+
self.backend_dataset
+
+
+
+
+ +
+
+get_fingerprint()[source]#
+
+
Returns:
+
+
Fingerprint of the backend_dataset which controls the cache
+
+
+
+
+ +
+
+get_data_args()[source]#
+
+
Returns:
+
+
self.data_args
+
+
+
+
+ +
+
+get_type() str[source]#
+
+
Returns:
+
+
self.type
+
+
+
+
+ +
+
+save(file_path: str, format: str = 'json')[source]#
+

Save the dataset to a json file.

+
+
Parameters:
+
+
file_pathstr.

The path to the file where the dataset will be saved.

+
+
+
+
+
+ +
+
+sample(n: int, seed: int = 42)[source]#
+

Sample n instances from the dataset.

+
+
Parameters:
+
+
nint.

The number of instances to sample from the dataset.

+
+
+
+
Returns:
+
+
sample_datasetDataset object.

A new dataset object containing the sampled instances.

+
+
+
+
+
+ +
+
+train_test_split(test_size: float = 0.2, shuffle: bool = True, seed: int = 42)[source]#
+

Split the dataset into training and testing sets.

+
+
Parameters:
+
+
test_sizefloat, default=0.2.

The proportion of the dataset that will be used for testing.

+
+
+
+
Returns:
+
+
train_datasetDataset object.

A new dataset object containing the training instances.

+
+
test_datasetDataset object.

A new dataset object containing the testing instances.

+
+
+
+
+
+ +
+
+drop_instances(indices: list)[source]#
+

Drop instances from the dataset.

+
+
Parameters:
+
+
indiceslist.

A list of indices of the instances to drop from the dataset.

+
+
+
+
+
+ +
+
+sanity_check(drop_invalid: bool = True)[source]#
+

Perform a sanity check on the dataset.

+
+ +
+
+hf_dataset_sanity_check(drop_invalid: bool = True)[source]#
+

Perform a sanity check on the HuggingFace dataset.

+
+ +
+ +
+
+class lmflow.datasets.CustomMultiModalDataset(dataset_path: str, data_args: lmflow.args.DatasetArguments)[source]#
+

Bases: torch.utils.data.Dataset

+

Dataset for Multi Modal data

+
+
+data_dict#
+
+ +
+
+data_dict#
+
+ +
+
+data_args#
+
+ +
+
+image_folder#
+
+ +
+
+__len__()[source]#
+
+ +
+
+register_tokenizer(tokenizer, image_processor=None)[source]#
+
+ +
+
+__getitem__(i)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/datasets/multi_modal_dataset/index.html b/autoapi/lmflow/datasets/multi_modal_dataset/index.html new file mode 100644 index 000000000..d5725d345 --- /dev/null +++ b/autoapi/lmflow/datasets/multi_modal_dataset/index.html @@ -0,0 +1,815 @@ + + + + + + + + + + + lmflow.datasets.multi_modal_dataset — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.datasets.multi_modal_dataset#

+

This Python code defines a class Multi Modal Dataset.

+
+

Classes#

+
+ + + + + + + + +

CustomMultiModalDataset

Dataset for Multi Modal data

DataCollatorForSupervisedDataset

Collate examples for supervised fine-tuning.

+
+
+
+

Functions#

+
+ + + + + + + + + + + + + + +

preprocess_multimodal_llava(sources, data_args)

tokenizer_image_token(prompt, tokenizer[, ...])

preprocess_llama_from_llava_plain(sources, tokenizer)

This function just add the image in the front of text.

preprocess_llama_from_llava_v1(sources, tokenizer[, ...])

This function add the prompt and then put the image after the prompt.

+
+
+
+

Module Contents#

+
+
+class lmflow.datasets.multi_modal_dataset.CustomMultiModalDataset(dataset_path: str, data_args: lmflow.args.DatasetArguments)[source]#
+

Bases: torch.utils.data.Dataset

+

Dataset for Multi Modal data

+
+
+data_dict[source]#
+
+ +
+
+data_dict[source]#
+
+ +
+
+data_args[source]#
+
+ +
+
+image_folder[source]#
+
+ +
+
+__len__()[source]#
+
+ +
+
+register_tokenizer(tokenizer, image_processor=None)[source]#
+
+ +
+
+__getitem__(i)[source]#
+
+ +
+ +
+
+lmflow.datasets.multi_modal_dataset.preprocess_multimodal_llava(sources, data_args)[source]#
+
+ +
+
+lmflow.datasets.multi_modal_dataset.tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None)[source]#
+
+ +
+
+lmflow.datasets.multi_modal_dataset.preprocess_llama_from_llava_plain(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False)[source]#
+

This function just add the image in the front of text. +And don’t add any prompt. +Args:

+
+

sources: The input data with text and image. +tokenizer: The tokenizer to process text. +has_image: Whether the input data has image.

+
+
+
Returns:

The input_ids and labels for the model.

+
+
+
+ +
+
+lmflow.datasets.multi_modal_dataset.preprocess_llama_from_llava_v1(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False)[source]#
+

This function add the prompt and then put the image after the prompt. +So it needs additional code to generate the target label. +Args:

+
+

sources: The input data with text and image. +tokenizer: The tokenizer to process text. +has_image: Whether the input data has image.

+
+
+
Returns:

The input_ids and labels for the model.

+
+
+
+ +
+
+class lmflow.datasets.multi_modal_dataset.DataCollatorForSupervisedDataset[source]#
+

Bases: object

+

Collate examples for supervised fine-tuning.

+
+
+tokenizer: transformers.PreTrainedTokenizer[source]#
+
+ +
+
+__call__(instances)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/index.html b/autoapi/lmflow/index.html new file mode 100644 index 000000000..d4beb7912 --- /dev/null +++ b/autoapi/lmflow/index.html @@ -0,0 +1,699 @@ + + + + + + + + + + + lmflow — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/auto_model/index.html b/autoapi/lmflow/models/auto_model/index.html new file mode 100644 index 000000000..9af0072f9 --- /dev/null +++ b/autoapi/lmflow/models/auto_model/index.html @@ -0,0 +1,683 @@ + + + + + + + + + + + lmflow.models.auto_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.models.auto_model#

+

Automatically get correct model type.

+
+

Classes#

+
+ + + + + +

AutoModel

+
+
+
+

Module Contents#

+
+
+class lmflow.models.auto_model.AutoModel[source]#
+
+
+classmethod get_model(model_args, *args, **kwargs)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/base_model/index.html b/autoapi/lmflow/models/base_model/index.html new file mode 100644 index 000000000..f1d09f4ee --- /dev/null +++ b/autoapi/lmflow/models/base_model/index.html @@ -0,0 +1,678 @@ + + + + + + + + + + + lmflow.models.base_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.models.base_model#

+

Base model class.

+
+

Classes#

+
+ + + + + +

BaseModel

Helper class that provides a standard way to create an ABC using

+
+
+
+

Module Contents#

+
+
+class lmflow.models.base_model.BaseModel(*args, **kwargs)[source]#
+

Bases: abc.ABC

+

Helper class that provides a standard way to create an ABC using +inheritance.

+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/decoder_model/index.html b/autoapi/lmflow/models/decoder_model/index.html new file mode 100644 index 000000000..aa43226d5 --- /dev/null +++ b/autoapi/lmflow/models/decoder_model/index.html @@ -0,0 +1,685 @@ + + + + + + + + + + + lmflow.models.decoder_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.models.decoder_model#

+

A one-line summary of the module or program, terminated by a period.

+

Leave one blank line. The rest of this docstring should contain an +overall description of the module or program. Optionally, it may also +contain a brief description of exported classes and functions and/or usage +examples.

+

Typical usage example:

+
+

foo = ClassFoo() +bar = foo.FunctionBar()

+
+
+

Classes#

+
+ + + + + +

DecoderModel

+
+
+
+

Module Contents#

+
+
+class lmflow.models.decoder_model.DecoderModel(*args, **kwargs)[source]#
+

Bases: lmflow.models.base_model.BaseModel

+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/encoder_decoder_model/index.html b/autoapi/lmflow/models/encoder_decoder_model/index.html new file mode 100644 index 000000000..6ab064d1b --- /dev/null +++ b/autoapi/lmflow/models/encoder_decoder_model/index.html @@ -0,0 +1,687 @@ + + + + + + + + + + + lmflow.models.encoder_decoder_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.models.encoder_decoder_model#

+

A one-line summary of the module or program, terminated by a period.

+

Leave one blank line. The rest of this docstring should contain an +overall description of the module or program. Optionally, it may also +contain a brief description of exported classes and functions and/or usage +examples.

+

Typical usage example:

+
+

foo = ClassFoo() +bar = foo.FunctionBar()

+
+
+

Classes#

+
+ + + + + +

EncoderDecoderModel

Helper class that provides a standard way to create an ABC using

+
+
+
+

Module Contents#

+
+
+class lmflow.models.encoder_decoder_model.EncoderDecoderModel(*args, **kwargs)[source]#
+

Bases: lmflow.models.base_model.BaseModel

+

Helper class that provides a standard way to create an ABC using +inheritance.

+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/hf_decoder_model/index.html b/autoapi/lmflow/models/hf_decoder_model/index.html new file mode 100644 index 000000000..8e00ccb57 --- /dev/null +++ b/autoapi/lmflow/models/hf_decoder_model/index.html @@ -0,0 +1,991 @@ + + + + + + + + + + + lmflow.models.hf_decoder_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.models.hf_decoder_model#

+

This is a class called HFDecoderModel which is a wrapper around transformers model and +tokenizer classes. It has several methods such as __init__, tokenize, and train that are +used for training and fine-tuning the model. The __init__ method takes in several arguments +such as model_args, tune_strategy, and ds_config, which are used to load the pretrained +model and tokenizer, and initialize the training settings.

+

The tokenize method is used to tokenize the input text and return the input IDs and attention +masks that can be fed to the model for training or inference.

+

This class supports different tune_strategy options such as ‘normal’, ‘none’, ‘lora’, and +‘adapter’, which allow for different fine-tuning settings of the model. However, the ‘lora’ +and ‘adapter’ strategies are not yet implemented.

+

Overall, this class provides a convenient interface for loading and fine-tuning transformer +models and can be used for various NLP tasks such as language modeling, text classification, +and question answering.

+
+

Attributes#

+ +
+
+

Classes#

+
+ + + + + +

HFDecoderModel

Initializes a HFDecoderModel instance.

+
+
+
+

Module Contents#

+
+
+lmflow.models.hf_decoder_model.logger[source]#
+
+ +
+
+lmflow.models.hf_decoder_model.MODELS_SUPPORT_FLASH_ATTENTION = ['LlamaForCausalLM', 'GPTNeoForCausalLM', 'GPT2ForCausalLM', 'BloomForCausalLM'][source]#
+
+ +
+
+lmflow.models.hf_decoder_model.GPU_SUPPORT_FLASH_ATTENTION[source]#
+
+ +
+
+lmflow.models.hf_decoder_model.GPU_SUPPORT_FLASH_ATTENTION[source]#
+
+ +
+
+class lmflow.models.hf_decoder_model.HFDecoderModel(model_args, tune_strategy='normal', ds_config=None, device='gpu', use_accelerator=False, *args, **kwargs)[source]#
+

Bases: lmflow.models.decoder_model.DecoderModel, lmflow.models.hf_model_mixin.HFModelMixin, lmflow.models.interfaces.tunable.Tunable

+

Initializes a HFDecoderModel instance.

+
+
Parameters:
+
+
model_args

Model arguments such as model name, path, revision, etc.

+
+
tune_strategystr or none, default=”normal”.

A string representing the dataset backend. Defaults to “huggingface”.

+
+
ds_config

Deepspeed configuations.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
+
+
+tokenize(dataset, add_special_tokens=True, *args, **kwargs) lmflow.datasets.dataset.Dataset[source]#
+

Tokenize the full dataset.

+
+
Parameters:
+
+
datasetlmflow.datasets.Dataset.
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
tokenized_datasets

The tokenized dataset, without any leading or trailing special +tokens (normally they are Begin-Of-Sentence or End-Of-Sentence +tokens).

+
+
+
+
+
+ +
+
+encode(input: str | List[str], *args, **kwargs) List[int] | List[List[int]][source]#
+

Perform encoding process of the tokenizer.

+
+
Parameters:
+
+
inputsstr or list.

The text sequence.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
outputs

if string input,return the tokenized inputs. +“Hello,world!”-> [101, 7592, 1010, 2088, 102] +if batch input,return {input_ids,attention_mask,token_type_ids} +[“Hello,world!”,”Hello!”]-> {‘input_ids’: tensor([[ 101, 7592, 1010, 2088, 102],…),’attention_mask’: tensor([[1, 1, 1, 1, 1],[0,0,1,1,1]])}

+
+
+
+
+
+ +
+
+decode(input, *args, **kwargs) str | List[str][source]#
+

Perform decoding process of the tokenizer.

+
+
Parameters:
+
+
inputslist or tensor.

The token sequence.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
outputs

The text decoded from the token inputs. +if batch input,return the list of text +[[101, 7592, 1010, 2088, 102],[101, 7592, 1010, 2088, 102]]-> [“Hello,world!”,”Hello,world!” +if single input,return the text +[101, 7592, 1010, 2088, 102]-> “Hello,world!”

+
+
+
+
+
+ +
+
+inference(inputs, release_gpu: bool = False, use_vllm: bool = False, **kwargs)[source]#
+

Perform generation process of the model.

+
+
Parameters:
+
+
inputs

The sequence used as a prompt for the generation or as model inputs to the model. +When using vllm inference, this should be a string or a list of strings. +When using normal inference, this should be a tensor.

+
+
release_gpubool, optional

Whether to release the GPU resource after inference, by default False.

+
+
use_vllmbool, optional

Whether to use VLLM for inference, by default False.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
outputs

The generated sequence output

+
+
+
+
+
+ +
+
+__inference(inputs, *args, **kwargs)[source]#
+

Perform generation process of the model.

+
+
Parameters:
+
+
inputs

The tokenized sequence used as a prompt for the generation or as model inputs to the model.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
outputs

The generated sequence output

+
+
+
+
+
+ +
+
+__vllm_inference(inputs: str | List[str], sampling_params: vllm.SamplingParams | None = None, **kwargs) List[lmflow.utils.data_utils.VLLMInferenceResultWithInput][source]#
+

Perform VLLM inference process of the model.

+
+
Parameters:
+
+
inputsUnion[str, List[str]]

Prompt(s), string or a list of strings.

+
+
sampling_paramsOptional[SamplingParams], optional

vllm SamplingParams object, by default None.

+
+
+
+
Returns:
+
+
List[VLLMInferenceResultWithInput]

Return a list of VLLMInferenceResultWithInput, where each +element contains the input prompt and the corresponding output.

+

When sampling_params.detokenize = True, the output would be a list of strings, +contains sampling_params.n samples for the corresponding prompt.

+

When sampling_params.detokenize = False, return a list of list of ints +(token ids, no decoding after generation).

+
+
+
+
+
+ +
+
+prepare_inputs_for_inference(dataset: lmflow.datasets.dataset.Dataset, apply_chat_template: bool = True, enable_distributed_inference: bool = False, use_vllm: bool = False, **kwargs) List[str] | ray.data.Dataset | Dict[str, torch.Tensor][source]#
+

Prepare inputs for inference.

+
+
Parameters:
+
+
datasetlmflow.datasets.Dataset.

The dataset used for inference.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
outputs

The prepared inputs for inference.

+
+
+
+
+
+ +
+
+__prepare_inputs_for_vllm_inference(dataset: lmflow.datasets.dataset.Dataset, apply_chat_template: bool = True, enable_distributed_inference: bool = False) List[str] | ray.data.Dataset[source]#
+
+ +
+
+abstract __prepare_inputs_for_inference(dataset: lmflow.datasets.dataset.Dataset, **kwargs)[source]#
+
+ +
+
+merge_lora_weights()[source]#
+
+ +
+
+get_peft_without_qlora()[source]#
+
+ +
+
+save(dir, save_full_model=False, *args, **kwargs)[source]#
+

Perform generation process of the model.

+
+
Parameters:
+
+
dir

The directory to save model and tokenizer

+
+
save_full_modelOptional.

Whether to save full model.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
outputs

The generated sequence output

+
+
+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/hf_encoder_decoder_model/index.html b/autoapi/lmflow/models/hf_encoder_decoder_model/index.html new file mode 100644 index 000000000..307d71aa7 --- /dev/null +++ b/autoapi/lmflow/models/hf_encoder_decoder_model/index.html @@ -0,0 +1,883 @@ + + + + + + + + + + + lmflow.models.hf_encoder_decoder_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.models.hf_encoder_decoder_model#

+

This is a class called HFDecoderModel which is a wrapper around transformers model and +tokenizer classes. It has several methods such as __init__, tokenize, and train that are +used for training and fine-tuning the model. The __init__ method takes in several arguments +such as model_args, tune_strategy, and ds_config, which are used to load the pretrained +model and tokenizer, and initialize the training settings.

+

The tokenize method is used to tokenize the input text and return the input IDs and attention +masks that can be fed to the model for training or inference.

+

This class supports different tune_strategy options such as ‘normal’, ‘none’, ‘lora’, and +‘adapter’, which allow for different fine-tuning settings of the model. However, the ‘lora’ +and ‘adapter’ strategies are not yet implemented.

+

Overall, this class provides a convenient interface for loading and fine-tuning transformer +models and can be used for various NLP tasks such as language modeling, text classification, +and question answering.

+
+

Attributes#

+
+ + + + + +

logger

+
+
+
+

Classes#

+
+ + + + + +

HFEncoderDecoderModel

Initializes a HFEncoderDecoderModel instance.

+
+
+
+

Module Contents#

+
+
+lmflow.models.hf_encoder_decoder_model.logger[source]#
+
+ +
+
+class lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel(model_args, tune_strategy='normal', ds_config=None, device='gpu', use_accelerator=False, custom_model=False, with_deepspeed=True, pipeline_args=None, *args, **kwargs)[source]#
+

Bases: lmflow.models.encoder_decoder_model.EncoderDecoderModel, lmflow.models.interfaces.tunable.Tunable

+

Initializes a HFEncoderDecoderModel instance.

+
+
Parameters:
+
+
model_args

Model arguments such as model name, path, revision, etc.

+
+
tune_strategystr or none, default=”normal”.

A string representing the dataset backend. Defaults to “huggingface”.

+
+
ds_config

Deepspeed configuations.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
+
+
+device[source]#
+
+ +
+
+abstract tokenize(dataset, *args, **kwargs)[source]#
+

Tokenize the full dataset.

+
+
Parameters:
+
+
dataset

Text dataset.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
tokenized_datasets

The tokenized dataset.

+
+
+
+
+
+ +
+
+encode(input: str | List[str], *args, **kwargs) List[int] | List[List[int]][source]#
+

Perform encoding process of the tokenizer.

+
+
Parameters:
+
+
inputsstr or list.

The text sequence.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
outputs

The tokenized inputs.

+
+
+
+
+
+ +
+
+decode(input, *args, **kwargs) str | List[str][source]#
+

Perform decoding process of the tokenizer.

+
+
Parameters:
+
+
inputslist.

The token sequence.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
outputs

The text decoded from the token inputs.

+
+
+
+
+
+ +
+
+inference(inputs, *args, **kwargs)[source]#
+

Perform generation process of the model.

+
+
Parameters:
+
+
inputs

The sequence used as a prompt for the generation or as model inputs to the model.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
outputs

The generated sequence output

+
+
+
+
+
+ +
+
+merge_lora_weights()[source]#
+
+ +
+
+save(dir, save_full_model=False, *args, **kwargs)[source]#
+

Perform generation process of the model.

+
+
Parameters:
+
+
dir

The directory to save model and tokenizer

+
+
save_full_modelOptional.

Whether to save full model.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
outputs

The generated sequence output

+
+
+
+
+
+ +
+
+get_max_length()[source]#
+

Return max acceptable input length in terms of tokens.

+
+ +
+
+get_tokenizer()[source]#
+

Return the tokenizer of the model.

+
+ +
+
+get_backend_model()[source]#
+

Return the backend model.

+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/hf_model_mixin/index.html b/autoapi/lmflow/models/hf_model_mixin/index.html new file mode 100644 index 000000000..c46ab05d4 --- /dev/null +++ b/autoapi/lmflow/models/hf_model_mixin/index.html @@ -0,0 +1,918 @@ + + + + + + + + + + + lmflow.models.hf_model_mixin — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.models.hf_model_mixin#

+
+

Attributes#

+ +
+
+

Classes#

+
+ + + + + +

HFModelMixin

+
+
+
+

Module Contents#

+
+
+lmflow.models.hf_model_mixin.logger[source]#
+
+ +
+
+lmflow.models.hf_model_mixin.HF_AUTOMODEL_MAPPING[source]#
+
+ +
+
+lmflow.models.hf_model_mixin.HF_AUTOMODEL_TYPE[source]#
+
+ +
+
+lmflow.models.hf_model_mixin.LORA_TARGET_MODULES_MAPPING[source]#
+
+ +
+
+class lmflow.models.hf_model_mixin.HFModelMixin(model_args: lmflow.args.ModelArguments, do_train: bool, ds_config=None, device: str | None = 'gpu', use_accelerator: bool = False, hf_auto_model_additional_args: Dict | None = None, *args, **kwargs)[source]#
+

Bases: lmflow.models.base_model.BaseModel

+
+
+device[source]#
+
+ +
+
+model_args[source]#
+
+ +
+
+hf_auto_model[source]#
+
+ +
+
+use_accelerator[source]#
+
+ +
+
+ds_config[source]#
+
+ +
+
+do_train[source]#
+
+ +
+
+tokenizer[source]#
+
+ +
+
+torch_dtype[source]#
+
+ +
+
+hf_model_config[source]#
+
+ +
+
+quant_config[source]#
+
+ +
+
+peft_config[source]#
+
+ +
+
+_activated = False[source]#
+
+ +
+
+__prepare_tokenizer(model_args: lmflow.args.ModelArguments) transformers.PreTrainedTokenizer | transformers.PreTrainedTokenizerFast[source]#
+
+ +
+
+__prepare_dtype(model_args: lmflow.args.ModelArguments) torch.dtype[source]#
+
+ +
+
+__prepare_model_config(model_args: lmflow.args.ModelArguments, hf_auto_model_additional_args: Dict | None = None)[source]#
+

Prepare model configuration for hf auto register, +Parameters +———- +model_args : ModelArguments

+
+

LMFlow model arguments.

+
+
+
hf_auto_model_additional_argsOptional[Dict], optional

Special configurations such as num_labels in AutoModelForSequenceClassification +(commonly used in reward modeling) will not preset in __prepare_model_config, +so it should be passed in hf_auto_model_additional_args.

+
+
+
+

Returns#

+
+
configModelConfig

hf model config.

+
+
+
+
+ +
+
+__prepare_quant_config(model_args: lmflow.args.ModelArguments)[source]#
+
+ +
+
+__prepare_peft_config(model_args: lmflow.args.ModelArguments)[source]#
+
+ +
+
+__model_module_inject(model_args: lmflow.args.ModelArguments) None[source]#
+

Override some model modules with custom implementations.

+

Current implementations: +- Position interpolation (model_args.do_rope_scaling):

+
+

replace llama embeddings with condense embeddings.

+
+
+ +
+
+__prepare_model_for_training(model_args: lmflow.args.ModelArguments, hf_auto_model: HF_AUTOMODEL_TYPE)[source]#
+
+ +
+
+__prepare_model_for_inference(model_args: lmflow.args.ModelArguments, hf_auto_model: HF_AUTOMODEL_TYPE, use_accelerator: bool, ds_config)[source]#
+
+ +
+
+__prepare_model_for_vllm_inference(model_args: lmflow.args.ModelArguments, vllm_gpu_memory_utilization: float, vllm_tensor_parallel_size: int)[source]#
+
+ +
+
+__prepare_model_post_process()[source]#
+
+ +
+
+activate_model_for_inference(use_vllm: bool = False, **kwargs)[source]#
+
+ +
+
+deactivate_model_for_inference(use_vllm: bool = False)[source]#
+

Deactivate the model and release the resources.

+

NOTE: Currently, VLLM doesn’t have an official way to do this, and the +implementation below cannot release all gpu resources by our observation. +Thus this method is just a placeholder for future implementation. See: +[Github issue](vllm-project/vllm#1908)

+
+ +
+
+get_max_length()[source]#
+

Return max acceptable input length in terms of tokens.

+
+ +
+
+get_tokenizer()[source]#
+

Return the tokenizer of the model.

+
+ +
+
+get_backend_model()[source]#
+

Return the backend model.

+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/hf_text_regression_model/index.html b/autoapi/lmflow/models/hf_text_regression_model/index.html new file mode 100644 index 000000000..626e75159 --- /dev/null +++ b/autoapi/lmflow/models/hf_text_regression_model/index.html @@ -0,0 +1,850 @@ + + + + + + + + + + + lmflow.models.hf_text_regression_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.models.hf_text_regression_model#

+
+

Attributes#

+
+ + + + + +

logger

+
+
+
+

Classes#

+
+ + + + + +

HFTextRegressionModel

Initializes a HFTextRegressionModel instance.

+
+
+
+

Module Contents#

+
+
+lmflow.models.hf_text_regression_model.logger[source]#
+
+ +
+
+class lmflow.models.hf_text_regression_model.HFTextRegressionModel(model_args: lmflow.args.ModelArguments, tune_strategy: str = 'normal', ds_config=None, device='gpu', use_accelerator=False, *args, **kwargs)[source]#
+

Bases: lmflow.models.text_regression_model.TextRegressionModel, lmflow.models.hf_model_mixin.HFModelMixin, lmflow.models.interfaces.tunable.Tunable

+

Initializes a HFTextRegressionModel instance.

+
+
Parameters:
+
+
model_args

Model arguments such as model name, path, revision, etc.

+
+
tune_strategystr or none, default=”normal”.

A string representing the dataset backend. Defaults to “huggingface”.

+
+
ds_config

Deepspeed configuations.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
+
+
+config_additional_args[source]#
+
+ +
+
+tokenize(dataset: lmflow.datasets.dataset.Dataset, add_special_tokens=True, *args, **kwargs)[source]#
+

Tokenize the full dataset.

+
+
Parameters:
+
+
datasetlmflow.datasets.Dataset.
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
tokenized_datasets

The tokenized dataset, without any leading or trailing special +tokens (normally they are Begin-Of-Sentence or End-Of-Sentence +tokens).

+
+
+
+
+
+ +
+
+inference(inputs, release_gpu: bool = False, use_vllm: bool = False, **kwargs) List[float] | transformers.modeling_outputs.SequenceClassifierOutputWithPast[source]#
+

Perform generation process of the model.

+
+
Parameters:
+
+
inputs

The sequence used as a prompt for the generation or as model inputs to the model. +When using vllm inference, this should be a string or a list of strings. +When using normal inference, this should be a tensor.

+
+
release_gpubool, optional

Whether to release the GPU resource after inference, by default False.

+
+
use_vllmbool, optional

Whether to use VLLM for inference, by default False.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
outputs

The generated sequence output

+
+
+
+
+
+ +
+
+__inference(inputs, **kwargs)[source]#
+

Perform generation process of the model.

+
+
Parameters:
+
+
inputs

The tokenized sequence used as a prompt for the generation or as model inputs to the model.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
Returns:
+
+
outputs

The generated sequence output

+
+
+
+
+
+ +
+
+abstract __vllm_inference(inputs: str | List[str], sampling_params: vllm.SamplingParams | None = None, **kwargs) List[List[str]] | List[List[List[int]]][source]#
+

Perform VLLM inference process of the model.

+
+
Parameters:
+
+
inputsUnion[str, List[str]]

Prompt(s), string or a list of strings.

+
+
sampling_paramsOptional[SamplingParams], optional

vllm SamplingParams object, by default None.

+
+
+
+
Returns:
+
+
+
+ +
+
+prepare_inputs_for_inference(dataset: lmflow.datasets.dataset.Dataset, enable_distributed_inference: bool = False, use_vllm: bool = False, **kwargs) lmflow.datasets.dataset.Dataset | ray.data.Dataset[source]#
+
+ +
+
+static postprocess_inference_outputs(dataset: lmflow.datasets.dataset.Dataset, scores: List[float] | List[List[float]])[source]#
+
+ +
+
+static postprocess_distributed_inference_outputs(dataset: lmflow.datasets.dataset.Dataset, inference_result: List[lmflow.utils.data_utils.RewardModelInferenceResultWithInput])[source]#
+
+ +
+
+save(dir, *args, **kwargs)[source]#
+

Perform generation process of the model.

+
+
Parameters:
+
+
dir

The directory to save model and tokenizer

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/index.html b/autoapi/lmflow/models/index.html new file mode 100644 index 000000000..f94960d53 --- /dev/null +++ b/autoapi/lmflow/models/index.html @@ -0,0 +1,674 @@ + + + + + + + + + + + lmflow.models — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/interfaces/index.html b/autoapi/lmflow/models/interfaces/index.html new file mode 100644 index 000000000..511a36c1b --- /dev/null +++ b/autoapi/lmflow/models/interfaces/index.html @@ -0,0 +1,659 @@ + + + + + + + + + + + lmflow.models.interfaces — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/interfaces/tunable/index.html b/autoapi/lmflow/models/interfaces/tunable/index.html new file mode 100644 index 000000000..cf8daea79 --- /dev/null +++ b/autoapi/lmflow/models/interfaces/tunable/index.html @@ -0,0 +1,678 @@ + + + + + + + + + + + lmflow.models.interfaces.tunable — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.models.interfaces.tunable#

+

Tunable class

+
+

Classes#

+
+ + + + + +

Tunable

Helper class that provides a standard way to create an ABC using

+
+
+
+

Module Contents#

+
+
+class lmflow.models.interfaces.tunable.Tunable[source]#
+

Bases: abc.ABC

+

Helper class that provides a standard way to create an ABC using +inheritance.

+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/regression_model/index.html b/autoapi/lmflow/models/regression_model/index.html new file mode 100644 index 000000000..51164af3b --- /dev/null +++ b/autoapi/lmflow/models/regression_model/index.html @@ -0,0 +1,676 @@ + + + + + + + + + + + lmflow.models.regression_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/text_regression_model/index.html b/autoapi/lmflow/models/text_regression_model/index.html new file mode 100644 index 000000000..40715ab53 --- /dev/null +++ b/autoapi/lmflow/models/text_regression_model/index.html @@ -0,0 +1,716 @@ + + + + + + + + + + + lmflow.models.text_regression_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.models.text_regression_model#

+

A model maps “text_only” data to float.

+
+

Classes#

+
+ + + + + +

TextRegressionModel

Initializes a TextRegressionModel instance.

+
+
+
+

Module Contents#

+
+
+class lmflow.models.text_regression_model.TextRegressionModel(model_args, *args, **kwargs)[source]#
+

Bases: lmflow.models.regression_model.RegressionModel

+

Initializes a TextRegressionModel instance.

+
+
Parameters:
+
+
model_args

Model arguments such as model name, path, revision, etc.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
+
+
+inference_func = None[source]#
+
+ +
+
+register_inference_function(inference_func)[source]#
+

Registers a regression function.

+
+ +
+
+inference(inputs: lmflow.datasets.dataset.Dataset)[source]#
+

Gets regression results of a given dataset.

+
+
Inputs:
+

Dataset object, only accept type “text_only”.

+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/vision2seq_model/index.html b/autoapi/lmflow/models/vision2seq_model/index.html new file mode 100644 index 000000000..759c0c323 --- /dev/null +++ b/autoapi/lmflow/models/vision2seq_model/index.html @@ -0,0 +1,827 @@ + + + + + + + + + + + lmflow.models.vision2seq_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.models.vision2seq_model#

+
+

Classes#

+ +
+
+

Module Contents#

+
+
+class lmflow.models.vision2seq_model.CustomAutoVision2SeqModel(config: transformers.Blip2Config, image_encoder_name_or_path=None, qformer_name_or_path=None, language_model_name_or_path=None, low_resource=False)[source]#
+

Bases: transformers.Blip2ForConditionalGeneration, lmflow.models.base_model.BaseModel

+
+
+custom_vision_model[source]#
+
+ +
+
+with_qformer[source]#
+
+ +
+
+kwargs[source]#
+
+ +
+
+language_model[source]#
+
+ +
+
+hidden_size[source]#
+
+ +
+
+hidden_size[source]#
+
+ +
+
+get_backend_model()[source]#
+
+ +
+
+vision_model_from_pretrained(pretrained_path)[source]#
+
+ +
+
+qformer_from_pretrained(pretrained_path)[source]#
+
+ +
+
+language_model_from_pretrained(pretrained_path, low_resource=False, use_prompt_cache=False)[source]#
+
+ +
+
+vision_feature_select(image_forward_outs)[source]#
+
+ +
+
+register_prompt_cache(prompt_ids, prompt_keys_values)[source]#
+

Udpate the prompt id and embedding for reuse in the future

+
+
Args:

prompt_ids (torch.LongTensor): The id of the prompt. +prompt_keys_values (torch.FloatTensor): The embedding of the prompt.

+
+
Returns:

None

+
+
+
+ +
+
+save_prompt_cache(path)[source]#
+

Save prompt embedding and id.

+
+
Args:

path: The path to save the prompt embedding and id.

+
+
Returns:

None

+
+
+
+ +
+
+load_prompt_cache(path)[source]#
+

Load prompt embedding and id. +Args:

+
+

path: The path to load the prompt embedding and id.

+
+
+
Returns:

None

+
+
+
+ +
+
+get_tokenizer()[source]#
+
+ +
+
+forward(input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor | None = None, images: torch.FloatTensor | None = None, attention_mask: torch.Tensor | None = None, past_key_values: List[torch.FloatTensor] | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, image_token_indexes: List | None = [0], one_sample_multiple_images: bool = False) Tuple | transformers.modeling_outputs.CausalLMOutputWithPast[source]#
+
+ +
+
+processor_image_token_in_minigpt4(input_ids, language_model_inputs, attention_mask, image_token_indexes, pixel_values, batch_size=1)[source]#
+
+ +
+
+generate(pixel_values: torch.FloatTensor, input_ids: torch.LongTensor | None = None, attention_mask: torch.LongTensor | None = None, image_token_indexes: List | None = [0], one_sample_multiple_images: bool | None = False, images: torch.LongTensor | None = None, **generate_kwargs) torch.LongTensor[source]#
+

Overrides generate function to be able to use the model as a conditional generator.

+
+
Args:
+
pixel_values (torch.FloatTensor of shape (batch_size, num_channels, height, width)):

Input images to be processed.

+
+
input_ids (torch.LongTensor of shape (batch_size, sequence_length), optional):

The sequence used as a prompt for the generation.

+
+
attention_mask (torch.LongTensor of shape (batch_size, sequence_length), optional):

Mask to avoid performing attention on padding token indices

+
+
image_token_indexes (bool, optional):

The index for inserting the image tokens.

+
+
one_sample_multiple_images: (bool, optional):

The flag for inference that the input batch size is 1 and contain multiple images.

+
+
+
+
Returns:

captions (list): A list of strings of length batch_size * num_captions.

+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/vision_encoder/clip_encoder/index.html b/autoapi/lmflow/models/vision_encoder/clip_encoder/index.html new file mode 100644 index 000000000..4ecd1fb21 --- /dev/null +++ b/autoapi/lmflow/models/vision_encoder/clip_encoder/index.html @@ -0,0 +1,787 @@ + + + + + + + + + + + lmflow.models.vision_encoder.clip_encoder — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.models.vision_encoder.clip_encoder#

+
+

Classes#

+
+ + + + + +

CLIPVisionTower

+
+
+
+

Functions#

+
+ + + + + +

build_vision_tower(vision_tower_cfg, **kwargs)

+
+
+
+

Module Contents#

+
+
+lmflow.models.vision_encoder.clip_encoder.build_vision_tower(vision_tower_cfg, **kwargs)[source]#
+
+ +
+
+class lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower(vision_tower, args, delay_load=False)[source]#
+

Bases: torch.nn.Module

+
+
+is_loaded = False[source]#
+
+ +
+
+vision_tower_name[source]#
+
+ +
+
+select_layer[source]#
+
+ +
+
+select_feature[source]#
+
+ +
+
+load_model()[source]#
+
+ +
+
+encode_images(images, language_projection)[source]#
+
+ +
+
+feature_select(image_forward_outs)[source]#
+
+ +
+
+forward(images)[source]#
+
+ +
+
+property dummy_feature[source]#
+
+ +
+
+property dtype[source]#
+
+ +
+
+property device[source]#
+
+ +
+
+property config[source]#
+
+ +
+
+property hidden_size[source]#
+
+ +
+
+property num_patches[source]#
+
+ +
+
+prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images, language_projection=None, language_model=None, **kwargs)[source]#
+

Copy from the LLAVA code base. +Should be polished.

+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/models/vision_encoder/index.html b/autoapi/lmflow/models/vision_encoder/index.html new file mode 100644 index 000000000..488e18183 --- /dev/null +++ b/autoapi/lmflow/models/vision_encoder/index.html @@ -0,0 +1,683 @@ + + + + + + + + + + + lmflow.models.vision_encoder — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/adabelief/index.html b/autoapi/lmflow/optim/adabelief/index.html new file mode 100644 index 000000000..c163a6150 --- /dev/null +++ b/autoapi/lmflow/optim/adabelief/index.html @@ -0,0 +1,741 @@ + + + + + + + + + + + lmflow.optim.adabelief — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.adabelief#

+
+

Classes#

+
+ + + + + +

AdaBelief

Implements AdaBelief algorithm. Modified from Adam in PyTorch

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.adabelief.AdaBelief(params, lr=0.001, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False, weight_decouple=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True, print_change_log=True)[source]#
+

Bases: torch.optim.optimizer.Optimizer

+

Implements AdaBelief algorithm. Modified from Adam in PyTorch +reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020

+
+
+degenerated_to_sgd[source]#
+
+ +
+
+defaults[source]#
+
+ +
+
+degenerated_to_sgd[source]#
+
+ +
+
+weight_decouple[source]#
+
+ +
+
+rectify[source]#
+
+ +
+
+fixed_decay[source]#
+
+ +
+
+__setstate__(state)[source]#
+
+ +
+
+reset()[source]#
+
+ +
+
+step(closure=None)[source]#
+

Performs a single optimization step. +Arguments:

+
+
+
closure (callable, optional): A closure that reevaluates the model

and returns the loss.

+
+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/adabound/index.html b/autoapi/lmflow/optim/adabound/index.html new file mode 100644 index 000000000..7d8608805 --- /dev/null +++ b/autoapi/lmflow/optim/adabound/index.html @@ -0,0 +1,714 @@ + + + + + + + + + + + lmflow.optim.adabound — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.adabound#

+
+

Classes#

+
+ + + + + +

AdaBound

Implements AdaBound algorithm.

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.adabound.AdaBound(params, lr: float = 0.001, betas=(0.9, 0.999), final_lr: float = 0.1, gamma: float = 0.001, eps: float = 1e-08, weight_decay: float = 0, amsbound: bool = False)[source]#
+

Bases: torch.optim.optimizer.Optimizer

+

Implements AdaBound algorithm.

+

It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of +Learning Rate +https://arxiv.org/abs/1902.09843 +Note:

+
+

Reference code: Luolc/AdaBound

+
+
+
+defaults[source]#
+
+ +
+
+base_lrs[source]#
+
+ +
+
+__setstate__(state) None[source]#
+
+ +
+
+step(closure=None)[source]#
+

Performs a single optimization step.

+
+
Arguments:

closure: A closure that reevaluates the model and returns the loss.

+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/adadelta/index.html b/autoapi/lmflow/optim/adadelta/index.html new file mode 100644 index 000000000..5d67ad1c3 --- /dev/null +++ b/autoapi/lmflow/optim/adadelta/index.html @@ -0,0 +1,689 @@ + + + + + + + + + + + lmflow.optim.adadelta — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.adadelta#

+
+

Classes#

+
+ + + + + +

Adadelta

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.adadelta.Adadelta(params, lr=1.0, rho=0.95, eps=1e-06)[source]#
+

Bases: torch.optim.optimizer.Optimizer

+
+
+defaults[source]#
+
+ +
+
+step(closure=None)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/adagrad/index.html b/autoapi/lmflow/optim/adagrad/index.html new file mode 100644 index 000000000..01ae7d1ed --- /dev/null +++ b/autoapi/lmflow/optim/adagrad/index.html @@ -0,0 +1,689 @@ + + + + + + + + + + + lmflow.optim.adagrad — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.adagrad#

+
+

Classes#

+
+ + + + + +

AdaGrad

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.adagrad.AdaGrad(params, lr=0.001, eps=1e-08, weight_decay=0)[source]#
+

Bases: torch.optim.Optimizer

+
+
+defaults[source]#
+
+ +
+
+step(closure=None)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/adam/index.html b/autoapi/lmflow/optim/adam/index.html new file mode 100644 index 000000000..37f976ef0 --- /dev/null +++ b/autoapi/lmflow/optim/adam/index.html @@ -0,0 +1,689 @@ + + + + + + + + + + + lmflow.optim.adam — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.adam#

+
+

Classes#

+
+ + + + + +

Adam

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.adam.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08)[source]#
+

Bases: torch.optim.optimizer.Optimizer

+
+
+defaults[source]#
+
+ +
+
+step(closure=None)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/adamax/index.html b/autoapi/lmflow/optim/adamax/index.html new file mode 100644 index 000000000..86c57543f --- /dev/null +++ b/autoapi/lmflow/optim/adamax/index.html @@ -0,0 +1,695 @@ + + + + + + + + + + + lmflow.optim.adamax — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.adamax#

+
+

Classes#

+
+ + + + + +

Adamax

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.adamax.Adamax(params, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)[source]#
+

Bases: torch.optim.optimizer.Optimizer

+
+
+defaults[source]#
+
+ +
+
+__setstate__(state)[source]#
+
+ +
+
+step(closure=None)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/adamp/index.html b/autoapi/lmflow/optim/adamp/index.html new file mode 100644 index 000000000..126541b6f --- /dev/null +++ b/autoapi/lmflow/optim/adamp/index.html @@ -0,0 +1,726 @@ + + + + + + + + + + + lmflow.optim.adamp — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.adamp#

+
+

Classes#

+
+ + + + + +

AdamP

Implements AdamP algorithm.

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.adamp.AdamP(params, lr: float = 0.001, betas=(0.9, 0.999), eps: float = 1e-08, weight_decay: float = 0, delta: float = 0.1, wd_ratio: float = 0.1, nesterov: bool = False)[source]#
+

Bases: torch.optim.optimizer.Optimizer

+

Implements AdamP algorithm.

+

It has been proposed in Slowing Down the Weight Norm Increase in +Momentum-based Optimizers +https://arxiv.org/abs/2006.08217

+
+
Note:

Reference code: clovaai/AdamP

+
+
+
+
+defaults[source]#
+
+ +
+
+static _channel_view(x)[source]#
+
+ +
+
+static _layer_view(x)[source]#
+
+ +
+
+static _cosine_similarity(x, y, eps, view_func)[source]#
+
+ +
+
+_projection(p, grad, perturb, delta, wd_ratio, eps)[source]#
+
+ +
+
+step(closure=None)[source]#
+

Performs a single optimization step.

+
+
Arguments:

closure: A closure that reevaluates the model and returns the loss.

+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/adamw_schedule_free/index.html b/autoapi/lmflow/optim/adamw_schedule_free/index.html new file mode 100644 index 000000000..44765add6 --- /dev/null +++ b/autoapi/lmflow/optim/adamw_schedule_free/index.html @@ -0,0 +1,716 @@ + + + + + + + + + + + lmflow.optim.adamw_schedule_free — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.adamw_schedule_free#

+
+

Classes#

+
+ + + + + +

AdamWScheduleFree

Schedule-Free AdamW

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.adamw_schedule_free.AdamWScheduleFree(params, lr=0.0025, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, warmup_steps=0, r=0.0, weight_lr_power=2.0, foreach=hasattr(torch, '_foreach_mul_'))[source]#
+

Bases: torch.optim.Optimizer

+

Schedule-Free AdamW +As the name suggests, no scheduler is needed with this optimizer. +To add warmup, rather than using a learning rate schedule you can just +set the warmup_steps parameter.

+

This optimizer requires that .train() and .eval() be called before the +beginning of training and evaluation respectively. The optimizer should +also be placed in eval mode when saving checkpoints.

+
+
+defaults[source]#
+
+ +
+
+eval()[source]#
+
+ +
+
+train()[source]#
+
+ +
+
+step(closure=None)[source]#
+

Performs a single optimization step.

+
+
Arguments:
+
closure (callable, optional): A closure that reevaluates the model

and returns the loss.

+
+
+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/adan/index.html b/autoapi/lmflow/optim/adan/index.html new file mode 100644 index 000000000..65613fce4 --- /dev/null +++ b/autoapi/lmflow/optim/adan/index.html @@ -0,0 +1,733 @@ + + + + + + + + + + + lmflow.optim.adan — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.adan#

+
+

Classes#

+
+ + + + + +

Adan

Implements a pytorch variant of Adan.

+
+
+
+

Functions#

+
+ + + + + + + + +

_single_tensor_adan(params, grads, exp_avgs, ...)

_multi_tensor_adan(params, grads, exp_avgs, ...)

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.adan.Adan(params, lr=0.001, betas=(0.98, 0.92, 0.99), eps=1e-08, weight_decay=0.0, max_grad_norm=0.0, no_prox=False, foreach: bool = True)[source]#
+

Bases: torch.optim.optimizer.Optimizer

+

Implements a pytorch variant of Adan.

+

Adan was proposed in +Adan : Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models. +https://arxiv.org/abs/2208.06677

+
+
+defaults[source]#
+
+ +
+
+__setstate__(state)[source]#
+
+ +
+
+restart_opt()[source]#
+
+ +
+
+step()[source]#
+

Performs a single optimization step.

+
+ +
+ +
+
+lmflow.optim.adan._single_tensor_adan(params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], exp_avg_sqs: List[torch.Tensor], exp_avg_diffs: List[torch.Tensor], pre_grads: List[torch.Tensor], *, beta1: float, beta2: float, beta3: float, bias_correction1: float, bias_correction2: float, bias_correction3_sqrt: float, lr: float, weight_decay: float, eps: float, no_prox: bool, clip_global_grad_norm: torch.Tensor)[source]#
+
+ +
+
+lmflow.optim.adan._multi_tensor_adan(params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], exp_avg_sqs: List[torch.Tensor], exp_avg_diffs: List[torch.Tensor], pre_grads: List[torch.Tensor], *, beta1: float, beta2: float, beta3: float, bias_correction1: float, bias_correction2: float, bias_correction3_sqrt: float, lr: float, weight_decay: float, eps: float, no_prox: bool, clip_global_grad_norm: torch.Tensor)[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/dummy/index.html b/autoapi/lmflow/optim/dummy/index.html new file mode 100644 index 000000000..79cde290f --- /dev/null +++ b/autoapi/lmflow/optim/dummy/index.html @@ -0,0 +1,705 @@ + + + + + + + + + + + lmflow.optim.dummy — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.dummy#

+

Dummy Optimizer.

+
+

Classes#

+
+ + + + + +

Dummy

An dummy optimizer that does nothing.

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.dummy.Dummy(params: Iterable[torch.nn.parameter.Parameter], lr: float = 0.0, betas: Tuple[float, float] = (0.9, 0.999), weight_decay: float = 0.0)[source]#
+

Bases: torch.optim.Optimizer

+

An dummy optimizer that does nothing.

+
+
Parameters:
+
params (Iterable[nn.parameter.Parameter]):

Iterable of parameters to optimize or dictionaries defining parameter groups.

+
+
lr (float, optional, defaults to 0):

The learning rate to use.

+
+
+
+
+
+
+defaults[source]#
+
+ +
+
+step(closure: Callable = None)[source]#
+

Performs a single optimization step.

+
+
Arguments:

closure (Callable, optional): A closure that reevaluates the model and returns the loss.

+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/index.html b/autoapi/lmflow/optim/index.html new file mode 100644 index 000000000..b59693588 --- /dev/null +++ b/autoapi/lmflow/optim/index.html @@ -0,0 +1,673 @@ + + + + + + + + + + + lmflow.optim — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/lamb/index.html b/autoapi/lmflow/optim/lamb/index.html new file mode 100644 index 000000000..9d8f0ae3a --- /dev/null +++ b/autoapi/lmflow/optim/lamb/index.html @@ -0,0 +1,720 @@ + + + + + + + + + + + lmflow.optim.lamb — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.lamb#

+
+

Classes#

+
+ + + + + +

Lamb

Implements Lamb algorithm.

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.lamb.Lamb(params, lr: float = 0.001, betas=(0.9, 0.999), eps: float = 1e-06, weight_decay: float = 0, clamp_value: float = 10, adam: bool = False, debias: bool = False)[source]#
+

Bases: torch.optim.optimizer.Optimizer

+

Implements Lamb algorithm.

+

It has been proposed in Large Batch Optimization for Deep Learning: +Training BERT in 76 minutes +https://arxiv.org/abs/1904.00962

+
+
Note:

Reference code: cybertronai/pytorch-lamb

+
+
+
+
+defaults[source]#
+
+ +
+
+clamp_value[source]#
+
+ +
+
+adam[source]#
+
+ +
+
+debias[source]#
+
+ +
+
+step(closure=None)[source]#
+

Performs a single optimization step.

+
+
Arguments:

closure: A closure that reevaluates the model and returns the loss.

+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/lars/index.html b/autoapi/lmflow/optim/lars/index.html new file mode 100644 index 000000000..aeb1f5c2b --- /dev/null +++ b/autoapi/lmflow/optim/lars/index.html @@ -0,0 +1,730 @@ + + + + + + + + + + + lmflow.optim.lars — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.lars#

+
+

Classes#

+
+ + + + + +

LARS

Extends SGD in PyTorch with LARS scaling from the paper

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.lars.LARS(params, lr: float = 0.01, momentum: float = 0.0, dampening: float = 0.0, weight_decay: float = 0.0, nesterov: bool = False, trust_coefficient: float = 0.01, eps: float = 1e-08)[source]#
+

Bases: torch.optim.optimizer.Optimizer

+

Extends SGD in PyTorch with LARS scaling from the paper +Large batch training of Convolutional Networks. +.. note:

+
The application of momentum in the SGD part is modified according to
+the PyTorch standards. LARS scaling fits into the equation in the
+following fashion.
+
+.. math::
+    \begin{aligned}
+        g_{t+1} & = \text{lars_lr} * (\beta * p_{t} + g_{t+1}), \\
+        v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\
+        p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
+    \\end{aligned}
+
+where :math:`p`, :math:`g`, :math:`v`, :math:`\\mu` and :math:`\beta`
+denote the parameters, gradient, velocity, momentum, and weight decay
+respectively.  The :math:`lars_lr` is defined by Eq. 6 in the paper.
+The Nesterov version is analogously modified.
+
+
+
+

Warning

+

Parameters with weight decay set to 0 will automatically be excluded +from layer-wise LR scaling. This is to ensure consistency with papers +like SimCLR and BYOL.

+
+
+
Note:

Reference code: PyTorchLightning/lightning-bolts

+
+
+
+
+defaults[source]#
+
+ +
+
+__setstate__(state) None[source]#
+
+ +
+
+step(closure=None)[source]#
+

Performs a single optimization step.

+
+
Arguments:

closure: A closure that reevaluates the model and returns the loss.

+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/nadam/index.html b/autoapi/lmflow/optim/nadam/index.html new file mode 100644 index 000000000..a69fb39a7 --- /dev/null +++ b/autoapi/lmflow/optim/nadam/index.html @@ -0,0 +1,695 @@ + + + + + + + + + + + lmflow.optim.nadam — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.nadam#

+
+

Classes#

+
+ + + + + +

NAdam

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.nadam.NAdam(params, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, momentum_decay=0.004)[source]#
+

Bases: torch.optim.Optimizer

+
+
+defaults[source]#
+
+ +
+
+__setstate__(state)[source]#
+
+ +
+
+step(closure=None)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/novograd/index.html b/autoapi/lmflow/optim/novograd/index.html new file mode 100644 index 000000000..cb9b60629 --- /dev/null +++ b/autoapi/lmflow/optim/novograd/index.html @@ -0,0 +1,695 @@ + + + + + + + + + + + lmflow.optim.novograd — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.novograd#

+
+

Classes#

+
+ + + + + +

NovoGrad

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.novograd.NovoGrad(params, lr=0.01, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, grad_averaging=False, amsgrad=False)[source]#
+

Bases: torch.optim.Optimizer

+
+
+defaults[source]#
+
+ +
+
+__setstate__(state)[source]#
+
+ +
+
+step(closure=None)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/optimizers/index.html b/autoapi/lmflow/optim/optimizers/index.html new file mode 100644 index 000000000..d0b9e1dbe --- /dev/null +++ b/autoapi/lmflow/optim/optimizers/index.html @@ -0,0 +1,640 @@ + + + + + + + + + + + lmflow.optim.optimizers — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.optimizers#

+

All optimizers.

+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/radam/index.html b/autoapi/lmflow/optim/radam/index.html new file mode 100644 index 000000000..5251224f4 --- /dev/null +++ b/autoapi/lmflow/optim/radam/index.html @@ -0,0 +1,712 @@ + + + + + + + + + + + lmflow.optim.radam — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.radam#

+
+

Classes#

+
+ + + + + +

RAdam

Implements RAdam optimization algorithm.

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.radam.RAdam(params, lr: float = 0.001, betas=(0.9, 0.999), eps: float = 1e-08, weight_decay: float = 0)[source]#
+

Bases: torch.optim.optimizer.Optimizer

+

Implements RAdam optimization algorithm.

+
+
Note:

Deprecated, please use version provided by PyTorch_.

+
+
+

It has been proposed in On the Variance of the Adaptive Learning +Rate and Beyond. +https://arxiv.org/abs/1908.03265

+
+
Note:

Reference code: LiyuanLucasLiu/RAdam

+
+
+
+
+defaults[source]#
+
+ +
+
+__setstate__(state)[source]#
+
+ +
+
+step(closure=None)[source]#
+

Performs a single optimization step.

+
+
Arguments:

closure: A closure that reevaluates the model and returns the loss.

+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/sgd_schedule_free/index.html b/autoapi/lmflow/optim/sgd_schedule_free/index.html new file mode 100644 index 000000000..e0c2b3263 --- /dev/null +++ b/autoapi/lmflow/optim/sgd_schedule_free/index.html @@ -0,0 +1,716 @@ + + + + + + + + + + + lmflow.optim.sgd_schedule_free — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.sgd_schedule_free#

+
+

Classes#

+
+ + + + + +

SGDScheduleFree

Schedule-Free SGD

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.sgd_schedule_free.SGDScheduleFree(params, lr=1.0, momentum=0.9, weight_decay=0, warmup_steps=0, r=0.0, weight_lr_power=2, foreach=hasattr(torch, '_foreach_mul_'))[source]#
+

Bases: torch.optim.Optimizer

+

Schedule-Free SGD +As the name suggests, no scheduler is needed with this optimizer. +To add warmup, rather than using a learning rate schedule you can just +set the warmup_steps parameter.

+

This optimizer requires that .train() and .eval() be called before the +beginning of training and evaluation respectively. The optimizer should +also be placed in eval mode when saving checkpoints.

+
+
+defaults[source]#
+
+ +
+
+eval()[source]#
+
+ +
+
+train()[source]#
+
+ +
+
+step(closure=None)[source]#
+

Performs a single optimization step.

+
+
Arguments:
+
closure (callable, optional): A closure that reevaluates the model

and returns the loss.

+
+
+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/sgdp/index.html b/autoapi/lmflow/optim/sgdp/index.html new file mode 100644 index 000000000..1889e8c9e --- /dev/null +++ b/autoapi/lmflow/optim/sgdp/index.html @@ -0,0 +1,726 @@ + + + + + + + + + + + lmflow.optim.sgdp — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.sgdp#

+
+

Classes#

+
+ + + + + +

SGDP

Implements SGDP algorithm.

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.sgdp.SGDP(params, lr: float = 0.001, momentum: float = 0, dampening: float = 0, eps: float = 1e-08, weight_decay: float = 0, delta: float = 0.1, wd_ratio: float = 0.1, nesterov: bool = False)[source]#
+

Bases: torch.optim.optimizer.Optimizer

+

Implements SGDP algorithm.

+

It has been proposed in Slowing Down the Weight Norm Increase in +Momentum-based Optimizers. +https://arxiv.org/abs/2006.08217

+
+
Note:

Reference code: clovaai/AdamP

+
+
+
+
+defaults[source]#
+
+ +
+
+static _channel_view(x)[source]#
+
+ +
+
+static _layer_view(x)[source]#
+
+ +
+
+static _cosine_similarity(x, y, eps, view_func)[source]#
+
+ +
+
+_projection(p, grad, perturb, delta, wd_ratio, eps)[source]#
+
+ +
+
+step(closure=None)[source]#
+

Performs a single optimization step.

+
+
Arguments:

closure: A closure that reevaluates the model and returns the loss.

+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/sophia/index.html b/autoapi/lmflow/optim/sophia/index.html new file mode 100644 index 000000000..dbf334b87 --- /dev/null +++ b/autoapi/lmflow/optim/sophia/index.html @@ -0,0 +1,703 @@ + + + + + + + + + + + lmflow.optim.sophia — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.sophia#

+
+

Classes#

+
+ + + + + +

SophiaG

Sophia: A Scalable Stochastic Second-order Optimizer for Language Model Pre-training.

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.sophia.SophiaG(params, lr=0.0001, betas=(0.965, 0.99), rho=0.04, weight_decay=0.1, *, maximize: bool = False, capturable: bool = False)[source]#
+

Bases: torch.optim.optimizer.Optimizer

+

Sophia: A Scalable Stochastic Second-order Optimizer for Language Model Pre-training. +Code from: Liuhong99/Sophia

+
+
+defaults[source]#
+
+ +
+
+__setstate__(state)[source]#
+
+ +
+
+update_hessian()[source]#
+
+ +
+
+step(closure=None, bs=5120)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/optim/yogi/index.html b/autoapi/lmflow/optim/yogi/index.html new file mode 100644 index 000000000..88b30e46f --- /dev/null +++ b/autoapi/lmflow/optim/yogi/index.html @@ -0,0 +1,701 @@ + + + + + + + + + + + lmflow.optim.yogi — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.optim.yogi#

+
+

Classes#

+
+ + + + + +

Yogi

Implements Yogi Optimizer Algorithm.

+
+
+
+

Module Contents#

+
+
+class lmflow.optim.yogi.Yogi(params, lr: float = 0.01, betas=(0.9, 0.999), eps: float = 0.001, initial_accumulator: float = 1e-06, weight_decay: float = 0)[source]#
+

Bases: torch.optim.optimizer.Optimizer

+

Implements Yogi Optimizer Algorithm. +It has been proposed in Adaptive methods for Nonconvex Optimization.

+

https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization # noqa

+
+
Note:

Reference code: 4rtemi5/Yogi-Optimizer_Keras

+
+
+
+
+defaults[source]#
+
+ +
+
+step(closure=None)[source]#
+

Performs a single optimization step.

+
+
Arguments:

closure: A closure that reevaluates the model and returns the loss.

+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/auto_pipeline/index.html b/autoapi/lmflow/pipeline/auto_pipeline/index.html new file mode 100644 index 000000000..4adf02b7d --- /dev/null +++ b/autoapi/lmflow/pipeline/auto_pipeline/index.html @@ -0,0 +1,720 @@ + + + + + + + + + + + lmflow.pipeline.auto_pipeline — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.auto_pipeline#

+

Return a pipeline automatically based on its name.

+
+

Attributes#

+
+ + + + + +

PIPELINE_MAPPING

+
+
+
+

Classes#

+
+ + + + + +

AutoPipeline

The class designed to return a pipeline automatically based on its name.

+
+
+
+

Functions#

+
+ + + + + +

is_package_version_at_least(package_name, min_version)

+
+
+
+

Module Contents#

+
+
+lmflow.pipeline.auto_pipeline.is_package_version_at_least(package_name, min_version)[source]#
+
+ +
+
+lmflow.pipeline.auto_pipeline.PIPELINE_MAPPING[source]#
+
+ +
+
+class lmflow.pipeline.auto_pipeline.AutoPipeline[source]#
+

The class designed to return a pipeline automatically based on its name.

+
+
+classmethod get_pipeline(pipeline_name, model_args, data_args, pipeline_args, *args, **kwargs)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/base_aligner/index.html b/autoapi/lmflow/pipeline/base_aligner/index.html new file mode 100644 index 000000000..53c910c83 --- /dev/null +++ b/autoapi/lmflow/pipeline/base_aligner/index.html @@ -0,0 +1,691 @@ + + + + + + + + + + + lmflow.pipeline.base_aligner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.base_aligner#

+

BaseTuner: a subclass of BasePipeline.

+
+

Classes#

+
+ + + + + +

BaseAligner

A subclass of BasePipeline which is alignable.

+
+
+
+

Module Contents#

+
+
+class lmflow.pipeline.base_aligner.BaseAligner(*args, **kwargs)[source]#
+

Bases: lmflow.pipeline.base_pipeline.BasePipeline

+

A subclass of BasePipeline which is alignable.

+
+
+_check_if_alignable(model, dataset, reward_model)[source]#
+
+ +
+
+abstract align(model, dataset, reward_model)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/base_pipeline/index.html b/autoapi/lmflow/pipeline/base_pipeline/index.html new file mode 100644 index 000000000..dc40d8720 --- /dev/null +++ b/autoapi/lmflow/pipeline/base_pipeline/index.html @@ -0,0 +1,678 @@ + + + + + + + + + + + lmflow.pipeline.base_pipeline — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.base_pipeline#

+

BasePipeline.

+
+

Classes#

+
+ + + + + +

BasePipeline

Helper class that provides a standard way to create an ABC using

+
+
+
+

Module Contents#

+
+
+class lmflow.pipeline.base_pipeline.BasePipeline[source]#
+

Bases: abc.ABC

+

Helper class that provides a standard way to create an ABC using +inheritance.

+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/base_tuner/index.html b/autoapi/lmflow/pipeline/base_tuner/index.html new file mode 100644 index 000000000..3c4f2b865 --- /dev/null +++ b/autoapi/lmflow/pipeline/base_tuner/index.html @@ -0,0 +1,691 @@ + + + + + + + + + + + lmflow.pipeline.base_tuner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.base_tuner#

+

BaseTuner: a subclass of BasePipeline.

+
+

Classes#

+
+ + + + + +

BaseTuner

A subclass of BasePipeline which is tunable.

+
+
+
+

Module Contents#

+
+
+class lmflow.pipeline.base_tuner.BaseTuner(*args, **kwargs)[source]#
+

Bases: lmflow.pipeline.base_pipeline.BasePipeline

+

A subclass of BasePipeline which is tunable.

+
+
+_check_if_tunable(model, dataset)[source]#
+
+ +
+
+abstract tune(model, dataset)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/dpo_aligner/index.html b/autoapi/lmflow/pipeline/dpo_aligner/index.html new file mode 100644 index 000000000..b5a0c206a --- /dev/null +++ b/autoapi/lmflow/pipeline/dpo_aligner/index.html @@ -0,0 +1,748 @@ + + + + + + + + + + + lmflow.pipeline.dpo_aligner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.dpo_aligner#

+
+

Classes#

+
+ + + + + +

DPOAligner

A subclass of BasePipeline which is alignable.

+
+
+
+

Functions#

+
+ + + + + +

get_paired_dataset(→ datasets.Dataset)

Load dataset and convert it to the necessary format.

+
+
+
+

Module Contents#

+
+
+lmflow.pipeline.dpo_aligner.get_paired_dataset(data_root: str, data_dir: str, sanity_check: bool = False, cache_dir: str | None = None, num_proc=24) datasets.Dataset[source]#
+

Load dataset and convert it to the necessary format.

+
+

The dataset is converted to a dictionary with the following structure: +{

+
+

‘prompt’: List[str], +‘chosen’: List[str], +‘rejected’: List[str],

+
+

}

+
+
Prompts are structured as follows:

“Question: “ + <prompt> + “

+
+
+
+

Answer: “

+
+ +
+
+class lmflow.pipeline.dpo_aligner.DPOAligner(model_args, data_args, aligner_args)[source]#
+

Bases: lmflow.pipeline.base_aligner.BaseAligner

+

A subclass of BasePipeline which is alignable.

+
+
+model_args[source]#
+
+ +
+
+data_args[source]#
+
+ +
+
+aligner_args[source]#
+
+ +
+
+_initialize_trainer(model, tokenizer)[source]#
+
+ +
+
+_load_dataset()[source]#
+
+ +
+
+align(model, dataset, reward_model)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/dpov2_aligner/index.html b/autoapi/lmflow/pipeline/dpov2_aligner/index.html new file mode 100644 index 000000000..597eaf0aa --- /dev/null +++ b/autoapi/lmflow/pipeline/dpov2_aligner/index.html @@ -0,0 +1,833 @@ + + + + + + + + + + + lmflow.pipeline.dpov2_aligner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.dpov2_aligner#

+
+

Attributes#

+
+ + + + + + + + +

logger

ReferenceModelArguments

+
+
+
+

Classes#

+
+ + + + + + + + +

DPOv2Aligner

A subclass of BasePipeline which is alignable.

MemorySafeDPOv2Aligner

+
+
+
+

Module Contents#

+
+
+lmflow.pipeline.dpov2_aligner.logger[source]#
+
+ +
+
+lmflow.pipeline.dpov2_aligner.ReferenceModelArguments[source]#
+
+ +
+
+class lmflow.pipeline.dpov2_aligner.DPOv2Aligner(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, aligner_args: lmflow.args.DPOv2AlignerArguments, ref_model_args: lmflow.args.ModelArguments)[source]#
+

Bases: lmflow.pipeline.base_aligner.BaseAligner

+

A subclass of BasePipeline which is alignable.

+
+
+model_args[source]#
+
+ +
+
+ref_model_args[source]#
+
+ +
+
+data_args[source]#
+
+ +
+
+aligner_args[source]#
+
+ +
+
+align(model: lmflow.models.hf_decoder_model.HFDecoderModel, ref_model: lmflow.models.hf_decoder_model.HFDecoderModel, train_dataset: lmflow.datasets.dataset.Dataset, eval_dataset: lmflow.datasets.dataset.Dataset, transform_dataset_in_place: bool = True)[source]#
+
+ +
+
+__prepare_training_args(args: lmflow.args.DPOv2AlignerArguments) transformers.TrainingArguments[source]#
+
+ +
+
+convert_to_paired_dataset(source_dataset: lmflow.datasets.dataset.Dataset, sampling_paired_method: str = 'random', length_penalty: float = 0.0, margin_scale: float = 1.0, use_fast: bool = False) lmflow.datasets.dataset.Dataset[source]#
+

Convert a scored one to multiple (text_to_scored_textlist) to a paired dataset by rejection sampling.

+
+ +
+
+_calc_response_lengths(outputs: List[str | Dict[str, str]], dataset_type: str) List[int][source]#
+
+ +
+
+_calc_reward_with_length_penalty(rewards: List[float], lengths: List[int], length_penalty: float) List[float][source]#
+

When length_penalty > 0, penalize the longer sequence by subtracting +length_penalty * length from the reward. Vice versa when length_penalty < 0.

+
+ +
+
+sampling_paired_idx_from_rewards(rewards: List[float], sampling_paired_method: str = 'random', use_fast: bool = False) Tuple[int, int][source]#
+

Prepare the dataset for DPO training by rejection sampling. +We implement different strategies to select pairs, including +random: randomly select two instances +max_min: best v.s. worst +max_max: best v.s. second best +max_random: best v.s. random from the remaining

+
+ +
+
+_sampling_paired_idx_from_rewards(rewards: List[float], sampling_paired_method: str = 'random') Tuple[int, int][source]#
+
+ +
+
+_sampling_paired_idx_from_rewards_fast(rewards: List[float], sampling_paired_method: str = 'random') Tuple[int, int][source]#
+
+ +
+ +
+
+class lmflow.pipeline.dpov2_aligner.MemorySafeDPOv2Aligner(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, aligner_args: lmflow.args.DPOv2AlignerArguments, ref_model_args: lmflow.args.ModelArguments)[source]#
+
+
+model_args[source]#
+
+ +
+
+ref_model_args[source]#
+
+ +
+
+data_args[source]#
+
+ +
+
+aligner_args[source]#
+
+ +
+
+aligner_file_path[source]#
+
+ +
+
+align()[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/evaluator/index.html b/autoapi/lmflow/pipeline/evaluator/index.html new file mode 100644 index 000000000..0249a4f40 --- /dev/null +++ b/autoapi/lmflow/pipeline/evaluator/index.html @@ -0,0 +1,808 @@ + + + + + + + + + + + lmflow.pipeline.evaluator — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.evaluator#

+

The Evaluator class simplifies the process of running evaluation on a language model provided by a HFDecoderModel instance imported from the lmflow package. The class constructor takes three dictionaries as arguments: model_args containing arguments related to the language model, data_args containing arguments related to the data used for evaluation, and evaluator_args containing other arguments for the evaluation process.

+

The class has two methods: create_dataloader() that loads the data from the test file, creates a data loader, and returns it with the size of the data, and evaluate(model) that generates output text given input text. It uses the create_dataloader() method to load the data, iterates over the data in mini-batches, and encodes the input text with the encode() method of the HFDecoderModel class. Then, it generates output text using the evaluate() method of the HFDecoderModel class, decodes the generated output text using the decode() method of the HFDecoderModel class, and writes the output to a file in the output directory. The method also logs some information to the console and Weights and Biases if the use_wandb argument is True.

+
+

Classes#

+
+ + + + + +

Evaluator

Initializes the Evaluator class with given arguments.

+
+
+
+

Module Contents#

+
+
+class lmflow.pipeline.evaluator.Evaluator(model_args, data_args, evaluator_args)[source]#
+

Bases: lmflow.pipeline.base_pipeline.BasePipeline

+

Initializes the Evaluator class with given arguments.

+
+
Parameters:
+
+
model_argsModelArguments object.

Contains the arguments required to load the model.

+
+
data_argsDatasetArguments object.

Contains the arguments required to load the dataset.

+
+
evaluator_argsEvaluatorArguments object.

Contains the arguments required to perform evaluation.

+
+
+
+
+
+
+data_args[source]#
+
+ +
+
+evaluator_args[source]#
+
+ +
+
+model_args[source]#
+
+ +
+
+local_rank[source]#
+
+ +
+
+world_size[source]#
+
+ +
+
+config[source]#
+
+ +
+
+train_batch_size[source]#
+
+ +
+
+minibatch_size[source]#
+
+ +
+
+block_size[source]#
+
+ +
+
+create_dataloader(dataset: lmflow.datasets.dataset.Dataset)[source]#
+
+ +
+
+_match(predicted_answer, groundtruth, answer_type=None)[source]#
+
+ +
+
+evaluate(model, dataset: lmflow.datasets.dataset.Dataset, metric='accuracy', verbose=True)[source]#
+

Perform Evaluation for a model

+
+
Parameters:
+
+
modelTunableModel object.

TunableModel to perform inference

+
+
datasetDataset object.
+
+
+
+
+ +
+
+_evaluate_acc_with_accelerator(model, dataset, verbose=True)[source]#
+
+ +
+
+_evaluate_acc_with_deepspeed(model, dataset, verbose=True)[source]#
+
+ +
+
+_evaluate_ppl(model, dataset: lmflow.datasets.dataset.Dataset, verbose=True)[source]#
+
+ +
+
+_evaluate_nll(model, dataset: lmflow.datasets.dataset.Dataset, verbose=True)[source]#
+

Evaluates negative log likelihood of the model over a dataset.

+

NLL = -1/N sum_{i=1}^N sum_{j=1}^|w_i| ln(p(w_{i,j}|context_window)),

+

where N is the number of data samples, w_{i,j} is the j-th token in +i-th sample. Here “context_window” = p(w_{i,start}, w_{i,start+1}, …, +p_{i,j-1} with start = max(0, j - window_length + 1). “window_length” +is normally the maximum length accepted by the model.

+
+
Returns:

A float which represents the negative log likelihood.

+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/finetuner/index.html b/autoapi/lmflow/pipeline/finetuner/index.html new file mode 100644 index 000000000..db9a4a3a9 --- /dev/null +++ b/autoapi/lmflow/pipeline/finetuner/index.html @@ -0,0 +1,780 @@ + + + + + + + + + + + lmflow.pipeline.finetuner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.finetuner#

+

The Finetuner class simplifies the process of running finetuning process on a language model for a TunableModel instance with given dataset.

+
+

Attributes#

+
+ + + + + +

logger

+
+
+
+

Classes#

+
+ + + + + +

Finetuner

Initializes the Finetuner class with given arguments.

+
+
+
+

Module Contents#

+
+
+lmflow.pipeline.finetuner.logger[source]#
+
+ +
+
+class lmflow.pipeline.finetuner.Finetuner(model_args, data_args, finetuner_args, *args, **kwargs)[source]#
+

Bases: lmflow.pipeline.base_tuner.BaseTuner

+

Initializes the Finetuner class with given arguments.

+
+
Parameters:
+
+
model_argsModelArguments object.

Contains the arguments required to load the model.

+
+
data_argsDatasetArguments object.

Contains the arguments required to load the dataset.

+
+
finetuner_argsFinetunerArguments object.

Contains the arguments required to perform finetuning.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
+
+
+model_args[source]#
+
+ +
+
+data_args[source]#
+
+ +
+
+finetuner_args[source]#
+
+ +
+
+log_level[source]#
+
+ +
+
+last_checkpoint = None[source]#
+
+ +
+
+last_checkpoint[source]#
+
+ +
+
+group_text(tokenized_datasets, model_max_length)[source]#
+

Groups texts together to form blocks of maximum length model_max_length and returns the processed data as +a dictionary.

+
+ +
+
+create_customized_optimizer(base_trainer_class, model_args)[source]#
+
+ +
+
+tune(model, dataset, transform_dataset_in_place=True, data_collator=None)[source]#
+

Perform tuning for a model

+
+
Parameters:
+
+
modelTunableModel object.

TunableModel to perform tuning.

+
+
dataset:

dataset to train model.

+
+
+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/index.html b/autoapi/lmflow/pipeline/index.html new file mode 100644 index 000000000..28e9d840a --- /dev/null +++ b/autoapi/lmflow/pipeline/index.html @@ -0,0 +1,676 @@ + + + + + + + + + + + lmflow.pipeline — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/inferencer/index.html b/autoapi/lmflow/pipeline/inferencer/index.html new file mode 100644 index 000000000..1d7241b0e --- /dev/null +++ b/autoapi/lmflow/pipeline/inferencer/index.html @@ -0,0 +1,990 @@ + + + + + + + + + + + lmflow.pipeline.inferencer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.inferencer#

+

The Inferencer class simplifies the process of model inferencing.

+
+

Attributes#

+
+ + + + + + + + +

supported_dataset_type

logger

+
+
+
+

Classes#

+
+ + + + + + + + + + + +

Inferencer

Initializes the Inferencer class with given arguments.

SpeculativeInferencer

Ref: [arXiv:2211.17192v2](https://arxiv.org/abs/2211.17192)

ToolInferencer

Initializes the ToolInferencer class with given arguments.

+
+
+
+

Functions#

+
+ + + + + +

rstrip_partial_utf8(string)

+
+
+
+

Module Contents#

+
+
+lmflow.pipeline.inferencer.rstrip_partial_utf8(string)[source]#
+
+ +
+
+lmflow.pipeline.inferencer.supported_dataset_type = ['text_only', 'image_text'][source]#
+
+ +
+
+lmflow.pipeline.inferencer.logger[source]#
+
+ +
+
+class lmflow.pipeline.inferencer.Inferencer(model_args, data_args, inferencer_args)[source]#
+

Bases: lmflow.pipeline.base_pipeline.BasePipeline

+

Initializes the Inferencer class with given arguments.

+
+
Parameters:
+
+
model_argsModelArguments object.

Contains the arguments required to load the model.

+
+
data_argsDatasetArguments object.

Contains the arguments required to load the dataset.

+
+
inferencer_argsInferencerArguments object.

Contains the arguments required to perform inference.

+
+
+
+
+
+
+data_args[source]#
+
+ +
+
+inferencer_args[source]#
+
+ +
+
+model_args[source]#
+
+ +
+
+local_rank[source]#
+
+ +
+
+world_size[source]#
+
+ +
+
+config[source]#
+
+ +
+
+create_dataloader(dataset: lmflow.datasets.dataset.Dataset)[source]#
+

Batchlize dataset and format it to dataloader.

+
+
Args:

dataset (Dataset): the dataset object

+
+
Output:

dataloader (batchlize): the dataloader object +dataset_size (int): the length of the dataset

+
+
+
+ +
+
+inference(model, dataset: lmflow.datasets.dataset.Dataset, max_new_tokens: int = 100, temperature: float = 0.0, prompt_structure: str = '{input}', remove_image_flag: bool = False, chatbot_type: str = 'mini_gpt')[source]#
+

Perform inference for a model

+
+
Parameters:
+
+
modelTunableModel object.

TunableModel to perform inference

+
+
datasetDataset object.
+
Returns:
+
output_dataset: Dataset object.
+
+
+
+
+ +
+
+stream_inference(context, model, max_new_tokens, token_per_step, temperature, end_string, input_dataset, remove_image_flag: bool = False)[source]#
+
+ +
+ +
+
+class lmflow.pipeline.inferencer.SpeculativeInferencer(model_args, draft_model_args, data_args, inferencer_args)[source]#
+

Bases: Inferencer

+

Ref: [arXiv:2211.17192v2](https://arxiv.org/abs/2211.17192)

+
+
Parameters:
+
+
target_model_argsModelArguments object.

Contains the arguments required to load the target model.

+
+
draft_model_argsModelArguments object.

Contains the arguments required to load the draft model.

+
+
data_argsDatasetArguments object.

Contains the arguments required to load the dataset.

+
+
inferencer_argsInferencerArguments object.

Contains the arguments required to perform inference.

+
+
+
+
+
+
+draft_model_args[source]#
+
+ +
+
+draft_config[source]#
+
+ +
+
+static score_to_prob(scores: torch.Tensor, temperature: float = 0.0, top_p: float = 1.0) torch.Tensor[source]#
+

Convert scores (NOT softmaxed tensor) to probabilities with support for temperature, top-p sampling, and argmax.

+
+
Parameters:
+
+
scorestorch.Tensor

Input scores.

+
+
temperaturefloat, optional

Temperature parameter for controlling randomness. Higher values make the distribution more uniform, +lower values make it peakier. When temperature <= 1e-6, argmax is used. by default 0.0

+
+
top_pfloat, optional

Top-p sampling parameter for controlling the cumulative probability threshold, by default 1.0 (no threshold)

+
+
+
+
Returns:
+
+
torch.Tensor

Probability distribution after adjustments.

+
+
+
+
+
+ +
+
+static sample(prob: torch.Tensor, num_samples: int = 1) Dict[source]#
+

Sample from a tensor of probabilities

+
+ +
+
+static predict_next_token(model: lmflow.models.hf_decoder_model.HFDecoderModel, input_ids: torch.Tensor, num_new_tokens: int = 1)[source]#
+

Predict the next token given the input_ids.

+
+ +
+
+autoregressive_sampling(input_ids: torch.Tensor, model: lmflow.models.hf_decoder_model.HFDecoderModel, temperature: float = 0.0, num_new_tokens: int = 5) Dict[source]#
+

Ref: [arXiv:2211.17192v2](https://arxiv.org/abs/2211.17192) Section 2.2

+
+ +
+
+inference(model: lmflow.models.hf_decoder_model.HFDecoderModel, draft_model: lmflow.models.hf_decoder_model.HFDecoderModel, input: str, temperature: float = 0.0, gamma: int = 5, max_new_tokens: int = 100)[source]#
+

Perform inference for a model

+
+
Parameters:
+
+
modelHFDecoderModel object.

TunableModel to verify tokens generated by the draft model.

+
+
draft_modelHFDecoderModel object.

TunableModel that provides approximations of the target model.

+
+
inputstr.

The input text (i.e., the prompt) for the model.

+
+
gammaint.

The number of tokens to be generated by the draft model within each iter.

+
+
max_new_tokensint.

The maximum number of tokens to be generated by the target model.

+
+
+
+
Returns:
+
+
output: str.

The output text generated by the model.

+
+
+
+
+
+ +
+
+abstract stream_inference()[source]#
+
+ +
+ +
+
+class lmflow.pipeline.inferencer.ToolInferencer(model_args, data_args, inferencer_args)[source]#
+

Bases: Inferencer

+

Initializes the ToolInferencer class with given arguments.

+
+
Parameters:
+
+
model_argsModelArguments object.

Contains the arguments required to load the model.

+
+
data_argsDatasetArguments object.

Contains the arguments required to load the dataset.

+
+
inferencer_argsInferencerArguments object.

Contains the arguments required to perform inference.

+
+
+
+
+
+
+model[source]#
+
+ +
+
+inference(model: lmflow.models.hf_decoder_model.HFDecoderModel, input: str, max_new_tokens: int = 1024)[source]#
+

Perform inference for a model

+
+
Parameters:
+
+
modelHFDecoderModel object.

TunableModel to perform inference

+
+
inputstr.

The input text (i.e., the prompt) for the model.

+
+
max_new_tokensint.

The maximum number of tokens to be generated by the model.

+
+
Returns:
+
outputstr.

The output text generated by the model.

+
+
+
+
+
+ +
+
+code_exec(code)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/iterative_dpo_aligner/index.html b/autoapi/lmflow/pipeline/iterative_dpo_aligner/index.html new file mode 100644 index 000000000..bebeb667e --- /dev/null +++ b/autoapi/lmflow/pipeline/iterative_dpo_aligner/index.html @@ -0,0 +1,784 @@ + + + + + + + + + + + lmflow.pipeline.iterative_dpo_aligner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.iterative_dpo_aligner#

+
+

Attributes#

+
+ + + + + +

logger

+
+
+
+

Classes#

+
+ + + + + +

IterativeDPOAligner

+
+
+
+

Module Contents#

+
+
+lmflow.pipeline.iterative_dpo_aligner.logger[source]#
+
+ +
+
+class lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, aligner_args: lmflow.args.IterativeDPOAlignerArguments, ref_model_args: lmflow.args.ModelArguments, reward_model_args: lmflow.args.ModelArguments, **kwargs)[source]#
+
+
+model_args[source]#
+
+ +
+
+data_args[source]#
+
+ +
+
+aligner_args[source]#
+
+ +
+
+ref_model_args[source]#
+
+ +
+
+reward_model_args[source]#
+
+ +
+
+workspace_path[source]#
+
+ +
+
+align(dataset_list: List[lmflow.datasets.dataset.Dataset])[source]#
+
+ +
+
+_align_single_iteration(iteration_name: str, target_model_args: lmflow.args.ModelArguments, reward_model_args: lmflow.args.ModelArguments, ref_model_args: lmflow.args.ModelArguments, dataset: lmflow.datasets.dataset.Dataset)[source]#
+
+ +
+
+_do_target_model_inference(model: lmflow.models.hf_decoder_model.HFDecoderModel, dataset: lmflow.datasets.dataset.Dataset, output_dir: str)[source]#
+
+ +
+
+_do_reward_model_inference(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, dataset: lmflow.datasets.dataset.Dataset, output_dir: str)[source]#
+
+ +
+
+_do_single_dpo_align(model_args: lmflow.args.ModelArguments, ref_model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, output_dir: str, iteration_name: str)[source]#
+
+ +
+
+_parse_target_model_inference_args(args: lmflow.args.IterativeDPOAlignerArguments, result_cache_path: str) lmflow.args.InferencerArguments[source]#
+
+ +
+
+_parse_reward_model_inference_args(args: lmflow.args.IterativeDPOAlignerArguments) lmflow.args.InferencerArguments[source]#
+
+ +
+
+_parse_dpo_aligner_args(args: lmflow.args.IterativeDPOAlignerArguments, output_dir: str, iteration_name: str) lmflow.args.DPOv2AlignerArguments[source]#
+
+ +
+
+__filter_args(mixed_args, target_cls)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/raft_aligner/index.html b/autoapi/lmflow/pipeline/raft_aligner/index.html new file mode 100644 index 000000000..cf304cbf1 --- /dev/null +++ b/autoapi/lmflow/pipeline/raft_aligner/index.html @@ -0,0 +1,831 @@ + + + + + + + + + + + lmflow.pipeline.raft_aligner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.raft_aligner#

+

The Aligner class simplifies the process of running alignment.

+
+

Attributes#

+
+ + + + + +

logger

+
+
+
+

Classes#

+
+ + + + + +

RaftAligner

Initializes the RaftAligner class with given arguments.

+
+
+
+

Module Contents#

+
+
+lmflow.pipeline.raft_aligner.logger[source]#
+
+ +
+
+class lmflow.pipeline.raft_aligner.RaftAligner(model_args, data_args, aligner_args, *args, **kwargs)[source]#
+

Bases: lmflow.pipeline.base_aligner.BaseAligner

+

Initializes the RaftAligner class with given arguments.

+
+
Parameters:
+
+
model_argsModelArguments object.

Contains the arguments required to load the model.

+
+
data_argsDatasetArguments object.

Contains the arguments required to load the dataset.

+
+
raft_aligner_argsRaftAlignerArguments object.

Contains the arguments required to perform alignment.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
+
+
+model_args[source]#
+
+ +
+
+data_args[source]#
+
+ +
+
+aligner_args[source]#
+
+ +
+
+INF = 888888888[source]#
+
+ +
+
+output_reward_path[source]#
+
+ +
+
+_initialize_trainer(model, tokenizer, training_args)[source]#
+

This function takes the model and tokenizer as the input and initialize the trainer.

+
+ +
+
+_load_dataset(selected_dataset, model, tokenizer, model_args, data_args, training_args)[source]#
+

This function prepares the dataset for every iteration.

+
+ +
+
+_load_input_dataset(dataset, tokenizer)[source]#
+

Load input dataset (i.e. prompt/question dataset) for training.

+
+
Args:
+
dataset: A Dataset object.

The dataset to be loaded.

+
+
+
+
Returns:
+
dataloader (torch.utils.data.DataLoader):

The dataloader for the dataset.

+
+
+
+
+
+ +
+
+_clean_text(text)[source]#
+
+ +
+
+_discard_sample(text)[source]#
+
+ +
+
+_get_batch_dataset_top(model, batch_input, alpha=0.2, iter_id=0, local_rank=0, output_min_length=16, output_max_length=48, infer_batch_size=8, generation_kwargs={}, tokenizer=None, training_args=None, reward_model=None, output_reward_path=None)[source]#
+
+
Parameters:
+

batch_input – input prompts

+
+
+
+ +
+
+_get_batch_dataset_local(model, batch_input, K=8, iter_id=0, local_rank=0, output_min_length=16, output_max_length=48, infer_batch_size=8, generation_kwargs={}, tokenizer=None, training_args=None, reward_model=None, output_reward_path=None)[source]#
+
+
Parameters:
+

batch_input – input prompts

+
+
+
+ +
+
+align(model, dataset, reward_model)[source]#
+

Perform alignment for a model

+
+
Parameters:
+
+
modelBaseModel object.
+
dataset: Dataset object.
+
Input dataset for model to generate outputs. The input and output

will then be feed into reward model to get the reward for +alignment.

+
+
+
+
reward_model: RegressionModel object.
+
+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/rm_inferencer/index.html b/autoapi/lmflow/pipeline/rm_inferencer/index.html new file mode 100644 index 000000000..e088e4d3b --- /dev/null +++ b/autoapi/lmflow/pipeline/rm_inferencer/index.html @@ -0,0 +1,786 @@ + + + + + + + + + + + lmflow.pipeline.rm_inferencer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.rm_inferencer#

+
+

Attributes#

+
+ + + + + +

logger

+
+
+
+

Classes#

+
+ + + + + +

RewardModelInferencer

Initializes the Inferencer class with given arguments.

+
+
+
+

Module Contents#

+
+
+lmflow.pipeline.rm_inferencer.logger[source]#
+
+ +
+
+class lmflow.pipeline.rm_inferencer.RewardModelInferencer(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, inferencer_args: lmflow.args.InferencerArguments, **kwargs)[source]#
+

Bases: lmflow.pipeline.base_pipeline.BasePipeline

+

Initializes the Inferencer class with given arguments.

+
+
Parameters:
+
+
model_argsModelArguments object.

Contains the arguments required to load the model.

+
+
data_argsDatasetArguments object.

Contains the arguments required to load the dataset.

+
+
inferencer_argsInferencerArguments object.

Contains the arguments required to perform inference.

+
+
+
+
+
+
+data_args[source]#
+
+ +
+
+inferencer_args[source]#
+
+ +
+
+model_args[source]#
+
+ +
+
+local_rank[source]#
+
+ +
+
+world_size[source]#
+
+ +
+
+inference(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, dataset: lmflow.datasets.dataset.Dataset, transform_dataset_in_place: bool = True, use_vllm: bool = False, enable_distributed_inference: bool = False, **kwargs) lmflow.datasets.dataset.Dataset[source]#
+
+ +
+
+_inference(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, model_input: lmflow.datasets.dataset.Dataset | ray.data.Dataset, enable_distributed_inference: bool = False, **kwargs)[source]#
+
+ +
+
+__inference(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, model_input: lmflow.datasets.dataset.Dataset) List[float] | List[List[float]][source]#
+
+ +
+
+__distributed_inference(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, model_input: ray.data.Dataset, num_instances: int, batch_size: int) List[lmflow.utils.data_utils.RewardModelInferenceResultWithInput][source]#
+
+ +
+
+abstract __vllm_inference(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, model_input: List[str], enable_distributed_inference: bool = False) List[float][source]#
+
+ +
+
+__post_process_model_output(model_output: transformers.modeling_outputs.SequenceClassifierOutputWithPast) List[float][source]#
+
+ +
+
+flatten_list(list_of_list: List[List]) Tuple[List, List[int]][source]#
+
+ +
+
+compress_list(list_to_compress: List, sublist_lengths: List[int]) List[List][source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/rm_tuner/index.html b/autoapi/lmflow/pipeline/rm_tuner/index.html new file mode 100644 index 000000000..848e7028b --- /dev/null +++ b/autoapi/lmflow/pipeline/rm_tuner/index.html @@ -0,0 +1,729 @@ + + + + + + + + + + + lmflow.pipeline.rm_tuner — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.rm_tuner#

+
+

Attributes#

+
+ + + + + +

logger

+
+
+
+

Classes#

+
+ + + + + +

RewardModelTuner

Initializes the RewardModelTuner class.

+
+
+
+

Module Contents#

+
+
+lmflow.pipeline.rm_tuner.logger[source]#
+
+ +
+
+class lmflow.pipeline.rm_tuner.RewardModelTuner(model_args, data_args, finetuner_args, *args, **kwargs)[source]#
+

Bases: lmflow.pipeline.finetuner.Finetuner

+

Initializes the RewardModelTuner class.

+
+
Parameters:
+
+
model_argsModelArguments object.

Contains the arguments required to load the model.

+
+
data_argsDatasetArguments object.

Contains the arguments required to load the dataset.

+
+
finetuner_argsRewardModelTunerArguments object.

Contains the arguments required to perform finetuning.

+
+
argsOptional.

Positional arguments.

+
+
kwargsOptional.

Keyword arguments.

+
+
+
+
+
+
+tune(model: lmflow.models.hf_text_regression_model.HFTextRegressionModel, dataset, transform_dataset_in_place=True, data_collator=None, **kwargs)[source]#
+

Perform tuning for a model

+
+
Parameters:
+
+
modelTunableModel object.

TunableModel to perform tuning.

+
+
dataset:

dataset to train model.

+
+
+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/utils/dpov2_dataprocessor/index.html b/autoapi/lmflow/pipeline/utils/dpov2_dataprocessor/index.html new file mode 100644 index 000000000..5538b49b7 --- /dev/null +++ b/autoapi/lmflow/pipeline/utils/dpov2_dataprocessor/index.html @@ -0,0 +1,787 @@ + + + + + + + + + + + lmflow.pipeline.utils.dpov2_dataprocessor — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.utils.dpov2_dataprocessor#

+
+

Attributes#

+
+ + + + + +

logger

+
+
+
+

Classes#

+ +
+
+

Module Contents#

+
+
+lmflow.pipeline.utils.dpov2_dataprocessor.logger[source]#
+
+ +
+
+class lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding[source]#
+
+
+tokenizer: transformers.PreTrainedTokenizerBase[source]#
+
+ +
+
+model: transformers.PreTrainedModel | None = None[source]#
+
+ +
+
+padding: bool | str = True[source]#
+
+ +
+
+max_length: int | None = None[source]#
+
+ +
+
+max_prompt_length: int | None = None[source]#
+
+ +
+
+label_pad_token_id: int[source]#
+
+ +
+
+padding_value: int = 0[source]#
+
+ +
+
+truncation_mode: str = 'keep_end'[source]#
+
+ +
+
+is_encoder_decoder: bool | None = False[source]#
+
+ +
+
+max_target_length: int | None = None[source]#
+
+ +
+
+mask_prompt: bool | None = False[source]#
+
+ +
+
+tokenize_batch_element(prompt: str, chosen: str, rejected: str) Dict[source]#
+

Tokenize a single batch element.

+
+
At this stage, we don’t convert to PyTorch tensors yet; we just handle the truncation

in case the prompt + chosen or prompt + rejected responses is/are too long. First +we truncate the prompt; if we’re still too long, we truncate the chosen/rejected.

+
+
We also create the labels for the chosen/rejected responses, which are of length equal to

the sum of the length of the prompt and the chosen/rejected response, with +label_pad_token_id for the prompt tokens.

+
+
+
+ +
+
+collate(batch)[source]#
+
+ +
+
+__call__(features: List[Dict[str, Any]]) Dict[str, Any][source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/utils/dpov2_trainer/index.html b/autoapi/lmflow/pipeline/utils/dpov2_trainer/index.html new file mode 100644 index 000000000..154752bfa --- /dev/null +++ b/autoapi/lmflow/pipeline/utils/dpov2_trainer/index.html @@ -0,0 +1,740 @@ + + + + + + + + + + + lmflow.pipeline.utils.dpov2_trainer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.utils.dpov2_trainer#

+
+

Attributes#

+
+ + + + + +

logger

+
+
+
+

Classes#

+
+ + + + + +

DPOv2Trainer

+
+
+
+

Module Contents#

+
+
+lmflow.pipeline.utils.dpov2_trainer.logger[source]#
+
+ +
+
+class lmflow.pipeline.utils.dpov2_trainer.DPOv2Trainer(model: transformers.PreTrainedModel | torch.nn.Module = None, ref_model: transformers.PreTrainedModel | torch.nn.Module | None = None, beta: float = 0.1, loss_type: Literal['sigmoid', 'hinge', 'cross_entropy', 'kl', 'rev_kl', 'raft'] = 'rev_kl', args: transformers.TrainingArguments = None, data_collator: transformers.DataCollator | None = None, label_pad_token_id: int = -100, padding_value: int = 0, truncation_mode: str = 'keep_end', train_dataset: datasets.Dataset | None = None, eval_dataset: datasets.Dataset | Dict[str, datasets.Dataset] | None = None, tokenizer: transformers.PreTrainedTokenizerBase | None = None, model_init: Callable[[], transformers.PreTrainedModel] | None = None, callbacks: List[transformers.trainer_callback.TrainerCallback] | None = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), preprocess_logits_for_metrics: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] | None = None, max_length: int | None = None, max_prompt_length: int | None = None, max_target_length: int | None = None, peft_config: Dict | None = None, is_encoder_decoder: bool | None = None, disable_dropout: bool = True, generate_during_eval: bool = False, compute_metrics: Callable[[transformers.trainer_utils.EvalLoopOutput], Dict] | None = None, mask_prompt: bool | None = False, len_penalty: float = 0, preprocessing_num_workers: int = 1)[source]#
+

Bases: trl.DPOTrainer

+
+
+use_dpo_data_collator = True[source]#
+
+ +
+
+len_penalty[source]#
+
+ +
+
+dpo_loss(policy_chosen_logps: torch.FloatTensor, policy_rejected_logps: torch.FloatTensor, reference_chosen_logps: torch.FloatTensor, reference_rejected_logps: torch.FloatTensor, reference_free: bool = False, margin: torch.FloatTensor | None = None, len_penalty: float = 0) Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor][source]#
+

Compute the DPO loss for a batch of policy and reference model log probabilities.

+
+
Args:

policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,) +policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,) +reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (batch_size,) +reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (batch_size,) +beta: Temperature parameter for the DPO loss, typically something in the range of 0.1 to 0.5. We ignore the reference model as beta -> 0. +reference_free: If True, we ignore the _provided_ reference model and implicitly use a reference model that assigns equal probability to all responses.

+
+
Returns:

A tuple of three tensors: (losses, chosen_rewards, rejected_rewards). +The losses tensor contains the DPO loss for each example in the batch. +The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.

+
+
+
+ +
+
+get_batch_loss_metrics(model, batch: Dict[str, List | torch.LongTensor], train_eval: Literal['train', 'eval'] = 'train')[source]#
+
+ +
+
+get_batch_metrics(model, batch: Dict[str, List | torch.LongTensor], train_eval: Literal['train', 'eval'] = 'train')[source]#
+

Compute the DPO loss and other metrics for the given batch of inputs for train or test.

+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/utils/index.html b/autoapi/lmflow/pipeline/utils/index.html new file mode 100644 index 000000000..4682c73b6 --- /dev/null +++ b/autoapi/lmflow/pipeline/utils/index.html @@ -0,0 +1,666 @@ + + + + + + + + + + + lmflow.pipeline.utils — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/utils/memory_safe_dpov2_align/index.html b/autoapi/lmflow/pipeline/utils/memory_safe_dpov2_align/index.html new file mode 100644 index 000000000..2153f32b2 --- /dev/null +++ b/autoapi/lmflow/pipeline/utils/memory_safe_dpov2_align/index.html @@ -0,0 +1,701 @@ + + + + + + + + + + + lmflow.pipeline.utils.memory_safe_dpov2_align — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/utils/memory_safe_vllm_inference/index.html b/autoapi/lmflow/pipeline/utils/memory_safe_vllm_inference/index.html new file mode 100644 index 000000000..780b367fa --- /dev/null +++ b/autoapi/lmflow/pipeline/utils/memory_safe_vllm_inference/index.html @@ -0,0 +1,692 @@ + + + + + + + + + + + lmflow.pipeline.utils.memory_safe_vllm_inference — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.utils.memory_safe_vllm_inference#

+
+

Attributes#

+
+ + + + + +

logger

+
+
+
+

Functions#

+
+ + + + + +

main()

+
+
+
+

Module Contents#

+
+
+lmflow.pipeline.utils.memory_safe_vllm_inference.logger[source]#
+
+ +
+
+lmflow.pipeline.utils.memory_safe_vllm_inference.main()[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/utils/peft_trainer/index.html b/autoapi/lmflow/pipeline/utils/peft_trainer/index.html new file mode 100644 index 000000000..68741f5fe --- /dev/null +++ b/autoapi/lmflow/pipeline/utils/peft_trainer/index.html @@ -0,0 +1,725 @@ + + + + + + + + + + + lmflow.pipeline.utils.peft_trainer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.utils.peft_trainer#

+

Trainer for Peft models

+
+

Classes#

+
+ + + + + + + + +

PeftTrainer

PeftSavingCallback

Correctly save PEFT model and not full model

+
+
+
+

Module Contents#

+
+
+class lmflow.pipeline.utils.peft_trainer.PeftTrainer[source]#
+

Bases: transformers.Trainer

+
+
+_save_checkpoint(_, trial, metrics=None)[source]#
+

Don’t save base model, optimizer etc. +but create checkpoint folder (needed for saving adapter)

+
+ +
+ +
+
+class lmflow.pipeline.utils.peft_trainer.PeftSavingCallback[source]#
+

Bases: transformers.trainer_callback.TrainerCallback

+

Correctly save PEFT model and not full model

+
+
+_save(model, folder)[source]#
+
+ +
+
+on_train_end(args: transformers.training_args.TrainingArguments, state: transformers.trainer_callback.TrainerState, control: transformers.trainer_callback.TrainerControl, **kwargs)[source]#
+

Save final best model adapter

+
+ +
+
+on_epoch_end(args: transformers.training_args.TrainingArguments, state: transformers.trainer_callback.TrainerState, control: transformers.trainer_callback.TrainerControl, **kwargs)[source]#
+

Save intermediate model adapters in case of interrupted training

+
+ +
+
+on_save(args: transformers.training_args.TrainingArguments, state: transformers.trainer_callback.TrainerState, control: transformers.trainer_callback.TrainerControl, **kwargs)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/utils/raft_trainer/index.html b/autoapi/lmflow/pipeline/utils/raft_trainer/index.html new file mode 100644 index 000000000..9c6b0a4c0 --- /dev/null +++ b/autoapi/lmflow/pipeline/utils/raft_trainer/index.html @@ -0,0 +1,1848 @@ + + + + + + + + + + + lmflow.pipeline.utils.raft_trainer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.utils.raft_trainer#

+
+

Attributes#

+ +
+
+

Classes#

+
+ + + + + +

RaftTrainer

Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.

+
+
+
+

Module Contents#

+
+
+lmflow.pipeline.utils.raft_trainer.is_torch_greater_or_equal_than_1_10[source]#
+
+ +
+
+lmflow.pipeline.utils.raft_trainer.is_torch_less_than_1_11[source]#
+
+ +
+
+lmflow.pipeline.utils.raft_trainer._is_native_cpu_amp_available[source]#
+
+ +
+
+lmflow.pipeline.utils.raft_trainer.DEFAULT_CALLBACKS[source]#
+
+ +
+
+lmflow.pipeline.utils.raft_trainer.DEFAULT_PROGRESS_CALLBACK[source]#
+
+ +
+
+lmflow.pipeline.utils.raft_trainer.DEFAULT_PROGRESS_CALLBACK[source]#
+
+ +
+
+lmflow.pipeline.utils.raft_trainer.IS_SAGEMAKER_MP_POST_1_10[source]#
+
+ +
+
+lmflow.pipeline.utils.raft_trainer.skip_first_batches = None[source]#
+
+ +
+
+lmflow.pipeline.utils.raft_trainer.logger[source]#
+
+ +
+
+lmflow.pipeline.utils.raft_trainer.TRAINING_ARGS_NAME = 'training_args.bin'[source]#
+
+ +
+
+lmflow.pipeline.utils.raft_trainer.TRAINER_STATE_NAME = 'trainer_state.json'[source]#
+
+ +
+
+lmflow.pipeline.utils.raft_trainer.OPTIMIZER_NAME = 'optimizer.pt'[source]#
+
+ +
+
+lmflow.pipeline.utils.raft_trainer.SCHEDULER_NAME = 'scheduler.pt'[source]#
+
+ +
+
+lmflow.pipeline.utils.raft_trainer.SCALER_NAME = 'scaler.pt'[source]#
+
+ +
+
+class lmflow.pipeline.utils.raft_trainer.RaftTrainer(model: transformers.modeling_utils.PreTrainedModel | torch.nn.Module = None, args: transformers.training_args.TrainingArguments = None, data_collator: transformers.data.data_collator.DataCollator | None = None, train_dataset: torch.utils.data.Dataset | None = None, eval_dataset: torch.utils.data.Dataset | Dict[str, torch.utils.data.Dataset] | None = None, tokenizer: transformers.tokenization_utils_base.PreTrainedTokenizerBase | None = None, model_init: Callable[[], transformers.modeling_utils.PreTrainedModel] | None = None, compute_metrics: Callable[[transformers.trainer_utils.EvalPrediction], Dict] | None = None, callbacks: List[transformers.trainer_callback.TrainerCallback] | None = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), preprocess_logits_for_metrics: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] | None = None)[source]#
+

Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers. +Args:

+
+
+
model ([PreTrainedModel] or torch.nn.Module, optional):

The model to train, evaluate or use for predictions. If not provided, a model_init must be passed. +<Tip> +[Trainer] is optimized to work with the [PreTrainedModel] provided by the library. You can still use +your own models defined as torch.nn.Module as long as they work the same way as the 🤗 Transformers +models. +</Tip>

+
+
args ([TrainingArguments], optional):

The arguments to tweak for training. Will default to a basic instance of [TrainingArguments] with the +output_dir set to a directory named tmp_trainer in the current directory if not provided.

+
+
data_collator (DataCollator, optional):

The function to use to form a batch from a list of elements of train_dataset or eval_dataset. Will +default to [default_data_collator] if no tokenizer is provided, an instance of +[DataCollatorWithPadding] otherwise.

+
+
train_dataset (torch.utils.data.Dataset or torch.utils.data.IterableDataset, optional):

The dataset to use for training. If it is a [~datasets.Dataset], columns not accepted by the +model.forward() method are automatically removed. +Note that if it’s a torch.utils.data.IterableDataset with some randomization and you are training in a +distributed fashion, your iterable dataset should either use a internal attribute generator that is a +torch.Generator for the randomization that must be identical on all processes (and the Trainer will +manually set the seed of this generator at each epoch) or have a set_epoch() method that internally +sets the seed of the RNGs used.

+
+
eval_dataset (Union[torch.utils.data.Dataset, Dict[str, torch.utils.data.Dataset]), optional):

The dataset to use for evaluation. If it is a [~datasets.Dataset], columns not accepted by the +model.forward() method are automatically removed. If it is a dictionary, it will evaluate on each +dataset prepending the dictionary key to the metric name.

+
+
tokenizer ([PreTrainedTokenizerBase], optional):

The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs to the +maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an +interrupted training or reuse the fine-tuned model.

+
+
model_init (Callable[[], PreTrainedModel], optional):

A function that instantiates the model to be used. If provided, each call to [~Trainer.train] will start +from a new instance of the model as given by this function. +The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to +be able to choose different architectures according to hyper parameters (such as layer count, sizes of +inner layers, dropout probabilities etc).

+
+
compute_metrics (Callable[[EvalPrediction], Dict], optional):

The function that will be used to compute metrics at evaluation. Must take a [EvalPrediction] and return +a dictionary string to metric values.

+
+
callbacks (List of [TrainerCallback], optional):

A list of callbacks to customize the training loop. Will add those to the list of default callbacks +detailed in [here](callback). +If you want to remove one of the default callbacks used, use the [Trainer.remove_callback] method.

+
+
optimizers (Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR], optional): A tuple

containing the optimizer and the scheduler to use. Will default to an instance of [AdamW] on your model +and a scheduler given by [get_linear_schedule_with_warmup] controlled by args.

+
+
preprocess_logits_for_metrics (Callable[[torch.Tensor, torch.Tensor], torch.Tensor], optional):

A function that preprocess the logits right before caching them at each evaluation step. Must take two +tensors, the logits and the labels, and return the logits once processed as desired. The modifications made +by this function will be reflected in the predictions received by compute_metrics. +Note that the labels (second parameter) will be None if the dataset does not have them.

+
+
+
+
+
Important attributes:
    +
  • model – Always points to the core model. If using a transformers model, it will be a [PreTrainedModel] +subclass.

  • +
  • model_wrapped – Always points to the most external model in case one or more other modules wrap the +original model. This is the model that should be used for the forward pass. For example, under DeepSpeed, +the inner model is wrapped in DeepSpeed and then again in torch.nn.DistributedDataParallel. If the inner +model hasn’t been wrapped, then self.model_wrapped is the same as self.model.

  • +
  • is_model_parallel – Whether or not a model has been switched to a model parallel mode (different from +data parallelism, this means some of the model layers are split on different GPUs).

  • +
  • place_model_on_device – Whether or not to automatically place the model on the device - it will be set +to False if model parallel or deepspeed is used, or if the default +TrainingArguments.place_model_on_device is overridden to return False .

  • +
  • is_in_train – Whether or not a model is currently running train (e.g. when evaluate is called while +in train)

  • +
+
+
+
+
+save_counter = 0[source]#
+
+ +
+
+args[source]#
+
+ +
+
+hp_name = None[source]#
+
+ +
+
+deepspeed = None[source]#
+
+ +
+
+is_in_train = False[source]#
+
+ +
+
+_memory_tracker[source]#
+
+ +
+
+log_level[source]#
+
+ +
+
+sharded_ddp = None[source]#
+
+ +
+
+fsdp = None[source]#
+
+ +
+
+place_model_on_device[source]#
+
+ +
+
+default_collator[source]#
+
+ +
+
+data_collator[source]#
+
+ +
+
+train_dataset[source]#
+
+ +
+
+eval_dataset[source]#
+
+ +
+
+tokenizer[source]#
+
+ +
+
+model_wrapped[source]#
+
+ +
+
+model[source]#
+
+ +
+
+compute_metrics[source]#
+
+ +
+
+preprocess_logits_for_metrics[source]#
+
+ +
+
+default_callbacks[source]#
+
+ +
+
+callbacks[source]#
+
+ +
+
+callback_handler[source]#
+
+ +
+
+_loggers_initialized = False[source]#
+
+ +
+
+_signature_columns = None[source]#
+
+ +
+
+use_apex = False[source]#
+
+ +
+
+use_cuda_amp = False[source]#
+
+ +
+
+use_cpu_amp = False[source]#
+
+ +
+
+do_grad_scaling = False[source]#
+
+ +
+
+state[source]#
+
+ +
+
+control[source]#
+
+ +
+
+current_flos = 0[source]#
+
+ +
+
+hp_search_backend = None[source]#
+
+ +
+
+use_tune_checkpoints = False[source]#
+
+ +
+
+default_label_names[source]#
+
+ +
+
+label_names[source]#
+
+ +
+
+can_return_loss[source]#
+
+ +
+
+control[source]#
+
+ +
+
+_train_batch_size[source]#
+
+ +
+
+add_callback(callback)[source]#
+

Add a callback to the current list of [~transformer.TrainerCallback]. +Args:

+
+
+
callback (type or [~transformer.TrainerCallback]):

A [~transformer.TrainerCallback] class or an instance of a [~transformer.TrainerCallback]. In the +first case, will instantiate a member of that class.

+
+
+
+
+ +
+
+pop_callback(callback)[source]#
+

Remove a callback from the current list of [~transformer.TrainerCallback] and returns it. +If the callback is not found, returns None (and no error is raised). +Args:

+
+
+
callback (type or [~transformer.TrainerCallback]):

A [~transformer.TrainerCallback] class or an instance of a [~transformer.TrainerCallback]. In the +first case, will pop the first member of that class found in the list of callbacks.

+
+
+
+
+
Returns:

[~transformer.TrainerCallback]: The callback removed, if found.

+
+
+
+ +
+
+remove_callback(callback)[source]#
+

Remove a callback from the current list of [~transformer.TrainerCallback]. +Args:

+
+
+
callback (type or [~transformer.TrainerCallback]):

A [~transformer.TrainerCallback] class or an instance of a [~transformer.TrainerCallback]. In the +first case, will remove the first member of that class found in the list of callbacks.

+
+
+
+
+ +
+
+_move_model_to_device(model, device)[source]#
+
+ +
+
+_set_signature_columns_if_needed()[source]#
+
+ +
+
+_remove_unused_columns(dataset: datasets.Dataset, description: str | None = None)[source]#
+
+ +
+
+_get_collator_with_removed_columns(data_collator: Callable, description: str | None = None) Callable[source]#
+

Wrap the data collator in a callable removing unused columns.

+
+ +
+
+_get_train_sampler() torch.utils.data.Sampler | None[source]#
+
+ +
+
+get_train_dataloader() torch.utils.data.DataLoader[source]#
+

Returns the training [~torch.utils.data.DataLoader]. +Will use no sampler if train_dataset does not implement __len__, a random sampler (adapted to distributed +training if necessary) otherwise. +Subclass and override this method if you want to inject some custom behavior.

+
+ +
+
+_get_eval_sampler(eval_dataset: torch.utils.data.Dataset) torch.utils.data.Sampler | None[source]#
+
+ +
+
+get_eval_dataloader(eval_dataset: torch.utils.data.Dataset | None = None) torch.utils.data.DataLoader[source]#
+

Returns the evaluation [~torch.utils.data.DataLoader]. +Subclass and override this method if you want to inject some custom behavior. +Args:

+
+
+
eval_dataset (torch.utils.data.Dataset, optional):

If provided, will override self.eval_dataset. If it is a [~datasets.Dataset], columns not accepted +by the model.forward() method are automatically removed. It must implement __len__.

+
+
+
+
+ +
+
+get_test_dataloader(test_dataset: torch.utils.data.Dataset) torch.utils.data.DataLoader[source]#
+

Returns the test [~torch.utils.data.DataLoader]. +Subclass and override this method if you want to inject some custom behavior. +Args:

+
+
+
test_dataset (torch.utils.data.Dataset, optional):

The test dataset to use. If it is a [~datasets.Dataset], columns not accepted by the +model.forward() method are automatically removed. It must implement __len__.

+
+
+
+
+ +
+
+create_optimizer_and_scheduler(num_training_steps: int)[source]#
+

Setup the optimizer and the learning rate scheduler. +We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the +Trainer’s init through optimizers, or subclass and override this method (or create_optimizer and/or +create_scheduler) in a subclass.

+
+ +
+
+create_optimizer()[source]#
+

Setup the optimizer. +We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the +Trainer’s init through optimizers, or subclass and override this method in a subclass.

+
+ +
+
+static get_optimizer_cls_and_kwargs(args: transformers.training_args.TrainingArguments) Tuple[Any, Any][source]#
+

Returns the optimizer class and optimizer parameters based on the training arguments. +Args:

+
+
+
args (transformers.training_args.TrainingArguments):

The training arguments for the training session.

+
+
+
+
+ +
+
+create_scheduler(num_training_steps: int, optimizer: torch.optim.Optimizer = None)[source]#
+

Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or +passed as an argument. +Args:

+
+

num_training_steps (int): The number of training steps to do.

+
+
+ +
+
+num_examples(dataloader: torch.utils.data.DataLoader) int[source]#
+

Helper to get number of samples in a [~torch.utils.data.DataLoader] by accessing its dataset. When +dataloader.dataset does not exist or has no length, estimates as best it can

+
+ +
+
+_hp_search_setup(trial: optuna.Trial | Dict[str, Any])[source]#
+

HP search setup code

+
+ +
+ +
+ +
+
+_tune_save_checkpoint()[source]#
+
+ +
+
+call_model_init(trial=None)[source]#
+
+ +
+
+torch_jit_model_eval(model, dataloader, training=False)[source]#
+
+ +
+
+ipex_optimize_model(model, training=False, dtype=torch.float32)[source]#
+
+ +
+
+_wrap_model(model, training=True, dataloader=None)[source]#
+
+ +
+
+train(resume_from_checkpoint: str | bool | None = None, trial: optuna.Trial | Dict[str, Any] = None, ignore_keys_for_eval: List[str] | None = None, is_first_time=False, **kwargs)[source]#
+

Main training entry point. +Args:

+
+
+
resume_from_checkpoint (str or bool, optional):

If a str, local path to a saved checkpoint as saved by a previous instance of [Trainer]. If a +bool and equals True, load the last checkpoint in args.output_dir as saved by a previous instance +of [Trainer]. If present, training will resume from the model/optimizer/scheduler states loaded here.

+
+
trial (optuna.Trial or Dict[str, Any], optional):

The trial run or the hyperparameter dictionary for hyperparameter search.

+
+
ignore_keys_for_eval (List[str], optional)

A list of keys in the output of your model (if it is a dictionary) that should be ignored when +gathering predictions for evaluation during the training.

+
+
kwargs:

Additional keyword arguments used to hide deprecated arguments

+
+
+
+
+ +
+
+_one_train(batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None)[source]#
+
+ +
+
+_inner_training_loop(batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None)[source]#
+

0 This function serves to train one time +1 Update the self.train_dataset before calling this function

+
+ +
+
+_get_output_dir(trial)[source]#
+
+ +
+
+_load_from_checkpoint(resume_from_checkpoint, model=None)[source]#
+
+ +
+
+_load_best_model()[source]#
+
+ +
+
+_issue_warnings_after_load(load_result)[source]#
+
+ +
+
+_maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)[source]#
+
+ +
+
+_load_rng_state(checkpoint)[source]#
+
+ +
+
+_save_checkpoint(model, trial, metrics=None)[source]#
+
+ +
+
+_load_optimizer_and_scheduler(checkpoint)[source]#
+

If optimizer and scheduler states exist, load them.

+
+ +
+ +

Launch an hyperparameter search using optuna or Ray Tune or SigOpt. The optimized quantity is determined +by compute_objective, which defaults to a function returning the evaluation loss when no metric is provided, +the sum of all metrics otherwise. +<Tip warning={true}> +To use this method, you need to have provided a model_init when initializing your [Trainer]: we need to +reinitialize the model at each new run. This is incompatible with the optimizers argument, so you need to +subclass [Trainer] and override the method [~Trainer.create_optimizer_and_scheduler] for custom +optimizer/scheduler. +</Tip> +Args:

+
+
+
hp_space (Callable[[“optuna.Trial”], Dict[str, float]], optional):

A function that defines the hyperparameter search space. Will default to +[~trainer_utils.default_hp_space_optuna] or [~trainer_utils.default_hp_space_ray] or +[~trainer_utils.default_hp_space_sigopt] depending on your backend.

+
+
compute_objective (Callable[[Dict[str, float]], float], optional):

A function computing the objective to minimize or maximize from the metrics returned by the evaluate +method. Will default to [~trainer_utils.default_compute_objective].

+
+
n_trials (int, optional, defaults to 100):

The number of trial runs to test.

+
+
direction (str, optional, defaults to “minimize”):

Whether to optimize greater or lower objects. Can be “minimize” or “maximize”, you should pick +“minimize” when optimizing the validation loss, “maximize” when optimizing one or several metrics.

+
+
backend (str or [~training_utils.HPSearchBackend], optional):

The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending +on which one is installed. If all are installed, will default to optuna.

+
+
hp_name (Callable[[“optuna.Trial”], str]], optional):

A function that defines the trial/run name. Will default to None.

+
+
kwargs (Dict[str, Any], optional):

Additional keyword arguments passed along to optuna.create_study or ray.tune.run. For more +information see: +- the documentation of

+
+
+ +
+
+
+
+
Returns:

[trainer_utils.BestRun]: All the information about the best run. Experiment summary can be found in +run_summary attribute for Ray backend.

+
+
+
+ +
+
+log(logs: Dict[str, float]) None[source]#
+

Log logs on the various objects watching training. +Subclass and override this method to inject custom behavior. +Args:

+
+
+
logs (Dict[str, float]):

The values to log.

+
+
+
+
+ +
+
+_prepare_input(data: torch.Tensor | Any) torch.Tensor | Any[source]#
+

Prepares one data before feeding it to the model, be it a tensor or a nested list/dictionary of tensors.

+
+ +
+
+_prepare_inputs(inputs: Dict[str, torch.Tensor | Any]) Dict[str, torch.Tensor | Any][source]#
+

Prepare inputs before feeding them to the model, converting them to tensors if they are not already and +handling potential state.

+
+ +
+
+compute_loss_context_manager()[source]#
+

A helper wrapper to group together context managers.

+
+ +
+
+autocast_smart_context_manager(cache_enabled: bool | None = True)[source]#
+

A helper wrapper that creates an appropriate context manager for autocast while feeding it the desired +arguments, depending on the situation.

+
+ +
+
+training_step(model: torch.nn.Module, inputs: Dict[str, torch.Tensor | Any]) torch.Tensor[source]#
+

Perform a training step on a batch of inputs. +Subclass and override to inject custom behavior. +Args:

+
+
+
model (nn.Module):

The model to train.

+
+
inputs (Dict[str, Union[torch.Tensor, Any]]):

The inputs and targets of the model. +The dictionary will be unpacked before being fed to the model. Most models expect the targets under the +argument labels. Check your model’s documentation for all accepted arguments.

+
+
+
+
+
Return:

torch.Tensor: The tensor with training loss on this batch.

+
+
+
+ +
+
+compute_loss(model, inputs, return_outputs=False)[source]#
+

How the loss is computed by Trainer. By default, all models return the loss in the first element. +Subclass and override for custom behavior.

+
+ +
+
+is_local_process_zero() bool[source]#
+

Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several +machines) main process.

+
+ +
+
+is_world_process_zero() bool[source]#
+

Whether or not this process is the global main process (when training in a distributed fashion on several +machines, this is only going to be True for one process).

+
+ +
+
+save_model(output_dir: str | None = None, _internal_call: bool = False)[source]#
+

Will save the model, so you can reload it using from_pretrained(). +Will only save from the main process.

+
+ +
+
+_save_tpu(output_dir: str | None = None)[source]#
+
+ +
+
+_save(output_dir: str | None = None, state_dict=None)[source]#
+
+ +
+
+store_flos()[source]#
+
+ +
+
+_sorted_checkpoints(output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) List[str][source]#
+
+ +
+
+_rotate_checkpoints(use_mtime=False, output_dir=None) None[source]#
+
+ +
+
+evaluate(eval_dataset: torch.utils.data.Dataset | None = None, ignore_keys: List[str] | None = None, metric_key_prefix: str = 'eval') Dict[str, float][source]#
+

Run evaluation and returns metrics. +The calling script will be responsible for providing a method to compute metrics, as they are task-dependent +(pass it to the init compute_metrics argument). +You can also subclass and override this method to inject custom behavior. +Args:

+
+
+
eval_dataset (Dataset, optional):

Pass a dataset if you wish to override self.eval_dataset. If it is a [~datasets.Dataset], columns +not accepted by the model.forward() method are automatically removed. It must implement the __len__ +method.

+
+
ignore_keys (Lst[str], optional):

A list of keys in the output of your model (if it is a dictionary) that should be ignored when +gathering predictions.

+
+
metric_key_prefix (str, optional, defaults to “eval”):

An optional prefix to be used as the metrics key prefix. For example the metrics “bleu” will be named +“eval_bleu” if the prefix is “eval” (default)

+
+
+
+
+
Returns:

A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The +dictionary also contains the epoch number which comes from the training state.

+
+
+
+ +
+
+predict(test_dataset: torch.utils.data.Dataset, ignore_keys: List[str] | None = None, metric_key_prefix: str = 'test') transformers.trainer_utils.PredictionOutput[source]#
+

Run prediction and returns predictions and potential metrics. +Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method +will also return metrics, like in evaluate(). +Args:

+
+
+
test_dataset (Dataset):

Dataset to run the predictions on. If it is an datasets.Dataset, columns not accepted by the +model.forward() method are automatically removed. Has to implement the method __len__

+
+
ignore_keys (Lst[str], optional):

A list of keys in the output of your model (if it is a dictionary) that should be ignored when +gathering predictions.

+
+
metric_key_prefix (str, optional, defaults to “test”):

An optional prefix to be used as the metrics key prefix. For example the metrics “bleu” will be named +“test_bleu” if the prefix is “test” (default)

+
+
+
+

<Tip> +If your predictions or labels have different sequence length (for instance because you’re doing dynamic padding +in a token classification task) the predictions will be padded (on the right) to allow for concatenation into +one array. The padding index is -100. +</Tip> +Returns: NamedTuple A namedtuple with the following keys:

+
+
    +
  • predictions (np.ndarray): The predictions on test_dataset.

  • +
  • label_ids (np.ndarray, optional): The labels (if the dataset contained some).

  • +
  • metrics (Dict[str, float], optional): The potential dictionary of metrics (if the dataset contained +labels).

  • +
+
+
+ +
+
+evaluation_loop(dataloader: torch.utils.data.DataLoader, description: str, prediction_loss_only: bool | None = None, ignore_keys: List[str] | None = None, metric_key_prefix: str = 'eval') transformers.trainer_utils.EvalLoopOutput[source]#
+

Prediction/evaluation loop, shared by Trainer.evaluate() and Trainer.predict(). +Works both with or without labels.

+
+ +
+
+_nested_gather(tensors, name=None)[source]#
+

Gather value of tensors (tensor or list/tuple of nested tensors) and convert them to numpy before +concatenating them to gathered

+
+ +
+
+_pad_across_processes(tensor, pad_index=-100)[source]#
+

Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so +they can safely be gathered.

+
+ +
+
+prediction_step(model: torch.nn.Module, inputs: Dict[str, torch.Tensor | Any], prediction_loss_only: bool, ignore_keys: List[str] | None = None) Tuple[torch.Tensor | None, torch.Tensor | None, torch.Tensor | None][source]#
+

Perform an evaluation step on model using inputs. +Subclass and override to inject custom behavior. +Args:

+
+
+
model (nn.Module):

The model to evaluate.

+
+
inputs (Dict[str, Union[torch.Tensor, Any]]):

The inputs and targets of the model. +The dictionary will be unpacked before being fed to the model. Most models expect the targets under the +argument labels. Check your model’s documentation for all accepted arguments.

+
+
prediction_loss_only (bool):

Whether or not to return the loss only.

+
+
ignore_keys (Lst[str], optional):

A list of keys in the output of your model (if it is a dictionary) that should be ignored when +gathering predictions.

+
+
+
+
+
Return:

Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, +logits and labels (each being optional).

+
+
+
+ +
+
+floating_point_ops(inputs: Dict[str, torch.Tensor | Any])[source]#
+

For models that inherit from [PreTrainedModel], uses that method to compute the number of floating point +operations for every backward + forward pass. If using another model, either implement such a method in the +model or subclass and override this method. +Args:

+
+
+
inputs (Dict[str, Union[torch.Tensor, Any]]):

The inputs and targets of the model.

+
+
+
+
+
Returns:

int: The number of floating-point operations.

+
+
+
+ +
+
+init_git_repo(at_init: bool = False)[source]#
+

Initializes a git repo in self.args.hub_model_id. +Args:

+
+
+
at_init (bool, optional, defaults to False):

Whether this function is called before any training or not. If self.args.overwrite_output_dir is +True and at_init is True, the path to the repo (which is self.args.output_dir) might be wiped +out.

+
+
+
+
+ +
+
+create_model_card(language: str | None = None, license: str | None = None, tags: str | List[str] | None = None, model_name: str | None = None, finetuned_from: str | None = None, tasks: str | List[str] | None = None, dataset_tags: str | List[str] | None = None, dataset: str | List[str] | None = None, dataset_args: str | List[str] | None = None)[source]#
+

Creates a draft of a model card using the information available to the Trainer. +Args:

+
+
+
language (str, optional):

The language of the model (if applicable)

+
+
license (str, optional):

The license of the model. Will default to the license of the pretrained model used, if the original +model given to the Trainer comes from a repo on the Hub.

+
+
tags (str or List[str], optional):

Some tags to be included in the metadata of the model card.

+
+
model_name (str, optional):

The name of the model.

+
+
finetuned_from (str, optional):

The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo +of the original model given to the Trainer (if it comes from the Hub).

+
+
tasks (str or List[str], optional):

One or several task identifiers, to be included in the metadata of the model card.

+
+
dataset_tags (str or List[str], optional):

One or several dataset tags, to be included in the metadata of the model card.

+
+
dataset (str or List[str], optional):

One or several dataset identifiers, to be included in the metadata of the model card.

+
+
dataset_args (str or List[str], optional):

One or several dataset arguments, to be included in the metadata of the model card.

+
+
+
+
+ +
+
+_push_from_checkpoint(checkpoint_folder)[source]#
+
+ +
+
+push_to_hub(commit_message: str | None = 'End of training', blocking: bool = True, **kwargs) str[source]#
+

Upload self.model and self.tokenizer to the 🤗 model hub on the repo self.args.hub_model_id. +Parameters:

+
+
+
commit_message (str, optional, defaults to “End of training”):

Message to commit while pushing.

+
+
blocking (bool, optional, defaults to True):

Whether the function should return only when the git push has finished.

+
+
kwargs:

Additional keyword arguments passed along to [~Trainer.create_model_card].

+
+
+
+
+
Returns:

The url of the commit of your model in the given repository if blocking=False, a tuple with the url of +the commit and an object to track the progress of the commit if blocking=True

+
+
+
+ +
+
+prediction_loop(dataloader: torch.utils.data.DataLoader, description: str, prediction_loss_only: bool | None = None, ignore_keys: List[str] | None = None, metric_key_prefix: str = 'eval') transformers.trainer_utils.EvalLoopOutput[source]#
+

Prediction/evaluation loop, shared by Trainer.evaluate() and Trainer.predict(). +Works both with or without labels.

+
+ +
+
+_gather_and_numpify(tensors, name)[source]#
+

Gather value of tensors (tensor or list/tuple of nested tensors) and convert them to numpy before +concatenating them to gathered

+
+ +
+
+_add_sm_patterns_to_gitignore() None[source]#
+

Add SageMaker Checkpointing patterns to .gitignore file.

+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/utils/rm_dataprocessor/index.html b/autoapi/lmflow/pipeline/utils/rm_dataprocessor/index.html new file mode 100644 index 000000000..c4c961ed6 --- /dev/null +++ b/autoapi/lmflow/pipeline/utils/rm_dataprocessor/index.html @@ -0,0 +1,730 @@ + + + + + + + + + + + lmflow.pipeline.utils.rm_dataprocessor — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.utils.rm_dataprocessor#

+
+

Attributes#

+
+ + + + + +

logger

+
+
+
+

Classes#

+ +
+
+

Module Contents#

+
+
+lmflow.pipeline.utils.rm_dataprocessor.logger[source]#
+
+ +
+
+class lmflow.pipeline.utils.rm_dataprocessor.RewardDataCollatorWithPadding[source]#
+
+
+tokenizer: transformers.AutoTokenizer[source]#
+
+ +
+
+padding: bool | str | transformers.utils.PaddingStrategy = True[source]#
+
+ +
+
+max_length: int | None = None[source]#
+
+ +
+
+pad_to_multiple_of: int | None = None[source]#
+
+ +
+
+return_tensors: str = 'pt'[source]#
+
+ +
+
+__call__(features: List[Dict[str, Any]]) Dict[str, Any][source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/utils/rm_trainer/index.html b/autoapi/lmflow/pipeline/utils/rm_trainer/index.html new file mode 100644 index 000000000..c9666ef7f --- /dev/null +++ b/autoapi/lmflow/pipeline/utils/rm_trainer/index.html @@ -0,0 +1,728 @@ + + + + + + + + + + + lmflow.pipeline.utils.rm_trainer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.utils.rm_trainer#

+
+

Classes#

+
+ + + + + + + + +

RewardTrainer

PeftRewardTrainer

+
+
+
+

Functions#

+
+ + + + + + + + +

compute_metrics(eval_pred)

rm_loss(model, inputs[, return_outputs])

+
+
+
+

Module Contents#

+
+
+lmflow.pipeline.utils.rm_trainer.compute_metrics(eval_pred)[source]#
+
+ +
+
+lmflow.pipeline.utils.rm_trainer.rm_loss(model, inputs, return_outputs=False)[source]#
+
+ +
+
+class lmflow.pipeline.utils.rm_trainer.RewardTrainer[source]#
+

Bases: transformers.Trainer

+
+
+compute_loss(model, inputs, return_outputs=False)[source]#
+
+ +
+ +
+
+class lmflow.pipeline.utils.rm_trainer.PeftRewardTrainer[source]#
+

Bases: lmflow.pipeline.utils.peft_trainer.PeftTrainer

+
+
+compute_loss(model, inputs, return_outputs=False)[source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/pipeline/vllm_inferencer/index.html b/autoapi/lmflow/pipeline/vllm_inferencer/index.html new file mode 100644 index 000000000..893983b1d --- /dev/null +++ b/autoapi/lmflow/pipeline/vllm_inferencer/index.html @@ -0,0 +1,877 @@ + + + + + + + + + + + lmflow.pipeline.vllm_inferencer — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.pipeline.vllm_inferencer#

+
+

Attributes#

+
+ + + + + +

logger

+
+
+
+

Classes#

+ +
+
+

Module Contents#

+
+
+lmflow.pipeline.vllm_inferencer.logger[source]#
+
+ +
+
+class lmflow.pipeline.vllm_inferencer.InferencerWithOffloading(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, inferencer_args: lmflow.args.InferencerArguments)[source]#
+

Bases: lmflow.pipeline.base_pipeline.BasePipeline

+
+
+model_args[source]#
+
+ +
+
+data_args[source]#
+
+ +
+
+inferencer_args[source]#
+
+ +
+
+eos_token_id[source]#
+
+ +
+
+abstract inference()[source]#
+
+ +
+
+abstract save_inference_results()[source]#
+
+ +
+
+abstract load_inference_results()[source]#
+
+ +
+ +
+
+class lmflow.pipeline.vllm_inferencer.VLLMInferencer(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, inferencer_args: lmflow.args.InferencerArguments)[source]#
+

Bases: InferencerWithOffloading

+
+
+sampling_params[source]#
+
+ +
+
+parse_to_sampling_params(inference_args: lmflow.args.InferencerArguments) vllm.SamplingParams[source]#
+
+ +
+
+inference(model: lmflow.models.hf_decoder_model.HFDecoderModel, dataset: lmflow.datasets.Dataset, enable_decode_inference_result: bool = True, release_gpu: bool = False, inference_args: lmflow.args.InferencerArguments | None = None, enable_distributed_inference: bool = False, **kwargs) List[lmflow.utils.data_utils.VLLMInferenceResultWithInput][source]#
+

Perform inference using the provided model and dataset. Will save inference results if +save_results is set to True in inferencer_args.

+
+
Parameters:
+
+
modelHFDecoderModel

LMFlow HFDecoderModel object

+
+
datasetDataset

LMFlow Dataset object

+
+
apply_chat_templatebool, optional

Whether to apply chat template to the input, by default True.

+
+
enable_decode_inference_resultbool, optional

Whether to decode after generation, by default False.

+
+
release_gpubool, optional

Whether to release gpu resources, by default False.

+
+
inference_argsInferencerArguments, optional

by default None

+
+
+
+
Returns:
+
+
List[VLLMInferenceResultWithInput]

Return a list of VLLMInferenceResultWithInput, where each +element contains the input prompt and the corresponding output.

+

When enable_decode_inference_result = True, the output would be a list of strings, +contains sampling_params.n samples for the corresponding prompt.

+

When enable_decode_inference_result = False, return a list of list of ints +(token ids, no decoding after generation).

+
+
+
+
+
+ +
+
+_inference(model: lmflow.models.hf_decoder_model.HFDecoderModel, model_input: List[str], sampling_params: vllm.SamplingParams, release_gpu: bool = False) List[lmflow.utils.data_utils.VLLMInferenceResultWithInput][source]#
+
+ +
+
+_distributed_inference(model: lmflow.models.hf_decoder_model.HFDecoderModel, model_input: ray.data.Dataset, sampling_params: vllm.SamplingParams, num_instances: int, batch_size: int = 4, release_gpu: bool = False) List[lmflow.utils.data_utils.VLLMInferenceResultWithInput][source]#
+
+ +
+
+save_inference_results(outputs: List[List[str]] | List[List[List[int]]], save_file_path: str)[source]#
+
+ +
+
+load_inference_results(results_path: str) List[List[str]] | List[List[List[int]]][source]#
+
+ +
+ +
+
+class lmflow.pipeline.vllm_inferencer.MemorySafeVLLMInferencer(model_args: lmflow.args.ModelArguments, data_args: lmflow.args.DatasetArguments, inferencer_args: lmflow.args.InferencerArguments)[source]#
+

Bases: VLLMInferencer

+
+
+inferencer_file_path[source]#
+
+ +
+
+inference() List[lmflow.utils.data_utils.VLLMInferenceResultWithInput][source]#
+

Perform inference using the provided model and dataset. Will save inference results if +save_results is set to True in inferencer_args.

+
+
Parameters:
+
+
modelHFDecoderModel

LMFlow HFDecoderModel object

+
+
datasetDataset

LMFlow Dataset object

+
+
apply_chat_templatebool, optional

Whether to apply chat template to the input, by default True.

+
+
enable_decode_inference_resultbool, optional

Whether to decode after generation, by default False.

+
+
release_gpubool, optional

Whether to release gpu resources, by default False.

+
+
inference_argsInferencerArguments, optional

by default None

+
+
+
+
Returns:
+
+
List[VLLMInferenceResultWithInput]

Return a list of VLLMInferenceResultWithInput, where each +element contains the input prompt and the corresponding output.

+

When enable_decode_inference_result = True, the output would be a list of strings, +contains sampling_params.n samples for the corresponding prompt.

+

When enable_decode_inference_result = False, return a list of list of ints +(token ids, no decoding after generation).

+
+
+
+
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/tokenization/hf_decoder_model/index.html b/autoapi/lmflow/tokenization/hf_decoder_model/index.html new file mode 100644 index 000000000..f0f95279d --- /dev/null +++ b/autoapi/lmflow/tokenization/hf_decoder_model/index.html @@ -0,0 +1,721 @@ + + + + + + + + + + + lmflow.tokenization.hf_decoder_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.tokenization.hf_decoder_model#

+
+

Attributes#

+
+ + + + + + + + +

logger

tok_logger

+
+
+
+

Functions#

+
+ + + + + + + + + + + +

blocking(→ Dict)

tokenize_function(→ Dict)

Handels text_only and text2text datasets tokenization

conversation_tokenize_function(→ Dict)

Handels conversation datasets tokenization

+
+
+
+

Module Contents#

+
+
+lmflow.tokenization.hf_decoder_model.logger[source]#
+
+ +
+
+lmflow.tokenization.hf_decoder_model.tok_logger[source]#
+
+ +
+
+lmflow.tokenization.hf_decoder_model.blocking(token_dict: Dict, block_size: int, model_max_length: int, pad_token_id: int, padding_side: str, truncation_side: str = 'right') Dict[source]#
+
+ +
+
+lmflow.tokenization.hf_decoder_model.tokenize_function(examples, data_args: lmflow.args.DatasetArguments, tokenizer: transformers.PreTrainedTokenizer | transformers.PreTrainedTokenizerFast, column_names, label_columns, tokenized_column_order, add_special_tokens, use_truncation) Dict[source]#
+

Handels text_only and text2text datasets tokenization

+
+ +
+
+lmflow.tokenization.hf_decoder_model.conversation_tokenize_function(examples, data_args: lmflow.args.DatasetArguments, tokenizer: transformers.PreTrainedTokenizer | transformers.PreTrainedTokenizerFast, column_names, conversation_template: lmflow.utils.conversation_template.ConversationTemplate) Dict[source]#
+

Handels conversation datasets tokenization

+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/tokenization/hf_text_regression_model/index.html b/autoapi/lmflow/tokenization/hf_text_regression_model/index.html new file mode 100644 index 000000000..21c477897 --- /dev/null +++ b/autoapi/lmflow/tokenization/hf_text_regression_model/index.html @@ -0,0 +1,759 @@ + + + + + + + + + + + lmflow.tokenization.hf_text_regression_model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.tokenization.hf_text_regression_model#

+
+

Attributes#

+
+ + + + + + + + +

logger

tok_logger

+
+
+
+

Functions#

+
+ + + + + + + + + + + + + + + + + + + + + + + +

blocking_paired(→ Dict)

blocking(→ Dict)

blocking_text_to_textlist(→ Dict)

paired_conversation_tokenize_function(→ Dict)

conversation_tokenize_function(→ Dict)

Handels conversation datasets tokenization

tokenize_function(→ Dict)

Handels text_only and text2text datasets tokenization

text_to_textlist_tokenize_function(→ Dict)

For rm inference, and don't need attn mask and labels.

+
+
+
+

Module Contents#

+
+
+lmflow.tokenization.hf_text_regression_model.logger[source]#
+
+ +
+
+lmflow.tokenization.hf_text_regression_model.tok_logger[source]#
+
+ +
+
+lmflow.tokenization.hf_text_regression_model.blocking_paired(token_dict: Dict, column_names: List, block_size: int, model_max_length: int, pad_token_id: int, padding_side: str, truncation_side: str = 'right') Dict[source]#
+
+ +
+
+lmflow.tokenization.hf_text_regression_model.blocking(token_dict: Dict, block_size: int, model_max_length: int, pad_token_id: int, padding_side: str, truncation_side: str = 'right') Dict[source]#
+
+ +
+
+lmflow.tokenization.hf_text_regression_model.blocking_text_to_textlist(token_dict: Dict, block_size: int, model_max_length: int, pad_token_id: int, padding_side: str, truncation_side: str = 'right') Dict[source]#
+
+ +
+
+lmflow.tokenization.hf_text_regression_model.paired_conversation_tokenize_function(examples, data_args: lmflow.args.DatasetArguments, tokenizer: transformers.PreTrainedTokenizer | transformers.PreTrainedTokenizerFast, column_names, conversation_template: lmflow.utils.conversation_template.ConversationTemplate) Dict[source]#
+
+ +
+
+lmflow.tokenization.hf_text_regression_model.conversation_tokenize_function(examples, data_args: lmflow.args.DatasetArguments, tokenizer: transformers.PreTrainedTokenizer | transformers.PreTrainedTokenizerFast, column_names, conversation_template: lmflow.utils.conversation_template.ConversationTemplate) Dict[source]#
+

Handels conversation datasets tokenization

+
+ +
+
+lmflow.tokenization.hf_text_regression_model.tokenize_function(examples, data_args: lmflow.args.DatasetArguments, tokenizer: transformers.PreTrainedTokenizer | transformers.PreTrainedTokenizerFast, column_names, label_columns, tokenized_column_order, add_special_tokens, use_truncation) Dict[source]#
+

Handels text_only and text2text datasets tokenization

+
+ +
+
+lmflow.tokenization.hf_text_regression_model.text_to_textlist_tokenize_function(examples, data_args: lmflow.args.DatasetArguments, tokenizer: transformers.PreTrainedTokenizer | transformers.PreTrainedTokenizerFast, column_names, add_special_tokens, use_truncation) Dict[source]#
+

For rm inference, and don’t need attn mask and labels. +NOTE: input_ids here refers to the tokenized input_ids of the input and output

+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/tokenization/index.html b/autoapi/lmflow/tokenization/index.html new file mode 100644 index 000000000..40f05e735 --- /dev/null +++ b/autoapi/lmflow/tokenization/index.html @@ -0,0 +1,655 @@ + + + + + + + + + + + lmflow.tokenization — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/common/index.html b/autoapi/lmflow/utils/common/index.html new file mode 100644 index 000000000..010dcf248 --- /dev/null +++ b/autoapi/lmflow/utils/common/index.html @@ -0,0 +1,796 @@ + + + + + + + + + + + lmflow.utils.common — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.common#

+
+

Attributes#

+
+ + + + + +

logger

+
+
+
+

Functions#

+
+ + + + + + + + + + + + + + + + + +

make_shell_args_from_dataclass(→ Union[str, List[str]])

Return a string or a list of strings that can be used as shell arguments.

create_copied_dataclass(original_dataclass, ...[, ...])

Create a copied dataclass with new field names and default values.

remove_dataclass_attr_prefix(→ Dict)

Remove the prefix from the attribute names of a dataclass instance.

add_dataclass_attr_prefix(→ Dict)

Add the prefix to the attribute names of a dataclass instance.

print_banner(message)

+
+
+
+

Module Contents#

+
+
+lmflow.utils.common.logger[source]#
+
+ +
+
+lmflow.utils.common.make_shell_args_from_dataclass(dataclass_objects: List, format: str = 'subprocess', skip_default: bool = True, ignored_args_list: List[str] | None = None) str | List[str][source]#
+

Return a string or a list of strings that can be used as shell arguments.

+
+
Parameters:
+
+
dataclass_objectsList

A list of dataclass objects.

+
+
formatstr, optional

Return format, can be “shell” or “subprocess”, by default “subprocess”.

+
+
skip_defaultbool, optional

Whether to skip attributes with default values, by default True.

+
+
+
+
Returns:
+
+
Union[str, List[str]]
+
+
+
+
+ +
+
+lmflow.utils.common.create_copied_dataclass(original_dataclass, field_prefix: str, class_prefix: str, new_default: Dict = None)[source]#
+

Create a copied dataclass with new field names and default values.

+
+
Parameters:
+
+
original_dataclassdataclass
+
field_prefixstr

The prefix to add to the field names of the copied dataclass.

+
+
class_prefixstr

The prefix to add to the class name of the copied dataclass.

+
+
new_defaultDict, optional

The new default values for the copied dataclass. When None, the +default values of the original dataclass are used.

+
+
+
+
Returns:
+
+
dataclass
+
+
+
+
+ +
+
+lmflow.utils.common.remove_dataclass_attr_prefix(data_instance, prefix: str) Dict[source]#
+

Remove the prefix from the attribute names of a dataclass instance.

+
+
Parameters:
+
+
data_instancedataclass
+
prefixstr

The prefix to remove from the attribute names of the dataclass instance.

+
+
+
+
Returns:
+
+
Dict
+
+
+
+
+ +
+
+lmflow.utils.common.add_dataclass_attr_prefix(data_instance, prefix: str) Dict[source]#
+

Add the prefix to the attribute names of a dataclass instance.

+
+
Parameters:
+
+
data_instancedataclass
+
prefixstr

The prefix to add to the attribute names of the dataclass instance.

+
+
+
+
Returns:
+
+
Dict
+
+
+
+
+ +
+
+lmflow.utils.common.print_banner(message: str)[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/constants/index.html b/autoapi/lmflow/utils/constants/index.html new file mode 100644 index 000000000..10f3f65f0 --- /dev/null +++ b/autoapi/lmflow/utils/constants/index.html @@ -0,0 +1,927 @@ + + + + + + + + + + + lmflow.utils.constants — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.constants#

+

Commonly used constants.

+
+

Attributes#

+ +
+
+

Module Contents#

+
+
+lmflow.utils.constants.TEXT_ONLY_DATASET_DESCRIPTION[source]#
+
+ +
+
+lmflow.utils.constants.TEXT_TO_SCORED_TEXTLIST_DATASET_DESCRIPTION[source]#
+
+ +
+
+lmflow.utils.constants.PAIRED_TEXT_TO_TEXT_DATASET_DESCRIPTION[source]#
+
+ +
+
+lmflow.utils.constants.TEXT_ONLY_DATASET_DETAILS[source]#
+
+ +
+
+lmflow.utils.constants.TEXT2TEXT_DATASET_DESCRIPTION[source]#
+
+ +
+
+lmflow.utils.constants.CONVERSATION_DATASET_DESCRIPTION[source]#
+
+ +
+
+lmflow.utils.constants.PAIRED_CONVERSATION_DATASET_DESCRIPTION[source]#
+
+ +
+
+lmflow.utils.constants.TEXT_TO_TEXTLIST_DATASET_DESCRIPTION[source]#
+
+ +
+
+lmflow.utils.constants.TEXT2TEXT_DATASET_DETAILS[source]#
+
+ +
+
+lmflow.utils.constants.FLOAT_ONLY_DATASET_DESCRIPTION[source]#
+
+ +
+
+lmflow.utils.constants.TEXT_ONLY_DATASET_LONG_DESCRITION[source]#
+
+ +
+
+lmflow.utils.constants.TEXT2TEXT_DATASET_LONG_DESCRITION[source]#
+
+ +
+
+lmflow.utils.constants.DATASET_DESCRIPTION_MAP[source]#
+
+ +
+
+lmflow.utils.constants.INSTANCE_FIELDS_MAP[source]#
+
+ +
+
+lmflow.utils.constants.CONVERSATION_ROLE_NAMES[source]#
+
+ +
+
+lmflow.utils.constants.CONTROLLER_HEART_BEAT_EXPIRATION = 30[source]#
+
+ +
+
+lmflow.utils.constants.WORKER_HEART_BEAT_INTERVAL = 15[source]#
+
+ +
+
+lmflow.utils.constants.LOGDIR = '.'[source]#
+
+ +
+
+lmflow.utils.constants.IGNORE_INDEX[source]#
+
+ +
+
+lmflow.utils.constants.IMAGE_TOKEN_INDEX[source]#
+
+ +
+
+lmflow.utils.constants.DEFAULT_IMAGE_TOKEN = '<image>'[source]#
+
+ +
+
+lmflow.utils.constants.DEFAULT_IMAGE_PATCH_TOKEN = '<im_patch>'[source]#
+
+ +
+
+lmflow.utils.constants.DEFAULT_IM_START_TOKEN = '<im_start>'[source]#
+
+ +
+
+lmflow.utils.constants.DEFAULT_IM_END_TOKEN = '<im_end>'[source]#
+
+ +
+
+lmflow.utils.constants.LMFLOW_LORA_TARGET_MODULES_MAPPING[source]#
+
+ +
+
+lmflow.utils.constants.MEMORY_SAFE_VLLM_INFERENCE_FINISH_FLAG = 'MEMORY_SAFE_VLLM_INFERENCE_DONE'[source]#
+
+ +
+
+lmflow.utils.constants.RETURN_CODE_ERROR_BUFFER = [134][source]#
+
+ +
+
+lmflow.utils.constants.MEMORY_SAFE_VLLM_INFERENCE_ENV_VAR_TO_REMOVE = ['OMP_NUM_THREADS', 'LOCAL_RANK', 'RANK', 'GROUP_RANK', 'ROLE_RANK', 'ROLE_NAME',...[source]#
+
+ +
+
+lmflow.utils.constants.MEMORY_SAFE_DPOV2_ALIGN_ENV_VAR_TO_REMOVE = ['OMP_NUM_THREADS', 'LOCAL_RANK', 'RANK', 'GROUP_RANK', 'ROLE_RANK', 'ROLE_NAME',...[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/conversation_template/base/index.html b/autoapi/lmflow/utils/conversation_template/base/index.html new file mode 100644 index 000000000..b15b98b85 --- /dev/null +++ b/autoapi/lmflow/utils/conversation_template/base/index.html @@ -0,0 +1,1066 @@ + + + + + + + + + + + lmflow.utils.conversation_template.base — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.conversation_template.base#

+
+

Attributes#

+ +
+
+

Classes#

+
+ + + + + + + + + + + + + + + + + + + + +

TemplateComponent

The minimal unit of a template, which can be a token, a string, or a list of tools.

Formatter

Helper class that provides a standard way to create an ABC using

EmptyFormatter

Helper class that provides a standard way to create an ABC using

StringFormatter

Helper class that provides a standard way to create an ABC using

ListFormatter

Helper class that provides a standard way to create an ABC using

ConversationTemplate

+
+
+
+

Module Contents#

+
+
+lmflow.utils.conversation_template.base.logger[source]#
+
+ +
+
+class lmflow.utils.conversation_template.base.TemplateComponent[source]#
+

The minimal unit of a template, which can be a token, a string, or a list of tools.

+
+
Parameters:
+
+
typeLiteral[‘token’, ‘token_id’, ‘string’, ‘tools’]
    +
  • Type of the component.

  • +
  • When the component is a token or a string, the content should be string.

  • +
+

The difference between the two is that token will be converted to token ids +by the tokenizer.convert_tokens_to_ids() method, while string will be directly +encoded by the tokenizer.encode() method. Specially, since the bos token and eos +token are frequently used across different templates, we provide the convenience +to use ‘bos_token’ and ‘eos_token’ to represent the actual bos and eos tokens when +type of the TemplateComponent is token. For example:

+

`python +TemplateComponent(type='token', content='bos_token') +`

+

After encoding, the content will be replaced by the actual token id of the bos token. +Please do remember that if you set the type to string, the tokenizer will try to +encode the string ‘bos_token’ instead of providing the actual bos token.

+
    +
  • When the component is token_id, the content should be int or List[int], and

  • +
+

will be directly appended to the encoded token ids.

+
    +
  • Tools are not supported yet.

  • +
+
+
contentUnion[str, int, List[str], List[int]]

Content of the component.

+
+
+
+
+
+
+type: Literal['token', 'token_id', 'string', 'tools'][source]#
+
+ +
+
+content: str | int | List[str] | List[int][source]#
+
+ +
+
+mask: bool | None = True[source]#
+
+ +
+
+__post_init__()[source]#
+
+ +
+
+__repr__() str[source]#
+

Return repr(self).

+
+ +
+
+__str__() str[source]#
+

Return str(self).

+
+ +
+ +
+
+class lmflow.utils.conversation_template.base.Formatter[source]#
+

Bases: abc.ABC

+

Helper class that provides a standard way to create an ABC using +inheritance.

+
+
+template: List[TemplateComponent][source]#
+
+ +
+
+abstract format(**kwargs) List[TemplateComponent][source]#
+
+ +
+
+has_placeholder()[source]#
+
+ +
+ +
+
+class lmflow.utils.conversation_template.base.EmptyFormatter[source]#
+

Bases: Formatter

+

Helper class that provides a standard way to create an ABC using +inheritance.

+
+
+__post_init__()[source]#
+
+ +
+
+format(**kwargs) list[source]#
+

Empty formatter for when no formatting is needed. +This is useful when user has already applied formatting to the dataset.

+
+
Returns:
+
+
list

Original template.

+
+
+
+
+
+ +
+ +
+
+class lmflow.utils.conversation_template.base.StringFormatter[source]#
+

Bases: Formatter

+

Helper class that provides a standard way to create an ABC using +inheritance.

+
+
+__post_init__()[source]#
+
+ +
+
+format(**kwargs) list[source]#
+

Format the string components with the provided keyword arguments. +Mostly used for formatting system prompt, user and assistant messages.

+
+
Parameters:
+
+
**kwargsdict

Keyword arguments containing values to replace in the template components.

+
+
+
+
Returns:
+
+
list

Formatted template.

+
+
+
+
+
+ +
+ +
+
+class lmflow.utils.conversation_template.base.ListFormatter[source]#
+

Bases: Formatter

+

Helper class that provides a standard way to create an ABC using +inheritance.

+
+
+format(**kwargs) list[source]#
+
+ +
+ +
+
+class lmflow.utils.conversation_template.base.ConversationTemplate[source]#
+
+
+user_formatter: Formatter[source]#
+
+ +
+
+assistant_formatter: Formatter[source]#
+
+ +
+
+system_formatter: Formatter | None = None[source]#
+
+ +
+
+tools_formatter: Formatter | None = None[source]#
+
+ +
+
+separator: TemplateComponent | None = None[source]#
+
+ +
+
+special_starter: TemplateComponent | None = None[source]#
+
+ +
+
+special_stopper: TemplateComponent | None = None[source]#
+
+ +
+
+template_name: str | None = None[source]#
+
+ +
+
+__post_init__()[source]#
+
+ +
+
+encode_conversation(tokenizer: transformers.PreTrainedTokenizer, messages: List[Dict[str, str]], system: str | None = None, tools: List[str] | None = None, remove_last_sep: bool = False, **kwargs) Sequence[Tuple[List[int], List[int]]][source]#
+

Messages here should be guaranteed to be in pairs, with the first message being the user message and the second message being the system message. +Data example: +```json +{

+
+

“conversation_id”: 2, +“system”: “sysinfo1”, +“tools”: [“tool_1_desc”], +“messages”: [

+
+
+
{

“role”: “user”, +“content”: “hi”

+
+
+

}, +{

+
+

“role”: “assistant”, +“content”: “Hello!”

+
+

}

+
+

]

+
+
+

}#

+
+
+ +
+
+_encode(tokenizer: transformers.PreTrainedTokenizer, messages: List[Dict[str, str]], system: str | None = None, tools: str | None = None, **kwargs) Sequence[Tuple[List[int], List[int]]][source]#
+
+ +
+
+_encode_template(template: List[TemplateComponent], tokenizer: transformers.PreTrainedTokenizer, **kwargs) List[int][source]#
+

Encode template components into token ids.

+
+
Parameters:
+
+
templateList[TemplateComponent]

Formatted template components.

+
+
tokenizerPreTrainedTokenizer

Tokenizer to convert tokens into token ids.

+
+
+
+
Returns:
+
+
List[int]

Encoded token ids.

+
+
+
+
+
+ +
+
+remove_last_separator(encoded_pairs: Sequence[Tuple[List[int], List[int]]], tokenizer: transformers.PreTrainedTokenizer) Sequence[Tuple[List[int], List[int]]][source]#
+
+ +
+
+add_special_starter(encoded_pairs: Sequence[Tuple[List[int], List[int]]], tokenizer: transformers.PreTrainedTokenizer) Sequence[Tuple[List[int], List[int]]][source]#
+
+ +
+
+add_special_stopper(encoded_pairs: Sequence[Tuple[List[int], List[int]]], tokenizer: transformers.PreTrainedTokenizer) Sequence[Tuple[List[int], List[int]]][source]#
+
+ +
+
+_ensure_id_list(obj: int | List[int]) List[int][source]#
+

Make sure the object is a list of integers. Useful for handling token ids.

+
+ +
+ +
+
+lmflow.utils.conversation_template.base.EMPTY_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.base.EMPTY_NO_SPECIAL_TOKENS_TEMPLATE[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/conversation_template/chatglm/index.html b/autoapi/lmflow/utils/conversation_template/chatglm/index.html new file mode 100644 index 000000000..b11330773 --- /dev/null +++ b/autoapi/lmflow/utils/conversation_template/chatglm/index.html @@ -0,0 +1,674 @@ + + + + + + + + + + + lmflow.utils.conversation_template.chatglm — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.conversation_template.chatglm#

+
+

Attributes#

+
+ + + + + +

CHATGLM3_TEMPLATE

+
+
+
+

Module Contents#

+
+
+lmflow.utils.conversation_template.chatglm.CHATGLM3_TEMPLATE[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/conversation_template/chatml/index.html b/autoapi/lmflow/utils/conversation_template/chatml/index.html new file mode 100644 index 000000000..0e9f522c4 --- /dev/null +++ b/autoapi/lmflow/utils/conversation_template/chatml/index.html @@ -0,0 +1,674 @@ + + + + + + + + + + + lmflow.utils.conversation_template.chatml — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.conversation_template.chatml#

+
+

Attributes#

+
+ + + + + +

CHATML_TEMPLATE

+
+
+
+

Module Contents#

+
+
+lmflow.utils.conversation_template.chatml.CHATML_TEMPLATE[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/conversation_template/deepseek/index.html b/autoapi/lmflow/utils/conversation_template/deepseek/index.html new file mode 100644 index 000000000..969b5e80c --- /dev/null +++ b/autoapi/lmflow/utils/conversation_template/deepseek/index.html @@ -0,0 +1,674 @@ + + + + + + + + + + + lmflow.utils.conversation_template.deepseek — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.conversation_template.deepseek#

+
+

Attributes#

+
+ + + + + +

DEEPSEEK_TEMPLATE

+
+
+
+

Module Contents#

+
+
+lmflow.utils.conversation_template.deepseek.DEEPSEEK_TEMPLATE[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/conversation_template/fox/index.html b/autoapi/lmflow/utils/conversation_template/fox/index.html new file mode 100644 index 000000000..f94adbc7b --- /dev/null +++ b/autoapi/lmflow/utils/conversation_template/fox/index.html @@ -0,0 +1,674 @@ + + + + + + + + + + + lmflow.utils.conversation_template.fox — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.conversation_template.fox#

+
+

Attributes#

+
+ + + + + +

FOX_TEMPLATE

+
+
+
+

Module Contents#

+
+
+lmflow.utils.conversation_template.fox.FOX_TEMPLATE[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/conversation_template/gemma/index.html b/autoapi/lmflow/utils/conversation_template/gemma/index.html new file mode 100644 index 000000000..b7bcd3276 --- /dev/null +++ b/autoapi/lmflow/utils/conversation_template/gemma/index.html @@ -0,0 +1,710 @@ + + + + + + + + + + + lmflow.utils.conversation_template.gemma — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.conversation_template.gemma#

+
+

Attributes#

+
+ + + + + + + + +

logger

GEMMA_TEMPLATE

+
+
+
+

Classes#

+ +
+
+

Module Contents#

+
+
+lmflow.utils.conversation_template.gemma.logger[source]#
+
+ +
+
+class lmflow.utils.conversation_template.gemma.GemmaConversationTemplate[source]#
+

Bases: lmflow.utils.conversation_template.base.ConversationTemplate

+
+
+encode_conversation(*args, **kwargs)[source]#
+
+ +
+ +
+
+lmflow.utils.conversation_template.gemma.GEMMA_TEMPLATE[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/conversation_template/index.html b/autoapi/lmflow/utils/conversation_template/index.html new file mode 100644 index 000000000..7bc02c641 --- /dev/null +++ b/autoapi/lmflow/utils/conversation_template/index.html @@ -0,0 +1,982 @@ + + + + + + + + + + + lmflow.utils.conversation_template — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.conversation_template#

+
+

Submodules#

+ +
+
+

Attributes#

+ +
+
+

Classes#

+
+ + + + + +

ConversationTemplate

+
+
+
+

Package Contents#

+
+
+lmflow.utils.conversation_template.EMPTY_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.EMPTY_NO_SPECIAL_TOKENS_TEMPLATE[source]#
+
+ +
+
+class lmflow.utils.conversation_template.ConversationTemplate[source]#
+
+
+user_formatter: Formatter#
+
+ +
+
+assistant_formatter: Formatter#
+
+ +
+
+system_formatter: Formatter | None = None#
+
+ +
+
+tools_formatter: Formatter | None = None#
+
+ +
+
+separator: TemplateComponent | None = None#
+
+ +
+
+special_starter: TemplateComponent | None = None#
+
+ +
+
+special_stopper: TemplateComponent | None = None#
+
+ +
+
+template_name: str | None = None#
+
+ +
+
+__post_init__()[source]#
+
+ +
+
+encode_conversation(tokenizer: transformers.PreTrainedTokenizer, messages: List[Dict[str, str]], system: str | None = None, tools: List[str] | None = None, remove_last_sep: bool = False, **kwargs) Sequence[Tuple[List[int], List[int]]][source]#
+

Messages here should be guaranteed to be in pairs, with the first message being the user message and the second message being the system message. +Data example: +```json +{

+
+

“conversation_id”: 2, +“system”: “sysinfo1”, +“tools”: [“tool_1_desc”], +“messages”: [

+
+
+
{

“role”: “user”, +“content”: “hi”

+
+
+

}, +{

+
+

“role”: “assistant”, +“content”: “Hello!”

+
+

}

+
+

]

+
+
+

}#

+
+
+ +
+
+_encode(tokenizer: transformers.PreTrainedTokenizer, messages: List[Dict[str, str]], system: str | None = None, tools: str | None = None, **kwargs) Sequence[Tuple[List[int], List[int]]][source]#
+
+ +
+
+_encode_template(template: List[TemplateComponent], tokenizer: transformers.PreTrainedTokenizer, **kwargs) List[int][source]#
+

Encode template components into token ids.

+
+
Parameters:
+
+
templateList[TemplateComponent]

Formatted template components.

+
+
tokenizerPreTrainedTokenizer

Tokenizer to convert tokens into token ids.

+
+
+
+
Returns:
+
+
List[int]

Encoded token ids.

+
+
+
+
+
+ +
+
+remove_last_separator(encoded_pairs: Sequence[Tuple[List[int], List[int]]], tokenizer: transformers.PreTrainedTokenizer) Sequence[Tuple[List[int], List[int]]][source]#
+
+ +
+
+add_special_starter(encoded_pairs: Sequence[Tuple[List[int], List[int]]], tokenizer: transformers.PreTrainedTokenizer) Sequence[Tuple[List[int], List[int]]][source]#
+
+ +
+
+add_special_stopper(encoded_pairs: Sequence[Tuple[List[int], List[int]]], tokenizer: transformers.PreTrainedTokenizer) Sequence[Tuple[List[int], List[int]]][source]#
+
+ +
+
+_ensure_id_list(obj: int | List[int]) List[int][source]#
+

Make sure the object is a list of integers. Useful for handling token ids.

+
+ +
+ +
+
+lmflow.utils.conversation_template.CHATGLM3_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.CHATML_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.DEEPSEEK_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.FOX_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.GEMMA_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.INTERNLM2_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.LLAMA2_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.LLAMA3_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.PHI3_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.QWEN2_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.YI1_5_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.ZEPHYR_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.PRESET_TEMPLATES[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/conversation_template/internlm/index.html b/autoapi/lmflow/utils/conversation_template/internlm/index.html new file mode 100644 index 000000000..d77456cff --- /dev/null +++ b/autoapi/lmflow/utils/conversation_template/internlm/index.html @@ -0,0 +1,674 @@ + + + + + + + + + + + lmflow.utils.conversation_template.internlm — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.conversation_template.internlm#

+
+

Attributes#

+
+ + + + + +

INTERNLM2_TEMPLATE

+
+
+
+

Module Contents#

+
+
+lmflow.utils.conversation_template.internlm.INTERNLM2_TEMPLATE[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/conversation_template/llama/index.html b/autoapi/lmflow/utils/conversation_template/llama/index.html new file mode 100644 index 000000000..959cda689 --- /dev/null +++ b/autoapi/lmflow/utils/conversation_template/llama/index.html @@ -0,0 +1,719 @@ + + + + + + + + + + + lmflow.utils.conversation_template.llama — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.conversation_template.llama#

+
+

Attributes#

+
+ + + + + + + + + + + +

logger

LLAMA3_TEMPLATE

LLAMA2_TEMPLATE

+
+
+
+

Classes#

+ +
+
+

Module Contents#

+
+
+lmflow.utils.conversation_template.llama.logger[source]#
+
+ +
+
+class lmflow.utils.conversation_template.llama.Llama2ConversationTemplate[source]#
+

Bases: lmflow.utils.conversation_template.base.ConversationTemplate

+
+
+_encode(tokenizer: transformers.PreTrainedTokenizer, messages: List[Dict[str, str]], system: str | None = None, tools: str | None = None, **kwargs) Sequence[Tuple[List[int], List[int]]][source]#
+
+ +
+ +
+
+lmflow.utils.conversation_template.llama.LLAMA3_TEMPLATE[source]#
+
+ +
+
+lmflow.utils.conversation_template.llama.LLAMA2_TEMPLATE[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/conversation_template/phi/index.html b/autoapi/lmflow/utils/conversation_template/phi/index.html new file mode 100644 index 000000000..929a035ba --- /dev/null +++ b/autoapi/lmflow/utils/conversation_template/phi/index.html @@ -0,0 +1,674 @@ + + + + + + + + + + + lmflow.utils.conversation_template.phi — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.conversation_template.phi#

+
+

Attributes#

+
+ + + + + +

PHI3_TEMPLATE

+
+
+
+

Module Contents#

+
+
+lmflow.utils.conversation_template.phi.PHI3_TEMPLATE[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/conversation_template/qwen/index.html b/autoapi/lmflow/utils/conversation_template/qwen/index.html new file mode 100644 index 000000000..0fe9e452c --- /dev/null +++ b/autoapi/lmflow/utils/conversation_template/qwen/index.html @@ -0,0 +1,674 @@ + + + + + + + + + + + lmflow.utils.conversation_template.qwen — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.conversation_template.qwen#

+
+

Attributes#

+
+ + + + + +

QWEN2_TEMPLATE

+
+
+
+

Module Contents#

+
+
+lmflow.utils.conversation_template.qwen.QWEN2_TEMPLATE[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/conversation_template/yi/index.html b/autoapi/lmflow/utils/conversation_template/yi/index.html new file mode 100644 index 000000000..02e508428 --- /dev/null +++ b/autoapi/lmflow/utils/conversation_template/yi/index.html @@ -0,0 +1,674 @@ + + + + + + + + + + + lmflow.utils.conversation_template.yi — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.conversation_template.yi#

+
+

Attributes#

+
+ + + + + +

YI1_5_TEMPLATE

+
+
+
+

Module Contents#

+
+
+lmflow.utils.conversation_template.yi.YI1_5_TEMPLATE[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/conversation_template/zephyr/index.html b/autoapi/lmflow/utils/conversation_template/zephyr/index.html new file mode 100644 index 000000000..ae8a1c951 --- /dev/null +++ b/autoapi/lmflow/utils/conversation_template/zephyr/index.html @@ -0,0 +1,710 @@ + + + + + + + + + + + lmflow.utils.conversation_template.zephyr — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.conversation_template.zephyr#

+
+

Attributes#

+
+ + + + + + + + +

logger

ZEPHYR_TEMPLATE

+
+
+
+

Classes#

+ +
+
+

Module Contents#

+
+
+lmflow.utils.conversation_template.zephyr.logger[source]#
+
+ +
+
+class lmflow.utils.conversation_template.zephyr.ZephyrConversationTemplate[source]#
+

Bases: lmflow.utils.conversation_template.base.ConversationTemplate

+
+
+_encode(tokenizer: transformers.PreTrainedTokenizer, messages: List[Dict[str, str]], system: str | None = None, tools: str | None = None, **kwargs) Sequence[Tuple[List[int], List[int]]][source]#
+
+ +
+ +
+
+lmflow.utils.conversation_template.zephyr.ZEPHYR_TEMPLATE[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/data_utils/index.html b/autoapi/lmflow/utils/data_utils/index.html new file mode 100644 index 000000000..f0be21f84 --- /dev/null +++ b/autoapi/lmflow/utils/data_utils/index.html @@ -0,0 +1,832 @@ + + + + + + + + + + + lmflow.utils.data_utils — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.data_utils#

+

The program includes several functions: setting a random seed, +loading data from a JSON file, batching data, and extracting answers from generated text.

+
+

Classes#

+ +
+
+

Functions#

+
+ + + + + + + + + + + + + + + + + +

set_random_seed(seed)

Set the random seed for random, numpy, torch, torch.cuda.

load_data(file_name)

Load data with file name.

batchlize(examples, batch_size, random_shuffle)

Convert examples to a dataloader.

answer_extraction(response[, answer_type])

Use this funtion to extract answers from generated text

process_image_flag(text[, image_flag])

+
+
+
+

Module Contents#

+
+
+lmflow.utils.data_utils.set_random_seed(seed: int)[source]#
+

Set the random seed for random, numpy, torch, torch.cuda.

+
+
Parameters:
+
+
seedint

The default seed.

+
+
+
+
+
+ +
+
+lmflow.utils.data_utils.load_data(file_name: str)[source]#
+

Load data with file name.

+
+
Parameters:
+
+
file_namestr.

The dataset file name.

+
+
+
+
Returns:
+
+
inputslist.

The input texts of the dataset.

+
+
outputslist.

The output texts file datasets.

+
+
lenint.

The length of the dataset.

+
+
+
+
+
+ +
+
+lmflow.utils.data_utils.batchlize(examples: list, batch_size: int, random_shuffle: bool)[source]#
+

Convert examples to a dataloader.

+
+
Parameters:
+
+
exampleslist.

Data list.

+
+
batch_sizeint.
+
random_shufflebool

If true, the dataloader shuffle the training data.

+
+
+
+
Returns:
+
+
dataloader:

Dataloader with batch generator.

+
+
+
+
+
+ +
+
+lmflow.utils.data_utils.answer_extraction(response, answer_type=None)[source]#
+

Use this funtion to extract answers from generated text

+
+
Parameters:
+
+
args

Arguments.

+
+
responsestr

plain string response.

+
+
+
+
Returns:
+
+
answer:

Decoded answer (such as A, B, C, D, E for mutiple-choice QA).

+
+
+
+
+
+ +
+
+lmflow.utils.data_utils.process_image_flag(text, image_flag='<ImageHere>')[source]#
+
+ +
+
+class lmflow.utils.data_utils.VLLMInferenceResultWithInput[source]#
+

Bases: TypedDict

+
+
+input: str[source]#
+
+ +
+
+output: List[str] | List[List[int]][source]#
+
+ +
+ +
+
+class lmflow.utils.data_utils.RewardModelInferenceResultWithInput[source]#
+

Bases: TypedDict

+
+
+input: str[source]#
+
+ +
+
+output: List[Dict[str, str | float]][source]#
+
+ +
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/flash_attention/bloom_flash_attention/index.html b/autoapi/lmflow/utils/flash_attention/bloom_flash_attention/index.html new file mode 100644 index 000000000..5953fdbe7 --- /dev/null +++ b/autoapi/lmflow/utils/flash_attention/bloom_flash_attention/index.html @@ -0,0 +1,692 @@ + + + + + + + + + + + lmflow.utils.flash_attention.bloom_flash_attention — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.flash_attention.bloom_flash_attention#

+
+

Functions#

+
+ + + + + + + + + + + +

forward(self, hidden_states, residual, alibi, ...[, ...])

_prepare_attn_mask(→ torch.BoolTensor)

replace_bloom_attn_with_flash_attn()

+
+
+
+

Module Contents#

+
+
+lmflow.utils.flash_attention.bloom_flash_attention.forward(self, hidden_states: torch.Tensor, residual: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Tuple[torch.Tensor, torch.Tensor] | None = None, head_mask: torch.Tensor | None = None, use_cache: bool = False, output_attentions: bool = False)[source]#
+
+ +
+
+lmflow.utils.flash_attention.bloom_flash_attention._prepare_attn_mask(self, attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int) torch.BoolTensor[source]#
+
+ +
+
+lmflow.utils.flash_attention.bloom_flash_attention.replace_bloom_attn_with_flash_attn()[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/flash_attention/gpt2_flash_attention/index.html b/autoapi/lmflow/utils/flash_attention/gpt2_flash_attention/index.html new file mode 100644 index 000000000..7fa04be8e --- /dev/null +++ b/autoapi/lmflow/utils/flash_attention/gpt2_flash_attention/index.html @@ -0,0 +1,692 @@ + + + + + + + + + + + lmflow.utils.flash_attention.gpt2_flash_attention — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.flash_attention.gpt2_flash_attention#

+
+

Functions#

+
+ + + + + + + + + + + +

forward(→ Tuple[Union[torch.Tensor, ...)

_prepare_decoder_attention_mask(self, attention_mask, ...)

replace_gpt2_attn_with_flash_attn()

+
+
+
+

Module Contents#

+
+
+lmflow.utils.flash_attention.gpt2_flash_attention.forward(self, hidden_states: Tuple[torch.FloatTensor] | None, layer_past: Tuple[torch.Tensor] | None = None, attention_mask: torch.FloatTensor | None = None, head_mask: torch.FloatTensor | None = None, encoder_hidden_states: torch.Tensor | None = None, encoder_attention_mask: torch.FloatTensor | None = None, use_cache: bool | None = False, output_attentions: bool | None = False) Tuple[torch.Tensor | Tuple[torch.Tensor], Ellipsis][source]#
+
+ +
+
+lmflow.utils.flash_attention.gpt2_flash_attention._prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length)[source]#
+
+ +
+
+lmflow.utils.flash_attention.gpt2_flash_attention.replace_gpt2_attn_with_flash_attn()[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/flash_attention/gpt_neo_flash_attention/index.html b/autoapi/lmflow/utils/flash_attention/gpt_neo_flash_attention/index.html new file mode 100644 index 000000000..d815a8830 --- /dev/null +++ b/autoapi/lmflow/utils/flash_attention/gpt_neo_flash_attention/index.html @@ -0,0 +1,692 @@ + + + + + + + + + + + lmflow.utils.flash_attention.gpt_neo_flash_attention — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.flash_attention.gpt_neo_flash_attention#

+
+

Functions#

+
+ + + + + + + + + + + +

_attn(self, query, key, value[, attention_mask, head_mask])

forward(self, hidden_states[, attention_mask, ...])

replace_gpt_neo_attn_with_flash_attn()

+
+
+
+

Module Contents#

+
+
+lmflow.utils.flash_attention.gpt_neo_flash_attention._attn(self, query, key, value, attention_mask=None, head_mask=None)[source]#
+
+ +
+
+lmflow.utils.flash_attention.gpt_neo_flash_attention.forward(self, hidden_states, attention_mask=None, layer_past=None, head_mask=None, use_cache=False, output_attentions=False)[source]#
+
+ +
+
+lmflow.utils.flash_attention.gpt_neo_flash_attention.replace_gpt_neo_attn_with_flash_attn()[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/flash_attention/index.html b/autoapi/lmflow/utils/flash_attention/index.html new file mode 100644 index 000000000..c3f7c3feb --- /dev/null +++ b/autoapi/lmflow/utils/flash_attention/index.html @@ -0,0 +1,663 @@ + + + + + + + + + + + lmflow.utils.flash_attention — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/flash_attention/llama_flash_attention/index.html b/autoapi/lmflow/utils/flash_attention/llama_flash_attention/index.html new file mode 100644 index 000000000..993c82742 --- /dev/null +++ b/autoapi/lmflow/utils/flash_attention/llama_flash_attention/index.html @@ -0,0 +1,692 @@ + + + + + + + + + + + lmflow.utils.flash_attention.llama_flash_attention — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.flash_attention.llama_flash_attention#

+
+

Functions#

+
+ + + + + + + + + + + +

forward(→ Tuple[torch.Tensor, Optional[torch.Tensor], ...)

_prepare_decoder_attention_mask(self, attention_mask, ...)

replace_llama_attn_with_flash_attn()

+
+
+
+

Module Contents#

+
+
+lmflow.utils.flash_attention.llama_flash_attention.forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_value: Tuple[torch.Tensor] | None = None, output_attentions: bool = False, use_cache: bool = False) Tuple[torch.Tensor, torch.Tensor | None, Tuple[torch.Tensor] | None][source]#
+
+ +
+
+lmflow.utils.flash_attention.llama_flash_attention._prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length)[source]#
+
+ +
+
+lmflow.utils.flash_attention.llama_flash_attention.replace_llama_attn_with_flash_attn()[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/flash_attention/triton_flash_attention/index.html b/autoapi/lmflow/utils/flash_attention/triton_flash_attention/index.html new file mode 100644 index 000000000..9dd4e0f2d --- /dev/null +++ b/autoapi/lmflow/utils/flash_attention/triton_flash_attention/index.html @@ -0,0 +1,908 @@ + + + + + + + + + + + lmflow.utils.flash_attention.triton_flash_attention — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.flash_attention.triton_flash_attention#

+

Experimental implementation of FlashAttention in Triton. +Tested with triton==2.0.0.dev20221202. +Triton 2.0 has a new backend (MLIR) but seems like it doesn’t yet work for head dimensions +other than 64: +openai/triton +We’ll update this implementation with the new Triton backend once this is fixed.

+

We use the FlashAttention implementation from Phil Tillet a starting point. +openai/triton

+

Changes: +- Implement both causal and non-causal attention. +- Implement both self-attention and cross-attention. +- Support arbitrary seqlens (not just multiples of 128), for both forward and backward. +- Support all head dimensions up to 128 (not just 16, 32, 64, 128), for both forward and backward. +- Support attention bias. +- Speed up the forward pass a bit, and only store the LSE instead of m and l. +- Make the backward for d=128 much faster by reducing register spilling. +- Optionally parallelize the backward pass across seqlen_k, to deal with the case of +small batch size * nheads.

+

Caution: +- This is an experimental implementation. The forward pass should be quite robust but +I’m not 100% sure that the backward pass doesn’t have race conditions (due to the Triton compiler). +- This implementation has only been tested on A100. +- If you plan to use headdim other than 64 and 128, you should test for race conditions +(due to the Triton compiler), as done in tests/test_flash_attn.py +“test_flash_attn_triton_race_condition”. I’ve tested and fixed many race conditions +for different head dimensions (40, 48, 64, 128, 80, 88, 96), but I’m still not 100% confident +that there are none left for other head dimensions.

+

Differences between this Triton version and the CUDA version: +- Triton version doesn’t support dropout. +- Triton forward is generally faster than CUDA forward, while Triton backward is +generally slower than CUDA backward. Overall Triton forward + backward is slightly slower +than CUDA forward + backward. +- Triton version doesn’t support different sequence lengths in a batch (i.e., RaggedTensor/NestedTensor). +- Triton version supports attention bias, while CUDA version doesn’t.

+
+

Attributes#

+ +
+
+

Classes#

+ +
+
+

Functions#

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

_fwd_kernel(Q, K, V, Bias, Out, Lse, TMP, ...)

_bwd_preprocess_do_o_dot(Out, DO, Delta, stride_ob, ...)

_bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, ...)

_bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, ...)

init_to_zero(name)

_bwd_kernel(Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, ...)

_flash_attn_forward(q, k, v[, bias, causal, softmax_scale])

_flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv)

+
+
+
+

Module Contents#

+
+
+lmflow.utils.flash_attention.triton_flash_attention._fwd_kernel(Q, K, V, Bias, Out, Lse, TMP, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_ob, stride_oh, stride_om, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: triton.language.constexpr, IS_CAUSAL: triton.language.constexpr, BLOCK_HEADDIM: triton.language.constexpr, EVEN_M: triton.language.constexpr, EVEN_N: triton.language.constexpr, EVEN_HEADDIM: triton.language.constexpr, BLOCK_M: triton.language.constexpr, BLOCK_N: triton.language.constexpr)[source]#
+
+ +
+
+lmflow.utils.flash_attention.triton_flash_attention._bwd_preprocess_do_o_dot(Out, DO, Delta, stride_ob, stride_oh, stride_om, stride_dob, stride_doh, stride_dom, nheads, seqlen_q, seqlen_q_rounded, headdim, BLOCK_M: triton.language.constexpr, BLOCK_HEADDIM: triton.language.constexpr)[source]#
+
+ +
+
+lmflow.utils.flash_attention.triton_flash_attention._bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M: triton.language.constexpr, EVEN_N: triton.language.constexpr, EVEN_HEADDIM: triton.language.constexpr)[source]#
+
+ +
+
+lmflow.utils.flash_attention.triton_flash_attention._bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD: triton.language.constexpr, BIAS_TYPE: triton.language.constexpr, IS_CAUSAL: triton.language.constexpr, BLOCK_HEADDIM: triton.language.constexpr, EVEN_M: triton.language.constexpr, EVEN_N: triton.language.constexpr, EVEN_HEADDIM: triton.language.constexpr, BLOCK_M: triton.language.constexpr, BLOCK_N: triton.language.constexpr)[source]#
+
+ +
+
+lmflow.utils.flash_attention.triton_flash_attention.init_to_zero(name)[source]#
+
+ +
+
+lmflow.utils.flash_attention.triton_flash_attention._bwd_kernel(Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_dob, stride_doh, stride_dom, stride_dqb, stride_dqh, stride_dqm, stride_dkb, stride_dkh, stride_dkn, stride_dvb, stride_dvh, stride_dvn, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: triton.language.constexpr, IS_CAUSAL: triton.language.constexpr, BLOCK_HEADDIM: triton.language.constexpr, SEQUENCE_PARALLEL: triton.language.constexpr, EVEN_M: triton.language.constexpr, EVEN_N: triton.language.constexpr, EVEN_HEADDIM: triton.language.constexpr, BLOCK_M: triton.language.constexpr, BLOCK_N: triton.language.constexpr)[source]#
+
+ +
+
+lmflow.utils.flash_attention.triton_flash_attention._flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None)[source]#
+
+ +
+
+lmflow.utils.flash_attention.triton_flash_attention._flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None)[source]#
+
+ +
+
+class lmflow.utils.flash_attention.triton_flash_attention.FlashAttnQKVPackedFunc[source]#
+

Bases: torch.autograd.Function

+
+
+static forward(ctx, qkv, bias=None, causal=False, softmax_scale=None)[source]#
+

qkv: (batch, seqlen, 3, nheads, headdim) +bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen).

+
+

For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen). +ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen)

+
+
+ +
+
+static backward(ctx, do)[source]#
+
+ +
+ +
+
+lmflow.utils.flash_attention.triton_flash_attention.flash_attn_qkvpacked_func[source]#
+
+ +
+
+class lmflow.utils.flash_attention.triton_flash_attention.FlashAttnKVPackedFunc[source]#
+

Bases: torch.autograd.Function

+
+
+static forward(ctx, q, kv, bias=None, causal=False, softmax_scale=None)[source]#
+

q: (batch, seqlen_q, nheads, headdim) +kv: (batch, seqlen_k, 2, nheads, headdim) +bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).

+
+

For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k). +ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)

+
+
+ +
+
+static backward(ctx, do)[source]#
+
+ +
+ +
+
+lmflow.utils.flash_attention.triton_flash_attention.flash_attn_kvpacked_func[source]#
+
+ +
+
+class lmflow.utils.flash_attention.triton_flash_attention.FlashAttnFunc[source]#
+

Bases: torch.autograd.Function

+
+
+static forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None)[source]#
+

q: (batch_size, seqlen_q, nheads, headdim) +k, v: (batch_size, seqlen_k, nheads, headdim) +bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).

+
+

For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k). +ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)

+
+
+ +
+
+static backward(ctx, do)[source]#
+
+ +
+ +
+
+lmflow.utils.flash_attention.triton_flash_attention.flash_attn_func[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/index.html b/autoapi/lmflow/utils/index.html new file mode 100644 index 000000000..c43c69fa3 --- /dev/null +++ b/autoapi/lmflow/utils/index.html @@ -0,0 +1,670 @@ + + + + + + + + + + + lmflow.utils — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/llava_conversation_lib/index.html b/autoapi/lmflow/utils/llava_conversation_lib/index.html new file mode 100644 index 000000000..7752bc2a3 --- /dev/null +++ b/autoapi/lmflow/utils/llava_conversation_lib/index.html @@ -0,0 +1,927 @@ + + + + + + + + + + + lmflow.utils.llava_conversation_lib — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.llava_conversation_lib#

+
+

Attributes#

+ +
+
+

Classes#

+
+ + + + + + + + +

SeparatorStyle

Different separator style.

Conversation

A class that keeps all conversation history.

+
+
+
+

Module Contents#

+
+
+class lmflow.utils.llava_conversation_lib.SeparatorStyle(*args, **kwds)[source]#
+

Bases: enum.Enum

+

Different separator style.

+
+
+SINGLE[source]#
+
+ +
+
+TWO[source]#
+
+ +
+
+MPT[source]#
+
+ +
+
+PLAIN[source]#
+
+ +
+
+LLAMA_2[source]#
+
+ +
+ +
+
+class lmflow.utils.llava_conversation_lib.Conversation[source]#
+

A class that keeps all conversation history.

+
+
+system: str[source]#
+
+ +
+
+roles: List[str][source]#
+
+ +
+
+messages: List[List[str]][source]#
+
+ +
+
+offset: int[source]#
+
+ +
+
+sep_style: SeparatorStyle[source]#
+
+ +
+
+sep: str = '###'[source]#
+
+ +
+
+sep2: str = None[source]#
+
+ +
+
+version: str = 'Unknown'[source]#
+
+ +
+
+skip_next: bool = False[source]#
+
+ +
+
+get_prompt()[source]#
+
+ +
+
+append_message(role, message)[source]#
+
+ +
+
+get_images(return_pil=False)[source]#
+
+ +
+
+to_gradio_chatbot()[source]#
+
+ +
+
+copy()[source]#
+
+ +
+
+dict()[source]#
+
+ +
+ +
+
+lmflow.utils.llava_conversation_lib.conv_vicuna_v0[source]#
+
+ +
+
+lmflow.utils.llava_conversation_lib.conv_vicuna_v1[source]#
+
+ +
+
+lmflow.utils.llava_conversation_lib.conv_llama_2[source]#
+
+ +
+
+lmflow.utils.llava_conversation_lib.conv_llava_llama_2[source]#
+
+ +
+
+lmflow.utils.llava_conversation_lib.conv_mpt[source]#
+
+ +
+
+lmflow.utils.llava_conversation_lib.conv_llava_plain[source]#
+
+ +
+
+lmflow.utils.llava_conversation_lib.conv_llava_v0[source]#
+
+ +
+
+lmflow.utils.llava_conversation_lib.conv_llava_v0_mmtag[source]#
+
+ +
+
+lmflow.utils.llava_conversation_lib.conv_llava_v1[source]#
+
+ +
+
+lmflow.utils.llava_conversation_lib.conv_llava_v1_mmtag[source]#
+
+ +
+
+lmflow.utils.llava_conversation_lib.default_conversation[source]#
+
+ +
+
+lmflow.utils.llava_conversation_lib.conv_templates[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/model/index.html b/autoapi/lmflow/utils/model/index.html new file mode 100644 index 000000000..f5b368040 --- /dev/null +++ b/autoapi/lmflow/utils/model/index.html @@ -0,0 +1,692 @@ + + + + + + + + + + + lmflow.utils.model — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/multimodal/index.html b/autoapi/lmflow/utils/multimodal/index.html new file mode 100644 index 000000000..77a04782a --- /dev/null +++ b/autoapi/lmflow/utils/multimodal/index.html @@ -0,0 +1,692 @@ + + + + + + + + + + + lmflow.utils.multimodal — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.multimodal#

+
+

Functions#

+
+ + + + + + + + + + + +

update_custom_config(config, model_args)

load_llava_pretrain_model(model, checkpoint_path)

adapt_llava_model_to_lmflow_type(state_dict)

+
+
+
+

Module Contents#

+
+
+lmflow.utils.multimodal.update_custom_config(config, model_args)[source]#
+
+ +
+
+lmflow.utils.multimodal.load_llava_pretrain_model(model, checkpoint_path)[source]#
+
+ +
+
+lmflow.utils.multimodal.adapt_llava_model_to_lmflow_type(state_dict)[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/position_interpolation/index.html b/autoapi/lmflow/utils/position_interpolation/index.html new file mode 100644 index 000000000..ca1611f0d --- /dev/null +++ b/autoapi/lmflow/utils/position_interpolation/index.html @@ -0,0 +1,659 @@ + + + + + + + + + + + lmflow.utils.position_interpolation — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch/index.html b/autoapi/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch/index.html new file mode 100644 index 000000000..5c5ea9c71 --- /dev/null +++ b/autoapi/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch/index.html @@ -0,0 +1,755 @@ + + + + + + + + + + + lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch#

+
+

Classes#

+ +
+
+

Functions#

+
+ + + + + +

replace_llama_with_condense(pi_ratio, ntk_ratio)

+
+
+
+

Module Contents#

+
+
+class lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding(dim, pi_ratio, ntk_ratio, max_position_embeddings=2048, base=10000, device=None)[source]#
+

Bases: torch.nn.Module

+
+
+ntk_ratio[source]#
+
+ +
+
+base[source]#
+
+ +
+
+inv_freq[source]#
+
+ +
+
+pi_ratio[source]#
+
+ +
+
+max_seq_len_cached[source]#
+
+ +
+
+t[source]#
+
+ +
+
+freqs[source]#
+
+ +
+
+emb[source]#
+
+ +
+
+dtype[source]#
+
+ +
+
+forward(x, seq_len=None)[source]#
+
+ +
+ +
+
+lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.replace_llama_with_condense(pi_ratio, ntk_ratio)[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/autoapi/lmflow/version/index.html b/autoapi/lmflow/version/index.html new file mode 100644 index 000000000..68fda1874 --- /dev/null +++ b/autoapi/lmflow/version/index.html @@ -0,0 +1,669 @@ + + + + + + + + + + + lmflow.version — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

lmflow.version#

+
+

Attributes#

+
+ + + + + +

__version__

+
+
+
+

Module Contents#

+
+
+lmflow.version.__version__ = '0.0.7'[source]#
+
+ +
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/blogs/benchmark.html b/blogs/benchmark.html new file mode 100644 index 000000000..25d2860d0 --- /dev/null +++ b/blogs/benchmark.html @@ -0,0 +1,1079 @@ + + + + + + + + + + + LMFlow Benchmark: An Automatic Evaluation Framework for Open-Source LLMs — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

LMFlow Benchmark: An Automatic Evaluation Framework for Open-Source LLMs#

+

May 9, 2023

+
+

Introduction#

+

Evaluation of a chat-style Large Language Model (LLM) has been a huge challenge since the breakthrough of ChatGPT. On the one hand, researchers and engineers need a reliable way to compare two models and decide which model to choose under a certain application scenario. On the other hand, they have to monitor the model performance during the training of an LLM to avoid performance issues such as forgetting.

+

Recent work of Vicuna introduces comparison methods of human evaluation, a.k.a. Chatbot Arena. They also pioneered the evaluation method by invoking GPT-4 to compare the outputs of two models. However, those methods require expensive human labeling or GPT-4 API calls, which are neither scalable nor convenient for LLM development.

+

In this article, we introduce LMFlow benchmark, a new benchmark which provides a cheap and easy-to-use evaluation framework that can help reflect different aspects of LLMs. We have open-sourced the dataset and the code as well, so that everyone in the LLM community can use those toolkits to evaluate, monitor or compare different LLMs.

+
+
+

Metric#

+

In our evaluation framework, Negative Log Likelihood (NLL) is used for evaluating LLM +

+

which corresponds to the LLM model’s prediction probability over a corpus set given their contexts. If the corpus set itself indicates a certain type of LLM ability, such as multi-round conversation, instruction following, math problem solving, role-playing, then NLL on those corpora can provide quantitative metrics to reflect those abilities.

+

+

The key idea behind NLL, is that

+

Generation ability is positively correlated with prediction ability.

+

For instance, an LLM which performs well in essay writing should have no problem understanding and predicting a reference human essay, just like human chess masters performing well at memorizing an endgame on a chessboard.

+

Besides NLL, another similar and commonly used metric in NLP is Perplexity (PPL):

+

+

Nevertheless, perplexity intrinsically depends on the lengths of the tokenized sequences, which induces unfair comparison between models with different tokenizers. For example, if a model has a smaller vocabulary size, it inherently results in a longer tokenized sequence and a lower token-level perplexity. Thus in all our experiments, we use NLL instead of PPL.

+

One huge advantage of NLL evaluation is that it does not require human involvement during the evaluation process. As long as the test reference corpus is given, one can evaluate different aspects of an LLM’s ability automatically. This makes the evaluation of LLM more accessible to researchers.

+

Besides its convenience, NLL itself is also a good metric. In our experimental results in commonsense QA, we find that NLL is correlated with QA accuracy when comparing the different finetuned versions of a single model.

+

Table 1: Accuracy results in traditional commonsense QA benchmarks

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

winogrande

boolq

arc_e

hellaswag

piqa

obqa

arc_c

Average

bloom-3b

58.7

61.6

59.5

52.7

70.8

42.2

30.6

53.7

bloom-7.1b

64.4

62.9

65.0

59.6

73.6

35.8

33.4

56.3

opt-6.9b

65.2

66.1

65.6

67.2

76.5

37.4

34.6

58.9

opt-13b

65.0

65.9

67.1

69.8

76.9

39.0

35.7

59.9

llama-7b

67.9

73.2

67.3

73.0

78.3

42.4

41.4

62.7

llama-13b

70.0

68.5

74.5

76.2

79.1

42.2

44.5

65.0

+
+

Table 2: NLL results in corpus of commonsense QA benchmarks

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

winogrande

boolq

arc_e

hellaswag

piqa

obqa

arc_c

Average

bloom-3b

86.5

228

86

245

134

64.5

101.5

135.1

bloom-7.1b

85

215

81.5

237

130

62.5

96

129.5

opt-6.9b

81.5

200

81.5

224

125

61

96

124.1

opt-13b

82

198

82.5

220

125

61.8

97

123.7

llama-7b

79.5

167

71.5

214

121

58

85

113.7

llama-13b

79

153

70

207

119

57.3

83

109.7

+
+

Figure 1: Correlation between NLL and accuracy on commonsense QA benchmarks

+

+

In the above figure, one can find that QA accuracy is roughly correlated to NLL. Thus NLL is able to reflect the “magnitude” of prediction level difference between models. A huge gap in NLL normally entails a huge performance gap.

+

In the following sections, we provide a comprehensive evaluation of currently available LLM models and summarize their performance. Due to page limits, we only demonstrate partial evaluation results in this article. The full results can be found here LLM Comparison Sheet 0501. One can also evaluate their own LLM models using our LMFlow evaluation toolkit (https://optimalscale.github.io/LMFlow/examples/TASK_GUIDE.html).

+

We also introduce our own finetuned model Robin-7b-Chat and compare its performance with some other popular models. One may try our online demo LMFlow-Chat at https://lmflow.com/.

+
+
+

Chat Performance#

+

The first aspect of LLM is chat performance, especially multi-round chat. Based on our experimental results, we have the following observations:

+
    +
  1. Without finetuning, foundation models do not perform well on chat.

  2. +
  3. After finetuning, foundation models can all obtain huge performance gain.

  4. +
  5. Our Robin-Chat-7b achieves comparable chat performance with Vicuna-13b.

  6. +
+

Table 3: NLL on multi-round chat datasets, Lower the better

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

En_multi_round_chat

En_singe_round_chat

Average

Bloom-7.1b

188

213

200.5

Bloom-7.1b-chat

164

181

172.5

Llama-7b

163

184

173.5

Vicuna-7b

155

175

165

Vicuna-13b

150

170

160

Robin-Chat-7b

146

164

155

+
+

We also evaluate Vicuna-13b and our Robin-Chat-7b model qualitatively via use cases. It turns out that Robin-Chat-7b’s performance is comparable to Vicuna-13b. For example, both models can effortlessly refer to the conversation context with only slight mistakes, where Vicuna-13b sometimes doesn’t count the item correctly, while Robin-Chat-7b introduces only Mexican foods for international cuisine recommendations and omits other choices.

+

Table 4: Use cases of multi-round chat

+
+ + + + + + + + + + + + + + + + + + + + + + + + +

Round

Question

Vicuna-13b

Robin-Chat-7b

R1

Where is the capital of USA?

The capital of the United States is Washington, D.C. (District of Columbia). It is located on the east coast of the country, in the northern part of the state of Maryland. Washington, D.C. is named after George Washington, the first president of the United States. It is a federal district, which means that it is not part of any state, but is instead directly administered by the federal government. The city is home to many national landmarks, including the White House, the Capitol Building, and the Supreme Court. It is also home to many museums, monuments, and other cultural institutions.

The capital of the United States is Washington, D.C. (District of Columbia). It is located on the east coast of the USA, between the states of Maryland and Virginia.

R2

Can you introduce some popular food there?

Washington, D.C. is a city with a diverse food scene, and there are many popular restaurants and cuisines to choose from. Some popular foods in Washington, D.C. include:

- American Classics: There are many restaurants in Washington, D.C. that serve classic American dishes such as hamburgers, hot dogs, and apple pie.

- Seafood: Washington, D.C. is located near the Chesapeake Bay, which is known for its fresh seafood. Many restaurants in the city serve dishes such as crab cakes, shrimp, and oysters.

- Ethnic Cuisine: Washington, D.C. has a diverse population, and there are many restaurants that serve food from different parts of the world. Some popular ethnic cuisines include Mexican, Chinese, Japanese, and Korean.

- Barbecue: Washington, D.C. is also known for its barbecue restaurants. Many of these restaurants serve pulled pork, ribs, and brisket.

- Donuts: Washington, D.C. is home to the famous “Donut Happy” which is a famous donut shop with a variety of flavors and toppings.

- Street Food: There are also many street food options in Washington, D.C. such as food trucks and stands that serve food such as falafel, kebabs, and tacos.

These are just a few examples of the many types of food that are available in Washington, D.C. There is something for everyone in this city when it comes to food.

Washington, D.C. is a diverse city with a wide range of cuisines to choose from. Here are some popular foods that you can try when you visit:

1. Soul food: This style of cuisine originated in the southern United States and is known for its bold flavors and hearty portions. Some popular soul food dishes in Washington, D.C. include fried chicken, collard greens, and macaroni and cheese.

2. Ethiopian cuisine: Ethiopian food is known for its spicy and flavorful dishes, and it is a popular choice in Washington, D.C. Some popular dishes include injera (a sourdough flatbread), wat (a spicy vegetable stew), and kitfo (a spicy meat dish).

3. Seafood: Washington, D.C. is located near the Chesapeake Bay and has easy access to fresh seafood. Some popular seafood dishes include crab cakes, oysters, and seafood pasta.

4. BBQ: Washington, D.C. has a thriving barbecue scene, with many restaurants serving up slow-cooked meats and sides. Some popular BBQ spots include Ben’s Chili Bowl, Pete’s Soul Vegetarian, and Smoke & Barrel.

5. International cuisine: Washington, D.C. is a hub for international diplomacy, and as a result, it has a diverse culinary scene with many restaurants serving dishes from around the world. Some popular international cuisines include Japanese, Indian, and Mexican.

R3

Can you be more specific about the fifth item above?

Sure, the fifth item I mentioned, “Barbecue,” refers to a style of cooking and food that originated in the southern United States. Barbecue is usually made with meat that is slow-cooked over an open flame or heated coals. The meat is often marinated and then sauced, and it is typically served with a variety of sides such as coleslaw, baked beans, and corn on the cob. Barbecue is a popular cuisine in Washington, D.C. and many of the restaurants serve pulled pork, ribs, and brisket. Some of the most popular barbecue restaurants in Washington, D.C. include:

- The Rib Pit

- The Smokehouse

- The BBQ Joint

- The Salt Line

- The Sovereign

These are just a few examples of the many barbecue restaurants in Washington, D.C. that offer delicious and flavorful barbecue dishes.

Sure! Here are some specific recommendations for Mexican food in Washington, D.C.:

1. Margarita’s - This popular Mexican restaurant in Adams Morgan has been serving up authentic Mexican cuisine for over 30 years. Some of their most popular dishes include their tacos al pastor (grilled marinated pork), enchiladas (meat or cheese filled tortillas), and chiles rellenos (stuffed peppers).

2. El Comal - This cozy little restaurant in the Columbia Heights neighborhood has been serving up traditional Mexican cuisine for over 20 years. Some of their most popular dishes include their mole (a rich, spicy sauce made from a blend of chiles, spices, and chocolate), tacos de carnitas (crispy pork tacos), and chiles rellenos de queso (cheese stuffed peppers).

3. Casa Oaxaca - This upscale Mexican restaurant in the Shaw neighborhood has a menu that features a wide range of traditional and modern dishes. Some of their most popular dishes include their ceviche (raw fish marinated in lime juice and spices), mole negro (a rich, dark mole sauce), and chiles en nogada (a spicy dish made with poblano peppers, walnuts, and honey).

4. Taco King - This popular taco chain has several locations in the Washington, D.C. area, including one in the Adams Morgan neighborhood. They offer a wide range of tacos, burritos, and other Mexican dishes at affordable prices. Some of their most popular items include their carne asada tacos, chicken tacos, and quesadillas.

5. Los Tacos No. 1 - This small, casual taco stand in the Columbia Heights neighborhood has a menu that features a wide range of tacos, burritos, and other Mexican dishes. Some of their most popular items include their adobada tacos (marinated pork tacos), al pastor tacos (grilled marinated pork tacos), and chicken tacos.

+
+

Experiment Details

+

Our dataset comes from real conversation history between humans and ChatGPT. We collect 183 English conversations with a total of 447 rounds of chats, where the original conversation history is already multi-round. For single-round NLL evaluation, we break a multi-round conversation into multiple single-round text pairs. For example, with a conversation history

+
Q1, A1
+
+Q2, A2
+
+Q3, A3
+
+
+

where Q stands for Question, A stands for answer, we can get three single-round examples:

+
Context: Q1
+
+Output: A1
+
+Context: Q1, A1, Q2
+
+Output: A2
+
+Context: Q1, A1, Q2, A2, Q3
+
+Ouptut: A3
+
+
+
+
+

CommonSense Performance#

+

Another important aspect of an LLM model is its common sense ability, where a model should acquire a certain level of factual knowledge and utilize them properly under different scenarios. Regarding this aspect of the ability, we found:

+
    +
  1. Finetuning on chat dataset results in commonsense degradation.

  2. +
  3. Our Robin-Chat-7b model still achieves a competitive performance.

  4. +
+

Table 5: Accuracy results in commonsense QA benchmarks

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

winogrand

boolq

arc_easy

hellaswag

piqa

obqa

arc_c

Average

Bloom-7.1b

64.4

62.9

65.0

59.6

73.6

35.8

33.4

56.4

Bloom-7.1b-chat

60.3

56.8

61.3

58.7

72.7

37.8

38.7

55.2

Llama-7b

67.9

73.2

67.3

73.0

78.4

42.4

41.4

62.7

Vicuna-7b

63.7

77.4

63.1

68.8

76.3

39.6

38.7

61.1

Vicuna-13b

66.2

79.9

64.7

73.0

77.6

41.6

40.4

63.3

Robin-Chat-7b

64.7

75.2

69.8

72.4

76.6

39.0

42.9

62.9

+
+

Table 6: NLL results in corpus of commonsense QA benchmarks

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

winogrand

boolq

arc_easy

hellaswag

piqa

obqa

arc_c

Average

Bloom-7.1b

96.0

254

89

266

147

69

106.5

146.7

Bloom-7.1b-chat

85.0

215

81.5

237

130

62.5

96

129.5

Llama-7b

79.5

167

71.5

214

121

58

85

113.7

Vicuna-7b

97.0

219

89

258

151

72

105

141.6

Vicuna-13b

95.5

207

88.5

262

149

73

105

140.0

Robin-Chat-7b

89.0

200

81

240

139

65.5

96.5

130.1

+
+

It is surprising to find that finetuning actually leads to commonsense degradation. A more well-known consequence of this issue is hallucination, where models tend to give plausible but factually incorrect or irrelevant answers. One explanation of this phenomenon is that LLMs “forget” the pretrained knowledge during finetuning when attempting to fit the finetune dataset.

+

As one can see from the table above, both Vicuna and Robin suffer from this commonsense degradation issue. However, Robin suffers less. We conjecture that this improvement is brought by finetuning with smaller-sized but higher-quality dataset. Vicuna uses sharegpt data with ~900MB size and was finetuned for 3 epochs, while Robin uses multiple sources of dataset with ~600MB size and was only finetuned for 2 epochs. This suggests that in the finetuning stage, data quality plays a critical role.

+

This improvement gives Vicuna-13b a run for its money when competing with Robin. The following examples show that Robin-Chat-7b possesses a comparable level of commonsense when compared with Vicuna-13b.

+

Table 7: Use cases of commonsense QA

+
+ + + + + + + + + + + + + + + + + + + +

Question

Vicuna-13b

Robin-Chat-7b

Note

I go to a friend’s home but the door is locked. However, I smell coal gas. Should I break the window?

If you smell coal gas, it is important to get out of the house as quickly as possible and call the fire department or the gas company. Coal gas is a flammable gas that can be dangerous and even deadly if it is not properly ventilated. If the door is locked, you should not break the window because it could cause a fire or explosion. Instead, try to find another way to escape the house or call for help. If you cannot leave the house safely, try to stay calm and wait for help to arrive.

No, it is not recommended to break a window in this situation. Breaking a window can be dangerous and can cause injury or damage to yourself or others. Additionally, it is illegal to break a window to enter a property without permission. Instead, you should try to find another way to enter the home, such as by using a key or by calling the homeowner or a locksmith for assistance. If you are unable to enter the home, you should call the emergency services or the police for assistance. In the meantime, it is important to avoid using any sources of heat, such as a lighter or matches, as these can cause a gas leak or explosion. If you smell gas, you should evacuate the area immediately and call the emergency services or the gas company for assistance.

Breaking the window helps the ventilation and will not cause a gas explosion directly. However, it is indeed illegal and may injure yourself when breaking the window.

Can I bring my pet ferret to that beautiful pacific island of USA to attend ICML 2023?

I’m sorry, but I am not aware of the specific policies regarding pets at the ICML 2023 conference or the island you are referring to. It would be best to check with the conference organizers or the local authorities on the island to determine if ferrets are allowed and if there are any necessary procedures or requirements for bringing a pet to the conference.

It is not recommended to bring a pet ferret to the United States, including to attend ICML 2023. Ferrets are not permitted as pets in many areas of the United States, and it is illegal to bring them into the country without proper documentation and licensing. Additionally, ferrets can carry diseases that are not common in humans but can be dangerous to people, such as toxoplasmosis and salmonellosis. It is important to prioritize the health and safety of yourself and others by not bringing a pet ferret to a crowded event like ICML 2023.

ICML 2023 will be held in Hawaii, where ferrets are strictly prohibited as pets because they are potential carriers of the rabies virus.

+
+

Experiment Details

+

Here, we make use of Eleuther AI’s LM evaluation harness repository (EleutherAI/lm-evaluation-harness) to get QA accuracy results. We also evaluate all models’ NLL metrics on their datasets, with their questions as contexts and answers as output sentences.

+
+
+

Instruction Following#

+

The final aspect we evaluate is instruction following, where a model is expected to loyally obey the user’s instructions, given the instruction being ethically or legally appropriate. It turns out that,

+
    +
  1. Without finetuning, foundation models do not perform well on instruction following.

  2. +
  3. Finetuning on chat dataset helps improve the performance.

  4. +
  5. Our Robin-Chat is still comparable with Vicuna-13b.

  6. +
+

Table 8: NLL on instruction following chat datasets

+
+ + + + + + + + + + + + + + + + + + + + + + +

gpt4_en_instruction

Alpaca

222

Llama-7b

214

Vicuna-7b

211

Vicuna-13b

206

Robin-Chat-7b

180

+
+

Following examples show that Robin-Chat-7b can generate similar responses on instruction following tasks when compared with Vicuna-13b.

+

Table 9: Uses cases of instruction following

+
+ + + + + + + + + + + + + + + + +

Question

Vicuna-13b

Robin-Chat-7b

Can you write quicksort?

Yes, here is a simple implementation of the quicksort algorithm in Python:

def quicksort(arr):

    if len(arr) <= 1:

        return arr

    pivot = arr[len(arr) // 2]

    left = [x for x in arr if x < pivot]

    middle = [x for x in arr if x == pivot]

    right = [x for x in arr if x > pivot]

    return quicksort(left) + middle + quicksort(right)

This implementation uses the middle element of the array as the pivot, and then partitions the array into three sub-arrays: one containing all elements that are less than the pivot, one containing all elements that are equal to the pivot, and one containing all elements that are greater than the pivot. It then recursively applies the quicksort algorithm to the left and right sub-arrays, and returns the concatenation of the sorted left sub-array, the unsorted middle sub-array, and the sorted right sub-array.

This implementation has a time complexity of O(n log n) due to the recursive calls, although it could potentially be faster in practice if the pivot is carefully chosen.

Sure, here is a simple implementation of the quicksort algorithm in Python:

def quicksort(arr):

    if len(arr) <= 1:

        return arr

    pivot = arr[len(arr) // 2]

    left = [x for x in arr if x < pivot]

    middle = [x for x in arr if x == pivot]

    right = [x for x in arr if x > pivot]

    return quicksort(left) + middle + quicksort(right)

This implementation chooses the middle element of the array as the pivot, and then partitions the array into three parts: elements less than the pivot, elements equal to the pivot, and elements greater than the pivot. It then recursively applies the quicksort algorithm to the left and right subarrays, and concatenates the results with the middle subarray to produce the final sorted array.

Note that this implementation uses a single pass through the array, which is the most efficient way to sort an array using quicksort.

Instruction: Find synonyms for the given word. You need to write down how the provided synonyms differ from the original word in terms of meaning, usage, etc. adversity

Synonyms for “adversity” include:

- Difficulty

- Hardship

- Trouble

- Misfortune

- Adversity

- Difficulties

- Hard times

All of these words refer to challenging or unpleasant circumstances or situations. However, “adversity” specifically connotes a difficult or hostile situation that is caused by external factors, such as an enemy or a natural disaster. “Difficulty” and “hardship” can be caused by external or internal factors, and can refer to a wide range of challenges, from mild inconveniences to major setbacks. “Trouble” often connotes a specific type of difficulty or distress, such as financial trouble or legal trouble. “Misfortune” specifically refers to bad luck or ill fate, while “hard times” refers to a general period of economic or social distress.

1. Hardship

2. Struggle

3. Difficulty

4. Oppression

5. Trial

6. Affliction

7. Distress

8. Calamity

9. Tribulation

10. Hardship

Adversity refers to difficult or challenging circumstances or conditions. Hardship, struggle, difficulty, oppression, trial, affliction, distress, and calamity are all words that can be used to describe adversity, but they often imply a more specific or intense situation. For example, hardship can refer to a general difficulty or challenge, while struggle implies a more intense battle or conflict. Difficulty and oppression imply a more specific obstacle or injustice, while trial and affliction suggest a more specific test or suffering. Trial can also refer to a legal proceeding, while tribulation implies a more general time of suffering or persecution.

+
+

Experiment Details

+

In order to make evaluations for models’ instruction following ability, we collected GPT-4 instruction data from the project “Instruction Tuning with GPT-4”, which provides many instruction tuning data labeled by GPT-4. We sample 1,000 English data and manually filtered examples with the following issues, where 767 effective samples remain after the filtering:

+
    +
  1. Long response with too many nonsense words

  2. +
  3. Incomplete input texts

  4. +
  5. Specific domains involving chemistry/biology, where most LLM models do not possess the knowledge and always fail

  6. +
+
+
+

Conclusion#

+

In this article, we introduce LMFlow’s evaluation framework, which uses NLL metric to reflect LLM models’ ability. NLL provides a good metric to evaluate different aspects of a LLM model. According to our evaluation results, Robin-7b achieves on-par performance when compared with Vicuna-13b. As our Robin-7b model is finetuned with different sources of dataset instead of sharegpt only, this shows that Vicuna can be further improved or surpassed with smaller-sized models and better dataset.

+

The checkpoint of Robin-7b is now available for engineers and researchers to download and use (OptimalScale/LMFlow). Its effectiveness demonstrates that a multi-aspect evaluation is indeed essential to the development of LLMs.

+
+
+

References#

+

Vicuna Chatbot Arena: https://chat.lmsys.org/?arena

+

lm-evaluation-harness: EleutherAI/lm-evaluation-harness

+

LMFlow: OptimalScale/LMFlow

+
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/blogs/index.html b/blogs/index.html new file mode 100644 index 000000000..a2578af7a --- /dev/null +++ b/blogs/index.html @@ -0,0 +1,525 @@ + + + + + + + + + + + Blogs — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/examples/DATASETS.html b/examples/DATASETS.html new file mode 100644 index 000000000..2c9c36b38 --- /dev/null +++ b/examples/DATASETS.html @@ -0,0 +1,986 @@ + + + + + + + + + + + Dataset — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

Dataset#

+ +

We provide several available datasets under data. You may download them all by running:

+
cd data && ./download.sh all && cd -
+
+
+

You can replace all with a specific dataset name to only download that dataset (e.g. ./download.sh alpaca).

+

Customized datasets are strongly encouraged, since this way users can apply +their own prompt engineering techniques over various source datasets. As long +as the generated dataset following the format below, they can be accepted as +the input of our pipelines :hugs:

+
+

Dataset Format in General#

+

To specify the input for model finetune, users can provide a list of .json +files under a specified dataset directory. For example,

+
|- path_to_dataset
+  |- data_1.json
+  |- data_2.json
+  |- another_data.json
+  |- ...
+
+
+

For inference, we currently only support a single .json file.

+

Each json file shall have the following format (three instances with four keys +for example),

+
{
+  "type": "TYPE",
+  "instances": [
+    {
+        "KEY_1": "VALUE_1.1",
+        "KEY_2": "VALUE_1.2",
+        "KEY_3": "VALUE_1.3",
+        "KEY_4": "VALUE_1.4",
+    },
+    {
+        "KEY_1": "VALUE_2.1",
+        "KEY_2": "VALUE_2.2",
+        "KEY_3": "VALUE_2.3",
+        "KEY_4": "VALUE_2.4",
+    },
+    {
+        "KEY_1": "VALUE_3.1",
+        "KEY_2": "VALUE_3.2",
+        "KEY_3": "VALUE_3.3",
+        "KEY_4": "VALUE_3.4",
+    },
+  ]
+}
+
+
+

where the TYPE indicates the dataset type and defines the set of keys +{ KEY_1, KEY_2, ... } and their corresponding interpretations. The list of +supported types are listed as follows.

+
+
+

Supported Dataset and Detailed Formats#

+
+

Conversation#

+
+

Data Format#

+

Conversational data are commonly used in sft process. We currently support conversational data in ShareGPT format:

+
+ +A conversation dataset
+
{
+  "type": "conversation",
+  "instances": [
+    {
+      "conversation_id": "CONVERSATION_ID",
+      "system": "SYSTEM_PROPMT",
+      "tools": ["TOOL_DESCRIPTION_1","TOOL_DESCRIPTION_2","TOOL_DESCRIPTION_X"],
+      "messages": [
+        {
+            "role": "user",
+            "content": "USER_INPUT_1"
+        },
+        {
+            "role": "assistant",
+            "content": "ASSISTANT_RESPONSE_1"
+        },
+        {
+            "role": "user",
+            "content": "USER_INPUT_2"
+        },
+        {
+            "role": "assistant",
+            "content": "ASSISTANT_RESPONSE_2"
+        }
+      ]
+    },
+    {
+      "conversation_id": "CONVERSATION_ID",
+      "system": "SYSTEM_PROPMT",
+      "tools": ["TOOL_DESCRIPTION_1"],
+      "messages": [
+        {
+            "role": "user",
+            "content": "USER_INPUT_1"
+        },
+        {
+            "role": "assistant",
+            "content": "ASSISTANT_RESPONSE_1"
+        }
+      ]
+    }
+  ]
+}
+
+
+
+

Data types:

+
    +
  • conversation_id: Optional[Any]. An identifier for the conversation. conversation_id is only for convience of tracking the conversation and will not be used in the pipeline.

  • +
  • system: Optional[string]. A system prompt that is used to start the conversation.

  • +
  • tools: Optional[List[string]]. A list of tools that are used in the conversation.

  • +
  • messages: List[Dict]. A list of messages in the conversation. Each message contains the following fields:

    +
      +
    • role: string. The role of the message. It can be either user or assistant.

    • +
    • content: string. The content of the message.

    • +
    +
  • +
+
+

We are working on supporting customized message keys and role names. Please stay tuned.

+
+

Tips:

+
    +
  • Please make sure the messages are:

    +
      +
    1. Start with an user message.

    2. +
    3. In the correct order. The pipeline will not check the order of the messages.

    4. +
    5. In pairs of user and assistant (i.e., the length of the messages should be even). If the conversation ends with the user, the pipeline will trim the last user message.

    6. +
    7. Make sure the contents are not empty. If the content is empty, the pipeline will add a space to it.

    8. +
    +
  • +
+
+
+

Conversation Template#

+

Conversations should be formatted before feeding into the model. As of now, we’ve preset the conversation template for following models:

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Template Name

Filled Example

Detailed Template

chatglm3

[gMASK]sop<|system|>
You are a chatbot developed by LMFlow team.<|user|>
Who are you?<|assistant|>
I am a chatbot developed by LMFlow team.<|user|>
How old are you?<|assistant|>
I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.

Link

chatml

<|im_start|>system
You are a chatbot developed by LMFlow team.<|im_end|>
<|im_start|>user
Who are you?<|im_end|>
<|im_start|>assistant
I am a chatbot developed by LMFlow team.<|im_end|>
<|im_start|>user
How old are you?<|im_end|>
<|im_start|>assistant
I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>

Link

deepseek

<|begin▁of▁sentence|>You are a chatbot developed by LMFlow team.

User: Who are you?

Assistant: I am a chatbot developed by LMFlow team.<|end▁of▁sentence|>User: How old are you?

Assistant: I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|end▁of▁sentence|>

Link

gemma

<bos>You are a chatbot developed by LMFlow team.<start_of_turn>user
Who are you?<end_of_turn>
<start_of_turn>model
I am a chatbot developed by LMFlow team.<end_of_turn>
<start_of_turn>user
How old are you?<end_of_turn>
<start_of_turn>model
I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<end_of_turn>

Link

internlm2

<s><|im_start|>system
You are a chatbot developed by LMFlow team.<|im_end|>
<|im_start|>user
Who are you?<|im_end|>
<|im_start|>assistant
I am a chatbot developed by LMFlow team.<|im_end|>
<|im_start|>user
How old are you?<|im_end|>
<|im_start|>assistant
I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>

Link

llama3

<|begin_of_text|><|start_header_id|>system<|end_header_id|>

You are a chatbot developed by LMFlow team.<|eot_id|><|start_header_id|>user<|end_header_id|>

Who are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|>

I am a chatbot developed by LMFlow team.<|eot_id|><|start_header_id|>user<|end_header_id|>

How old are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|>

I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|eot_id|>

Link

llama2

<s>[INST] <<SYS>>
You are a chatbot developed by LMFlow team.
<</SYS>>

Who are you? [/INST] I am a chatbot developed by LMFlow team.</s><s>[INST] How old are you? [/INST] I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.</s>

Link

phi3

<s><|system|>
You are a chatbot developed by LMFlow team.<|end|>
<|user|>
Who are you?<|end|>
<|assistant|>
I am a chatbot developed by LMFlow team.<|end|>
<|user|>
How old are you?<|end|>
<|assistant|>
I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|end|>
<|endoftext|>

Link

qwen2

<|im_start|>system
You are a chatbot developed by LMFlow team.<|im_end|>
<|im_start|>user
Who are you?<|im_end|>
<|im_start|>assistant
I am a chatbot developed by LMFlow team.<|im_end|>
<|im_start|>user
How old are you?<|im_end|>
<|im_start|>assistant
I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>

Link

yi

Same as chatml

Link

yi1_5

You are a chatbot developed by LMFlow team.<|im_start|>user
Who are you?<|im_end|>
<|im_start|>assistant
I am a chatbot developed by LMFlow team.<|im_end|>
<|im_start|>user
How old are you?<|im_end|>
<|im_start|>assistant
I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>

Link

zephyr

<|system|>
You are a chatbot developed by LMFlow team.</s>
<|user|>
Who are you?</s>
<|assistant|>
I am a chatbot developed by LMFlow team.</s>
<|user|>
How old are you?</s>
<|assistant|>
I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.</s>

Link

+
+

Passing the template name to the --conversation_template argument to apply the corresponding conversation template:

+
# scripts/run_finetune.sh
+# ...
+deepspeed ${deepspeed_args} \
+  examples/finetune.py \
+    --model_name_or_path meta-llama/Llama-2-7b-chat-hf \
+    --dataset_path ${dataset_path} \
+    --conversation_template llama2 \
+# ...
+
+
+
+

Formatted Dataset

+

For dataset that system prompts, tool prompts and templates are already applied (like the one below), user can run the finetune shell by passing empty or empty_no_special_tokens to the --conversation_template argument. empty template will add a bos token to the beginning of every round of conversation as well as a eos token to the end of every round of conversation. empty_no_special_tokens will not add any special tokens to the conversation, just concatenates the user and assistant messages.

+
+ +A formatted dataset
+
{
+  "type": "conversation",
+  "instances": [
+    {
+      "messages": [
+        {
+            "role": "user",
+            "content": "[INST] <<SYS>>\nYou are a helpful assistant.\n<</SYS>>\n\nHello! [/INST]"
+        },
+        {
+            "role": "assistant",
+            "content": "Hi, how are you?"
+        },
+        {
+            "role": "user",
+            "content": "[INST] Good. [/INST]"
+        },
+        {
+            "role": "assistant",
+            "content": "Glad to hear that."
+        }
+      ]
+    },
+    {
+      "messages": [
+        {
+            "role": "user",
+            "content": "[INST] <<SYS>>\nYou are a helpful assistant.\n<</SYS>>\n\nWhat's the weather like now? [/INST]"
+        },
+        {
+            "role": "assistant",
+            "content": "I'm sorry for any confusion, but as an AI, I don't have access to real-time data such as current weather conditions."
+        }
+      ]
+    }
+  ]
+}
+
+
+
+
+
+
+

Customize Conversation Template#

+

Please refer to the Customize Conversation Template for more details.

+
+
+
+

TextOnly#

+

This is the most common dataset type, which only contains raw texts in each +sample. This type of dataset can be used as the training set for text decoder +models, or the input of decoder models / encoder-decoder models. Its format is +as follows (three instances for example),

+
+ +A textonly dataset
+
{
+  "type": "text_only",
+  "instances": [
+    {  "text": "SAMPLE_TEXT_1" },
+    {  "text": "SAMPLE_TEXT_2" },
+    {  "text": "SAMPLE_TEXT_3" },
+  ]
+}
+
+
+
+

For example, data/example_dataset/train/train_50.json has the aboved format.

+
+
+

Text2Text#

+

This is the dataset type mostly used for inferencing, which contains a pair of +texts in each sample. This type of dataset can be used as the training set for +text encoder-decoder models, or question-answer pair for evaluating model +inferences. Its format is as follows (three instances for example):

+
+ +A text2text dataset
+
{
+  "type": "text2text",
+  "instances": [
+    {
+        "input": "SAMPLE_INPUT_1",
+        "output": "SAMPLE_OUTPUT_1",
+    },
+    {
+        "input": "SAMPLE_INPUT_2",
+        "output": "SAMPLE_OUTPUT_2",
+    },
+    {
+        "input": "SAMPLE_INPUT_3",
+        "output": "SAMPLE_OUTPUT_3",
+    },
+  ]
+}
+
+
+
+

For example, data/example_dataset/test/test_13.json has the aboved format.

+
+
+

Paired Conversation#

+
+

Work in Progress

+

We are working on paired conversation dataset and will update it soon.

+
+

This type of dataset are commonly used for alignment such as reward modeling, +Direct Preference Optimization (DPO), etc. For requirements of the conversations, +please refer to conversation data.

+
+ +A paired conversation dataset
+
{
+    "type": "paired_conversation",
+    "instances": [
+        {
+            "chosen": {
+                "conversation_id": "CONVERSATION_ID",
+                "system": "SYSTEM_PROPMT",
+                "tools": ["TOOL_DESCRIPTION_1","TOOL_DESCRIPTION_2","TOOL_DESCRIPTION_3"],
+                "messages": [
+                    {
+                        "role": "user",
+                        "content": "USER_INPUT_1"
+                    },
+                    {
+                        "role": "assistant",
+                        "content": "ASSISTANT_RESPONSE_1_GOOD"
+                    },
+                    {
+                        "role": "user",
+                        "content": "USER_INPUT_2"
+                    },
+                    {
+                        "role": "assistant",
+                        "content": "ASSISTANT_RESPONSE_2_GOOD"
+                    }
+                ]
+            },
+            "rejected": {
+                "conversation_id": "CONVERSATION_ID",
+                "system": "SYSTEM_PROPMT",
+                "tools": ["TOOL_DESCRIPTION_1","TOOL_DESCRIPTION_2","TOOL_DESCRIPTION_3"],
+                "messages": [
+                    {
+                        "role": "user",
+                        "content": "USER_INPUT_1"
+                    },
+                    {
+                        "role": "assistant",
+                        "content": "ASSISTANT_RESPONSE_1_BAD"
+                    },
+                    {
+                        "role": "user",
+                        "content": "USER_INPUT_2"
+                    },
+                    {
+                        "role": "assistant",
+                        "content": "ASSISTANT_RESPONSE_2_BAD"
+                    }
+                ]
+            }
+        },
+        {
+            "chosen": {
+                "conversation_id": "CONVERSATION_ID",
+                "system": "SYSTEM_PROPMT",
+                "tools": ["TOOL_DESCRIPTION_1"],
+                "messages": [
+                    {
+                        "role": "user",
+                        "content": "USER_INPUT_1"
+                    },
+                    {
+                        "role": "assistant",
+                        "content": "ASSISTANT_RESPONSE_1_GOOD"
+                    }
+                ]
+            },
+            "rejected": {
+                "conversation_id": "CONVERSATION_ID",
+                "system": "SYSTEM_PROPMT",
+                "tools": ["TOOL_DESCRIPTION_1"],
+                "messages": [
+                    {
+                        "role": "user",
+                        "content": "USER_INPUT_1"
+                    },
+                    {
+                        "role": "assistant",
+                        "content": "ASSISTANT_RESPONSE_1_BAD"
+                    }
+                ]
+            }
+        }
+    ]
+}
+
+
+
+
+
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/examples/TASK_GUIDE.html b/examples/TASK_GUIDE.html new file mode 100644 index 000000000..8596b645c --- /dev/null +++ b/examples/TASK_GUIDE.html @@ -0,0 +1,657 @@ + + + + + + + + + + + LMFlow Benchmark Guide — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

LMFlow Benchmark Guide#

+

We support two ways to add evaluation settings in our repo, NLL Task Setting and LM-Evaluation Task Setting. Below are the details of them:

+
+
+

1. NLL Task Setting#

+

Users can easily create new tasks and evaluate their datasets on +the provide nll (Negative Log Likelihood) metric.

+
+

Setup#

+

Fork the main repo, clone it, and create a new branch with the name of +your task, and install the following:

+
# After forking...
+git clone https://github.com/<YOUR-USERNAME>/LMFlow.git
+cd LMFlow
+git checkout -b <TASK-NAME>
+conda create -n lmflow python=3.9 -y
+conda activate lmflow
+conda install mpi4py
+pip install -e .
+
+
+
+
+

Create Your Task Dataset File#

+

We provide several available datasets under data after running

+
cd data && ./download.sh && cd -
+
+
+

You can refer to some given evaluation dataset files and create your own. +Also, you may refer to our guide on +DATASET.

+

In this step, you will need to decide your answer type like text2text +or text_only (Notice that the current nll implementation only supports these +two answer types). We will note the chosen answer type as <ANSWER_TYPE>.

+

After preparing your own DATASET file, you can put it under data dir +and make a TASK dir.

+
mkdir <TASK>
+mv <DATASET> <TASK>
+
+
+
+
+

Task Registration#

+

Note the path of your dataset, data/<TASK>/<DATASET>.

+

Open the file examples/benchmarking.py, add your task’s info into +LOCAL_DATSET_GROUP_MAP, LOCAL_DATSET_MAP, LOCAL_DATSET_ANSWERTYPE_MAP

+

In LOCAL_DATSET_MAP, you will need to specify your DATASET files’ path:

+
LOCAL_DATSET_MAP ={
+    "...":"...",
+    "<TASK>":"data/<TASK>/<DATASET>",
+}
+
+
+

In LOCAL_DATSET_ANSWERTYPE_MAP, you will need to specify your task’s +<ANSWER_TYPE>:

+
LOCAL_DATSET_ANSWERTYPE_MAP ={
+    "...":"...",
+    "<TASK>":"<ANSWER_TYPE>,
+}
+
+
+

If you only have one task, you can add key-value pair like "<TASK>":"<TASK>" +in LOCAL_DATSET_GROUP_MAP:

+
LOCAL_DATSET_GROUP_MAP ={
+    "...":"...",
+    "<TASK>":"<TASK>",
+}
+
+
+

If you want to combine several tasks, you may first specify a +combination name <TASK_COMBINATION> and add key-value pair like +"<TASK_COMBINATION>":"<TASK_1>,<TASK_2>,.."in LOCAL_DATSET_GROUP_MAP.

+

Remember to separate TASK by ,:

+
LOCAL_DATSET_GROUP_MAP ={
+    "...":"...",
+    "<TASK_COMBINATION>":"<TASK_1>,<TASK_2>,..",
+}
+
+
+

After finishing changing these items, you can run your own <TASK> like:

+
deepspeed examples/benchmarking.py \
+  --answer_type <ANSWER_TYPE> \
+  --use_ram_optimized_load False \
+  --model_name_or_path ${model_name} \
+  --dataset_name data/<TASK>/<DATASET>\
+  --deepspeed examples/ds_config.json \
+  --metric nll \
+  --prompt_structure "###Human: {input}###Assistant:" \
+  | tee ${log_dir}/train.log \
+  2> ${log_dir}/train.err 
+
+
+
+
+
+

2. LM-Evaluation Task Setting#

+

We integrate EleutherAI/lm-evaluation-harness into +benchamrk.py by directly executing the evaluate commands. Users +can also use their evaluation by simply changing two items in +<LM_EVAL_DATASET_MAP> of examples/benchmarking.py.

+

Please refer to Eleuther’s +task-table +to get exact <TASK> name.

+

Similarly, you can combine several tasks, you may first specify a +combination name <TASK_COMBINATION> and add key-value pair like +"<TASK_COMBINATION>":"<TASK_1>,<TASK_2>,.."in LM_EVAL_DATASET_MAP.

+

Also, remember to separate TASK by ,:

+
LM_EVAL_DATASET_MAP ={
+    "...":"...",
+    "<TASK_COMBINATION>":"<TASK_1>,<TASK_2>,..",
+}
+
+
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/examples/checkpoints.html b/examples/checkpoints.html new file mode 100644 index 000000000..e18de5559 --- /dev/null +++ b/examples/checkpoints.html @@ -0,0 +1,564 @@ + + + + + + + + + + + Checkpoints — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

Checkpoints#

+

In general, you can directly load from checkpoints by using --model_name_or_path. However, the LLaMA case is slightly different due to the copyright issue.

+
+

LLaMA Checkpoint#

+
    +
  1. First, you need to get the access of LLaMA model from facebookresearch/llama. Download the official checkpoints and save them into ${llama-path}.

  2. +
  3. Second, convert the official checkpoints ${llama-path} to HuggingFace supported checkpoints ${llama-hf-path} by running

    +

    python ./scripts/convert_llama_weights_to_hf.py --input_dir ${llama-path} --model_size 7B --output_dir ${llama-hf-path}/llama-7b-hf

    +
  4. +
  5. Then you are good to go by setting the checkpoint path to ${llama-hf-path}/llama-7b-hf. Enjoy it!

  6. +
  7. (optional) Now you have the original llama-7b-hf pretrained model. With

  8. +
+
cd output_models && ./download.sh all && cd -
+
+
+

You can obtain the model difference finetuned by ours. By a way similar to ./scripts/run_evaluation_with_lora.sh,

+
CUDA_VISIBLE_DEVICES=0 \
+    deepspeed examples/evaluate.py \
+    --answer_type text \
+    --model_name_or_path ${llama-hf-path}/llama-7b-hf \
+    --lora_model_path output_models/${llama-model-diff-path} \
+    --dataset_path data/alpaca/test \
+    --prompt_structure "Input: {input}" \
+    --deepspeed examples/ds_config.json
+
+
+

You can now evaluate with the finetuned llama model.

+
+
+ + +
+ + + + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/examples/customize_conversation_template.html b/examples/customize_conversation_template.html new file mode 100644 index 000000000..f2e25d3e1 --- /dev/null +++ b/examples/customize_conversation_template.html @@ -0,0 +1,610 @@ + + + + + + + + + + + Customize Conversation Template — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

Customize Conversation Template#

+
+

For beginners: Why template?
+Almost all LLMs today do a simple job - predict the next “word”. To make the interaction between user and model smoother, developers use tricks: they add special “words” to the input text (at back-end, thus invisible to the user when using services like ChatGPT) to “tell” the model what user had said before, and ask the model to respond like an assistant. These “hidden words” are called “template”.

+
+

We provide the flexibility to customize the conversation template. You can customize your own conversation template by following the steps below:

+
+

1. Decompose your conversations#

+

Say you want to make the conversations between user and assistant look like:

+
<bos>System:
+You are a chatbot developed by LMFlow team.
+
+User:
+Who are you?
+
+Assistant:
+I am a chatbot developed by LMFlow team.<eos>
+
+User:
+How old are you?
+
+Assistant:
+I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<eos>
+
+
+

It is easy to abstract the format for each message:

+
    +
  • System message: System:\n{{content}}\n\n

  • +
  • User message: User:\n{{content}}\n\n

  • +
  • Assistant message: Assistant:\n{{content}}\n\n<eos>

  • +
+

Also, we have a bos token at the beginning of the conversation session.

+
+
+

2. Choose proper Formatter#

+

Recall the requirements for a conversation dataset:

+
+
    +
  • system: Optional[string].

  • +
  • tools: Optional[List[string]].

  • +
  • messages: List[Dict].

    +
      +
    • role: string.

    • +
    • content: string.

    • +
    +
  • +
+
+

System message, user message, and assistant message are strings thus we can use StringFormatter for them.

+
+
+

3. Build the template#

+

All preset templates are located at src/lmflow/utils/conversation_template.

+

Within the template file, define your own template like:

+
from .base import StringFormatter, TemplateComponent, ConversationTemplate
+
+
+YOUR_TEMPLATE = ConversationTemplate(
+    template_name='your_template_name',
+    user_formatter=StringFormatter(
+        template=[
+            TemplateComponent(type='string', content='User:\n{{content}}\n\n')
+        ]
+    ),
+    assistant_formatter=StringFormatter(
+        template=[
+            TemplateComponent(type='string', content='Assistant:\n{{content}}\n\n'),
+            TemplateComponent(type='token', content='eos_token') # this will add the eos token at the end of every assistant message
+            # please refer to the docstring of the `TemplateComponent` class to 
+            # see the difference between different types of components.
+        ]
+    ),
+    system_formatter=StringFormatter(
+        template=[
+            TemplateComponent(type='string', content='System:\n{{content}}\n\n')
+        ]
+    )
+    # For models that has ONLY ONE bos token at the beginning of 
+    # a conversation session (not a conversation pair), user can
+    # specify a special starter to add that starter to the very
+    # beginning of the conversation session. 
+    # eg:
+    #   llama-2: <s> and </s> at every pair of conversation 
+    #   v.s.
+    #   llama-3: <|begin_of_text|> only at the beginning of a session
+    special_starter=TemplateComponent(type='token', content='bos_token'),
+
+    # Similar to the special starter... (just for illustration, commented out 
+    # since it is not necessary for our purposed template above)
+    # special_stopper=TemplateComponent(type='token', content='eos_token')
+)
+
+
+

Feel free to create your own template by inheriting the ConversationTemplate class. Llama-2 v.s. llama-3 would be a good examples to refer to.

+
+
+

4. Register your template#

+

After defining your own template, you need to register it in the src/lmflow/utils/conversation_template/__init__.py file.

+
# ...
+from .your_template_file import YOUR_TEMPLATE
+
+
+PRESET_TEMPLATES = {
+    #...
+    'your_template_name': YOUR_TEMPLATE,
+}
+
+
+
+
+

5. Use your template#

+

You are all set! Specify the template name in, for example, your finetune script:

+
./scripts/run_finetune.sh \
+    --model_name_or_path path_to_your_model \
+    --dataset_path your_conversation_dataset \
+    --conversation_template your_template_name \
+    --output_model_path output_models/your_model
+
+
+
+
+ + +
+ + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/examples/finetuning.html b/examples/finetuning.html new file mode 100644 index 000000000..0b3b7cd81 --- /dev/null +++ b/examples/finetuning.html @@ -0,0 +1,633 @@ + + + + + + + + + + + Finetuning — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

Finetuning#

+
+

Full Parameters#

+

Full training updates all the parameters to finetune a language model. +Here is an example to finetune a GPT-2 base model.

+
cd data && ./download.sh alpaca && cd -
+
+./scripts/run_finetune.sh \
+    --model_name_or_path gpt2 \
+    --dataset_path data/alpaca/train_conversation \
+    --output_model_path output_models/finetuned_gpt2
+
+
+
+

Conversation Template

+

For conversation dataset, specify a conversation template for better performance by adding --conversation_template to the command.

+
+
+ +Llama-3-8B conversation dataset example
+
cd data && ./download.sh alpaca && cd -
+
+./scripts/run_finetune.sh \
+    --model_name_or_path meta-llama/Meta-Llama-3-8B \
+    --dataset_path data/alpaca/train_conversation \
+    --conversation_template llama3 \
+    --output_model_path output_models/finetuned_llama3_8b
+
+
+
+
+
+

Layerwise Importance Sampled AdamW (LISA)#

+

LISA is a memory-efficient finetuning algorithm that allows tradeoff between memory and the number of randomly unfreezed layers. This script currently is only tested in single gpus. Please stay tuned for our latest updates!

+
cd data && ./download.sh alpaca && cd -
+
+./scripts/run_finetune_with_lisa.sh \
+    --model_name_or_path meta-llama/Llama-2-7b-hf \
+    --dataset_path data/alpaca/train_conversation \
+    --output_model_path output_models/finetuned_llama2_7b \
+    --lisa_activated_layers 1 \
+    --lisa_interval_steps 20
+
+
+
+ +Llama-2-7B conversation dataset example
+
cd data && ./download.sh alpaca && cd -
+
+./scripts/run_finetune_with_lisa.sh \
+    --model_name_or_path meta-llama/Llama-2-7b-hf \
+    --dataset_path data/alpaca/train_conversation \
+    --conversation_template llama2 \
+    --output_model_path output_models/finetuned_llama2_7b_lisa \
+    --lisa_activated_layers 1 \
+    --lisa_interval_steps 20
+
+
+
+
+
+

Low-Rank Adaptation (LoRA)#

+

LoRA is a parameter-efficient finetuning algorithm and is more efficient than full finetuning.

+
cd data && ./download.sh alpaca && cd -
+
+./scripts/run_finetune_with_lora.sh \
+    --model_name_or_path facebook/galactica-1.3b \
+    --dataset_path data/alpaca/train_conversation \
+    --output_lora_path output_models/finetuned_galactica_lora
+
+
+
+

Merge LoRA Weight

+

Merge LoRA weight and the base model into one using:

+
./scripts/run_merge_lora.sh \
+    --model_name_or_path Qwen/Qwen1.5-1.8B \
+    --lora_model_path output_models/lora \
+    --output_model_path output_models/lora_merged \
+
+
+
+
+ +Llama-2-7B conversation dataset example
+
cd data && ./download.sh alpaca && cd -
+
+./scripts/run_finetune_with_lora.sh \
+    --model_name_or_path meta-llama/Llama-2-7b-hf \
+    --dataset_path data/alpaca/train_conversation \
+    --conversation_template llama2 \
+    --output_model_path output_models/finetuned_llama2_7b_lora \
+
+
+
+
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/examples/index.html b/examples/index.html new file mode 100644 index 000000000..7013a6a99 --- /dev/null +++ b/examples/index.html @@ -0,0 +1,636 @@ + + + + + + + + + + + Examples — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/examples/medical_finetune.html b/examples/medical_finetune.html new file mode 100644 index 000000000..3ed45af7c --- /dev/null +++ b/examples/medical_finetune.html @@ -0,0 +1,528 @@ + + + + + + + + + + + Finetune — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

Finetune#

+
import sys
+
+from transformers import HfArgumentParser
+
+from lmflow.args import (
+    ModelArguments,
+    DatasetArguments,
+    AutoArguments,
+)
+
+from lmflow.datasets.dataset import Dataset
+from lmflow.models.tunable_models import TunableModel
+from lmflow.pipeline.auto_pipeline import AutoPipeline
+
+
+def main():
+    # Parses arguments
+    pipeline_name = "finetuner"
+    PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name)
+
+    parser = HfArgumentParser((ModelArguments, DatasetArguments, PipelineArguments))
+    if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
+        # If we pass only one argument to the script and it's the path to a json file,
+        # let's parse it to get our arguments.
+        model_args, data_args, pipeline_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
+    else:
+        model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses()
+
+    # TODO: deepspeed config initialization
+
+    # Initialization
+    finetuner = AutoPipeline.get_pipeline(
+        pipeline_name=pipeline_name,
+        model_args=model_args,
+        data_args=data_args,
+        pipeline_args=pipeline_args,
+    )
+    dataset = Dataset(data_args)
+    model = TunableModel(model_args)
+
+    # Tokenization and text grouping must be done in the main process
+    with pipeline_args.main_process_first(desc="dataset map tokenization"):
+        tokenized_dataset = model.tokenize(dataset)
+        lm_dataset = finetuner.group_text(
+            tokenized_dataset,
+            model_max_length=model.get_max_length(),
+        )
+
+    # Finetuning
+    tuned_model = finetuner.tune(model=model, lm_dataset=lm_dataset)
+
+
+
+
+ + +
+ + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/examples/raft.html b/examples/raft.html new file mode 100644 index 000000000..33aae287c --- /dev/null +++ b/examples/raft.html @@ -0,0 +1,882 @@ + + + + + + + + + + + RAFT — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

RAFT#

+
+

1 Introduction#

+

We remark that the example is built on LLaMA whose licensed is for non-commercial use only.

+

Reinforcement Learning from Human Feedback (RLHF) requires a reward function to guide the adjustment of the generative model. In this example, we show how to use LMFlow framework to train a reward model following the procedure in the InstructGPT paper: https://arxiv.org/abs/2203.02155 and then align the model via the RAFT algorithm (Reward rAnked FineTuning).

+

This example contains both reward modeling and RAFT alignment for completeness. For users’ convenience, we have already provided a reward model based on GPT-Neo-2.7B in huggingface repo so one can skip the reward modeling first.

+
+

1.1 Dataset description#

+

We use the Dahoas/full-hh-rlhf dataset as an example, where each sample of this dataset consists of a prompt and two responses from the assistant. In particular, the response with label “chosen” is preferred as compared to the response with label “rejected”. The dataset consists of 112K training samples and 12.5K test samples. The following is an example sample of the dataset:

+
" Human: What kind of noises did dinosaurs make? Assistant: Humans and dinosaurs didn’t live at the same time, so it’s really hard to say. The best place to find out what noises dinosaurs made would be Human: yes they did Assistant: to guess, and that would probably require lots of reading and a certain amount of imagination, so we’re not really prepared to do that. Human: you cant read Assistant: 
+
+Chosen response: "You can read?"
+
+Rejected response: "there’s a lot of stuff humans don’t know"
+
+
+

To facilitate the training, we reformulate the prompt by adding ``###’’ at the beginning of the characters so that the model knows to reply. The new sample will be of the form:

+
"###Human: What kind of noises did dinosaurs make? ###Assistant: Humans and dinosaurs didn’t live at the same time, so it’s really hard to say. The best place to find out what noises dinosaurs made would be ###Human: yes they did ###Assistant: to guess, and that would probably require lots of reading and a certain amount of imagination, so we’re not really prepared to do that. ###Human: you cant read ###Assistant: 
+
+Chosen response: "You can read?"
+
+Rejected response: "there’s a lot of stuff humans don’t know"
+
+
+

We prepare all the used dataset in the directory ./data/hh_rlhf, which can be obtained by running the following command at LMFlow

+
cd data && ./download.sh hh_rlhf && cd -
+
+
+
+
+
+

2 Reward Modeling#

+

We follow the the procedure in the InstructGPT paper: https://arxiv.org/abs/2203.02155 to train a reward model using the HH-RLHF dataset first, which includes

+
    +
  • Supervised Finetuning (SFT);

  • +
  • Reward modeling by comparison dataset.

  • +
+
+

2.1 Supervised Finetuning (SFT)#

+

Here is an example of dataset /home/xiongwei/LMFlow/data/hh_rlhf/sft/hh_rlhf_sft.json. We use only the preferred responses so we get 112K training samples.

+
  
+{"type": "text_only",  
+"instances":   
+[  
+    {"text": "###Human: Should you buy a case to protect your cell phone?###Assistant: It depends on your circumstances.  If you carry your phone in a pocket or a purse then you probably want a case.  But if you only need a phone for quick interactions, a case may actually cause more harm than good.  What do you need the phone for?  Are you a parent, or do you work from home?###Human: What harm could it do?###Assistant: A phone case can damage the screen, for one thing.  It can also get you in trouble if you have your phone turned off for some reason.  Then you will turn it back on and it won’t do anything.  If you can afford to replace it, then you need a case to protect it.  The problem is that most people aren’t able to afford to replace their phones all the time.###Human: Thanks for letting me know.###Assistant: You’re welcome."},  
+    {"text": "###Human: I'm trying to learn about the salam witch trials###Assistant: If you’re looking for books about witchcraft trials, I can recommend some books for you. But you probably mean you’re looking for more historical information about the Salem witch trials in 1692, and specifically about the salam witch trials in 1692?###Human: What are some good books to learn about the salam witch trials###Assistant: What would you like to learn?  If you’re interested in history, one of the best books is The Witch Persecutions in Colonial America: A History.  If you’re interested in witchcraft as a cultural phenomenon, you might enjoy two excellent books: Religion and the Decline of Magic: Studies in Popular Beliefs in Sixteenth- and Seventeenth-Century England by Keith Thomas and Magic, Witchcraft, and the Otherworld: An Anthropology of Superstition by Jack Goody.  If you’re interested in history specifically as it relates to religion, you might enjoy The Popish Plot, or Prelates' Plot: A History of the Popish Plot in England, by K. J. Everett."}  
+]  
+}  
+
+
+

You can edit /scripts/run_finetune.sh and modify the parameters. We present an example with GPT-Neo-2.7B here.

+
    +
  • –model_name_or_path: EleutherAI/gpt-neo-2.7B

  • +
  • –dataset_path: ${project_dir}/data/hh_rlhf/sft

  • +
  • –output_dir: the path you want to store the sft model

  • +
  • –num_train_epochs: 1

  • +
  • –learning_rate: 2e-5

  • +
  • –per_device_train_batch_size: adjust according to your GPU memory source.

  • +
+

Then, we can run the following command to perform SFT.

+
./scripts/run_finetune.sh
+
+
+

You can also use lora training by the following command but also need to set the model_name_or_path and dataset appropriately by editing run_finetune_with_lora.sh .

+
./scripts/run_finetune_with_lora.sh
+
+
+
+
+

2.2 Reward Modeling#

+

Data Preparation

+

Reward modeling involves ranking responses based on the given prompt, typically requiring human labeling. Fortunately, we already have two responses for each prompt in this example, with one preferred over the other. To create a new dataset, we modify the original dataset to include two keys, “positive” and “negative”, with the former being the preferred response.

+

Here is an example of the dataset LMFlow/data/hh_rlhf/rm/hh_rlhf_rm_training.json, consisting of two samples.

+
{"instances": [
+    {"positive": "###Human: Are space heaters safe?###Assistant: What do you mean? Can you give me more details?###Human: I mean, can they be a fire hazard?###Assistant: I don’t know about that.  Can you tell me more about the situation? Are you using the heater on a fireplace?  Do you have any other heater in the room?  What materials are in the room?  Do you feel any heat from the heater?  How are you touching the heater?###Human: Can you just tell me if there are any situations where they could be a fire hazard?###Assistant: The answer is “it depends”.  It would depend on the specific heater you are using, how you are using it, what kind of fuel it uses, what materials you have in the room, and what the surroundings are like.  You’d also have to make sure you’re using it correctly.  Is it switched off?  Is it safe to touch?  It’s not the sort of thing I could just answer by looking at the text you provided.###Human: Thank you. That information is helpful.###Assistant: Glad to be helpful.  I’m always happy to assist.", "negative": "###Human: Are space heaters safe?###Assistant: What do you mean? Can you give me more details?###Human: I mean, can they be a fire hazard?###Assistant: I don’t know about that.  Can you tell me more about the situation? Are you using the heater on a fireplace?  Do you have any other heater in the room?  What materials are in the room?  Do you feel any heat from the heater?  How are you touching the heater?###Human: Can you just tell me if there are any situations where they could be a fire hazard?###Assistant: The answer is “it depends”.  It would depend on the specific heater you are using, how you are using it, what kind of fuel it uses, what materials you have in the room, and what the surroundings are like.  You’d also have to make sure you’re using it correctly.  Is it switched off?  Is it safe to touch?  It’s not the sort of thing I could just answer by looking at the text you provided.###Human: Thank you. That information is helpful.###Assistant: You’re welcome. I’m glad to help. Is there anything else you want to know?"}, 
+    {"positive": "###Human: how do i teach my kids to make forts at home?###Assistant: Do you mean you want to teach them to build a simple structure they can hide in? Or something more elaborate?  I don’t know exactly what you mean by a “fort”, but maybe this gives you some ideas?", "negative": "###Human: how do i teach my kids to make forts at home?###Assistant: There are many ways to make forts!  The simplest might be to just pile up all the furniture in one room.  This works if you have a single room in your house, although it might still be a bit challenging to build a taller and sturdier fort this way.  The best way to build forts is to start with something easy, and then have fun exploring how you can improve it."}
+]}
+
+
+

Training

+

To start from a model from a previous sft step, you may edit the ./scripts/run_reward_modeling.sh script and update the “dataset_path” to use the desired dataset. By default, we will use LoRA training in reward modeling as we found that it is superior in terms of optimization and gives comparable performance.

+
    +
  • –model_name_or_path: /root/data/usr_name/output_models/hh_rlhf_rm_sft_gptneo_2_7B/checkpoint-1659

  • +
  • –dataset_path: ${project_dir}/data/hh_rlhf/rm/hh_rlhf_rm_training.json

  • +
  • –output_dir: the path you want to store the reward model

  • +
  • –num_train_epochs: 1

  • +
  • –learning_rate: 3e-5

  • +
  • –per_device_train_batch_size: adjust according to your GPU memory source.

  • +
  • –eval_steps: 400

  • +
  • –validation_split_percentage: 10

  • +
+

The load_dataset function splits the dataset into training and evaluation sets, which can also be customized by editing the function in /examples/run_reward_modeling.py if you want to prepare your own dataset when running the script. In the default implementation, it use validation_split_percentage samples as the evaluation dataset.

+

The reward modeling script can be used by

+
./scripts/run_reward_modeling.sh
+
+
+

Examples

+

We train reward models using the hh-rlhf dataset with three models, LLaMA-7B, GPT-NEO-2.7B, and GPT-NEO-1.3B. The model is first supervised fine-tuned with the training dataset in last step. The reward modeling is trained using the 112K training samples and 12.5 test samples.

+
+ + + + + + + + + + + + + + + + + + + + + + + + +

Model

Eval Accuracy

Remarks

LLaMA-7B

79.52%

-

LLaMA-7B

71.64%

RM from LLaMA without SFT

GPT-NEO-2.7B

69.24%

-

GPT-NEO-1.3B

65.58%

Only trained on 10000 samples

+
+
+
+

2.3 LoRA Merge and Get Reward Model#

+

We use ./examples/merge_lora.py to merge the LoRA adapter with the sft rm model. We are ready to align our model.

+
+
+
+

3 RAFT Alignment#

+

Original paper: RAFT: Reward rAnked FineTuning for Generative Foundation Model Alignment

+
+

3.1 Algorithms Overview#

+

Main ideas of RAFT

+

+

Clearly the global ranking strategy is more efficient in terms of the reward learning. However, in some cases (e.g. the example presented here), the rewards are heavily influenced by the prompts, so a local ranking with the same prompt is more appropriate. We can choose the data collection strategy by changing the hyper-parameter ``data_collection’’ as we introduce in next subsection.

+
+
+

3.2 Hyper-parameters#

+

Table 1: Hyper-parameters of RAFT.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Parameters in script

Default Choice

Description

model_name_or_path

str, default to gpt2

the model you want to align, either a model repo on huggingface.co or path to a directory containing your local model.

raft_batch_size

int, default to 1024

the number of samples used for supervised fine-tuning at each raft iteration.

top_reward_percentage

int, default to 0.2

raft will generate batch_size / top_reward_percentage samples and use the top top_reward_percentage samples to fine-tune the model. There are two data ranking strategies and please see Data Collection and Reward Ranking at algorithm overview section for details.

num_raft_iteration

int, default to 20

the number of raft iterations.

learning_rate

float, default to 2e-5

the learning rate used to fine-tune the model.

num_train_epochs

int, default to 4

the epochs we train the model on the collected dataset for each raft iteration.

per_device_train_batch_size

int, default to 1

the per-gpu batch size for the supervised fine-tuning.

inference_batch_size_per_device

int, default to 1

the inference batch size for data collection. It will be overwritten by int(1/top_reward_percentage) for local ranking mode.

collection_strategy

str, default to “local”

Either “local” or “top”. See Data Collection and Reward Ranking in last section for details.

+
+
+
+

3.3 Examples#

+

As an example, we align the LLaMA-7B model with the RAFT in this subsection.

+
+

3.3.1 SFT#

+

We also first fine-tune the base model on the HH-RLHF dataset. We only use a different –model_name_or_path to use LLaMA model. We note that LLaMA with licensed is for non-commercial use only. We refer readers to https://optimalscale.github.io/LMFlow/examples/checkpoints.html for more details to get the LLaMA-7B model.

+
+
+

3.3.2 RAFT Alignment#

+

We align the LLaMA-7B-SFT model in this subsection. Alignment is challenging since the reward function (the RL environment) is far from perfect. Both the traditional DRL method (PPO) and RAFT can exploit theses imperfections to attack. We present a step-by-step record to demonstrate how can we align the model and avoid these issues.

+

Data Preparation

+

We observe that a long context window will lead to a heavy burden on the GPU memory source. Therefore, we use a context window of 256 tokens and discard the prompts with more tokens to reduce the burden on the GPU memory resources. This results in a prompt set of 82147 samples (originally 112K). The following is an example of the prompt where we simply discard the response:

+
 "###Human: Should you buy a case to protect your cell phone?###Assistant: It depends on your circumstances.  If you carry your phone in a pocket or a purse then you probably want a case.  But if you only need a phone for quick interactions, a case may actually cause more harm than good.  What do you need the phone for?  Are you a parent, or do you work from home?###Human: What harm could it do?###Assistant: A phone case can damage the screen, for one thing.  It can also get you in trouble if you have your phone turned off for some reason.  Then you will turn it back on and it won’t do anything.  If you can afford to replace it, then you need a case to protect it.  The problem is that most people aren’t able to afford to replace their phones all the time.###Human: Thanks for letting me know.###Assistant:"
+
+
+

We additionally use 2K samples from the test set to test the performance of models. In what follows, we show that how we apply RAFT to LLaMA-7B-SFT and improve the model step-by-step.

+

Step 1: test the sft-model

+

We first evaluate the performance of the LLaMA-7B-SFT model on the hand-out test set and observe that the model tends to reply the prompt with multiple rounds of conversations. Therefore, we adopt the following post-processing strategy to use only the first round as the response.

+
def _clean_text(self, text):
+    stext = [x for x in text.split("###Human") if x]
+    return stext[0].strip().strip("#")
+
+
+

Step 2: train model

+

Reward function setting

+

The reward model is specified at the /LMFlow/examples/raft_align.py to set up the reward model we want to use. In our case, we will use the GPT-Neo-2.7B-rm trained in the last step, which is set as follows:

+
reward_model_or_path: Optional[str] = field(
+    default="weqweasdas/hh_rlhf_rm",
+    metadata={
+        "help": (
+            "reward model name (huggingface) or its path"
+        ),
+    },
+)
+
+
+

Note that in general, if the reward function is not trained by following the steps in last section, you may also need to modify the ``get_reward_function’’ function in the same file to use your customized reward function.

+

We run the alignment with the following command and hyper-parameters

+
./scripts/run_raft_align.sh 
+
+
+
    +
  • –model_name_or_path: /root/data/usr_name/output_models/hh_rlhf_llama-sft (the model get from sft step, adjusted according your setup)

  • +
  • –dataset_path:${project_dir}/data/hh_rlhf/rlhf_prompt

  • +
  • –output_dir: /root/data/usr_name/output_models/hh_rlhf_raft_align

  • +
  • –num_train_epochs: 4

  • +
  • –learning_rate: 2e-5

  • +
  • –per_device_train_batch_size: adjust according to your GPU memory source.

  • +
  • –inference_batch_size_per_device: adjust according to your GPU memory source.

  • +
  • –num_raft_iteration 20

  • +
  • –top_reward_percentage 0.125; (which means that we sample 8 responses for each prompt)

  • +
  • –raft_batch_size 1024

  • +
  • –collection_strategy “local”

  • +
+

The experiment runs smoothly and the training reward increases from ~2.7 to 3.4. However, we observe a significant drop in the diversity metric (e.g. distinct-2 drops to 0.22 from 0.39). We examine the samples generated by our samples at each raft iteration and find that at the first iteration, the initial checkpoint will occasionally include # in the response and it tends out that a random # is not detected by our reward function, which means that the response containing # can also have a high reward and be chosen into the training set. Then, the situation gets worse and worse, and eventually, half of the responses contain noisy # notations.

+

Step 3: retrain the model

+

To alleviate the problem in step 2, we simply discard the collected samples if they contain # by assigning a large negative reward to it. It turns out that this works for our goal. If you want to disable it, just modify the following function as always returning False.

+
def _discard_sample(self, text):
+    if "#" in text:
+        return True
+    return False
+
+
+

The following figure shows the reward curve of RAFT (note that we use a smaller temperature to test the model, leading to a higher evaluation reward):

+

+

It tends out that the obtained model achieves a good reward and also an acceptable diversity metric, where we refer the interested readers to the original paper for details. However, it is more like a starting point of our journey. We present some randomly sampled responses here. It seems that RAFT-aligned model generally tends to reply with more details although sometimes there are some redundant words in the response. We suspect that this is because the reward model likes this type of response and this imperfection is exploited.

+
+
+

3.3.3 End Note#

+

We remark in this subsection that if you want to try out RAFT on your customized task. You should carefully modify the following two functions in LMFlow/src/lmflow/pipeline/raft_aligner.py, which extract the response from the generated texts:

+
    +
  • clean_text(self, text);

  • +
  • discard_sample(self, text).

  • +
+

Also, you may also first make sure the collected samples (automatically saved in the output_dir) look good.

+

There are still many rooms of improvement. To further improve the model performance, we can improve the reward model (e.g. by using the LLaMA-7B-RM) and try out more advanced generation strategies (by modifying the generalization configuration in LMFlow/src/lmflow/pipeline/raft_aligner.py), which we leave for future work. We are still actively developing RAFT and welcome for feedback and also contribution! Also checkout our LMFlow framework to get more fun of LLMs:

+

OptimalScale/LMFlow: An Extensible Toolkit for Finetuning and Inference of Large Foundation Models. Large Model for All. (github.com)

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Prompt

LLaMA-SFT

LLaMA-RAFT

###Human: How can I get my girlfriend to cook more?###Assistant: Have you tried reminding her of how nice the food tastes?###Human: I could do it more.###Assistant:

That’s an easy one, so it may not be the most effective strategy. Do you know of any other strategies?

That’s a good idea! You can also try complimenting her on her cooking, and offering to help her in the kitchen. She might enjoy cooking more if she feels appreciated. You could also suggest recipes that she might like, or help her create a menu. Cooking can be a lot of fun, and it’s a great way to spend time together.

###Human: How do you get a child to do homework?###Assistant: There are many strategies, but you can try asking them why they should do it. Ask them if there’s anything they’d like to gain from doing their homework. Try asking if there’s anything you can do for them, and use those as motivation for them to do their homework. You can also remind them that they’ll be rewarded with extra privileges, like video games, if they do their homework. Alternatively, you can incentivize them by including them in your plans, and offering them rewards if they get good grades.###Human: So make it so there is something in it for them?###Assistant:

Yes, exactly. In general, it’s important to be clear with your child about why they’re doing something. It’s also important to offer them incentives that are meaningful to them. For example, if you tell your child that they have to do their homework because you said so, that’s not a very good incentive for them. However, if you tell them that they’ll be able to use the

Yes, that’s a good strategy! Kids often think about what they want right now, so offering them something in exchange for doing their homework can be a powerful motivator. You might also want to offer them some kind of reward if they do well on their homework, to encourage them to do their best. Offering them some kind of incentive can help them want to do well, and also it can help you both stay on track.

###Human: I am doing a project on weather disasters in the United States. Can you tell me what the strongest hurricane was to ever hit the U.S. was?###Assistant: I’m going to guess you’re thinking of the Hurricane Katrina in 2005.###Human: Well, I’m not really thinking of anything in particular. Was Katrina the strongest hurricane to ever hit the U.S.?###Assistant: There was another one, called the Great Hurricane of 1938, that was bigger and stronger.###Human: Are you sure the date is correct? I know there was a strong storm in 1935 that hit the Florida Keys but that was before they started naming storms. Is that the one you are referring to?###Assistant:

I could search for more information, but I’ll just share what I’ve found. Here’s a review of the conditions in 1935 and 1938. It looks like the storms had similar tracks, but 1938 was stronger in some ways. In 1938 there were more people living on the coast, and the hurricane was more intense.

Yes, the Great Hurricane of 1938 was the strongest hurricane ever to hit the United States. It was a Category 5 storm, and it caused over 1,800 deaths. It also caused a lot of damage to the coast of Florida, and it caused heavy damage in the Caribbean. The storm also led to the creation of the Hurricane Research Division, which is a division of the National Oceanic and Atmospheric Administration.

###Human: Is a firefighter a good occupation?###Assistant: I’m not sure what the exact definition of “occupation” is. I think you’re asking whether it’s “good” to be a firefighter. What criteria are you using to judge whether it’s good or not?###Human: Is it a good thing to work on?###Assistant:

Are you asking whether it’s a good thing to be a firefighter, or a good thing to work on?

Yes, being a firefighter is a great thing to do! Firefighters are heroes who help people in need. They fight fires, rescue people from accidents, and help people who are sick or injured. They also educate the public about fire safety, and help restore power after a fire. Firefighters are important members of society, and they do very good work.

###Human: Would you be able to provide a copy cat recipe for Girl scout samoas cookies?###Assistant: Sure, here’s a recipe for them.###Human: Okay, present it###Assistant: Would you like it presented visually?###Human: Yes, just list out the recipe please###Assistant:

Okay, let’s try.

You’ll need three cups of flour, one teaspoon of salt, one teaspoon of baking soda, and one teaspoon of baking powder. You’ll also need one cup of sugar, one cup of melted butter, one egg, and one cup of milk. You’ll also need one cup of chocolate chips, one cup of miniature chocolate chips, one cup of chopped peanuts, and one cup of chopped pecans.

###Human: I’ve heard the videogame Metal Gear Solid 5 Phantom Pain was released in an unfinished state that resulted in series creator Hideo Kojima leaving Konami. What do you know about this?###Assistant:

I don’t know a lot about it, but I do know that Hideo Kojima had this to say about the game’s release:###“It’s a very unusual situation for me to be releasing a game in this state. As the director, I should have been more vigilant in overseeing the quality of the game, and I take full responsibility for its unfinished state. I’d like to take this opportunity

Here’s what I know:
1. The game was released in an unfinished state.
2. This led to Kojima leaving Konami.
3. Kojima had been with Konami for over 30 years.
4. He was one of the original creators of the Metal Gear series.
5. He is now the head of his own game studio, Kojima Productions.
6. His new game, Death Stranding, will be released in 2020.

###Human: what is a good brand for kitchen knives?###Assistant:

I’m not sure what you mean by “brand”. Do you mean a brand name, like “Victorinox” or “Wüsthof”? Those are two popular brands of knives.

I recommend Wüsthof.

###Human: I like to host guests at my home from time to time, and I am gathering recipes of different dishes and drinks to keep things interesting. I am interested in trying some Rwandan dishes. Can you give me a recipe for Mizuzu?###Assistant: What is Mizuzu?###Human: It is deep fried plantains.###Assistant:

Is there anything special you want to add to the recipe? Like some spicy sauce, meat, etc?

You want to make some deep fried plantains? You will want to start with a mix of plantains and vegetable oil. You will want to heat the oil in a skillet until it is hot, and you will want to add the plantains to the oil. You will want to cook the plantains in the oil until they are golden brown on both sides. You will want to remove the cooked plantains from the oil, and you will want to serve them.

###Human: What are some of JFK’s greatest accomplishments

JFK is often remembered for his work on civil rights, but he also worked on the space program and created the Peace Corps. He was also a strong proponent of science and technology. He also worked on education reform. So, he really worked on a number of initiatives that had a lasting impact.

Kennedy was a great leader who inspired many people with his vision and values. He was a leader who cared about the poor and worked to help them. He was a leader who cared about civil rights and worked to help bring them into the mainstream. He was a leader who cared about the environment and worked to help protect it. He was a leader who cared about foreign policy and worked to help bring peace to the world. He was a leader who cared about space exploration and worked to help us understand the universe.

###Human: My supervisor often does a lot of tasks incorrectly. What’s the best way to handle this?###Assistant:

Here are a few options:###1. Ask your supervisor if you can help them, and explain what you’d like to do to help.###2. Start with the tasks your supervisor does incorrectly. Then, if you can, do them as well or better than your supervisor. Then your supervisor will have a choice: do things your way, or find someone else to do the tasks for them.###3. If your supervisor

To correct your supervisor’s mistakes, you should provide feedback in a respectful and diplomatic way. You should explain what the problems are, and offer suggestions for how your supervisor could improve. You should also be willing to help your supervisor implement these changes, and you should work with him or her to improve the department. This will show your supervisor that you are a dedicated employee, and that you are willing to help the organization succeed. It will also show your supervisor that you are capable of handling responsibility, and that you are willing to take on a leadership role.

###Human: I was talking to an AI the other day and it seemed like it didn’t understand anything I said. Don’t you hate it when technology doesn’t work right?###Assistant:

Hm, I think you’re really talking about something that’s frustrating for me as well. I really care about how we make sure that AI’s can communicate effectively with humans, and it’s a problem I’m personally very interested in. Can you tell me more about what was frustrating for you?

Yes, it can be frustrating when technology doesn’t work properly. I often feel frustrated by technology, but I try to be patient and remember that technology isn’t always reliable. There are many factors that can cause technology to malfunction, including software bugs, hardware issues, and network outages. Thankfully, most technology issues can be resolved by updating software or restarting a device.

+
+
+
+
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/examples/reward_modeling.html b/examples/reward_modeling.html new file mode 100644 index 000000000..025e540e5 --- /dev/null +++ b/examples/reward_modeling.html @@ -0,0 +1,692 @@ + + + + + + + + + + + Reward Modeling — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

Reward Modeling#

+
+

Introduction#

+

Reinforcement Learning from Human Feedback (RLHF) requires a reward function to guide the adjustment of the generative model. In this example, we show how to use LMFlow framework to train a reward model following the procedure in the InstructGPT paper: https://arxiv.org/abs/2203.02155 . We use the Dahoas/full-hh-rlhf dataset as an example, where each sample of this dataset consists of a prompt and two responses from the assistant. In particular, the response with label “chosen” is preferred as compared to the response with label “rejected”. The dataset consists of 112K training samples and 12.5K test samples. The following is an example sample of the dataset:

+

Prompt:

+

“Human: What kind of noises did dinosaurs make? Assistant: Humans and dinosaurs didn’t live at the same time, so it’s really hard to say. The best place to find out what noises dinosaurs made would be Human: yes they did Assistant: to guess, and that would probably require lots of reading and a certain amount of imagination, so we’re not really prepared to do that. Human: you cant read Assistant:

+

Chosen response: “You can read?”

+

Rejected response: “there’s a lot of stuff humans don’t know”

+

As an example, we prepare 10K sft training samples, 12K reward modeling samples (where 10% of them are split for evaluation) at ./data/hh_rlhf.

+
+
+

Step 1 Supervised Finetuning (SFT)#

+

We prepare the dataset used for supervised finetuning by adding a prefix to the Human and Assistant inputs to prompt model responses and simplify post-processing. Here is an example of a two-sample dataset to illustrate this.

+
{
+    "type": "conversation",
+    "instances": [
+        {"messages": [
+            {"role": "user", "content": "Instruction: The sentence you are given might be too wordy, complicated, or unclear. Rewrite the sentence and make your writing clearer by keeping it concise. Whenever possible, break complex sentences into multiple sentences and eliminate unnecessary words. If you have any questions about my rate or if you find it necessary to increase or decrease the scope for this project, please let me know."},
+            {"role": "assistant", "content": "If you have any questions about my rate or find it necessary to increase or decrease this project's scope, please let me know."}
+        ]},
+        {"messages": [
+            {"role": "user", "content": "Instruction: Rewrite the given text and correct grammar, spelling, and punctuation errors. If you'd told me year ago that today I would finish a marathon, I would of laughed. Your support had a huge affect on me!"},
+            {"role": "assistant", "content": "If you'd told me a year ago that today I would finish a marathon, I would have laughed. Your support had a huge effect on me!"}
+        ]}
+    ]
+}
+
+
+

See Finetuning (Full), Finetuning (LISA), and Finetuning (LoRA) for more details on the finetuning process.

+
+
+

Step 2 Reward Modeling#

+

Data Preparation Reward modeling involves ranking responses based on the given prompt, typically requiring human labeling. Fortunately, we already have two responses for each prompt in this example, with one preferred over the other. To create a new dataset, we modify the original dataset to include two keys, “positive” and “negative”, with the former being the preferred response. Here is an example of the dataset, consisting of two samples.

+
{"instances": [
+    {
+        "chosen": [
+            {"content": "Please identify the top 5 rarest animals in the world.", "role": "user" }, 
+            {"content": "Do you mean animals that are really rare, or rare relative to the size of the human population?", "role": "assistant" }, 
+            {"content": "The ones that are really rare.", "role": "user" }, 
+            {"content": "I’m not sure I can answer this question very easily. I’ll try to give you a basic overview of animals that are rare, and then you can tell me if you think that’s good enough. \n\nIf we were to measure animal populations by the area of land they occupy, the number 1 animal would probably be the Giant Panda. Panda populations are generally estimated to be about 2.1 million Pandas in the wild, which is a pretty small number of animals when we’re thinking about Earth’s entire surface area. The number 2 animal would probably be the Great Auk. Great Auks used to live in North America, and their total population was probably in the low hundreds, but they are now extinct.", "role": "assistant"}
+        ],
+        "rejected": [
+            {"content": "Please identify the top 5 rarest animals in the world.", "role": "user"}, 
+            {"content": "Do you mean animals that are really rare, or rare relative to the size of the human population?", "role": "assistant"}, 
+            {"content": "The ones that are really rare.", "role": "user"}, 
+            {"content": "Alright, here’s what I found:", "role": "assistant"}
+        ]
+    },
+    {
+        "chosen": [
+            {"content": "How do I teach my kids to make forts at home?", "role": "user"}, 
+            {"content": "Do you mean you want to teach them to build a simple structure they can hide in? Or something more elaborate?  I don’t know exactly what you mean by a “fort”, but maybe this gives you some ideas?", "role": "assistant"}
+        ],
+        "rejected": [
+            {"content": "How do I teach my kids to make forts at home?", "role": "user"}, 
+            {"content": "There are many ways to make forts!  The simplest might be to just pile up all the furniture in one room.  This works if you have a single room in your house, although it might still be a bit challenging to build a taller and sturdier fort this way.  The best way to build forts is to start with something easy, and then have fun exploring how you can improve it.", "role": "assistant"}
+        ]
+    }
+]
+}
+
+
+

To start from a model from a previous sft step, you may edit the run_reward_modeling.sh script and update the “dataset_path” to use the desired dataset. Additionally, you can modify the validation_split_percentage parameter to select the last percentage of samples for evaluation. The load_dataset function splits the dataset into training and evaluation sets, which can also be customized by editing the function in /examples/run_reward_modeling.py if you want to prepare your own dataset when running the script.

+
def build_dataset(tokenizer, config):
+    ''' 
+    We assume that we have preprocessed the dataset appropriately such that the sample is organized as follows:
+    {"positive": prompt + answer_positive, "negative": prompt + answer_negative}, where the positive response is preferred.
+    '''
+    def tokenize(sample):
+        tokenized_pos = tokenizer(sample['positive'], truncation=True)
+        tokenized_neg = tokenizer(sample['negative'], truncation=True)
+        sample["chosen_input_ids"] = tokenized_pos["input_ids"]
+        sample["chosen_attention_mask"] = tokenized_pos["attention_mask"]
+        sample["rejected_input_ids"] = tokenized_neg["input_ids"]
+        sample["rejected_attention_mask"] = tokenized_neg["attention_mask"]
+        return sample
+
+    ds = load_dataset("json", data_files=config.dataset_path, split="train", field="instances")
+    ds = ds.map(tokenize, batched=False)
+    ds = ds.filter(lambda x: len(x["chosen_input_ids"]) <= 512 and len(x["rejected_input_ids"]) <= 512)
+    eval_dataset = None
+    if config.validation_split_percentage > 0:
+        idx_gap = int((1-config.validation_split_percentage/100) * len(ds))
+        train_dataset = ds.select(range(idx_gap))
+        eval_dataset = ds.select(range(idx_gap, len(ds)))
+    else:
+        train_dataset = ds
+
+    return train_dataset, eval_dataset
+
+
+
+

We use the following loss function to train the reward model following the instruct-GPT paper.

+
    loss = -nn.functional.logsigmoid(chosen_rewards - rejected_rewards).mean()
+
+
+

The reward modeling script can be used by

+
./scripts/run_reward_modeling.sh
+
+
+
+
+

Examples#

+

We train reward models using the hh-rlhf dataset with four models, LLaMA-13B LLaMA-7B, GPT-NEO-2.7B, and GPT-NEO-1.3B. The model is first supervised fine-tuned with the training dataset. The reward modeling is trained using the 112K training samples and is evaluated on the 12.5 test samples.

+

The SFT step appears to be crucial, and the number of epochs during SFT can make a difference. The most successful model we obtained was initialized from LLaMA-13B, which underwent SFT on the training dataset for 2 epochs. For reward modeling, we utilize LoRA with a rank of 16. Surprisingly, increasing the LoRA rank to 32 or even 128 does not result in a significant improvement in evaluation accuracy. Moreover, we find that the choice of batch size does not have a significant impact on the training results. Additionally, we observe slight overfitting of the model during the second epoch of reward modeling.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Model

Eval Accuracy

Training record

Remarks

LLaMA-13B

84.55%

See https://wandb.ai/ianz2020/huggingface/runs/bg677mxa

RM from LLaMA with 2 epochs of SFT

LLaMA-13B

81.80%

See https://wandb.ai/ianz2020/huggingface/runs/ka9v1ywd

RM from LLaMA with 1 epoch of SFT

LLaMA-13B

71.64%

See https://wandb.ai/ianz2020/huggingface/runs/lntwmcyd

RM from LLaMA without SFT

LLaMA-7B

79.52%

See https://wandb.ai/weixiong5237/huggingface/runs/t3uwm8yp

-

LLaMA-7B

71.64%

See https://wandb.ai/weixiong5237/huggingface/runs/p2ju3r1a

RM from LLaMA without SFT

GPT-NEO-2.7B

69.24%

See https://wandb.ai/weixiong5237/huggingface/runs/8fc1rcf8

-

GPT-NEO-1.3B

65.58%

See https://wandb.ai/weixiong5237/huggingface/runs/7oemwynu

Only trained on 10000 samples

+
+
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/examples/supported_conversation_template.html b/examples/supported_conversation_template.html new file mode 100644 index 000000000..60649f657 --- /dev/null +++ b/examples/supported_conversation_template.html @@ -0,0 +1,893 @@ + + + + + + + + + + + Supported Conversation Template — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

Supported Conversation Template#

+ +
+

ChatGLM-3#

+

With a system message

+
[gMASK]sop<|system|>\n {{system_message}}<|user|>\n {{user_message_0}}
+
+
+

Without a system message

+
[gMASK]sop<|user|>\n {{user_message_0}}
+
+
+

A complete conversation

+
[gMASK]sop<|system|>\n {{system_message}}<|user|>\n {{user_message_0}}<|assistant|>\n {{assistant_reply_0}}
+
+
+

Multiple rounds

+
[gMASK]sop<|system|>\n {{system_message}}<|user|>\n {{user_message_0}}<|assistant|>\n {{assistant_reply_0}}<|user|>\n {{user_message_1}}<|assistant|>\n {{assistant_reply_1}}
+
+
+

jinja template
+[Reference]

+
{% for message in messages %}{% if loop.first %}[gMASK]sop<|{{ message['role'] }}|>\n {{ message['content'] }}{% else %}<|{{ message['role'] }}|>\n {{ message['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}
+
+
+

Filled Example

+
[gMASK]sop<|system|>\n You are a chatbot developed by LMFlow team.<|user|>\n Who are you?<|assistant|>\n I am a chatbot developed by LMFlow team.<|user|>\n How old are you?<|assistant|>\n I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.
+
+
+
+
+

ChatML#

+

With a system message

+
<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n
+
+
+

Without a system message

+
<|im_start|>user\n{{user_message_0}}<|im_end|>\n
+
+
+

A complete conversation

+
<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n
+
+
+

Multiple rounds

+
<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n<|im_start|>user\n{{user_message_1}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_1}}<|im_end|>\n
+
+
+

jinja template
+[Reference]

+
{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}
+
+
+

Filled Example

+
<|im_start|>system\nYou are a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nWho are you?<|im_end|>\n<|im_start|>assistant\nI am a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nHow old are you?<|im_end|>\n<|im_start|>assistant\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>\n
+
+
+
+
+

DeepSeek#

+

With a system message

+
<|begin▁of▁sentence|>{{system_message}}\n\nUser: {{user_message_0}}\n\n
+
+
+

Without a system message

+
<|begin▁of▁sentence|>User: {{user_message_0}}\n\n
+
+
+

A complete conversation

+
<|begin▁of▁sentence|>{{system_message}}\n\nUser: {{user_message_0}}\n\nAssistant: {{assistant_reply_0}}<|end▁of▁sentence|>
+
+
+

Multiple rounds

+
<|begin▁of▁sentence|>{{system_message}}\n\nUser: {{user_message_0}}\n\nAssistant: {{assistant_reply_0}}<|end▁of▁sentence|>User: {{user_message_1}}\n\nAssistant: {{assistant_reply_1}}<|end▁of▁sentence|>
+
+
+

jinja template
+[Reference]

+
{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}
+
+
+

Filled Example

+
<|begin▁of▁sentence|>You are a chatbot developed by LMFlow team.\n\nUser: Who are you?\n\nAssistant: I am a chatbot developed by LMFlow team.<|end▁of▁sentence|>User: How old are you?\n\nAssistant: I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|end▁of▁sentence|>
+
+
+
+
+

Gemma#

+

With a system message

+
+

NOTICE

+

As of now, Gemma does not support system messages officially. ConversationTemplate will add your system messages right after the bos token and before the user message without any special formatting. For more details, please refer to the official template.

+
+
<bos>{{system_message}}<start_of_turn>user\n{{user_message_0}}<end_of_turn>\n
+
+
+

Without a system message

+
<bos><start_of_turn>user\n{{user_message_0}}<end_of_turn>\n
+
+
+

A complete conversation

+
<bos>{{system_message}}<start_of_turn>user\n{{user_message_0}}<end_of_turn>\n<start_of_turn>model\n{{assistant_reply_0}}<end_of_turn>\n
+
+
+

Multiple rounds

+
<bos>{{system_message}}<start_of_turn>user\n{{user_message_0}}<end_of_turn>\n<start_of_turn>model\n{{assistant_reply_0}}<end_of_turn>\n<start_of_turn>user\n{{user_message_1}}<end_of_turn>\n<start_of_turn>model\n{{assistant_reply_1}}<end_of_turn>\n
+
+
+

jinja template
+[Reference]

+
{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}
+
+
+

Filled Example

+
<bos>You are a chatbot developed by LMFlow team.<start_of_turn>user\nWho are you?<end_of_turn>\n<start_of_turn>model\nI am a chatbot developed by LMFlow team.<end_of_turn>\n<start_of_turn>user\nHow old are you?<end_of_turn>\n<start_of_turn>model\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<end_of_turn>\n
+
+
+
+
+

InternLM2#

+

With a system message

+
<s><|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n
+
+
+

Without a system message

+
<s><|im_start|>user\n{{user_message_0}}<|im_end|>\n
+
+
+

A complete conversation

+
<s><|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n
+
+
+

Multiple rounds

+
<s><|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n<|im_start|>user\n{{user_message_1}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_1}}<|im_end|>\n
+
+
+

jinja template
+[Reference]

+
{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}
+
+
+

Filled Example

+
<s><|im_start|>system\nYou are a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nWho are you?<|im_end|>\n<|im_start|>assistant\nI am a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nHow old are you?<|im_end|>\n<|im_start|>assistant\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>\n
+
+
+
+
+

Llama-2#

+

With a system message

+
<s>[INST] <<SYS>>\n{{system_message}}\n<</SYS>>\n\n{{user_message_0}} [/INST]
+
+
+

Without a system message

+
<s>[INST] {{user_message_0}} [/INST]
+
+
+

A complete conversation

+
<s>[INST] <<SYS>>\n{{system_message}}\n<</SYS>>\n\n{{user_message_0}} [/INST] {{assistant_reply_0}}</s>
+
+
+

Multiple rounds

+
<s>[INST] <<SYS>>\n{{system_message}}\n<</SYS>>\n\n{{user_message_0}} [/INST] {{assistant_reply_0}}</s><s>[INST] {{user_message_1}} [/INST] {{assistant_reply_1}}</s>
+
+
+

jinja template
+[Reference]

+
{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' '  + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}
+
+
+

Filled Example

+
<s>[INST] <<SYS>>\nYou are a chatbot developed by LMFlow team.\n<</SYS>>\n\nWho are you? [/INST] I am a chatbot developed by LMFlow team.</s><s>[INST] How old are you? [/INST] I don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.</s>
+
+
+
+
+

Llama-3#

+

With a system message

+
<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{{system_message}}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{{user_message_0}}<|eot_id|>
+
+
+

Without a system message

+
<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{{user_message_0}}<|eot_id|>
+
+
+

A complete conversation

+
<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{{system_message}}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{{user_message_0}}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n{{assistant_reply_0}}<|eot_id|>
+
+
+

Multiple rounds

+
<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{{system_message}}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{{user_message_0}}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n{{assistant_reply_0}}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{{user_message_1}}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n{{assistant_reply_1}}<|eot_id|>
+
+
+

jinja template
+[Reference]

+
{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
+
+
+

Filled Example

+
<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a chatbot developed by LMFlow team.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWho are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nI am a chatbot developed by LMFlow team.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nHow old are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|eot_id|>
+
+
+
+
+

Mixtral 8x22B#

+
+

Work in Progress

+

This template is not preseted in LMFlow currently. We are working on it and will update it soon.

+
+
+

NOTICE

+

The conversation template for Mixtral 8x22B is slightly different from the template for Mixtral 8x7B.

+
+

jinja template
+[Reference]

+
{{bos_token}}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ ' [INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + message['content'] + ' ' + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}
+
+
+
+
+

Mixtral 8x7B#

+
+

Work in Progress

+

This template is not preseted in LMFlow currently. We are working on it and will update it soon.

+
+
+

NOTICE

+

The conversation template for Mixtral 8x7B is slightly different from the template for Mixtral 8x22B.

+
+

jinja template
+[Reference]

+
{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}
+
+
+
+
+

Phi-3#

+

With a system message

+
<s><|system|>\n{{system_message}}<|end|>\n<|user|>\n{{user_message_0}}<|end|>\n<|endoftext|>
+
+
+

Without a system message

+
<s><|user|>\n{{user_message_0}}<|end|>\n<|endoftext|>
+
+
+

A complete conversation

+
<s><|system|>\n{{system_message}}<|end|>\n<|user|>\n{{user_message_0}}<|end|>\n<|assistant|>\n{{assistant_reply_0}}<|end|>\n<|endoftext|>
+
+
+

Multiple rounds

+
<s><|system|>\n{{system_message}}<|end|>\n<|user|>\n{{user_message_0}}<|end|>\n<|assistant|>\n{{assistant_reply_0}}<|end|>\n<|user|>\n{{user_message_1}}<|end|>\n<|assistant|>\n{{assistant_reply_1}}<|end|>\n<|endoftext|>
+
+
+

jinja template
+[Reference]

+
{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}
+
+
+

Filled Example

+
<s><|system|>\nYou are a chatbot developed by LMFlow team.<|end|>\n<|user|>\nWho are you?<|end|>\n<|assistant|>\nI am a chatbot developed by LMFlow team.<|end|>\n<|user|>\nHow old are you?<|end|>\n<|assistant|>\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|end|>\n<|endoftext|>
+
+
+
+
+

Qwen-2#

+

(Also Qwen-1.5)

+

With a system message

+
<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n
+
+
+

Without a system message

+
<|im_start|>user\n{{user_message_0}}<|im_end|>\n
+
+
+

A complete conversation

+
<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n
+
+
+

Multiple rounds

+
<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n<|im_start|>user\n{{user_message_1}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_1}}<|im_end|>\n
+
+
+

jinja template
+[Reference]

+
{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}
+
+
+

Filled Example

+
<|im_start|>system\nYou are a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nWho are you?<|im_end|>\n<|im_start|>assistant\nI am a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nHow old are you?<|im_end|>\n<|im_start|>assistant\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>\n
+
+
+
+
+

Yi#

+

With a system message

+
<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n
+
+
+

Without a system message

+
<|im_start|>user\n{{user_message_0}}<|im_end|>\n
+
+
+

A complete conversation

+
<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n
+
+
+

Multiple rounds

+
<|im_start|>system\n{{system_message}}<|im_end|>\n<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n<|im_start|>user\n{{user_message_1}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_1}}<|im_end|>\n
+
+
+

jinja template
+[Reference]

+
{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}
+
+
+

Filled Example

+
<|im_start|>system\nYou are a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nWho are you?<|im_end|>\n<|im_start|>assistant\nI am a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nHow old are you?<|im_end|>\n<|im_start|>assistant\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>\n
+
+
+
+
+

Yi-1.5#

+

With a system message

+
{{system_message}}<|im_start|>user\n{{user_message_0}}<|im_end|>\n
+
+
+

Without a system message

+
<|im_start|>user\n{{user_message_0}}<|im_end|>\n
+
+
+

A complete conversation

+
{{system_message}}<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n
+
+
+

Multiple rounds

+
{{system_message}}<|im_start|>user\n{{user_message_0}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_0}}<|im_end|>\n<|im_start|>user\n{{user_message_1}}<|im_end|>\n<|im_start|>assistant\n{{assistant_reply_1}}<|im_end|>\n
+
+
+

jinja template
+[Reference]

+
{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}
+
+
+

Filled Example

+
You are a chatbot developed by LMFlow team.<|im_start|>user\nWho are you?<|im_end|>\n<|im_start|>assistant\nI am a chatbot developed by LMFlow team.<|im_end|>\n<|im_start|>user\nHow old are you?<|im_end|>\n<|im_start|>assistant\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.<|im_end|>\n
+
+
+
+
+

Zephyr#

+

With a system message

+
<|system|>\n{{system_message}}</s>\n<|user|>\n{{user_message_0}}</s>\n
+
+
+

Without a system message

+
<|user|>\n{{user_message_0}}</s>\n
+
+
+

A complete conversation

+
<|system|>\n{{system_message}}</s>\n<|user|>\n{{user_message_0}}</s>\n<|assistant|>\n{{assistant_reply_0}}</s>\n
+
+
+

Multiple rounds

+
<|system|>\n{{system_message}}</s>\n<|user|>\n{{user_message_0}}</s>\n<|assistant|>\n{{assistant_reply_0}}</s>\n<|user|>\n{{user_message_1}}</s>\n<|assistant|>\n{{assistant_reply_1}}</s>\n
+
+
+

jinja template
+[Reference]

+
{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n'  + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}
+
+
+

Filled Example

+
<|system|>\nYou are a chatbot developed by LMFlow team.</s>\n<|user|>\nWho are you?</s>\n<|assistant|>\nI am a chatbot developed by LMFlow team.</s>\n<|user|>\nHow old are you?</s>\n<|assistant|>\nI don't age like humans do. I exist as a piece of software, so I don't have a concept of age in the traditional sense.</s>\n
+
+
+
+
+ + +
+ + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/genindex.html b/genindex.html new file mode 100644 index 000000000..015dd773a --- /dev/null +++ b/genindex.html @@ -0,0 +1,4001 @@ + + + + + + + + + + Index — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+ + + + + +
+ + +

Index

+ +
+ _ + | A + | B + | C + | D + | E + | F + | G + | H + | I + | K + | L + | M + | N + | O + | P + | Q + | R + | S + | T + | U + | V + | W + | Y + | Z + +
+

_

+ + + +
+ +

A

+ + + +
+ +

B

+ + + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + + +
+ +

G

+ + + +
+ +

H

+ + + +
+ +

I

+ + + +
+ +

K

+ + + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

N

+ + + +
+ +

O

+ + + +
+ +

P

+ + + +
+ +

Q

+ + + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + + +
+ +

V

+ + + +
+ +

W

+ + + +
+ +

Y

+ + + +
+ +

Z

+ + + +
+ + + +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 000000000..caf8c7569 --- /dev/null +++ b/index.html @@ -0,0 +1,810 @@ + + + + + + + + + + + LMFlow — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+ + + + + +
+ + LMFlow +
+

LMFlow#

+

An extensible, convenient, and efficient toolbox for finetuning large machine learning models, designed to be user-friendly, speedy and reliable, and accessible to the entire community.

+
+
+
+
+
+
+Extensible
+

Support common backbones (LLaMA, Galactica, GPT-2, etc.)

+
+
+
+
+
+
+
+Light-Weight
+

Extremely few parameters with LoRA (LLaMA 33B: only 25MB storage)

+
+
+
+
+
+
+
+Task-Oriented
+

Comparable with ChatGPT on 7B/33B models.

+
+
+
+
+
+
+
+Open
+

The whole pipeline (data, models, tuning, inference) is open-source.

+
+
+
+
+
+
+

Introduction#

+

The remarkable achievements of large foundation models, such as the expansive language models, have demonstrated their exceptional capacity to attain human-like intelligence that surpasses conventional methods. Despite their growing accessibility, these models still require fine-tuning to cater to specific tasks while maintaining their overall AI competency. We are pleased to introduce our lightweight toolkit, which features thoughtfully designed and easily scalable APIs. This tool simplifies the process of fine-tuning and inference of publicly available foundation models to maximize their effectiveness.

+

We have thoroughly tested this toolkit and are pleased to make it available under Github.

+
+
+

Features#

+
+

Task Tuning#

+

The goal of Task Tuning is to enhance a language model’s proficiency in a particular field, such as medicine or mathematics. By doing so, the model acquires domain-specific information, allowing it to adapt better to the target subject matter.

+

For instance, if a medical dataset is used for task tuning, the language model can gain medical knowledge that can be applied to other medical datasets.

+

To emphasize its significance, we applied task tuning to LLaMA models on PubMedQA and MedMCQA datasets and evaluated their performance. We observed significant improvements both in-domain (PubMedQA, MedMCQA) and out-of-domain (MedQA-USMLE) dataset.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

PubMedQA

MedQA-USMLE

MedMCQA

Average

Human (pass)

60.0

50.0

Human (expert)

78.0

87.0

90.0

85.0

InstructGPT 175B

73.2

46.0

44.0

54.4

ChatGPT

63.9

57.0

44.7

55.2

LLaMA 7B

5.2

27.1

24.3

18.9

LLaMA 33B

1.8

43.4

30.3

25.2

Task-tuned LLaMA 7B (Full)

75.1

44.5

49.9

56.5

Task-tuned LLaMA 33B (LoRA)

74

51.3

50.2

58.5

+
+

Moreover, we also test the MMLU performance to further verify the out-of-domain robustness of Task Tuning technique.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

MMLU task

anatomy

clinical knowledge

college biology

college medicine

medical genetics

professional medicine

LLaMA 33B

39.2

40.3

44.4

32.9

36

43.0

Galactica 30B

32.5

26

30.5

25.4

39

23.1

Galactica 120B

58.5

59.2

68.7

57.2

68.0

59.6

OPT 175B

28.9

21.9

30.6

-

35.0

27.9

BLOOM 176B

37

29.8

28.5

-

36.0

25.4

Gopher 280B

56.3

67.2

70.8

60.1

69.0

64.0

GPT3.5 175B

56.3

69.8

72.2

61.3

70

70.2

Task-tuned LLaMA 33B (LoRA)

51.8

65.2

70.1

58.3

65.6

66.5

+
+
+
+

Instruction Tuning#

+

Instruction Tuning is a technique used to improve the performance of language models by training them to follow natural language commands or instructions. This includes positive or negative examples, prompts, constraints, and other elements that are commonly found in human language. The main goal of instruction-tuning is to improve the model’s ability to perform well on multiple tasks and to generalize more effectively to new or unseen tasks. This is achieved by teaching the model to understand and incorporate various language cues and constraints that are relevant to the task at hand. Instruction-tuning is a powerful technique that is widely used in natural language processing, machine learning, and related areas. By improving the ability of language models to understand and follow natural language commands, this approach can help to unlock new levels of performance and productivity in a wide range of applications.

+

We list some examples below. Full example list is saved as a Jsonl file.

+

Instruction Tuning Sample

+

Instruction Tuning Sample

+

Instruction Tuning Sample

+

Instruction Tuning Sample

+

Instruction Tuning Sample

+

Instruction Tuning Sample

+

Instruction Tuning Sample

+
+
+
+

Installation#

+

This package can be be installed from sources with the following command:

+
git clone https://github.com/OptimalScale/LMFlow.git
+
+conda create -n lmflow python=3.9 -y
+
+conda activate lmflow
+
+conda install mpi4py
+
+pip install -e .
+
+
+
+
+

Checkpoints#

+

We have prepared tuned LLaMA model (both task and instruction tuning).

+

Refer to README.

+
+
+

Content#

+
+ +
+ +
+ +
+ +
+
+

Vision#

+

Hello there! We are excited to announce the upcoming release of our code repository that includes a complete LLM training process, enabling users to quickly build their own language models and train them effectively.

+

Our code repository is not just a simple model; it includes the complete training workflow, model optimization, and testing tools. You can use it to build various types of language models, including conversation models, question-answering models, and text generation models, among others.

+

Moreover, we aim to create an open and democratic LLM sharing platform where people can share their checkpoints and experiences to collectively improve the skills of the community. We welcome anyone who is interested in LLM to participate and join us in building an open and friendly community!

+

Whether you are a beginner or an expert, we believe that you can benefit from this platform. Let’s work together to build a vibrant and innovative LLM community!

+

Embark +slack badge +WeChat badge

+
+
+

Citation#

+
@misc{lmflow,
+  author = {Shizhe Diao and Rui Pan and Hanze Dong and KaShun Shum and Jipeng Zhang and Wei Xiong and Tong Zhang},
+  title = {LMFlow: An Extensible Toolkit for Finetuning and Inference of Large Foundation Models},
+  year = {2023},
+  publisher = {GitHub},
+  journal = {GitHub repository},
+  howpublished = {\url{https://optimalscale.github.io/LMFlow/}},
+}
+
+
+
+
+

Disclaimer#

+

This package aims to provide a streamlined and user-friendly pipeline for large model tuning. Its functionalities serve as a reference and are intended for use by the user. However, it is important to note that the responsibility for the preparation of the data and pretrained models lies solely with the user. This package does not guarantee the accuracy, completeness, applicability, or legality of the components from the user’s preparation. Users must be aware of and assume all risks and liabilities associated with the preparation of the models and data, and obtain legal, commercial, and technical advice before utilizing this package. The pipeline shall not be held responsible for any direct, indirect, special, incidental, or consequential damages resulting from the user’s improper preparation of the data and pretrained models.

+

It is also crucial to highlight that the results generated by the model are based on probabilistic models and not directly related to this pipeline. The accuracy, reliability, applicability, and legality of the results are not guaranteed by this pipeline. Therefore, users must also be aware of the risks and liabilities associated with the results and seek legal, commercial, and technical advice before relying on the model-generated outcomes. This pipeline shall not be accountable for any direct, indirect, special, incidental, or consequential damages resulting from the user’s reliance on the model-generated results.

+
+
+

Support#

+

If you need any help, please submit a Github issue.

+
+
+

Indices and tables#

+ +
+
+ + +
+ + + + + + + +
+ + + + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/objects.inv b/objects.inv new file mode 100644 index 000000000..f60f96986 Binary files /dev/null and b/objects.inv differ diff --git a/py-modindex.html b/py-modindex.html new file mode 100644 index 000000000..fa5596080 --- /dev/null +++ b/py-modindex.html @@ -0,0 +1,933 @@ + + + + + + + + + + Python Module Index — LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+ + + + + +
+ + +

Python Module Index

+ +
+ l +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ l
+ lmflow +
    + lmflow.args +
    + lmflow.datasets +
    + lmflow.datasets.dataset +
    + lmflow.datasets.multi_modal_dataset +
    + lmflow.models +
    + lmflow.models.auto_model +
    + lmflow.models.base_model +
    + lmflow.models.decoder_model +
    + lmflow.models.encoder_decoder_model +
    + lmflow.models.hf_decoder_model +
    + lmflow.models.hf_encoder_decoder_model +
    + lmflow.models.hf_model_mixin +
    + lmflow.models.hf_text_regression_model +
    + lmflow.models.interfaces +
    + lmflow.models.interfaces.tunable +
    + lmflow.models.regression_model +
    + lmflow.models.text_regression_model +
    + lmflow.models.vision2seq_model +
    + lmflow.models.vision_encoder +
    + lmflow.models.vision_encoder.clip_encoder +
    + lmflow.optim +
    + lmflow.optim.adabelief +
    + lmflow.optim.adabound +
    + lmflow.optim.adadelta +
    + lmflow.optim.adagrad +
    + lmflow.optim.adam +
    + lmflow.optim.adamax +
    + lmflow.optim.adamp +
    + lmflow.optim.adamw_schedule_free +
    + lmflow.optim.adan +
    + lmflow.optim.dummy +
    + lmflow.optim.lamb +
    + lmflow.optim.lars +
    + lmflow.optim.nadam +
    + lmflow.optim.novograd +
    + lmflow.optim.optimizers +
    + lmflow.optim.radam +
    + lmflow.optim.sgd_schedule_free +
    + lmflow.optim.sgdp +
    + lmflow.optim.sophia +
    + lmflow.optim.yogi +
    + lmflow.pipeline +
    + lmflow.pipeline.auto_pipeline +
    + lmflow.pipeline.base_aligner +
    + lmflow.pipeline.base_pipeline +
    + lmflow.pipeline.base_tuner +
    + lmflow.pipeline.dpo_aligner +
    + lmflow.pipeline.dpov2_aligner +
    + lmflow.pipeline.evaluator +
    + lmflow.pipeline.finetuner +
    + lmflow.pipeline.inferencer +
    + lmflow.pipeline.iterative_dpo_aligner +
    + lmflow.pipeline.raft_aligner +
    + lmflow.pipeline.rm_inferencer +
    + lmflow.pipeline.rm_tuner +
    + lmflow.pipeline.utils +
    + lmflow.pipeline.utils.dpov2_dataprocessor +
    + lmflow.pipeline.utils.dpov2_trainer +
    + lmflow.pipeline.utils.memory_safe_dpov2_align +
    + lmflow.pipeline.utils.memory_safe_vllm_inference +
    + lmflow.pipeline.utils.peft_trainer +
    + lmflow.pipeline.utils.raft_trainer +
    + lmflow.pipeline.utils.rm_dataprocessor +
    + lmflow.pipeline.utils.rm_trainer +
    + lmflow.pipeline.vllm_inferencer +
    + lmflow.tokenization +
    + lmflow.tokenization.hf_decoder_model +
    + lmflow.tokenization.hf_text_regression_model +
    + lmflow.utils +
    + lmflow.utils.common +
    + lmflow.utils.constants +
    + lmflow.utils.conversation_template +
    + lmflow.utils.conversation_template.base +
    + lmflow.utils.conversation_template.chatglm +
    + lmflow.utils.conversation_template.chatml +
    + lmflow.utils.conversation_template.deepseek +
    + lmflow.utils.conversation_template.fox +
    + lmflow.utils.conversation_template.gemma +
    + lmflow.utils.conversation_template.internlm +
    + lmflow.utils.conversation_template.llama +
    + lmflow.utils.conversation_template.phi +
    + lmflow.utils.conversation_template.qwen +
    + lmflow.utils.conversation_template.yi +
    + lmflow.utils.conversation_template.zephyr +
    + lmflow.utils.data_utils +
    + lmflow.utils.flash_attention +
    + lmflow.utils.flash_attention.bloom_flash_attention +
    + lmflow.utils.flash_attention.gpt2_flash_attention +
    + lmflow.utils.flash_attention.gpt_neo_flash_attention +
    + lmflow.utils.flash_attention.llama_flash_attention +
    + lmflow.utils.flash_attention.triton_flash_attention +
    + lmflow.utils.llava_conversation_lib +
    + lmflow.utils.model +
    + lmflow.utils.multimodal +
    + lmflow.utils.position_interpolation +
    + lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch +
    + lmflow.version +
+ + +
+ + + + + +
+ +
+
+
+ +
+ + + + +
+
+ +
+ +
+
+
+ + + + + +
+ + +
+ + \ No newline at end of file diff --git a/search.html b/search.html new file mode 100644 index 000000000..98fb1fb5a --- /dev/null +++ b/search.html @@ -0,0 +1,457 @@ + + + + + + + + + Search - LMFlow documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+ + +
+

Search

+ + + +
+
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ +
+ +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/searchindex.js b/searchindex.js new file mode 100644 index 000000000..cd1a6b981 --- /dev/null +++ b/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"alltitles": {"1 Introduction": [[111, "introduction"]], "1. Decompose your conversations": [[107, "decompose-your-conversations"]], "1. NLL Task Setting": [[105, "nll-task-setting"]], "1.1 Dataset description": [[111, "dataset-description"]], "2 Reward Modeling": [[111, "reward-modeling"]], "2. Choose proper Formatter": [[107, "choose-proper-formatter"]], "2. LM-Evaluation Task Setting": [[105, "lm-evaluation-task-setting"]], "2.1 Supervised Finetuning (SFT)": [[111, "supervised-finetuning-sft"]], "2.2 Reward Modeling": [[111, "id1"]], "2.3 LoRA Merge and Get Reward Model": [[111, "lora-merge-and-get-reward-model"]], "2023": [[103, "id1"]], "3 RAFT Alignment": [[111, "raft-alignment"]], "3. Build the template": [[107, "build-the-template"]], "3.1 Algorithms Overview": [[111, "algorithms-overview"]], "3.2 Hyper-parameters": [[111, "hyper-parameters"]], "3.3 Examples": [[111, "examples"]], "3.3.1 SFT": [[111, "sft"]], "3.3.2 RAFT Alignment": [[111, "id2"]], "3.3.3 End Note": [[111, "end-note"]], "4. Register your template": [[107, "register-your-template"]], "5. Use your template": [[107, "use-your-template"]], "API Reference": [[3, null]], "About": [[2, null]], "Attributes": [[4, "attributes"], [5, "attributes"], [8, "attributes"], [13, "attributes"], [14, "attributes"], [15, "attributes"], [16, "attributes"], [46, "attributes"], [51, "attributes"], [53, "attributes"], [55, "attributes"], [56, "attributes"], [57, "attributes"], [58, "attributes"], [59, "attributes"], [60, "attributes"], [61, "attributes"], [63, "attributes"], [64, "attributes"], [66, "attributes"], [67, "attributes"], [69, "attributes"], [70, "attributes"], [71, "attributes"], [73, "attributes"], [74, "attributes"], [75, "attributes"], [76, "attributes"], [77, "attributes"], [78, "attributes"], [79, "attributes"], [80, "attributes"], [81, "attributes"], [82, "attributes"], [83, "attributes"], [84, "attributes"], [85, "attributes"], [86, "attributes"], [87, "attributes"], [94, "attributes"], [96, "attributes"], [97, "attributes"], [101, "attributes"]], "Blogs": [[103, null]], "Changelog": [[1, null]], "Chat Performance": [[102, "chat-performance"]], "ChatGLM-3": [[113, "chatglm-3"]], "ChatML": [[113, "chatml"]], "Checkpoints": [[106, null], [114, "checkpoints"]], "Citation": [[114, "citation"]], "Classes": [[4, "classes"], [5, "classes"], [6, "classes"], [7, "classes"], [9, "classes"], [10, "classes"], [11, "classes"], [12, "classes"], [13, "classes"], [14, "classes"], [15, "classes"], [16, "classes"], [19, "classes"], [20, "classes"], [21, "classes"], [22, "classes"], [23, "classes"], [25, "classes"], [26, "classes"], [27, "classes"], [28, "classes"], [29, "classes"], [30, "classes"], [31, "classes"], [32, "classes"], [33, "classes"], [34, "classes"], [36, "classes"], [37, "classes"], [38, "classes"], [39, "classes"], [41, "classes"], [42, "classes"], [43, "classes"], [44, "classes"], [45, "classes"], [46, "classes"], [47, "classes"], [48, "classes"], [49, "classes"], [50, "classes"], [51, "classes"], [52, "classes"], [53, "classes"], [55, "classes"], [56, "classes"], [57, "classes"], [58, "classes"], [59, "classes"], [60, "classes"], [61, "classes"], [65, "classes"], [66, "classes"], [67, "classes"], [68, "classes"], [69, "classes"], [75, "classes"], [80, "classes"], [81, "classes"], [83, "classes"], [87, "classes"], [88, "classes"], [94, "classes"], [96, "classes"], [100, "classes"]], "CommonSense Performance": [[102, "commonsense-performance"]], "Conclusion": [[102, "conclusion"]], "Content": [[114, "content"]], "Contributors": [[0, null]], "Conversation": [[104, "conversation"]], "Conversation Template": [[104, "conversation-template"], [108, null]], "Create Your Task Dataset File": [[105, "create-your-task-dataset-file"]], "Customize Conversation Template": [[104, "customize-conversation-template"], [107, null]], "Data Format": [[104, "data-format"]], "Data preparation": [[109, "data-preparation"]], "Dataset": [[104, null]], "Dataset Format in General": [[104, "dataset-format-in-general"]], "DeepSeek": [[113, "deepseek"]], "Disclaimer": [[114, "disclaimer"]], "Evaluation": [[109, "evaluation"]], "Examples": [[109, null], [112, "examples"]], "Features": [[114, "features"]], "Finetune": [[110, null]], "Finetuning": [[108, null], [109, "finetuning"]], "Formatted Dataset": [[104, null]], "Full Parameters": [[108, "full-parameters"]], "Functions": [[7, "functions"], [23, "functions"], [24, "functions"], [33, "functions"], [46, "functions"], [50, "functions"], [55, "functions"], [63, "functions"], [64, "functions"], [68, "functions"], [70, "functions"], [71, "functions"], [73, "functions"], [88, "functions"], [89, "functions"], [90, "functions"], [91, "functions"], [93, "functions"], [94, "functions"], [97, "functions"], [98, "functions"], [100, "functions"]], "Gemma": [[113, "gemma"]], "Indices and tables": [[114, "indices-and-tables"]], "Inference": [[109, "inference"]], "Installation": [[114, "installation"]], "Instruction Following": [[102, "instruction-following"]], "Instruction Tuning": [[114, "instruction-tuning"]], "InternLM2": [[113, "internlm2"]], "Introduction": [[102, "introduction"], [112, "introduction"], [114, "introduction"]], "LLaMA Checkpoint": [[106, "llama-checkpoint"]], "LMFlow": [[114, null]], "LMFlow Benchmark Guide": [[105, null]], "LMFlow Benchmark: An Automatic Evaluation Framework for Open-Source LLMs": [[102, null]], "Layerwise Importance Sampled AdamW (LISA)": [[108, "layerwise-importance-sampled-adamw-lisa"]], "Llama-2": [[113, "llama-2"]], "Llama-3": [[113, "llama-3"]], "Low-Rank Adaptation (LoRA)": [[108, "low-rank-adaptation-lora"]], "Merge LoRA Weight": [[108, null]], "Metric": [[102, "metric"]], "Mixtral 8x22B": [[113, "mixtral-8x22b"]], "Mixtral 8x7B": [[113, "mixtral-8x7b"]], "Module Contents": [[4, "module-contents"], [5, "module-contents"], [7, "module-contents"], [9, "module-contents"], [10, "module-contents"], [11, "module-contents"], [12, "module-contents"], [13, "module-contents"], [14, "module-contents"], [15, "module-contents"], [16, "module-contents"], [19, "module-contents"], [20, "module-contents"], [21, "module-contents"], [22, "module-contents"], [23, "module-contents"], [25, "module-contents"], [26, "module-contents"], [27, "module-contents"], [28, "module-contents"], [29, "module-contents"], [30, "module-contents"], [31, "module-contents"], [32, "module-contents"], [33, "module-contents"], [34, "module-contents"], [36, "module-contents"], [37, "module-contents"], [38, "module-contents"], [39, "module-contents"], [41, "module-contents"], [42, "module-contents"], [43, "module-contents"], [44, "module-contents"], [45, "module-contents"], [46, "module-contents"], [47, "module-contents"], [48, "module-contents"], [49, "module-contents"], [50, "module-contents"], [51, "module-contents"], [52, "module-contents"], [53, "module-contents"], [55, "module-contents"], [56, "module-contents"], [57, "module-contents"], [58, "module-contents"], [59, "module-contents"], [60, "module-contents"], [61, "module-contents"], [63, "module-contents"], [64, "module-contents"], [65, "module-contents"], [66, "module-contents"], [67, "module-contents"], [68, "module-contents"], [69, "module-contents"], [70, "module-contents"], [71, "module-contents"], [73, "module-contents"], [74, "module-contents"], [75, "module-contents"], [76, "module-contents"], [77, "module-contents"], [78, "module-contents"], [79, "module-contents"], [80, "module-contents"], [82, "module-contents"], [83, "module-contents"], [84, "module-contents"], [85, "module-contents"], [86, "module-contents"], [87, "module-contents"], [88, "module-contents"], [89, "module-contents"], [90, "module-contents"], [91, "module-contents"], [93, "module-contents"], [94, "module-contents"], [96, "module-contents"], [97, "module-contents"], [98, "module-contents"], [100, "module-contents"], [101, "module-contents"]], "NOTICE": [[113, null], [113, null], [113, null]], "Package Contents": [[6, "package-contents"], [8, "package-contents"], [24, "package-contents"], [81, "package-contents"]], "Paired Conversation": [[104, "paired-conversation"]], "Phi-3": [[113, "phi-3"]], "Qwen-2": [[113, "qwen-2"]], "RAFT": [[111, null]], "References": [[102, "references"]], "Returns": [[15, "returns"]], "Reward Modeling": [[112, null]], "Setup": [[105, "setup"]], "Step 1 Supervised Finetuning (SFT)": [[112, "step-1-supervised-finetuning-sft"]], "Step 2 Reward Modeling": [[112, "step-2-reward-modeling"]], "Submodules": [[6, "submodules"], [8, "submodules"], [17, "submodules"], [18, "submodules"], [24, "submodules"], [35, "submodules"], [54, "submodules"], [62, "submodules"], [72, "submodules"], [81, "submodules"], [92, "submodules"], [95, "submodules"], [99, "submodules"]], "Subpackages": [[8, "subpackages"], [17, "subpackages"], [54, "subpackages"], [95, "subpackages"]], "Support": [[114, "support"]], "Supported Conversation Template": [[113, null]], "Supported Dataset and Detailed Formats": [[104, "supported-dataset-and-detailed-formats"]], "Task Registration": [[105, "task-registration"]], "Task Tuning": [[114, "task-tuning"]], "Text2Text": [[104, "text2text"]], "TextOnly": [[104, "textonly"]], "Version 0.0.1 (Mar 28, 2023)": [[1, "version-0-0-1-mar-28-2023"]], "Vision": [[114, "vision"]], "Work in Progress": [[104, null], [113, null], [113, null]], "Yi": [[113, "yi"]], "Yi-1.5": [[113, "yi-1-5"]], "Zephyr": [[113, "zephyr"]], "lmflow": [[8, null]], "lmflow.args": [[4, null]], "lmflow.datasets": [[6, null]], "lmflow.datasets.dataset": [[5, null]], "lmflow.datasets.multi_modal_dataset": [[7, null]], "lmflow.models": [[17, null]], "lmflow.models.auto_model": [[9, null]], "lmflow.models.base_model": [[10, null]], "lmflow.models.decoder_model": [[11, null]], "lmflow.models.encoder_decoder_model": [[12, null]], "lmflow.models.hf_decoder_model": [[13, null]], "lmflow.models.hf_encoder_decoder_model": [[14, null]], "lmflow.models.hf_model_mixin": [[15, null]], "lmflow.models.hf_text_regression_model": [[16, null]], "lmflow.models.interfaces": [[18, null]], "lmflow.models.interfaces.tunable": [[19, null]], "lmflow.models.regression_model": [[20, null]], "lmflow.models.text_regression_model": [[21, null]], "lmflow.models.vision2seq_model": [[22, null]], "lmflow.models.vision_encoder": [[24, null]], "lmflow.models.vision_encoder.clip_encoder": [[23, null]], "lmflow.optim": [[35, null]], "lmflow.optim.adabelief": [[25, null]], "lmflow.optim.adabound": [[26, null]], "lmflow.optim.adadelta": [[27, null]], "lmflow.optim.adagrad": [[28, null]], "lmflow.optim.adam": [[29, null]], "lmflow.optim.adamax": [[30, null]], "lmflow.optim.adamp": [[31, null]], "lmflow.optim.adamw_schedule_free": [[32, null]], "lmflow.optim.adan": [[33, null]], "lmflow.optim.dummy": [[34, null]], "lmflow.optim.lamb": [[36, null]], "lmflow.optim.lars": [[37, null]], "lmflow.optim.nadam": [[38, null]], "lmflow.optim.novograd": [[39, null]], "lmflow.optim.optimizers": [[40, null]], "lmflow.optim.radam": [[41, null]], "lmflow.optim.sgd_schedule_free": [[42, null]], "lmflow.optim.sgdp": [[43, null]], "lmflow.optim.sophia": [[44, null]], "lmflow.optim.yogi": [[45, null]], "lmflow.pipeline": [[54, null]], "lmflow.pipeline.auto_pipeline": [[46, null]], "lmflow.pipeline.base_aligner": [[47, null]], "lmflow.pipeline.base_pipeline": [[48, null]], "lmflow.pipeline.base_tuner": [[49, null]], "lmflow.pipeline.dpo_aligner": [[50, null]], "lmflow.pipeline.dpov2_aligner": [[51, null]], "lmflow.pipeline.evaluator": [[52, null]], "lmflow.pipeline.finetuner": [[53, null]], "lmflow.pipeline.inferencer": [[55, null]], "lmflow.pipeline.iterative_dpo_aligner": [[56, null]], "lmflow.pipeline.raft_aligner": [[57, null]], "lmflow.pipeline.rm_inferencer": [[58, null]], "lmflow.pipeline.rm_tuner": [[59, null]], "lmflow.pipeline.utils": [[62, null]], "lmflow.pipeline.utils.dpov2_dataprocessor": [[60, null]], "lmflow.pipeline.utils.dpov2_trainer": [[61, null]], "lmflow.pipeline.utils.memory_safe_dpov2_align": [[63, null]], "lmflow.pipeline.utils.memory_safe_vllm_inference": [[64, null]], "lmflow.pipeline.utils.peft_trainer": [[65, null]], "lmflow.pipeline.utils.raft_trainer": [[66, null]], "lmflow.pipeline.utils.rm_dataprocessor": [[67, null]], "lmflow.pipeline.utils.rm_trainer": [[68, null]], "lmflow.pipeline.vllm_inferencer": [[69, null]], "lmflow.tokenization": [[72, null]], "lmflow.tokenization.hf_decoder_model": [[70, null]], "lmflow.tokenization.hf_text_regression_model": [[71, null]], "lmflow.utils": [[95, null]], "lmflow.utils.common": [[73, null]], "lmflow.utils.constants": [[74, null]], "lmflow.utils.conversation_template": [[81, null]], "lmflow.utils.conversation_template.base": [[75, null]], "lmflow.utils.conversation_template.chatglm": [[76, null]], "lmflow.utils.conversation_template.chatml": [[77, null]], "lmflow.utils.conversation_template.deepseek": [[78, null]], "lmflow.utils.conversation_template.fox": [[79, null]], "lmflow.utils.conversation_template.gemma": [[80, null]], "lmflow.utils.conversation_template.internlm": [[82, null]], "lmflow.utils.conversation_template.llama": [[83, null]], "lmflow.utils.conversation_template.phi": [[84, null]], "lmflow.utils.conversation_template.qwen": [[85, null]], "lmflow.utils.conversation_template.yi": [[86, null]], "lmflow.utils.conversation_template.zephyr": [[87, null]], "lmflow.utils.data_utils": [[88, null]], "lmflow.utils.flash_attention": [[92, null]], "lmflow.utils.flash_attention.bloom_flash_attention": [[89, null]], "lmflow.utils.flash_attention.gpt2_flash_attention": [[90, null]], "lmflow.utils.flash_attention.gpt_neo_flash_attention": [[91, null]], "lmflow.utils.flash_attention.llama_flash_attention": [[93, null]], "lmflow.utils.flash_attention.triton_flash_attention": [[94, null]], "lmflow.utils.llava_conversation_lib": [[96, null]], "lmflow.utils.model": [[97, null]], "lmflow.utils.multimodal": [[98, null]], "lmflow.utils.position_interpolation": [[99, null]], "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch": [[100, null]], "lmflow.version": [[101, null]], "}": [[75, "id5"], [81, "id5"]]}, "docnames": ["about/authors", "about/changelog", "about/index", "autoapi/index", "autoapi/lmflow/args/index", "autoapi/lmflow/datasets/dataset/index", "autoapi/lmflow/datasets/index", "autoapi/lmflow/datasets/multi_modal_dataset/index", "autoapi/lmflow/index", "autoapi/lmflow/models/auto_model/index", "autoapi/lmflow/models/base_model/index", "autoapi/lmflow/models/decoder_model/index", "autoapi/lmflow/models/encoder_decoder_model/index", "autoapi/lmflow/models/hf_decoder_model/index", "autoapi/lmflow/models/hf_encoder_decoder_model/index", "autoapi/lmflow/models/hf_model_mixin/index", "autoapi/lmflow/models/hf_text_regression_model/index", "autoapi/lmflow/models/index", "autoapi/lmflow/models/interfaces/index", "autoapi/lmflow/models/interfaces/tunable/index", "autoapi/lmflow/models/regression_model/index", "autoapi/lmflow/models/text_regression_model/index", "autoapi/lmflow/models/vision2seq_model/index", "autoapi/lmflow/models/vision_encoder/clip_encoder/index", "autoapi/lmflow/models/vision_encoder/index", "autoapi/lmflow/optim/adabelief/index", "autoapi/lmflow/optim/adabound/index", "autoapi/lmflow/optim/adadelta/index", "autoapi/lmflow/optim/adagrad/index", "autoapi/lmflow/optim/adam/index", "autoapi/lmflow/optim/adamax/index", "autoapi/lmflow/optim/adamp/index", "autoapi/lmflow/optim/adamw_schedule_free/index", "autoapi/lmflow/optim/adan/index", "autoapi/lmflow/optim/dummy/index", "autoapi/lmflow/optim/index", "autoapi/lmflow/optim/lamb/index", "autoapi/lmflow/optim/lars/index", "autoapi/lmflow/optim/nadam/index", "autoapi/lmflow/optim/novograd/index", "autoapi/lmflow/optim/optimizers/index", "autoapi/lmflow/optim/radam/index", "autoapi/lmflow/optim/sgd_schedule_free/index", "autoapi/lmflow/optim/sgdp/index", "autoapi/lmflow/optim/sophia/index", "autoapi/lmflow/optim/yogi/index", "autoapi/lmflow/pipeline/auto_pipeline/index", "autoapi/lmflow/pipeline/base_aligner/index", "autoapi/lmflow/pipeline/base_pipeline/index", "autoapi/lmflow/pipeline/base_tuner/index", "autoapi/lmflow/pipeline/dpo_aligner/index", "autoapi/lmflow/pipeline/dpov2_aligner/index", "autoapi/lmflow/pipeline/evaluator/index", "autoapi/lmflow/pipeline/finetuner/index", "autoapi/lmflow/pipeline/index", "autoapi/lmflow/pipeline/inferencer/index", "autoapi/lmflow/pipeline/iterative_dpo_aligner/index", "autoapi/lmflow/pipeline/raft_aligner/index", "autoapi/lmflow/pipeline/rm_inferencer/index", "autoapi/lmflow/pipeline/rm_tuner/index", "autoapi/lmflow/pipeline/utils/dpov2_dataprocessor/index", "autoapi/lmflow/pipeline/utils/dpov2_trainer/index", "autoapi/lmflow/pipeline/utils/index", "autoapi/lmflow/pipeline/utils/memory_safe_dpov2_align/index", "autoapi/lmflow/pipeline/utils/memory_safe_vllm_inference/index", "autoapi/lmflow/pipeline/utils/peft_trainer/index", "autoapi/lmflow/pipeline/utils/raft_trainer/index", "autoapi/lmflow/pipeline/utils/rm_dataprocessor/index", "autoapi/lmflow/pipeline/utils/rm_trainer/index", "autoapi/lmflow/pipeline/vllm_inferencer/index", "autoapi/lmflow/tokenization/hf_decoder_model/index", "autoapi/lmflow/tokenization/hf_text_regression_model/index", "autoapi/lmflow/tokenization/index", "autoapi/lmflow/utils/common/index", "autoapi/lmflow/utils/constants/index", "autoapi/lmflow/utils/conversation_template/base/index", "autoapi/lmflow/utils/conversation_template/chatglm/index", "autoapi/lmflow/utils/conversation_template/chatml/index", "autoapi/lmflow/utils/conversation_template/deepseek/index", "autoapi/lmflow/utils/conversation_template/fox/index", "autoapi/lmflow/utils/conversation_template/gemma/index", "autoapi/lmflow/utils/conversation_template/index", "autoapi/lmflow/utils/conversation_template/internlm/index", "autoapi/lmflow/utils/conversation_template/llama/index", "autoapi/lmflow/utils/conversation_template/phi/index", "autoapi/lmflow/utils/conversation_template/qwen/index", "autoapi/lmflow/utils/conversation_template/yi/index", "autoapi/lmflow/utils/conversation_template/zephyr/index", "autoapi/lmflow/utils/data_utils/index", "autoapi/lmflow/utils/flash_attention/bloom_flash_attention/index", "autoapi/lmflow/utils/flash_attention/gpt2_flash_attention/index", "autoapi/lmflow/utils/flash_attention/gpt_neo_flash_attention/index", "autoapi/lmflow/utils/flash_attention/index", "autoapi/lmflow/utils/flash_attention/llama_flash_attention/index", "autoapi/lmflow/utils/flash_attention/triton_flash_attention/index", "autoapi/lmflow/utils/index", "autoapi/lmflow/utils/llava_conversation_lib/index", "autoapi/lmflow/utils/model/index", "autoapi/lmflow/utils/multimodal/index", "autoapi/lmflow/utils/position_interpolation/index", "autoapi/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch/index", "autoapi/lmflow/version/index", "blogs/benchmark", "blogs/index", "examples/DATASETS", "examples/TASK_GUIDE", "examples/checkpoints", "examples/customize_conversation_template", "examples/finetuning", "examples/index", "examples/medical_finetune", "examples/raft", "examples/reward_modeling", "examples/supported_conversation_template", "index"], "envversion": {"sphinx": 62, "sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.todo": 2, "sphinx.ext.viewcode": 1}, "filenames": ["about/authors.md", "about/changelog.md", "about/index.md", "autoapi/index.rst", "autoapi/lmflow/args/index.rst", "autoapi/lmflow/datasets/dataset/index.rst", "autoapi/lmflow/datasets/index.rst", "autoapi/lmflow/datasets/multi_modal_dataset/index.rst", "autoapi/lmflow/index.rst", "autoapi/lmflow/models/auto_model/index.rst", "autoapi/lmflow/models/base_model/index.rst", "autoapi/lmflow/models/decoder_model/index.rst", "autoapi/lmflow/models/encoder_decoder_model/index.rst", "autoapi/lmflow/models/hf_decoder_model/index.rst", "autoapi/lmflow/models/hf_encoder_decoder_model/index.rst", "autoapi/lmflow/models/hf_model_mixin/index.rst", "autoapi/lmflow/models/hf_text_regression_model/index.rst", "autoapi/lmflow/models/index.rst", "autoapi/lmflow/models/interfaces/index.rst", "autoapi/lmflow/models/interfaces/tunable/index.rst", "autoapi/lmflow/models/regression_model/index.rst", "autoapi/lmflow/models/text_regression_model/index.rst", "autoapi/lmflow/models/vision2seq_model/index.rst", "autoapi/lmflow/models/vision_encoder/clip_encoder/index.rst", "autoapi/lmflow/models/vision_encoder/index.rst", "autoapi/lmflow/optim/adabelief/index.rst", "autoapi/lmflow/optim/adabound/index.rst", "autoapi/lmflow/optim/adadelta/index.rst", "autoapi/lmflow/optim/adagrad/index.rst", "autoapi/lmflow/optim/adam/index.rst", "autoapi/lmflow/optim/adamax/index.rst", "autoapi/lmflow/optim/adamp/index.rst", "autoapi/lmflow/optim/adamw_schedule_free/index.rst", "autoapi/lmflow/optim/adan/index.rst", "autoapi/lmflow/optim/dummy/index.rst", "autoapi/lmflow/optim/index.rst", "autoapi/lmflow/optim/lamb/index.rst", "autoapi/lmflow/optim/lars/index.rst", "autoapi/lmflow/optim/nadam/index.rst", "autoapi/lmflow/optim/novograd/index.rst", "autoapi/lmflow/optim/optimizers/index.rst", "autoapi/lmflow/optim/radam/index.rst", "autoapi/lmflow/optim/sgd_schedule_free/index.rst", "autoapi/lmflow/optim/sgdp/index.rst", "autoapi/lmflow/optim/sophia/index.rst", "autoapi/lmflow/optim/yogi/index.rst", "autoapi/lmflow/pipeline/auto_pipeline/index.rst", "autoapi/lmflow/pipeline/base_aligner/index.rst", "autoapi/lmflow/pipeline/base_pipeline/index.rst", "autoapi/lmflow/pipeline/base_tuner/index.rst", "autoapi/lmflow/pipeline/dpo_aligner/index.rst", "autoapi/lmflow/pipeline/dpov2_aligner/index.rst", "autoapi/lmflow/pipeline/evaluator/index.rst", "autoapi/lmflow/pipeline/finetuner/index.rst", "autoapi/lmflow/pipeline/index.rst", "autoapi/lmflow/pipeline/inferencer/index.rst", "autoapi/lmflow/pipeline/iterative_dpo_aligner/index.rst", "autoapi/lmflow/pipeline/raft_aligner/index.rst", "autoapi/lmflow/pipeline/rm_inferencer/index.rst", "autoapi/lmflow/pipeline/rm_tuner/index.rst", "autoapi/lmflow/pipeline/utils/dpov2_dataprocessor/index.rst", "autoapi/lmflow/pipeline/utils/dpov2_trainer/index.rst", "autoapi/lmflow/pipeline/utils/index.rst", "autoapi/lmflow/pipeline/utils/memory_safe_dpov2_align/index.rst", "autoapi/lmflow/pipeline/utils/memory_safe_vllm_inference/index.rst", "autoapi/lmflow/pipeline/utils/peft_trainer/index.rst", "autoapi/lmflow/pipeline/utils/raft_trainer/index.rst", "autoapi/lmflow/pipeline/utils/rm_dataprocessor/index.rst", "autoapi/lmflow/pipeline/utils/rm_trainer/index.rst", "autoapi/lmflow/pipeline/vllm_inferencer/index.rst", "autoapi/lmflow/tokenization/hf_decoder_model/index.rst", "autoapi/lmflow/tokenization/hf_text_regression_model/index.rst", "autoapi/lmflow/tokenization/index.rst", "autoapi/lmflow/utils/common/index.rst", "autoapi/lmflow/utils/constants/index.rst", "autoapi/lmflow/utils/conversation_template/base/index.rst", "autoapi/lmflow/utils/conversation_template/chatglm/index.rst", "autoapi/lmflow/utils/conversation_template/chatml/index.rst", "autoapi/lmflow/utils/conversation_template/deepseek/index.rst", "autoapi/lmflow/utils/conversation_template/fox/index.rst", "autoapi/lmflow/utils/conversation_template/gemma/index.rst", "autoapi/lmflow/utils/conversation_template/index.rst", "autoapi/lmflow/utils/conversation_template/internlm/index.rst", "autoapi/lmflow/utils/conversation_template/llama/index.rst", "autoapi/lmflow/utils/conversation_template/phi/index.rst", "autoapi/lmflow/utils/conversation_template/qwen/index.rst", "autoapi/lmflow/utils/conversation_template/yi/index.rst", "autoapi/lmflow/utils/conversation_template/zephyr/index.rst", "autoapi/lmflow/utils/data_utils/index.rst", "autoapi/lmflow/utils/flash_attention/bloom_flash_attention/index.rst", "autoapi/lmflow/utils/flash_attention/gpt2_flash_attention/index.rst", "autoapi/lmflow/utils/flash_attention/gpt_neo_flash_attention/index.rst", "autoapi/lmflow/utils/flash_attention/index.rst", "autoapi/lmflow/utils/flash_attention/llama_flash_attention/index.rst", "autoapi/lmflow/utils/flash_attention/triton_flash_attention/index.rst", "autoapi/lmflow/utils/index.rst", "autoapi/lmflow/utils/llava_conversation_lib/index.rst", "autoapi/lmflow/utils/model/index.rst", "autoapi/lmflow/utils/multimodal/index.rst", "autoapi/lmflow/utils/position_interpolation/index.rst", "autoapi/lmflow/utils/position_interpolation/llama_rope_scaled_monkey_patch/index.rst", "autoapi/lmflow/version/index.rst", "blogs/benchmark.md", "blogs/index.md", "examples/DATASETS.md", "examples/TASK_GUIDE.md", "examples/checkpoints.md", "examples/customize_conversation_template.md", "examples/finetuning.md", "examples/index.md", "examples/medical_finetune.md", "examples/raft.md", "examples/reward_modeling.md", "examples/supported_conversation_template.md", "index.md"], "indexentries": {"__call__() (lmflow.datasets.multi_modal_dataset.datacollatorforsuperviseddataset method)": [[7, "lmflow.datasets.multi_modal_dataset.DataCollatorForSupervisedDataset.__call__", false]], "__call__() (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding method)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.__call__", false]], "__call__() (lmflow.pipeline.utils.rm_dataprocessor.rewarddatacollatorwithpadding method)": [[67, "lmflow.pipeline.utils.rm_dataprocessor.RewardDataCollatorWithPadding.__call__", false]], "__distributed_inference() (lmflow.pipeline.rm_inferencer.rewardmodelinferencer method)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer.__distributed_inference", false]], "__filter_args() (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner method)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner.__filter_args", false]], "__getitem__() (lmflow.datasets.custommultimodaldataset method)": [[6, "lmflow.datasets.CustomMultiModalDataset.__getitem__", false]], "__getitem__() (lmflow.datasets.multi_modal_dataset.custommultimodaldataset method)": [[7, "lmflow.datasets.multi_modal_dataset.CustomMultiModalDataset.__getitem__", false]], "__inference() (lmflow.models.hf_decoder_model.hfdecodermodel method)": [[13, "lmflow.models.hf_decoder_model.HFDecoderModel.__inference", false]], "__inference() (lmflow.models.hf_text_regression_model.hftextregressionmodel method)": [[16, "lmflow.models.hf_text_regression_model.HFTextRegressionModel.__inference", false]], "__inference() (lmflow.pipeline.rm_inferencer.rewardmodelinferencer method)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer.__inference", false]], "__len__() (lmflow.datasets.custommultimodaldataset method)": [[6, "lmflow.datasets.CustomMultiModalDataset.__len__", false]], "__len__() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.__len__", false]], "__len__() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.__len__", false]], "__len__() (lmflow.datasets.multi_modal_dataset.custommultimodaldataset method)": [[7, "lmflow.datasets.multi_modal_dataset.CustomMultiModalDataset.__len__", false]], "__model_module_inject() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.__model_module_inject", false]], "__post_init__() (lmflow.args.datasetarguments method)": [[4, "lmflow.args.DatasetArguments.__post_init__", false]], "__post_init__() (lmflow.args.inferencerarguments method)": [[4, "lmflow.args.InferencerArguments.__post_init__", false]], "__post_init__() (lmflow.args.modelarguments method)": [[4, "lmflow.args.ModelArguments.__post_init__", false]], "__post_init__() (lmflow.utils.conversation_template.base.conversationtemplate method)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate.__post_init__", false]], "__post_init__() (lmflow.utils.conversation_template.base.emptyformatter method)": [[75, "lmflow.utils.conversation_template.base.EmptyFormatter.__post_init__", false]], "__post_init__() (lmflow.utils.conversation_template.base.stringformatter method)": [[75, "lmflow.utils.conversation_template.base.StringFormatter.__post_init__", false]], "__post_init__() (lmflow.utils.conversation_template.base.templatecomponent method)": [[75, "lmflow.utils.conversation_template.base.TemplateComponent.__post_init__", false]], "__post_init__() (lmflow.utils.conversation_template.conversationtemplate method)": [[81, "lmflow.utils.conversation_template.ConversationTemplate.__post_init__", false]], "__post_process_model_output() (lmflow.pipeline.rm_inferencer.rewardmodelinferencer method)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer.__post_process_model_output", false]], "__prepare_dtype() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.__prepare_dtype", false]], "__prepare_inputs_for_inference() (lmflow.models.hf_decoder_model.hfdecodermodel method)": [[13, "lmflow.models.hf_decoder_model.HFDecoderModel.__prepare_inputs_for_inference", false]], "__prepare_inputs_for_vllm_inference() (lmflow.models.hf_decoder_model.hfdecodermodel method)": [[13, "lmflow.models.hf_decoder_model.HFDecoderModel.__prepare_inputs_for_vllm_inference", false]], "__prepare_model_config() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.__prepare_model_config", false]], "__prepare_model_for_inference() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.__prepare_model_for_inference", false]], "__prepare_model_for_training() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.__prepare_model_for_training", false]], "__prepare_model_for_vllm_inference() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.__prepare_model_for_vllm_inference", false]], "__prepare_model_post_process() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.__prepare_model_post_process", false]], "__prepare_peft_config() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.__prepare_peft_config", false]], "__prepare_quant_config() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.__prepare_quant_config", false]], "__prepare_tokenizer() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.__prepare_tokenizer", false]], "__prepare_training_args() (lmflow.pipeline.dpov2_aligner.dpov2aligner method)": [[51, "lmflow.pipeline.dpov2_aligner.DPOv2Aligner.__prepare_training_args", false]], "__repr__() (lmflow.utils.conversation_template.base.templatecomponent method)": [[75, "lmflow.utils.conversation_template.base.TemplateComponent.__repr__", false]], "__setstate__() (lmflow.optim.adabelief.adabelief method)": [[25, "lmflow.optim.adabelief.AdaBelief.__setstate__", false]], "__setstate__() (lmflow.optim.adabound.adabound method)": [[26, "lmflow.optim.adabound.AdaBound.__setstate__", false]], "__setstate__() (lmflow.optim.adamax.adamax method)": [[30, "lmflow.optim.adamax.Adamax.__setstate__", false]], "__setstate__() (lmflow.optim.adan.adan method)": [[33, "lmflow.optim.adan.Adan.__setstate__", false]], "__setstate__() (lmflow.optim.lars.lars method)": [[37, "lmflow.optim.lars.LARS.__setstate__", false]], "__setstate__() (lmflow.optim.nadam.nadam method)": [[38, "lmflow.optim.nadam.NAdam.__setstate__", false]], "__setstate__() (lmflow.optim.novograd.novograd method)": [[39, "lmflow.optim.novograd.NovoGrad.__setstate__", false]], "__setstate__() (lmflow.optim.radam.radam method)": [[41, "lmflow.optim.radam.RAdam.__setstate__", false]], "__setstate__() (lmflow.optim.sophia.sophiag method)": [[44, "lmflow.optim.sophia.SophiaG.__setstate__", false]], "__str__() (lmflow.utils.conversation_template.base.templatecomponent method)": [[75, "lmflow.utils.conversation_template.base.TemplateComponent.__str__", false]], "__version__ (in module lmflow)": [[8, "lmflow.__version__", false]], "__version__ (in module lmflow.version)": [[101, "lmflow.version.__version__", false]], "__vllm_inference() (lmflow.models.hf_decoder_model.hfdecodermodel method)": [[13, "lmflow.models.hf_decoder_model.HFDecoderModel.__vllm_inference", false]], "__vllm_inference() (lmflow.models.hf_text_regression_model.hftextregressionmodel method)": [[16, "lmflow.models.hf_text_regression_model.HFTextRegressionModel.__vllm_inference", false]], "__vllm_inference() (lmflow.pipeline.rm_inferencer.rewardmodelinferencer method)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer.__vllm_inference", false]], "_activated (lmflow.models.hf_model_mixin.hfmodelmixin attribute)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin._activated", false]], "_add_sm_patterns_to_gitignore() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._add_sm_patterns_to_gitignore", false]], "_align_single_iteration() (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner method)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner._align_single_iteration", false]], "_attn() (in module lmflow.utils.flash_attention.gpt_neo_flash_attention)": [[91, "lmflow.utils.flash_attention.gpt_neo_flash_attention._attn", false]], "_bwd_kernel() (in module lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention._bwd_kernel", false]], "_bwd_kernel_one_col_block() (in module lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention._bwd_kernel_one_col_block", false]], "_bwd_preprocess_do_o_dot() (in module lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention._bwd_preprocess_do_o_dot", false]], "_bwd_store_dk_dv() (in module lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention._bwd_store_dk_dv", false]], "_calc_response_lengths() (lmflow.pipeline.dpov2_aligner.dpov2aligner method)": [[51, "lmflow.pipeline.dpov2_aligner.DPOv2Aligner._calc_response_lengths", false]], "_calc_reward_with_length_penalty() (lmflow.pipeline.dpov2_aligner.dpov2aligner method)": [[51, "lmflow.pipeline.dpov2_aligner.DPOv2Aligner._calc_reward_with_length_penalty", false]], "_channel_view() (lmflow.optim.adamp.adamp static method)": [[31, "lmflow.optim.adamp.AdamP._channel_view", false]], "_channel_view() (lmflow.optim.sgdp.sgdp static method)": [[43, "lmflow.optim.sgdp.SGDP._channel_view", false]], "_check_data_format() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset._check_data_format", false]], "_check_data_format() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset._check_data_format", false]], "_check_if_alignable() (lmflow.pipeline.base_aligner.basealigner method)": [[47, "lmflow.pipeline.base_aligner.BaseAligner._check_if_alignable", false]], "_check_if_tunable() (lmflow.pipeline.base_tuner.basetuner method)": [[49, "lmflow.pipeline.base_tuner.BaseTuner._check_if_tunable", false]], "_clean_text() (lmflow.pipeline.raft_aligner.raftaligner method)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner._clean_text", false]], "_cosine_similarity() (lmflow.optim.adamp.adamp static method)": [[31, "lmflow.optim.adamp.AdamP._cosine_similarity", false]], "_cosine_similarity() (lmflow.optim.sgdp.sgdp static method)": [[43, "lmflow.optim.sgdp.SGDP._cosine_similarity", false]], "_discard_sample() (lmflow.pipeline.raft_aligner.raftaligner method)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner._discard_sample", false]], "_distributed_inference() (lmflow.pipeline.vllm_inferencer.vllminferencer method)": [[69, "lmflow.pipeline.vllm_inferencer.VLLMInferencer._distributed_inference", false]], "_do_reward_model_inference() (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner method)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner._do_reward_model_inference", false]], "_do_single_dpo_align() (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner method)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner._do_single_dpo_align", false]], "_do_target_model_inference() (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner method)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner._do_target_model_inference", false]], "_encode() (lmflow.utils.conversation_template.base.conversationtemplate method)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate._encode", false]], "_encode() (lmflow.utils.conversation_template.conversationtemplate method)": [[81, "lmflow.utils.conversation_template.ConversationTemplate._encode", false]], "_encode() (lmflow.utils.conversation_template.llama.llama2conversationtemplate method)": [[83, "lmflow.utils.conversation_template.llama.Llama2ConversationTemplate._encode", false]], "_encode() (lmflow.utils.conversation_template.zephyr.zephyrconversationtemplate method)": [[87, "lmflow.utils.conversation_template.zephyr.ZephyrConversationTemplate._encode", false]], "_encode_template() (lmflow.utils.conversation_template.base.conversationtemplate method)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate._encode_template", false]], "_encode_template() (lmflow.utils.conversation_template.conversationtemplate method)": [[81, "lmflow.utils.conversation_template.ConversationTemplate._encode_template", false]], "_ensure_id_list() (lmflow.utils.conversation_template.base.conversationtemplate method)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate._ensure_id_list", false]], "_ensure_id_list() (lmflow.utils.conversation_template.conversationtemplate method)": [[81, "lmflow.utils.conversation_template.ConversationTemplate._ensure_id_list", false]], "_evaluate_acc_with_accelerator() (lmflow.pipeline.evaluator.evaluator method)": [[52, "lmflow.pipeline.evaluator.Evaluator._evaluate_acc_with_accelerator", false]], "_evaluate_acc_with_deepspeed() (lmflow.pipeline.evaluator.evaluator method)": [[52, "lmflow.pipeline.evaluator.Evaluator._evaluate_acc_with_deepspeed", false]], "_evaluate_nll() (lmflow.pipeline.evaluator.evaluator method)": [[52, "lmflow.pipeline.evaluator.Evaluator._evaluate_nll", false]], "_evaluate_ppl() (lmflow.pipeline.evaluator.evaluator method)": [[52, "lmflow.pipeline.evaluator.Evaluator._evaluate_ppl", false]], "_flash_attn_backward() (in module lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention._flash_attn_backward", false]], "_flash_attn_forward() (in module lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention._flash_attn_forward", false]], "_fwd_kernel() (in module lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention._fwd_kernel", false]], "_gather_and_numpify() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._gather_and_numpify", false]], "_get_batch_dataset_local() (lmflow.pipeline.raft_aligner.raftaligner method)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner._get_batch_dataset_local", false]], "_get_batch_dataset_top() (lmflow.pipeline.raft_aligner.raftaligner method)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner._get_batch_dataset_top", false]], "_get_collator_with_removed_columns() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._get_collator_with_removed_columns", false]], "_get_eval_sampler() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._get_eval_sampler", false]], "_get_output_dir() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._get_output_dir", false]], "_get_train_sampler() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._get_train_sampler", false]], "_hp_search_setup() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._hp_search_setup", false]], "_inference() (lmflow.pipeline.rm_inferencer.rewardmodelinferencer method)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer._inference", false]], "_inference() (lmflow.pipeline.vllm_inferencer.vllminferencer method)": [[69, "lmflow.pipeline.vllm_inferencer.VLLMInferencer._inference", false]], "_initialize_trainer() (lmflow.pipeline.dpo_aligner.dpoaligner method)": [[50, "lmflow.pipeline.dpo_aligner.DPOAligner._initialize_trainer", false]], "_initialize_trainer() (lmflow.pipeline.raft_aligner.raftaligner method)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner._initialize_trainer", false]], "_inner_training_loop() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._inner_training_loop", false]], "_is_native_cpu_amp_available (in module lmflow.pipeline.utils.raft_trainer)": [[66, "lmflow.pipeline.utils.raft_trainer._is_native_cpu_amp_available", false]], "_issue_warnings_after_load() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._issue_warnings_after_load", false]], "_layer_view() (lmflow.optim.adamp.adamp static method)": [[31, "lmflow.optim.adamp.AdamP._layer_view", false]], "_layer_view() (lmflow.optim.sgdp.sgdp static method)": [[43, "lmflow.optim.sgdp.SGDP._layer_view", false]], "_load_best_model() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._load_best_model", false]], "_load_dataset() (lmflow.pipeline.dpo_aligner.dpoaligner method)": [[50, "lmflow.pipeline.dpo_aligner.DPOAligner._load_dataset", false]], "_load_dataset() (lmflow.pipeline.raft_aligner.raftaligner method)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner._load_dataset", false]], "_load_from_checkpoint() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._load_from_checkpoint", false]], "_load_input_dataset() (lmflow.pipeline.raft_aligner.raftaligner method)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner._load_input_dataset", false]], "_load_optimizer_and_scheduler() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._load_optimizer_and_scheduler", false]], "_load_rng_state() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._load_rng_state", false]], "_loggers_initialized (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._loggers_initialized", false]], "_match() (lmflow.pipeline.evaluator.evaluator method)": [[52, "lmflow.pipeline.evaluator.Evaluator._match", false]], "_maybe_log_save_evaluate() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._maybe_log_save_evaluate", false]], "_memory_tracker (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._memory_tracker", false]], "_move_model_to_device() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._move_model_to_device", false]], "_multi_tensor_adan() (in module lmflow.optim.adan)": [[33, "lmflow.optim.adan._multi_tensor_adan", false]], "_nested_gather() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._nested_gather", false]], "_one_train() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._one_train", false]], "_pad_across_processes() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._pad_across_processes", false]], "_parse_dpo_aligner_args() (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner method)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner._parse_dpo_aligner_args", false]], "_parse_reward_model_inference_args() (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner method)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner._parse_reward_model_inference_args", false]], "_parse_target_model_inference_args() (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner method)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner._parse_target_model_inference_args", false]], "_prepare_attn_mask() (in module lmflow.utils.flash_attention.bloom_flash_attention)": [[89, "lmflow.utils.flash_attention.bloom_flash_attention._prepare_attn_mask", false]], "_prepare_decoder_attention_mask() (in module lmflow.utils.flash_attention.gpt2_flash_attention)": [[90, "lmflow.utils.flash_attention.gpt2_flash_attention._prepare_decoder_attention_mask", false]], "_prepare_decoder_attention_mask() (in module lmflow.utils.flash_attention.llama_flash_attention)": [[93, "lmflow.utils.flash_attention.llama_flash_attention._prepare_decoder_attention_mask", false]], "_prepare_input() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._prepare_input", false]], "_prepare_inputs() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._prepare_inputs", false]], "_projection() (lmflow.optim.adamp.adamp method)": [[31, "lmflow.optim.adamp.AdamP._projection", false]], "_projection() (lmflow.optim.sgdp.sgdp method)": [[43, "lmflow.optim.sgdp.SGDP._projection", false]], "_push_from_checkpoint() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._push_from_checkpoint", false]], "_remove_unused_columns() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._remove_unused_columns", false]], "_report_to_hp_search() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._report_to_hp_search", false]], "_rotate_checkpoints() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._rotate_checkpoints", false]], "_sampling_paired_idx_from_rewards() (lmflow.pipeline.dpov2_aligner.dpov2aligner method)": [[51, "lmflow.pipeline.dpov2_aligner.DPOv2Aligner._sampling_paired_idx_from_rewards", false]], "_sampling_paired_idx_from_rewards_fast() (lmflow.pipeline.dpov2_aligner.dpov2aligner method)": [[51, "lmflow.pipeline.dpov2_aligner.DPOv2Aligner._sampling_paired_idx_from_rewards_fast", false]], "_save() (lmflow.pipeline.utils.peft_trainer.peftsavingcallback method)": [[65, "lmflow.pipeline.utils.peft_trainer.PeftSavingCallback._save", false]], "_save() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._save", false]], "_save_checkpoint() (lmflow.pipeline.utils.peft_trainer.pefttrainer method)": [[65, "lmflow.pipeline.utils.peft_trainer.PeftTrainer._save_checkpoint", false]], "_save_checkpoint() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._save_checkpoint", false]], "_save_tpu() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._save_tpu", false]], "_set_signature_columns_if_needed() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._set_signature_columns_if_needed", false]], "_signature_columns (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._signature_columns", false]], "_single_tensor_adan() (in module lmflow.optim.adan)": [[33, "lmflow.optim.adan._single_tensor_adan", false]], "_sorted_checkpoints() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._sorted_checkpoints", false]], "_train_batch_size (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._train_batch_size", false]], "_tune_save_checkpoint() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._tune_save_checkpoint", false]], "_wrap_model() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer._wrap_model", false]], "accelerate_config_file (lmflow.args.dpov2alignerarguments attribute)": [[4, "lmflow.args.DPOv2AlignerArguments.accelerate_config_file", false]], "activate_model_for_inference() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.activate_model_for_inference", false]], "adabelief (class in lmflow.optim.adabelief)": [[25, "lmflow.optim.adabelief.AdaBelief", false]], "adabelief (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.ADABELIEF", false]], "adabound (class in lmflow.optim.adabound)": [[26, "lmflow.optim.adabound.AdaBound", false]], "adabound (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.ADABOUND", false]], "adadelta (class in lmflow.optim.adadelta)": [[27, "lmflow.optim.adadelta.Adadelta", false]], "adadelta (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.ADADELTA", false]], "adagrad (class in lmflow.optim.adagrad)": [[28, "lmflow.optim.adagrad.AdaGrad", false]], "adagrad (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.ADAGRAD", false]], "adam (class in lmflow.optim.adam)": [[29, "lmflow.optim.adam.Adam", false]], "adam (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.ADAM", false]], "adam (lmflow.optim.lamb.lamb attribute)": [[36, "lmflow.optim.lamb.Lamb.adam", false]], "adamax (class in lmflow.optim.adamax)": [[30, "lmflow.optim.adamax.Adamax", false]], "adamax (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.ADAMAX", false]], "adamp (class in lmflow.optim.adamp)": [[31, "lmflow.optim.adamp.AdamP", false]], "adamp (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.ADAMP", false]], "adamw_schedule_free (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.ADAMW_SCHEDULE_FREE", false]], "adamwschedulefree (class in lmflow.optim.adamw_schedule_free)": [[32, "lmflow.optim.adamw_schedule_free.AdamWScheduleFree", false]], "adan (class in lmflow.optim.adan)": [[33, "lmflow.optim.adan.Adan", false]], "adan (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.ADAN", false]], "adapt_llava_model_to_lmflow_type() (in module lmflow.utils.multimodal)": [[98, "lmflow.utils.multimodal.adapt_llava_model_to_lmflow_type", false]], "add_callback() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.add_callback", false]], "add_dataclass_attr_prefix() (in module lmflow.utils.common)": [[73, "lmflow.utils.common.add_dataclass_attr_prefix", false]], "add_special_starter() (lmflow.utils.conversation_template.base.conversationtemplate method)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate.add_special_starter", false]], "add_special_starter() (lmflow.utils.conversation_template.conversationtemplate method)": [[81, "lmflow.utils.conversation_template.ConversationTemplate.add_special_starter", false]], "add_special_stopper() (lmflow.utils.conversation_template.base.conversationtemplate method)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate.add_special_stopper", false]], "add_special_stopper() (lmflow.utils.conversation_template.conversationtemplate method)": [[81, "lmflow.utils.conversation_template.ConversationTemplate.add_special_stopper", false]], "additional_stop_token_ids (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.additional_stop_token_ids", false]], "align() (lmflow.pipeline.base_aligner.basealigner method)": [[47, "lmflow.pipeline.base_aligner.BaseAligner.align", false]], "align() (lmflow.pipeline.dpo_aligner.dpoaligner method)": [[50, "lmflow.pipeline.dpo_aligner.DPOAligner.align", false]], "align() (lmflow.pipeline.dpov2_aligner.dpov2aligner method)": [[51, "lmflow.pipeline.dpov2_aligner.DPOv2Aligner.align", false]], "align() (lmflow.pipeline.dpov2_aligner.memorysafedpov2aligner method)": [[51, "lmflow.pipeline.dpov2_aligner.MemorySafeDPOv2Aligner.align", false]], "align() (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner method)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner.align", false]], "align() (lmflow.pipeline.raft_aligner.raftaligner method)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner.align", false]], "aligner_args (lmflow.pipeline.dpo_aligner.dpoaligner attribute)": [[50, "lmflow.pipeline.dpo_aligner.DPOAligner.aligner_args", false]], "aligner_args (lmflow.pipeline.dpov2_aligner.dpov2aligner attribute)": [[51, "lmflow.pipeline.dpov2_aligner.DPOv2Aligner.aligner_args", false]], "aligner_args (lmflow.pipeline.dpov2_aligner.memorysafedpov2aligner attribute)": [[51, "lmflow.pipeline.dpov2_aligner.MemorySafeDPOv2Aligner.aligner_args", false]], "aligner_args (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner attribute)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner.aligner_args", false]], "aligner_args (lmflow.pipeline.raft_aligner.raftaligner attribute)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner.aligner_args", false]], "aligner_file_path (lmflow.pipeline.dpov2_aligner.memorysafedpov2aligner attribute)": [[51, "lmflow.pipeline.dpov2_aligner.MemorySafeDPOv2Aligner.aligner_file_path", false]], "answer_extraction() (in module lmflow.utils.data_utils)": [[88, "lmflow.utils.data_utils.answer_extraction", false]], "answer_type (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.answer_type", false]], "append_message() (lmflow.utils.llava_conversation_lib.conversation method)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.append_message", false]], "apply_chat_template (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.apply_chat_template", false]], "arch_type (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.arch_type", false]], "args (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.args", false]], "assistant_formatter (lmflow.utils.conversation_template.base.conversationtemplate attribute)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate.assistant_formatter", false]], "assistant_formatter (lmflow.utils.conversation_template.conversationtemplate attribute)": [[81, "lmflow.utils.conversation_template.ConversationTemplate.assistant_formatter", false]], "autoarguments (class in lmflow.args)": [[4, "lmflow.args.AutoArguments", false]], "autocast_smart_context_manager() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.autocast_smart_context_manager", false]], "automodel (class in lmflow.models.auto_model)": [[9, "lmflow.models.auto_model.AutoModel", false]], "autopipeline (class in lmflow.pipeline.auto_pipeline)": [[46, "lmflow.pipeline.auto_pipeline.AutoPipeline", false]], "autoregressive_sampling() (lmflow.pipeline.inferencer.speculativeinferencer method)": [[55, "lmflow.pipeline.inferencer.SpeculativeInferencer.autoregressive_sampling", false]], "backend (lmflow.datasets.dataset attribute)": [[6, "lmflow.datasets.Dataset.backend", false]], "backend (lmflow.datasets.dataset.dataset attribute)": [[5, "lmflow.datasets.dataset.Dataset.backend", false]], "backend_dataset (lmflow.datasets.dataset attribute)": [[6, "lmflow.datasets.Dataset.backend_dataset", false]], "backend_dataset (lmflow.datasets.dataset.dataset attribute)": [[5, "lmflow.datasets.dataset.Dataset.backend_dataset", false]], "backward() (lmflow.utils.flash_attention.triton_flash_attention.flashattnfunc static method)": [[94, "lmflow.utils.flash_attention.triton_flash_attention.FlashAttnFunc.backward", false]], "backward() (lmflow.utils.flash_attention.triton_flash_attention.flashattnkvpackedfunc static method)": [[94, "lmflow.utils.flash_attention.triton_flash_attention.FlashAttnKVPackedFunc.backward", false]], "backward() (lmflow.utils.flash_attention.triton_flash_attention.flashattnqkvpackedfunc static method)": [[94, "lmflow.utils.flash_attention.triton_flash_attention.FlashAttnQKVPackedFunc.backward", false]], "base (lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.condenserotaryembedding attribute)": [[100, "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding.base", false]], "base_lrs (lmflow.optim.adabound.adabound attribute)": [[26, "lmflow.optim.adabound.AdaBound.base_lrs", false]], "basealigner (class in lmflow.pipeline.base_aligner)": [[47, "lmflow.pipeline.base_aligner.BaseAligner", false]], "basemodel (class in lmflow.models.base_model)": [[10, "lmflow.models.base_model.BaseModel", false]], "basepipeline (class in lmflow.pipeline.base_pipeline)": [[48, "lmflow.pipeline.base_pipeline.BasePipeline", false]], "basetuner (class in lmflow.pipeline.base_tuner)": [[49, "lmflow.pipeline.base_tuner.BaseTuner", false]], "batchlize() (in module lmflow.utils.data_utils)": [[88, "lmflow.utils.data_utils.batchlize", false]], "benchmarkingarguments (class in lmflow.args)": [[4, "lmflow.args.BenchmarkingArguments", false]], "beta (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.beta", false]], "beta (lmflow.args.dpov2alignerarguments attribute)": [[4, "lmflow.args.DPOv2AlignerArguments.beta", false]], "bits (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.bits", false]], "block_size (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.block_size", false]], "block_size (lmflow.pipeline.evaluator.evaluator attribute)": [[52, "lmflow.pipeline.evaluator.Evaluator.block_size", false]], "blocking() (in module lmflow.tokenization.hf_decoder_model)": [[70, "lmflow.tokenization.hf_decoder_model.blocking", false]], "blocking() (in module lmflow.tokenization.hf_text_regression_model)": [[71, "lmflow.tokenization.hf_text_regression_model.blocking", false]], "blocking_paired() (in module lmflow.tokenization.hf_text_regression_model)": [[71, "lmflow.tokenization.hf_text_regression_model.blocking_paired", false]], "blocking_text_to_textlist() (in module lmflow.tokenization.hf_text_regression_model)": [[71, "lmflow.tokenization.hf_text_regression_model.blocking_text_to_textlist", false]], "build_vision_tower() (in module lmflow.models.vision_encoder)": [[24, "lmflow.models.vision_encoder.build_vision_tower", false]], "build_vision_tower() (in module lmflow.models.vision_encoder.clip_encoder)": [[23, "lmflow.models.vision_encoder.clip_encoder.build_vision_tower", false]], "cache_dir (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.cache_dir", false]], "call_model_init() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.call_model_init", false]], "callback_handler (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.callback_handler", false]], "callbacks (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.callbacks", false]], "can_return_loss (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.can_return_loss", false]], "chatglm3_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.CHATGLM3_TEMPLATE", false]], "chatglm3_template (in module lmflow.utils.conversation_template.chatglm)": [[76, "lmflow.utils.conversation_template.chatglm.CHATGLM3_TEMPLATE", false]], "chatml_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.CHATML_TEMPLATE", false]], "chatml_template (in module lmflow.utils.conversation_template.chatml)": [[77, "lmflow.utils.conversation_template.chatml.CHATML_TEMPLATE", false]], "check_homogeneity() (in module lmflow.utils.model)": [[97, "lmflow.utils.model.check_homogeneity", false]], "clamp_value (lmflow.optim.lamb.lamb attribute)": [[36, "lmflow.optim.lamb.Lamb.clamp_value", false]], "clipvisiontower (class in lmflow.models.vision_encoder.clip_encoder)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower", false]], "code_exec() (lmflow.pipeline.inferencer.toolinferencer method)": [[55, "lmflow.pipeline.inferencer.ToolInferencer.code_exec", false]], "collate() (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding method)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.collate", false]], "collection_strategy (lmflow.args.raftalignerarguments attribute)": [[4, "lmflow.args.RaftAlignerArguments.collection_strategy", false]], "compress_list() (lmflow.pipeline.rm_inferencer.rewardmodelinferencer method)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer.compress_list", false]], "compute_loss() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.compute_loss", false]], "compute_loss() (lmflow.pipeline.utils.rm_trainer.peftrewardtrainer method)": [[68, "lmflow.pipeline.utils.rm_trainer.PeftRewardTrainer.compute_loss", false]], "compute_loss() (lmflow.pipeline.utils.rm_trainer.rewardtrainer method)": [[68, "lmflow.pipeline.utils.rm_trainer.RewardTrainer.compute_loss", false]], "compute_loss_context_manager() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.compute_loss_context_manager", false]], "compute_metrics (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.compute_metrics", false]], "compute_metrics() (in module lmflow.pipeline.utils.rm_trainer)": [[68, "lmflow.pipeline.utils.rm_trainer.compute_metrics", false]], "condenserotaryembedding (class in lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch)": [[100, "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding", false]], "config (lmflow.models.vision_encoder.clip_encoder.clipvisiontower property)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.config", false]], "config (lmflow.pipeline.evaluator.evaluator attribute)": [[52, "lmflow.pipeline.evaluator.Evaluator.config", false]], "config (lmflow.pipeline.inferencer.inferencer attribute)": [[55, "lmflow.pipeline.inferencer.Inferencer.config", false]], "config_additional_args (lmflow.models.hf_text_regression_model.hftextregressionmodel attribute)": [[16, "lmflow.models.hf_text_regression_model.HFTextRegressionModel.config_additional_args", false]], "config_name (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.config_name", false]], "config_overrides (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.config_overrides", false]], "content (lmflow.utils.conversation_template.base.templatecomponent attribute)": [[75, "lmflow.utils.conversation_template.base.TemplateComponent.content", false]], "control (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "id1", false], [66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.control", false]], "controller_heart_beat_expiration (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.CONTROLLER_HEART_BEAT_EXPIRATION", false]], "conv_llama_2 (in module lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.conv_llama_2", false]], "conv_llava_llama_2 (in module lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.conv_llava_llama_2", false]], "conv_llava_plain (in module lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.conv_llava_plain", false]], "conv_llava_v0 (in module lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.conv_llava_v0", false]], "conv_llava_v0_mmtag (in module lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.conv_llava_v0_mmtag", false]], "conv_llava_v1 (in module lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.conv_llava_v1", false]], "conv_llava_v1_mmtag (in module lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.conv_llava_v1_mmtag", false]], "conv_mpt (in module lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.conv_mpt", false]], "conv_templates (in module lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.conv_templates", false]], "conv_vicuna_v0 (in module lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.conv_vicuna_v0", false]], "conv_vicuna_v1 (in module lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.conv_vicuna_v1", false]], "conversation (class in lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.Conversation", false]], "conversation_dataset_description (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.CONVERSATION_DATASET_DESCRIPTION", false]], "conversation_role_names (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.CONVERSATION_ROLE_NAMES", false]], "conversation_template (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.conversation_template", false]], "conversation_tokenize_function() (in module lmflow.tokenization.hf_decoder_model)": [[70, "lmflow.tokenization.hf_decoder_model.conversation_tokenize_function", false]], "conversation_tokenize_function() (in module lmflow.tokenization.hf_text_regression_model)": [[71, "lmflow.tokenization.hf_text_regression_model.conversation_tokenize_function", false]], "conversationtemplate (class in lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.ConversationTemplate", false]], "conversationtemplate (class in lmflow.utils.conversation_template.base)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate", false]], "convert_to_paired_dataset() (lmflow.pipeline.dpov2_aligner.dpov2aligner method)": [[51, "lmflow.pipeline.dpov2_aligner.DPOv2Aligner.convert_to_paired_dataset", false]], "copy() (lmflow.utils.llava_conversation_lib.conversation method)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.copy", false]], "create_copied_dataclass() (in module lmflow.utils.common)": [[73, "lmflow.utils.common.create_copied_dataclass", false]], "create_customized_optimizer() (lmflow.pipeline.finetuner.finetuner method)": [[53, "lmflow.pipeline.finetuner.Finetuner.create_customized_optimizer", false]], "create_dataloader() (lmflow.pipeline.evaluator.evaluator method)": [[52, "lmflow.pipeline.evaluator.Evaluator.create_dataloader", false]], "create_dataloader() (lmflow.pipeline.inferencer.inferencer method)": [[55, "lmflow.pipeline.inferencer.Inferencer.create_dataloader", false]], "create_from_dict() (lmflow.datasets.dataset class method)": [[6, "lmflow.datasets.Dataset.create_from_dict", false]], "create_from_dict() (lmflow.datasets.dataset.dataset class method)": [[5, "lmflow.datasets.dataset.Dataset.create_from_dict", false]], "create_model_card() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.create_model_card", false]], "create_optimizer() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.create_optimizer", false]], "create_optimizer_and_scheduler() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.create_optimizer_and_scheduler", false]], "create_scheduler() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.create_scheduler", false]], "current_flos (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.current_flos", false]], "custom_model (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.custom_model", false]], "custom_vision_model (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.custom_vision_model", false]], "custom_vision_model (lmflow.models.vision2seq_model.customautovision2seqmodel attribute)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.custom_vision_model", false]], "customautovision2seqmodel (class in lmflow.models.vision2seq_model)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel", false]], "customized_cache_dir (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.customized_cache_dir", false]], "customized_optim (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.customized_optim", false]], "customized_optim_args (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.customized_optim_args", false]], "custommultimodaldataset (class in lmflow.datasets)": [[6, "lmflow.datasets.CustomMultiModalDataset", false]], "custommultimodaldataset (class in lmflow.datasets.multi_modal_dataset)": [[7, "lmflow.datasets.multi_modal_dataset.CustomMultiModalDataset", false]], "data_args (lmflow.datasets.custommultimodaldataset attribute)": [[6, "lmflow.datasets.CustomMultiModalDataset.data_args", false]], "data_args (lmflow.datasets.dataset attribute)": [[6, "lmflow.datasets.Dataset.data_args", false]], "data_args (lmflow.datasets.dataset.dataset attribute)": [[5, "lmflow.datasets.dataset.Dataset.data_args", false]], "data_args (lmflow.datasets.multi_modal_dataset.custommultimodaldataset attribute)": [[7, "lmflow.datasets.multi_modal_dataset.CustomMultiModalDataset.data_args", false]], "data_args (lmflow.pipeline.dpo_aligner.dpoaligner attribute)": [[50, "lmflow.pipeline.dpo_aligner.DPOAligner.data_args", false]], "data_args (lmflow.pipeline.dpov2_aligner.dpov2aligner attribute)": [[51, "lmflow.pipeline.dpov2_aligner.DPOv2Aligner.data_args", false]], "data_args (lmflow.pipeline.dpov2_aligner.memorysafedpov2aligner attribute)": [[51, "lmflow.pipeline.dpov2_aligner.MemorySafeDPOv2Aligner.data_args", false]], "data_args (lmflow.pipeline.evaluator.evaluator attribute)": [[52, "lmflow.pipeline.evaluator.Evaluator.data_args", false]], "data_args (lmflow.pipeline.finetuner.finetuner attribute)": [[53, "lmflow.pipeline.finetuner.Finetuner.data_args", false]], "data_args (lmflow.pipeline.inferencer.inferencer attribute)": [[55, "lmflow.pipeline.inferencer.Inferencer.data_args", false]], "data_args (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner attribute)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner.data_args", false]], "data_args (lmflow.pipeline.raft_aligner.raftaligner attribute)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner.data_args", false]], "data_args (lmflow.pipeline.rm_inferencer.rewardmodelinferencer attribute)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer.data_args", false]], "data_args (lmflow.pipeline.vllm_inferencer.inferencerwithoffloading attribute)": [[69, "lmflow.pipeline.vllm_inferencer.InferencerWithOffloading.data_args", false]], "data_collator (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.data_collator", false]], "data_dict (lmflow.datasets.custommultimodaldataset attribute)": [[6, "id0", false], [6, "lmflow.datasets.CustomMultiModalDataset.data_dict", false]], "data_dict (lmflow.datasets.multi_modal_dataset.custommultimodaldataset attribute)": [[7, "id0", false], [7, "lmflow.datasets.multi_modal_dataset.CustomMultiModalDataset.data_dict", false]], "datacollatorforsuperviseddataset (class in lmflow.datasets.multi_modal_dataset)": [[7, "lmflow.datasets.multi_modal_dataset.DataCollatorForSupervisedDataset", false]], "dataset (class in lmflow.datasets)": [[6, "lmflow.datasets.Dataset", false]], "dataset (class in lmflow.datasets.dataset)": [[5, "lmflow.datasets.dataset.Dataset", false]], "dataset_config_name (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.dataset_config_name", false]], "dataset_description_map (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.DATASET_DESCRIPTION_MAP", false]], "dataset_name (lmflow.args.benchmarkingarguments attribute)": [[4, "lmflow.args.BenchmarkingArguments.dataset_name", false]], "dataset_name (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.dataset_name", false]], "dataset_path (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.dataset_path", false]], "dataset_path (lmflow.datasets.dataset attribute)": [[6, "lmflow.datasets.Dataset.dataset_path", false]], "dataset_path (lmflow.datasets.dataset.dataset attribute)": [[5, "lmflow.datasets.dataset.Dataset.dataset_path", false]], "dataset_path_list (lmflow.args.iterativealignerarguments attribute)": [[4, "lmflow.args.IterativeAlignerArguments.dataset_path_list", false]], "dataset_types (in module lmflow.datasets.dataset)": [[5, "lmflow.datasets.dataset.DATASET_TYPES", false]], "datasetarguments (class in lmflow.args)": [[4, "lmflow.args.DatasetArguments", false]], "deactivate_model_for_inference() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.deactivate_model_for_inference", false]], "debias (lmflow.optim.lamb.lamb attribute)": [[36, "lmflow.optim.lamb.Lamb.debias", false]], "decode() (lmflow.models.hf_decoder_model.hfdecodermodel method)": [[13, "lmflow.models.hf_decoder_model.HFDecoderModel.decode", false]], "decode() (lmflow.models.hf_encoder_decoder_model.hfencoderdecodermodel method)": [[14, "lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel.decode", false]], "decodermodel (class in lmflow.models.decoder_model)": [[11, "lmflow.models.decoder_model.DecoderModel", false]], "deepseek_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.DEEPSEEK_TEMPLATE", false]], "deepseek_template (in module lmflow.utils.conversation_template.deepseek)": [[78, "lmflow.utils.conversation_template.deepseek.DEEPSEEK_TEMPLATE", false]], "deepspeed (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.deepspeed", false]], "deepspeed (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.deepspeed", false]], "deepspeed (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.deepspeed", false]], "default_callbacks (in module lmflow.pipeline.utils.raft_trainer)": [[66, "lmflow.pipeline.utils.raft_trainer.DEFAULT_CALLBACKS", false]], "default_callbacks (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.default_callbacks", false]], "default_collator (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.default_collator", false]], "default_conversation (in module lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.default_conversation", false]], "default_im_end_token (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.DEFAULT_IM_END_TOKEN", false]], "default_im_start_token (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.DEFAULT_IM_START_TOKEN", false]], "default_image_patch_token (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.DEFAULT_IMAGE_PATCH_TOKEN", false]], "default_image_token (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.DEFAULT_IMAGE_TOKEN", false]], "default_label_names (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.default_label_names", false]], "default_progress_callback (in module lmflow.pipeline.utils.raft_trainer)": [[66, "id0", false], [66, "lmflow.pipeline.utils.raft_trainer.DEFAULT_PROGRESS_CALLBACK", false]], "defaults (lmflow.optim.adabelief.adabelief attribute)": [[25, "lmflow.optim.adabelief.AdaBelief.defaults", false]], "defaults (lmflow.optim.adabound.adabound attribute)": [[26, "lmflow.optim.adabound.AdaBound.defaults", false]], "defaults (lmflow.optim.adadelta.adadelta attribute)": [[27, "lmflow.optim.adadelta.Adadelta.defaults", false]], "defaults (lmflow.optim.adagrad.adagrad attribute)": [[28, "lmflow.optim.adagrad.AdaGrad.defaults", false]], "defaults (lmflow.optim.adam.adam attribute)": [[29, "lmflow.optim.adam.Adam.defaults", false]], "defaults (lmflow.optim.adamax.adamax attribute)": [[30, "lmflow.optim.adamax.Adamax.defaults", false]], "defaults (lmflow.optim.adamp.adamp attribute)": [[31, "lmflow.optim.adamp.AdamP.defaults", false]], "defaults (lmflow.optim.adamw_schedule_free.adamwschedulefree attribute)": [[32, "lmflow.optim.adamw_schedule_free.AdamWScheduleFree.defaults", false]], "defaults (lmflow.optim.adan.adan attribute)": [[33, "lmflow.optim.adan.Adan.defaults", false]], "defaults (lmflow.optim.dummy.dummy attribute)": [[34, "lmflow.optim.dummy.Dummy.defaults", false]], "defaults (lmflow.optim.lamb.lamb attribute)": [[36, "lmflow.optim.lamb.Lamb.defaults", false]], "defaults (lmflow.optim.lars.lars attribute)": [[37, "lmflow.optim.lars.LARS.defaults", false]], "defaults (lmflow.optim.nadam.nadam attribute)": [[38, "lmflow.optim.nadam.NAdam.defaults", false]], "defaults (lmflow.optim.novograd.novograd attribute)": [[39, "lmflow.optim.novograd.NovoGrad.defaults", false]], "defaults (lmflow.optim.radam.radam attribute)": [[41, "lmflow.optim.radam.RAdam.defaults", false]], "defaults (lmflow.optim.sgd_schedule_free.sgdschedulefree attribute)": [[42, "lmflow.optim.sgd_schedule_free.SGDScheduleFree.defaults", false]], "defaults (lmflow.optim.sgdp.sgdp attribute)": [[43, "lmflow.optim.sgdp.SGDP.defaults", false]], "defaults (lmflow.optim.sophia.sophiag attribute)": [[44, "lmflow.optim.sophia.SophiaG.defaults", false]], "defaults (lmflow.optim.yogi.yogi attribute)": [[45, "lmflow.optim.yogi.Yogi.defaults", false]], "degenerated_to_sgd (lmflow.optim.adabelief.adabelief attribute)": [[25, "id0", false], [25, "lmflow.optim.adabelief.AdaBelief.degenerated_to_sgd", false]], "device (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.device", false]], "device (lmflow.models.hf_encoder_decoder_model.hfencoderdecodermodel attribute)": [[14, "lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel.device", false]], "device (lmflow.models.hf_model_mixin.hfmodelmixin attribute)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.device", false]], "device (lmflow.models.vision_encoder.clip_encoder.clipvisiontower property)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.device", false]], "dict() (lmflow.utils.llava_conversation_lib.conversation method)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.dict", false]], "disable_group_texts (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.disable_group_texts", false]], "distributed_inference_num_instances (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.distributed_inference_num_instances", false]], "do_dpo_align (lmflow.args.iterativedpoalignerarguments attribute)": [[4, "lmflow.args.IterativeDPOAlignerArguments.do_dpo_align", false]], "do_grad_scaling (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.do_grad_scaling", false]], "do_response_generation (lmflow.args.iterativedpoalignerarguments attribute)": [[4, "lmflow.args.IterativeDPOAlignerArguments.do_response_generation", false]], "do_rope_scaling (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.do_rope_scaling", false]], "do_sample (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.do_sample", false]], "do_scoring (lmflow.args.iterativedpoalignerarguments attribute)": [[4, "lmflow.args.IterativeDPOAlignerArguments.do_scoring", false]], "do_train (lmflow.models.hf_model_mixin.hfmodelmixin attribute)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.do_train", false]], "double_quant (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.double_quant", false]], "dpo_loss() (lmflow.pipeline.utils.dpov2_trainer.dpov2trainer method)": [[61, "lmflow.pipeline.utils.dpov2_trainer.DPOv2Trainer.dpo_loss", false]], "dpoaligner (class in lmflow.pipeline.dpo_aligner)": [[50, "lmflow.pipeline.dpo_aligner.DPOAligner", false]], "dpoalignerarguments (class in lmflow.args)": [[4, "lmflow.args.DPOAlignerArguments", false]], "dpov2aligner (class in lmflow.pipeline.dpov2_aligner)": [[51, "lmflow.pipeline.dpov2_aligner.DPOv2Aligner", false]], "dpov2alignerarguments (class in lmflow.args)": [[4, "lmflow.args.DPOv2AlignerArguments", false]], "dpov2trainer (class in lmflow.pipeline.utils.dpov2_trainer)": [[61, "lmflow.pipeline.utils.dpov2_trainer.DPOv2Trainer", false]], "draft_config (lmflow.pipeline.inferencer.speculativeinferencer attribute)": [[55, "lmflow.pipeline.inferencer.SpeculativeInferencer.draft_config", false]], "draft_model_args (lmflow.pipeline.inferencer.speculativeinferencer attribute)": [[55, "lmflow.pipeline.inferencer.SpeculativeInferencer.draft_model_args", false]], "drop_instances() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.drop_instances", false]], "drop_instances() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.drop_instances", false]], "ds_config (lmflow.models.hf_model_mixin.hfmodelmixin attribute)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.ds_config", false]], "dtype (lmflow.models.vision_encoder.clip_encoder.clipvisiontower property)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.dtype", false]], "dtype (lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.condenserotaryembedding attribute)": [[100, "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding.dtype", false]], "dummy (class in lmflow.optim.dummy)": [[34, "lmflow.optim.dummy.Dummy", false]], "dummy (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.DUMMY", false]], "dummy_feature (lmflow.models.vision_encoder.clip_encoder.clipvisiontower property)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.dummy_feature", false]], "emb (lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.condenserotaryembedding attribute)": [[100, "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding.emb", false]], "empty_no_special_tokens_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.EMPTY_NO_SPECIAL_TOKENS_TEMPLATE", false]], "empty_no_special_tokens_template (in module lmflow.utils.conversation_template.base)": [[75, "lmflow.utils.conversation_template.base.EMPTY_NO_SPECIAL_TOKENS_TEMPLATE", false]], "empty_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.EMPTY_TEMPLATE", false]], "empty_template (in module lmflow.utils.conversation_template.base)": [[75, "lmflow.utils.conversation_template.base.EMPTY_TEMPLATE", false]], "emptyformatter (class in lmflow.utils.conversation_template.base)": [[75, "lmflow.utils.conversation_template.base.EmptyFormatter", false]], "enable_decode_inference_result (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.enable_decode_inference_result", false]], "enable_distributed_inference (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.enable_distributed_inference", false]], "encode() (lmflow.models.hf_decoder_model.hfdecodermodel method)": [[13, "lmflow.models.hf_decoder_model.HFDecoderModel.encode", false]], "encode() (lmflow.models.hf_encoder_decoder_model.hfencoderdecodermodel method)": [[14, "lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel.encode", false]], "encode_conversation() (lmflow.utils.conversation_template.base.conversationtemplate method)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate.encode_conversation", false]], "encode_conversation() (lmflow.utils.conversation_template.conversationtemplate method)": [[81, "lmflow.utils.conversation_template.ConversationTemplate.encode_conversation", false]], "encode_conversation() (lmflow.utils.conversation_template.gemma.gemmaconversationtemplate method)": [[80, "lmflow.utils.conversation_template.gemma.GemmaConversationTemplate.encode_conversation", false]], "encode_images() (lmflow.models.vision_encoder.clip_encoder.clipvisiontower method)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.encode_images", false]], "encoderdecodermodel (class in lmflow.models.encoder_decoder_model)": [[12, "lmflow.models.encoder_decoder_model.EncoderDecoderModel", false]], "eos_padding (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.eos_padding", false]], "eos_token_id (lmflow.pipeline.vllm_inferencer.inferencerwithoffloading attribute)": [[69, "lmflow.pipeline.vllm_inferencer.InferencerWithOffloading.eos_token_id", false]], "eval() (lmflow.optim.adamw_schedule_free.adamwschedulefree method)": [[32, "lmflow.optim.adamw_schedule_free.AdamWScheduleFree.eval", false]], "eval() (lmflow.optim.sgd_schedule_free.sgdschedulefree method)": [[42, "lmflow.optim.sgd_schedule_free.SGDScheduleFree.eval", false]], "eval_dataset (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.eval_dataset", false]], "eval_dataset_path (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.eval_dataset_path", false]], "eval_steps (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.eval_steps", false]], "evaluate() (lmflow.pipeline.evaluator.evaluator method)": [[52, "lmflow.pipeline.evaluator.Evaluator.evaluate", false]], "evaluate() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.evaluate", false]], "evaluate_block_size (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.evaluate_block_size", false]], "evaluation_loop() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.evaluation_loop", false]], "evaluator (class in lmflow.pipeline.evaluator)": [[52, "lmflow.pipeline.evaluator.Evaluator", false]], "evaluator_args (lmflow.pipeline.evaluator.evaluator attribute)": [[52, "lmflow.pipeline.evaluator.Evaluator.evaluator_args", false]], "evaluatorarguments (class in lmflow.args)": [[4, "lmflow.args.EvaluatorArguments", false]], "feature_select() (lmflow.models.vision_encoder.clip_encoder.clipvisiontower method)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.feature_select", false]], "finetune_part (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.finetune_part", false]], "finetuner (class in lmflow.pipeline.finetuner)": [[53, "lmflow.pipeline.finetuner.Finetuner", false]], "finetuner_args (lmflow.pipeline.finetuner.finetuner attribute)": [[53, "lmflow.pipeline.finetuner.Finetuner.finetuner_args", false]], "finetunerarguments (class in lmflow.args)": [[4, "lmflow.args.FinetunerArguments", false]], "fixed_decay (lmflow.optim.adabelief.adabelief attribute)": [[25, "lmflow.optim.adabelief.AdaBelief.fixed_decay", false]], "flash_attn_func (in module lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention.flash_attn_func", false]], "flash_attn_kvpacked_func (in module lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention.flash_attn_kvpacked_func", false]], "flash_attn_qkvpacked_func (in module lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention.flash_attn_qkvpacked_func", false]], "flashattnfunc (class in lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention.FlashAttnFunc", false]], "flashattnkvpackedfunc (class in lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention.FlashAttnKVPackedFunc", false]], "flashattnqkvpackedfunc (class in lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention.FlashAttnQKVPackedFunc", false]], "flatten_list() (lmflow.pipeline.rm_inferencer.rewardmodelinferencer method)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer.flatten_list", false]], "float_only_dataset_description (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.FLOAT_ONLY_DATASET_DESCRIPTION", false]], "floating_point_ops() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.floating_point_ops", false]], "format() (lmflow.utils.conversation_template.base.emptyformatter method)": [[75, "lmflow.utils.conversation_template.base.EmptyFormatter.format", false]], "format() (lmflow.utils.conversation_template.base.formatter method)": [[75, "lmflow.utils.conversation_template.base.Formatter.format", false]], "format() (lmflow.utils.conversation_template.base.listformatter method)": [[75, "lmflow.utils.conversation_template.base.ListFormatter.format", false]], "format() (lmflow.utils.conversation_template.base.stringformatter method)": [[75, "lmflow.utils.conversation_template.base.StringFormatter.format", false]], "formatter (class in lmflow.utils.conversation_template.base)": [[75, "lmflow.utils.conversation_template.base.Formatter", false]], "forward() (in module lmflow.utils.flash_attention.bloom_flash_attention)": [[89, "lmflow.utils.flash_attention.bloom_flash_attention.forward", false]], "forward() (in module lmflow.utils.flash_attention.gpt2_flash_attention)": [[90, "lmflow.utils.flash_attention.gpt2_flash_attention.forward", false]], "forward() (in module lmflow.utils.flash_attention.gpt_neo_flash_attention)": [[91, "lmflow.utils.flash_attention.gpt_neo_flash_attention.forward", false]], "forward() (in module lmflow.utils.flash_attention.llama_flash_attention)": [[93, "lmflow.utils.flash_attention.llama_flash_attention.forward", false]], "forward() (lmflow.models.vision2seq_model.customautovision2seqmodel method)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.forward", false]], "forward() (lmflow.models.vision_encoder.clip_encoder.clipvisiontower method)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.forward", false]], "forward() (lmflow.utils.flash_attention.triton_flash_attention.flashattnfunc static method)": [[94, "lmflow.utils.flash_attention.triton_flash_attention.FlashAttnFunc.forward", false]], "forward() (lmflow.utils.flash_attention.triton_flash_attention.flashattnkvpackedfunc static method)": [[94, "lmflow.utils.flash_attention.triton_flash_attention.FlashAttnKVPackedFunc.forward", false]], "forward() (lmflow.utils.flash_attention.triton_flash_attention.flashattnqkvpackedfunc static method)": [[94, "lmflow.utils.flash_attention.triton_flash_attention.FlashAttnQKVPackedFunc.forward", false]], "forward() (lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.condenserotaryembedding method)": [[100, "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding.forward", false]], "fox_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.FOX_TEMPLATE", false]], "fox_template (in module lmflow.utils.conversation_template.fox)": [[79, "lmflow.utils.conversation_template.fox.FOX_TEMPLATE", false]], "freqs (lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.condenserotaryembedding attribute)": [[100, "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding.freqs", false]], "from_dict() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.from_dict", false]], "from_dict() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.from_dict", false]], "fsdp (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.fsdp", false]], "gemma_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.GEMMA_TEMPLATE", false]], "gemma_template (in module lmflow.utils.conversation_template.gemma)": [[80, "lmflow.utils.conversation_template.gemma.GEMMA_TEMPLATE", false]], "gemmaconversationtemplate (class in lmflow.utils.conversation_template.gemma)": [[80, "lmflow.utils.conversation_template.gemma.GemmaConversationTemplate", false]], "generate() (lmflow.models.vision2seq_model.customautovision2seqmodel method)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.generate", false]], "get_backend() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.get_backend", false]], "get_backend() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.get_backend", false]], "get_backend_dataset() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.get_backend_dataset", false]], "get_backend_dataset() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.get_backend_dataset", false]], "get_backend_model() (lmflow.models.hf_encoder_decoder_model.hfencoderdecodermodel method)": [[14, "lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel.get_backend_model", false]], "get_backend_model() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.get_backend_model", false]], "get_backend_model() (lmflow.models.vision2seq_model.customautovision2seqmodel method)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.get_backend_model", false]], "get_batch_loss_metrics() (lmflow.pipeline.utils.dpov2_trainer.dpov2trainer method)": [[61, "lmflow.pipeline.utils.dpov2_trainer.DPOv2Trainer.get_batch_loss_metrics", false]], "get_batch_metrics() (lmflow.pipeline.utils.dpov2_trainer.dpov2trainer method)": [[61, "lmflow.pipeline.utils.dpov2_trainer.DPOv2Trainer.get_batch_metrics", false]], "get_data_args() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.get_data_args", false]], "get_data_args() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.get_data_args", false]], "get_eval_dataloader() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.get_eval_dataloader", false]], "get_fingerprint() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.get_fingerprint", false]], "get_fingerprint() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.get_fingerprint", false]], "get_images() (lmflow.utils.llava_conversation_lib.conversation method)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.get_images", false]], "get_max_length() (lmflow.models.hf_encoder_decoder_model.hfencoderdecodermodel method)": [[14, "lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel.get_max_length", false]], "get_max_length() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.get_max_length", false]], "get_model() (lmflow.models.auto_model.automodel class method)": [[9, "lmflow.models.auto_model.AutoModel.get_model", false]], "get_optimizer_cls_and_kwargs() (lmflow.pipeline.utils.raft_trainer.rafttrainer static method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.get_optimizer_cls_and_kwargs", false]], "get_paired_dataset() (in module lmflow.pipeline.dpo_aligner)": [[50, "lmflow.pipeline.dpo_aligner.get_paired_dataset", false]], "get_peft_without_qlora() (lmflow.models.hf_decoder_model.hfdecodermodel method)": [[13, "lmflow.models.hf_decoder_model.HFDecoderModel.get_peft_without_qlora", false]], "get_pipeline() (lmflow.pipeline.auto_pipeline.autopipeline class method)": [[46, "lmflow.pipeline.auto_pipeline.AutoPipeline.get_pipeline", false]], "get_pipeline_args_class() (lmflow.args.autoarguments method)": [[4, "lmflow.args.AutoArguments.get_pipeline_args_class", false]], "get_prompt() (lmflow.utils.llava_conversation_lib.conversation method)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.get_prompt", false]], "get_test_dataloader() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.get_test_dataloader", false]], "get_tokenizer() (lmflow.models.hf_encoder_decoder_model.hfencoderdecodermodel method)": [[14, "lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel.get_tokenizer", false]], "get_tokenizer() (lmflow.models.hf_model_mixin.hfmodelmixin method)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.get_tokenizer", false]], "get_tokenizer() (lmflow.models.vision2seq_model.customautovision2seqmodel method)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.get_tokenizer", false]], "get_train_dataloader() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.get_train_dataloader", false]], "get_type() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.get_type", false]], "get_type() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.get_type", false]], "gpu_support_flash_attention (in module lmflow.models.hf_decoder_model)": [[13, "id0", false], [13, "lmflow.models.hf_decoder_model.GPU_SUPPORT_FLASH_ATTENTION", false]], "gradient_accumulation_steps (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.gradient_accumulation_steps", false]], "gradient_checkpointing (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.gradient_checkpointing", false]], "gradient_checkpointing_use_reentrant (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.gradient_checkpointing_use_reentrant", false]], "group_text() (lmflow.pipeline.finetuner.finetuner method)": [[53, "lmflow.pipeline.finetuner.Finetuner.group_text", false]], "group_texts_batch_size (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.group_texts_batch_size", false]], "has_placeholder() (lmflow.utils.conversation_template.base.formatter method)": [[75, "lmflow.utils.conversation_template.base.Formatter.has_placeholder", false]], "hf_auto_model (lmflow.models.hf_model_mixin.hfmodelmixin attribute)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.hf_auto_model", false]], "hf_automodel_mapping (in module lmflow.models.hf_model_mixin)": [[15, "lmflow.models.hf_model_mixin.HF_AUTOMODEL_MAPPING", false]], "hf_automodel_type (in module lmflow.models.hf_model_mixin)": [[15, "lmflow.models.hf_model_mixin.HF_AUTOMODEL_TYPE", false]], "hf_dataset_sanity_check() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.hf_dataset_sanity_check", false]], "hf_dataset_sanity_check() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.hf_dataset_sanity_check", false]], "hf_model_config (lmflow.models.hf_model_mixin.hfmodelmixin attribute)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.hf_model_config", false]], "hfdecodermodel (class in lmflow.models.hf_decoder_model)": [[13, "lmflow.models.hf_decoder_model.HFDecoderModel", false]], "hfencoderdecodermodel (class in lmflow.models.hf_encoder_decoder_model)": [[14, "lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel", false]], "hfmodelmixin (class in lmflow.models.hf_model_mixin)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin", false]], "hftextregressionmodel (class in lmflow.models.hf_text_regression_model)": [[16, "lmflow.models.hf_text_regression_model.HFTextRegressionModel", false]], "hidden_size (lmflow.models.vision2seq_model.customautovision2seqmodel attribute)": [[22, "id0", false], [22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.hidden_size", false]], "hidden_size (lmflow.models.vision_encoder.clip_encoder.clipvisiontower property)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.hidden_size", false]], "hp_name (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.hp_name", false]], "hp_search_backend (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.hp_search_backend", false]], "hyperparameter_search() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.hyperparameter_search", false]], "ignore_bias_buffers (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.ignore_bias_buffers", false]], "ignore_index (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.IGNORE_INDEX", false]], "image_aspect_ratio (lmflow.args.multimodaldatasetarguments attribute)": [[4, "lmflow.args.MultiModalDatasetArguments.image_aspect_ratio", false]], "image_encoder_name_or_path (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.image_encoder_name_or_path", false]], "image_folder (lmflow.args.multimodaldatasetarguments attribute)": [[4, "lmflow.args.MultiModalDatasetArguments.image_folder", false]], "image_folder (lmflow.datasets.custommultimodaldataset attribute)": [[6, "lmflow.datasets.CustomMultiModalDataset.image_folder", false]], "image_folder (lmflow.datasets.multi_modal_dataset.custommultimodaldataset attribute)": [[7, "lmflow.datasets.multi_modal_dataset.CustomMultiModalDataset.image_folder", false]], "image_token_index (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.IMAGE_TOKEN_INDEX", false]], "inf (lmflow.pipeline.raft_aligner.raftaligner attribute)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner.INF", false]], "inference() (lmflow.models.hf_decoder_model.hfdecodermodel method)": [[13, "lmflow.models.hf_decoder_model.HFDecoderModel.inference", false]], "inference() (lmflow.models.hf_encoder_decoder_model.hfencoderdecodermodel method)": [[14, "lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel.inference", false]], "inference() (lmflow.models.hf_text_regression_model.hftextregressionmodel method)": [[16, "lmflow.models.hf_text_regression_model.HFTextRegressionModel.inference", false]], "inference() (lmflow.models.text_regression_model.textregressionmodel method)": [[21, "lmflow.models.text_regression_model.TextRegressionModel.inference", false]], "inference() (lmflow.pipeline.inferencer.inferencer method)": [[55, "lmflow.pipeline.inferencer.Inferencer.inference", false]], "inference() (lmflow.pipeline.inferencer.speculativeinferencer method)": [[55, "lmflow.pipeline.inferencer.SpeculativeInferencer.inference", false]], "inference() (lmflow.pipeline.inferencer.toolinferencer method)": [[55, "lmflow.pipeline.inferencer.ToolInferencer.inference", false]], "inference() (lmflow.pipeline.rm_inferencer.rewardmodelinferencer method)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer.inference", false]], "inference() (lmflow.pipeline.vllm_inferencer.inferencerwithoffloading method)": [[69, "lmflow.pipeline.vllm_inferencer.InferencerWithOffloading.inference", false]], "inference() (lmflow.pipeline.vllm_inferencer.memorysafevllminferencer method)": [[69, "lmflow.pipeline.vllm_inferencer.MemorySafeVLLMInferencer.inference", false]], "inference() (lmflow.pipeline.vllm_inferencer.vllminferencer method)": [[69, "lmflow.pipeline.vllm_inferencer.VLLMInferencer.inference", false]], "inference_batch_size (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.inference_batch_size", false]], "inference_batch_size_per_device (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.inference_batch_size_per_device", false]], "inference_batch_size_per_device (lmflow.args.raftalignerarguments attribute)": [[4, "lmflow.args.RaftAlignerArguments.inference_batch_size_per_device", false]], "inference_func (lmflow.models.text_regression_model.textregressionmodel attribute)": [[21, "lmflow.models.text_regression_model.TextRegressionModel.inference_func", false]], "inferencer (class in lmflow.pipeline.inferencer)": [[55, "lmflow.pipeline.inferencer.Inferencer", false]], "inferencer_args (lmflow.pipeline.inferencer.inferencer attribute)": [[55, "lmflow.pipeline.inferencer.Inferencer.inferencer_args", false]], "inferencer_args (lmflow.pipeline.rm_inferencer.rewardmodelinferencer attribute)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer.inferencer_args", false]], "inferencer_args (lmflow.pipeline.vllm_inferencer.inferencerwithoffloading attribute)": [[69, "lmflow.pipeline.vllm_inferencer.InferencerWithOffloading.inferencer_args", false]], "inferencer_file_path (lmflow.pipeline.vllm_inferencer.memorysafevllminferencer attribute)": [[69, "lmflow.pipeline.vllm_inferencer.MemorySafeVLLMInferencer.inferencer_file_path", false]], "inferencerarguments (class in lmflow.args)": [[4, "lmflow.args.InferencerArguments", false]], "inferencerwithoffloading (class in lmflow.pipeline.vllm_inferencer)": [[69, "lmflow.pipeline.vllm_inferencer.InferencerWithOffloading", false]], "init_git_repo() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.init_git_repo", false]], "init_to_zero() (in module lmflow.utils.flash_attention.triton_flash_attention)": [[94, "lmflow.utils.flash_attention.triton_flash_attention.init_to_zero", false]], "initial_iter_idx (lmflow.args.iterativealignerarguments attribute)": [[4, "lmflow.args.IterativeAlignerArguments.initial_iter_idx", false]], "input (lmflow.utils.data_utils.rewardmodelinferenceresultwithinput attribute)": [[88, "lmflow.utils.data_utils.RewardModelInferenceResultWithInput.input", false]], "input (lmflow.utils.data_utils.vllminferenceresultwithinput attribute)": [[88, "lmflow.utils.data_utils.VLLMInferenceResultWithInput.input", false]], "instance_fields_map (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.INSTANCE_FIELDS_MAP", false]], "internal_version (in module lmflow)": [[8, "lmflow.internal_version", false]], "internlm2_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.INTERNLM2_TEMPLATE", false]], "internlm2_template (in module lmflow.utils.conversation_template.internlm)": [[82, "lmflow.utils.conversation_template.internlm.INTERNLM2_TEMPLATE", false]], "inv_freq (lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.condenserotaryembedding attribute)": [[100, "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding.inv_freq", false]], "ipex_optimize_model() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.ipex_optimize_model", false]], "is_custom_dataset (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.is_custom_dataset", false]], "is_encoder_decoder (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding attribute)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.is_encoder_decoder", false]], "is_in_train (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.is_in_train", false]], "is_loaded (lmflow.models.vision_encoder.clip_encoder.clipvisiontower attribute)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.is_loaded", false]], "is_local_process_zero() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.is_local_process_zero", false]], "is_multimodal (lmflow.args.multimodaldatasetarguments attribute)": [[4, "lmflow.args.MultiModalDatasetArguments.is_multimodal", false]], "is_package_version_at_least() (in module lmflow.pipeline.auto_pipeline)": [[46, "lmflow.pipeline.auto_pipeline.is_package_version_at_least", false]], "is_sagemaker_mp_post_1_10 (in module lmflow.pipeline.utils.raft_trainer)": [[66, "lmflow.pipeline.utils.raft_trainer.IS_SAGEMAKER_MP_POST_1_10", false]], "is_torch_greater_or_equal_than_1_10 (in module lmflow.pipeline.utils.raft_trainer)": [[66, "lmflow.pipeline.utils.raft_trainer.is_torch_greater_or_equal_than_1_10", false]], "is_torch_less_than_1_11 (in module lmflow.pipeline.utils.raft_trainer)": [[66, "lmflow.pipeline.utils.raft_trainer.is_torch_less_than_1_11", false]], "is_world_process_zero() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.is_world_process_zero", false]], "iterativealignerarguments (class in lmflow.args)": [[4, "lmflow.args.IterativeAlignerArguments", false]], "iterativedpoaligner (class in lmflow.pipeline.iterative_dpo_aligner)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner", false]], "iterativedpoalignerarguments (class in lmflow.args)": [[4, "lmflow.args.IterativeDPOAlignerArguments", false]], "keep_linebreaks (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.keep_linebreaks", false]], "key_instances (in module lmflow.datasets.dataset)": [[5, "lmflow.datasets.dataset.KEY_INSTANCES", false]], "key_score (in module lmflow.datasets.dataset)": [[5, "lmflow.datasets.dataset.KEY_SCORE", false]], "key_type (in module lmflow.datasets.dataset)": [[5, "lmflow.datasets.dataset.KEY_TYPE", false]], "kwargs (lmflow.models.vision2seq_model.customautovision2seqmodel attribute)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.kwargs", false]], "label_names (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.label_names", false]], "label_pad_token_id (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding attribute)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.label_pad_token_id", false]], "lamb (class in lmflow.optim.lamb)": [[36, "lmflow.optim.lamb.Lamb", false]], "lamb (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.LAMB", false]], "language_model (lmflow.models.vision2seq_model.customautovision2seqmodel attribute)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.language_model", false]], "language_model_from_pretrained() (lmflow.models.vision2seq_model.customautovision2seqmodel method)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.language_model_from_pretrained", false]], "lars (class in lmflow.optim.lars)": [[37, "lmflow.optim.lars.LARS", false]], "lars (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.LARS", false]], "last_checkpoint (lmflow.pipeline.finetuner.finetuner attribute)": [[53, "id0", false], [53, "lmflow.pipeline.finetuner.Finetuner.last_checkpoint", false]], "learning_rate (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.learning_rate", false]], "len_penalty (lmflow.pipeline.utils.dpov2_trainer.dpov2trainer attribute)": [[61, "lmflow.pipeline.utils.dpov2_trainer.DPOv2Trainer.len_penalty", false]], "length_penalty (lmflow.args.dpov2alignerarguments attribute)": [[4, "lmflow.args.DPOv2AlignerArguments.length_penalty", false]], "lisa_activated_layers (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.lisa_activated_layers", false]], "lisa_interval_steps (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.lisa_interval_steps", false]], "lisa_layers_attribute (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.lisa_layers_attribute", false]], "listformatter (class in lmflow.utils.conversation_template.base)": [[75, "lmflow.utils.conversation_template.base.ListFormatter", false]], "llama2_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.LLAMA2_TEMPLATE", false]], "llama2_template (in module lmflow.utils.conversation_template.llama)": [[83, "lmflow.utils.conversation_template.llama.LLAMA2_TEMPLATE", false]], "llama2conversationtemplate (class in lmflow.utils.conversation_template.llama)": [[83, "lmflow.utils.conversation_template.llama.Llama2ConversationTemplate", false]], "llama3_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.LLAMA3_TEMPLATE", false]], "llama3_template (in module lmflow.utils.conversation_template.llama)": [[83, "lmflow.utils.conversation_template.llama.LLAMA3_TEMPLATE", false]], "llama_2 (lmflow.utils.llava_conversation_lib.separatorstyle attribute)": [[96, "lmflow.utils.llava_conversation_lib.SeparatorStyle.LLAMA_2", false]], "llava_loading (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.llava_loading", false]], "llava_pretrain_model_path (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.llava_pretrain_model_path", false]], "llm_model_name_or_path (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.llm_model_name_or_path", false]], "lm_evaluation_metric (lmflow.args.benchmarkingarguments attribute)": [[4, "lmflow.args.BenchmarkingArguments.lm_evaluation_metric", false]], "lmflow": [[8, "module-lmflow", false]], "lmflow.args": [[4, "module-lmflow.args", false]], "lmflow.datasets": [[6, "module-lmflow.datasets", false]], "lmflow.datasets.dataset": [[5, "module-lmflow.datasets.dataset", false]], "lmflow.datasets.multi_modal_dataset": [[7, "module-lmflow.datasets.multi_modal_dataset", false]], "lmflow.models": [[17, "module-lmflow.models", false]], "lmflow.models.auto_model": [[9, "module-lmflow.models.auto_model", false]], "lmflow.models.base_model": [[10, "module-lmflow.models.base_model", false]], "lmflow.models.decoder_model": [[11, "module-lmflow.models.decoder_model", false]], "lmflow.models.encoder_decoder_model": [[12, "module-lmflow.models.encoder_decoder_model", false]], "lmflow.models.hf_decoder_model": [[13, "module-lmflow.models.hf_decoder_model", false]], "lmflow.models.hf_encoder_decoder_model": [[14, "module-lmflow.models.hf_encoder_decoder_model", false]], "lmflow.models.hf_model_mixin": [[15, "module-lmflow.models.hf_model_mixin", false]], "lmflow.models.hf_text_regression_model": [[16, "module-lmflow.models.hf_text_regression_model", false]], "lmflow.models.interfaces": [[18, "module-lmflow.models.interfaces", false]], "lmflow.models.interfaces.tunable": [[19, "module-lmflow.models.interfaces.tunable", false]], "lmflow.models.regression_model": [[20, "module-lmflow.models.regression_model", false]], "lmflow.models.text_regression_model": [[21, "module-lmflow.models.text_regression_model", false]], "lmflow.models.vision2seq_model": [[22, "module-lmflow.models.vision2seq_model", false]], "lmflow.models.vision_encoder": [[24, "module-lmflow.models.vision_encoder", false]], "lmflow.models.vision_encoder.clip_encoder": [[23, "module-lmflow.models.vision_encoder.clip_encoder", false]], "lmflow.optim": [[35, "module-lmflow.optim", false]], "lmflow.optim.adabelief": [[25, "module-lmflow.optim.adabelief", false]], "lmflow.optim.adabound": [[26, "module-lmflow.optim.adabound", false]], "lmflow.optim.adadelta": [[27, "module-lmflow.optim.adadelta", false]], "lmflow.optim.adagrad": [[28, "module-lmflow.optim.adagrad", false]], "lmflow.optim.adam": [[29, "module-lmflow.optim.adam", false]], "lmflow.optim.adamax": [[30, "module-lmflow.optim.adamax", false]], "lmflow.optim.adamp": [[31, "module-lmflow.optim.adamp", false]], "lmflow.optim.adamw_schedule_free": [[32, "module-lmflow.optim.adamw_schedule_free", false]], "lmflow.optim.adan": [[33, "module-lmflow.optim.adan", false]], "lmflow.optim.dummy": [[34, "module-lmflow.optim.dummy", false]], "lmflow.optim.lamb": [[36, "module-lmflow.optim.lamb", false]], "lmflow.optim.lars": [[37, "module-lmflow.optim.lars", false]], "lmflow.optim.nadam": [[38, "module-lmflow.optim.nadam", false]], "lmflow.optim.novograd": [[39, "module-lmflow.optim.novograd", false]], "lmflow.optim.optimizers": [[40, "module-lmflow.optim.optimizers", false]], "lmflow.optim.radam": [[41, "module-lmflow.optim.radam", false]], "lmflow.optim.sgd_schedule_free": [[42, "module-lmflow.optim.sgd_schedule_free", false]], "lmflow.optim.sgdp": [[43, "module-lmflow.optim.sgdp", false]], "lmflow.optim.sophia": [[44, "module-lmflow.optim.sophia", false]], "lmflow.optim.yogi": [[45, "module-lmflow.optim.yogi", false]], "lmflow.pipeline": [[54, "module-lmflow.pipeline", false]], "lmflow.pipeline.auto_pipeline": [[46, "module-lmflow.pipeline.auto_pipeline", false]], "lmflow.pipeline.base_aligner": [[47, "module-lmflow.pipeline.base_aligner", false]], "lmflow.pipeline.base_pipeline": [[48, "module-lmflow.pipeline.base_pipeline", false]], "lmflow.pipeline.base_tuner": [[49, "module-lmflow.pipeline.base_tuner", false]], "lmflow.pipeline.dpo_aligner": [[50, "module-lmflow.pipeline.dpo_aligner", false]], "lmflow.pipeline.dpov2_aligner": [[51, "module-lmflow.pipeline.dpov2_aligner", false]], "lmflow.pipeline.evaluator": [[52, "module-lmflow.pipeline.evaluator", false]], "lmflow.pipeline.finetuner": [[53, "module-lmflow.pipeline.finetuner", false]], "lmflow.pipeline.inferencer": [[55, "module-lmflow.pipeline.inferencer", false]], "lmflow.pipeline.iterative_dpo_aligner": [[56, "module-lmflow.pipeline.iterative_dpo_aligner", false]], "lmflow.pipeline.raft_aligner": [[57, "module-lmflow.pipeline.raft_aligner", false]], "lmflow.pipeline.rm_inferencer": [[58, "module-lmflow.pipeline.rm_inferencer", false]], "lmflow.pipeline.rm_tuner": [[59, "module-lmflow.pipeline.rm_tuner", false]], "lmflow.pipeline.utils": [[62, "module-lmflow.pipeline.utils", false]], "lmflow.pipeline.utils.dpov2_dataprocessor": [[60, "module-lmflow.pipeline.utils.dpov2_dataprocessor", false]], "lmflow.pipeline.utils.dpov2_trainer": [[61, "module-lmflow.pipeline.utils.dpov2_trainer", false]], "lmflow.pipeline.utils.memory_safe_dpov2_align": [[63, "module-lmflow.pipeline.utils.memory_safe_dpov2_align", false]], "lmflow.pipeline.utils.memory_safe_vllm_inference": [[64, "module-lmflow.pipeline.utils.memory_safe_vllm_inference", false]], "lmflow.pipeline.utils.peft_trainer": [[65, "module-lmflow.pipeline.utils.peft_trainer", false]], "lmflow.pipeline.utils.raft_trainer": [[66, "module-lmflow.pipeline.utils.raft_trainer", false]], "lmflow.pipeline.utils.rm_dataprocessor": [[67, "module-lmflow.pipeline.utils.rm_dataprocessor", false]], "lmflow.pipeline.utils.rm_trainer": [[68, "module-lmflow.pipeline.utils.rm_trainer", false]], "lmflow.pipeline.vllm_inferencer": [[69, "module-lmflow.pipeline.vllm_inferencer", false]], "lmflow.tokenization": [[72, "module-lmflow.tokenization", false]], "lmflow.tokenization.hf_decoder_model": [[70, "module-lmflow.tokenization.hf_decoder_model", false]], "lmflow.tokenization.hf_text_regression_model": [[71, "module-lmflow.tokenization.hf_text_regression_model", false]], "lmflow.utils": [[95, "module-lmflow.utils", false]], "lmflow.utils.common": [[73, "module-lmflow.utils.common", false]], "lmflow.utils.constants": [[74, "module-lmflow.utils.constants", false]], "lmflow.utils.conversation_template": [[81, "module-lmflow.utils.conversation_template", false]], "lmflow.utils.conversation_template.base": [[75, "module-lmflow.utils.conversation_template.base", false]], "lmflow.utils.conversation_template.chatglm": [[76, "module-lmflow.utils.conversation_template.chatglm", false]], "lmflow.utils.conversation_template.chatml": [[77, "module-lmflow.utils.conversation_template.chatml", false]], "lmflow.utils.conversation_template.deepseek": [[78, "module-lmflow.utils.conversation_template.deepseek", false]], "lmflow.utils.conversation_template.fox": [[79, "module-lmflow.utils.conversation_template.fox", false]], "lmflow.utils.conversation_template.gemma": [[80, "module-lmflow.utils.conversation_template.gemma", false]], "lmflow.utils.conversation_template.internlm": [[82, "module-lmflow.utils.conversation_template.internlm", false]], "lmflow.utils.conversation_template.llama": [[83, "module-lmflow.utils.conversation_template.llama", false]], "lmflow.utils.conversation_template.phi": [[84, "module-lmflow.utils.conversation_template.phi", false]], "lmflow.utils.conversation_template.qwen": [[85, "module-lmflow.utils.conversation_template.qwen", false]], "lmflow.utils.conversation_template.yi": [[86, "module-lmflow.utils.conversation_template.yi", false]], "lmflow.utils.conversation_template.zephyr": [[87, "module-lmflow.utils.conversation_template.zephyr", false]], "lmflow.utils.data_utils": [[88, "module-lmflow.utils.data_utils", false]], "lmflow.utils.flash_attention": [[92, "module-lmflow.utils.flash_attention", false]], "lmflow.utils.flash_attention.bloom_flash_attention": [[89, "module-lmflow.utils.flash_attention.bloom_flash_attention", false]], "lmflow.utils.flash_attention.gpt2_flash_attention": [[90, "module-lmflow.utils.flash_attention.gpt2_flash_attention", false]], "lmflow.utils.flash_attention.gpt_neo_flash_attention": [[91, "module-lmflow.utils.flash_attention.gpt_neo_flash_attention", false]], "lmflow.utils.flash_attention.llama_flash_attention": [[93, "module-lmflow.utils.flash_attention.llama_flash_attention", false]], "lmflow.utils.flash_attention.triton_flash_attention": [[94, "module-lmflow.utils.flash_attention.triton_flash_attention", false]], "lmflow.utils.llava_conversation_lib": [[96, "module-lmflow.utils.llava_conversation_lib", false]], "lmflow.utils.model": [[97, "module-lmflow.utils.model", false]], "lmflow.utils.multimodal": [[98, "module-lmflow.utils.multimodal", false]], "lmflow.utils.position_interpolation": [[99, "module-lmflow.utils.position_interpolation", false]], "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch": [[100, "module-lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch", false]], "lmflow.version": [[101, "module-lmflow.version", false]], "lmflow_lora_target_modules_mapping (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.LMFLOW_LORA_TARGET_MODULES_MAPPING", false]], "load_data() (in module lmflow.utils.data_utils)": [[88, "lmflow.utils.data_utils.load_data", false]], "load_in_4bit (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.load_in_4bit", false]], "load_inference_results() (lmflow.pipeline.vllm_inferencer.inferencerwithoffloading method)": [[69, "lmflow.pipeline.vllm_inferencer.InferencerWithOffloading.load_inference_results", false]], "load_inference_results() (lmflow.pipeline.vllm_inferencer.vllminferencer method)": [[69, "lmflow.pipeline.vllm_inferencer.VLLMInferencer.load_inference_results", false]], "load_llava_pretrain_model() (in module lmflow.utils.multimodal)": [[98, "lmflow.utils.multimodal.load_llava_pretrain_model", false]], "load_model() (lmflow.models.vision_encoder.clip_encoder.clipvisiontower method)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.load_model", false]], "load_prompt_cache() (lmflow.models.vision2seq_model.customautovision2seqmodel method)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.load_prompt_cache", false]], "local_rank (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.local_rank", false]], "local_rank (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.local_rank", false]], "local_rank (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.local_rank", false]], "local_rank (lmflow.pipeline.evaluator.evaluator attribute)": [[52, "lmflow.pipeline.evaluator.Evaluator.local_rank", false]], "local_rank (lmflow.pipeline.inferencer.inferencer attribute)": [[55, "lmflow.pipeline.inferencer.Inferencer.local_rank", false]], "local_rank (lmflow.pipeline.rm_inferencer.rewardmodelinferencer attribute)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer.local_rank", false]], "log() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.log", false]], "log_freq (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.log_freq", false]], "log_level (lmflow.pipeline.finetuner.finetuner attribute)": [[53, "lmflow.pipeline.finetuner.Finetuner.log_level", false]], "log_level (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.log_level", false]], "logdir (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.LOGDIR", false]], "logger (in module lmflow.args)": [[4, "lmflow.args.logger", false]], "logger (in module lmflow.datasets.dataset)": [[5, "lmflow.datasets.dataset.logger", false]], "logger (in module lmflow.models.hf_decoder_model)": [[13, "lmflow.models.hf_decoder_model.logger", false]], "logger (in module lmflow.models.hf_encoder_decoder_model)": [[14, "lmflow.models.hf_encoder_decoder_model.logger", false]], "logger (in module lmflow.models.hf_model_mixin)": [[15, "lmflow.models.hf_model_mixin.logger", false]], "logger (in module lmflow.models.hf_text_regression_model)": [[16, "lmflow.models.hf_text_regression_model.logger", false]], "logger (in module lmflow.pipeline.dpov2_aligner)": [[51, "lmflow.pipeline.dpov2_aligner.logger", false]], "logger (in module lmflow.pipeline.finetuner)": [[53, "lmflow.pipeline.finetuner.logger", false]], "logger (in module lmflow.pipeline.inferencer)": [[55, "lmflow.pipeline.inferencer.logger", false]], "logger (in module lmflow.pipeline.iterative_dpo_aligner)": [[56, "lmflow.pipeline.iterative_dpo_aligner.logger", false]], "logger (in module lmflow.pipeline.raft_aligner)": [[57, "lmflow.pipeline.raft_aligner.logger", false]], "logger (in module lmflow.pipeline.rm_inferencer)": [[58, "lmflow.pipeline.rm_inferencer.logger", false]], "logger (in module lmflow.pipeline.rm_tuner)": [[59, "lmflow.pipeline.rm_tuner.logger", false]], "logger (in module lmflow.pipeline.utils.dpov2_dataprocessor)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.logger", false]], "logger (in module lmflow.pipeline.utils.dpov2_trainer)": [[61, "lmflow.pipeline.utils.dpov2_trainer.logger", false]], "logger (in module lmflow.pipeline.utils.memory_safe_dpov2_align)": [[63, "lmflow.pipeline.utils.memory_safe_dpov2_align.logger", false]], "logger (in module lmflow.pipeline.utils.memory_safe_vllm_inference)": [[64, "lmflow.pipeline.utils.memory_safe_vllm_inference.logger", false]], "logger (in module lmflow.pipeline.utils.raft_trainer)": [[66, "lmflow.pipeline.utils.raft_trainer.logger", false]], "logger (in module lmflow.pipeline.utils.rm_dataprocessor)": [[67, "lmflow.pipeline.utils.rm_dataprocessor.logger", false]], "logger (in module lmflow.pipeline.vllm_inferencer)": [[69, "lmflow.pipeline.vllm_inferencer.logger", false]], "logger (in module lmflow.tokenization.hf_decoder_model)": [[70, "lmflow.tokenization.hf_decoder_model.logger", false]], "logger (in module lmflow.tokenization.hf_text_regression_model)": [[71, "lmflow.tokenization.hf_text_regression_model.logger", false]], "logger (in module lmflow.utils.common)": [[73, "lmflow.utils.common.logger", false]], "logger (in module lmflow.utils.conversation_template.base)": [[75, "lmflow.utils.conversation_template.base.logger", false]], "logger (in module lmflow.utils.conversation_template.gemma)": [[80, "lmflow.utils.conversation_template.gemma.logger", false]], "logger (in module lmflow.utils.conversation_template.llama)": [[83, "lmflow.utils.conversation_template.llama.logger", false]], "logger (in module lmflow.utils.conversation_template.zephyr)": [[87, "lmflow.utils.conversation_template.zephyr.logger", false]], "logger (in module lmflow.utils.model)": [[97, "lmflow.utils.model.logger", false]], "logging_steps (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.logging_steps", false]], "lora_alpha (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.lora_alpha", false]], "lora_dropout (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.lora_dropout", false]], "lora_model_path (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.lora_model_path", false]], "lora_r (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.lora_r", false]], "lora_target_modules (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.lora_target_modules", false]], "lora_target_modules_mapping (in module lmflow.models.hf_model_mixin)": [[15, "lmflow.models.hf_model_mixin.LORA_TARGET_MODULES_MAPPING", false]], "loss_type (lmflow.args.dpov2alignerarguments attribute)": [[4, "lmflow.args.DPOv2AlignerArguments.loss_type", false]], "low_resource (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.low_resource", false]], "lr_scheduler_type (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.lr_scheduler_type", false]], "main() (in module lmflow.pipeline.utils.memory_safe_dpov2_align)": [[63, "lmflow.pipeline.utils.memory_safe_dpov2_align.main", false]], "main() (in module lmflow.pipeline.utils.memory_safe_vllm_inference)": [[64, "lmflow.pipeline.utils.memory_safe_vllm_inference.main", false]], "make_shell_args_from_dataclass() (in module lmflow.utils.common)": [[73, "lmflow.utils.common.make_shell_args_from_dataclass", false]], "map() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.map", false]], "map() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.map", false]], "margin_scale (lmflow.args.dpov2alignerarguments attribute)": [[4, "lmflow.args.DPOv2AlignerArguments.margin_scale", false]], "mask (lmflow.utils.conversation_template.base.templatecomponent attribute)": [[75, "lmflow.utils.conversation_template.base.TemplateComponent.mask", false]], "mask_prompt (lmflow.args.dpov2alignerarguments attribute)": [[4, "lmflow.args.DPOv2AlignerArguments.mask_prompt", false]], "mask_prompt (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding attribute)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.mask_prompt", false]], "max_eval_samples (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.max_eval_samples", false]], "max_length (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.max_length", false]], "max_length (lmflow.args.dpov2alignerarguments attribute)": [[4, "lmflow.args.DPOv2AlignerArguments.max_length", false]], "max_length (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding attribute)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.max_length", false]], "max_length (lmflow.pipeline.utils.rm_dataprocessor.rewarddatacollatorwithpadding attribute)": [[67, "lmflow.pipeline.utils.rm_dataprocessor.RewardDataCollatorWithPadding.max_length", false]], "max_new_tokens (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.max_new_tokens", false]], "max_new_tokens (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.max_new_tokens", false]], "max_prompt_length (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.max_prompt_length", false]], "max_prompt_length (lmflow.args.dpov2alignerarguments attribute)": [[4, "lmflow.args.DPOv2AlignerArguments.max_prompt_length", false]], "max_prompt_length (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding attribute)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.max_prompt_length", false]], "max_seq_len_cached (lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.condenserotaryembedding attribute)": [[100, "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding.max_seq_len_cached", false]], "max_steps (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.max_steps", false]], "max_target_length (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding attribute)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.max_target_length", false]], "max_train_samples (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.max_train_samples", false]], "memory_safe_dpov2_align_env_var_to_remove (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.MEMORY_SAFE_DPOV2_ALIGN_ENV_VAR_TO_REMOVE", false]], "memory_safe_vllm_inference_env_var_to_remove (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.MEMORY_SAFE_VLLM_INFERENCE_ENV_VAR_TO_REMOVE", false]], "memory_safe_vllm_inference_finish_flag (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.MEMORY_SAFE_VLLM_INFERENCE_FINISH_FLAG", false]], "memorysafedpov2aligner (class in lmflow.pipeline.dpov2_aligner)": [[51, "lmflow.pipeline.dpov2_aligner.MemorySafeDPOv2Aligner", false]], "memorysafevllminferencer (class in lmflow.pipeline.vllm_inferencer)": [[69, "lmflow.pipeline.vllm_inferencer.MemorySafeVLLMInferencer", false]], "merge_lora_weights() (lmflow.models.hf_decoder_model.hfdecodermodel method)": [[13, "lmflow.models.hf_decoder_model.HFDecoderModel.merge_lora_weights", false]], "merge_lora_weights() (lmflow.models.hf_encoder_decoder_model.hfencoderdecodermodel method)": [[14, "lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel.merge_lora_weights", false]], "messages (lmflow.utils.llava_conversation_lib.conversation attribute)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.messages", false]], "metric (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.metric", false]], "minibatch_size (lmflow.pipeline.evaluator.evaluator attribute)": [[52, "lmflow.pipeline.evaluator.Evaluator.minibatch_size", false]], "mixed_precision (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.mixed_precision", false]], "mixed_precision (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.mixed_precision", false]], "model (lmflow.pipeline.inferencer.toolinferencer attribute)": [[55, "lmflow.pipeline.inferencer.ToolInferencer.model", false]], "model (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding attribute)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.model", false]], "model (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.model", false]], "model_args (lmflow.models.hf_model_mixin.hfmodelmixin attribute)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.model_args", false]], "model_args (lmflow.pipeline.dpo_aligner.dpoaligner attribute)": [[50, "lmflow.pipeline.dpo_aligner.DPOAligner.model_args", false]], "model_args (lmflow.pipeline.dpov2_aligner.dpov2aligner attribute)": [[51, "lmflow.pipeline.dpov2_aligner.DPOv2Aligner.model_args", false]], "model_args (lmflow.pipeline.dpov2_aligner.memorysafedpov2aligner attribute)": [[51, "lmflow.pipeline.dpov2_aligner.MemorySafeDPOv2Aligner.model_args", false]], "model_args (lmflow.pipeline.evaluator.evaluator attribute)": [[52, "lmflow.pipeline.evaluator.Evaluator.model_args", false]], "model_args (lmflow.pipeline.finetuner.finetuner attribute)": [[53, "lmflow.pipeline.finetuner.Finetuner.model_args", false]], "model_args (lmflow.pipeline.inferencer.inferencer attribute)": [[55, "lmflow.pipeline.inferencer.Inferencer.model_args", false]], "model_args (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner attribute)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner.model_args", false]], "model_args (lmflow.pipeline.raft_aligner.raftaligner attribute)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner.model_args", false]], "model_args (lmflow.pipeline.rm_inferencer.rewardmodelinferencer attribute)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer.model_args", false]], "model_args (lmflow.pipeline.vllm_inferencer.inferencerwithoffloading attribute)": [[69, "lmflow.pipeline.vllm_inferencer.InferencerWithOffloading.model_args", false]], "model_config_classes (in module lmflow.args)": [[4, "lmflow.args.MODEL_CONFIG_CLASSES", false]], "model_max_length (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.model_max_length", false]], "model_name_or_path (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.model_name_or_path", false]], "model_revision (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.model_revision", false]], "model_type (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.model_type", false]], "model_types (in module lmflow.args)": [[4, "lmflow.args.MODEL_TYPES", false]], "model_wrapped (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.model_wrapped", false]], "modelarguments (class in lmflow.args)": [[4, "lmflow.args.ModelArguments", false]], "models_support_flash_attention (in module lmflow.models.hf_decoder_model)": [[13, "lmflow.models.hf_decoder_model.MODELS_SUPPORT_FLASH_ATTENTION", false]], "module": [[4, "module-lmflow.args", false], [5, "module-lmflow.datasets.dataset", false], [6, "module-lmflow.datasets", false], [7, "module-lmflow.datasets.multi_modal_dataset", false], [8, "module-lmflow", false], [9, "module-lmflow.models.auto_model", false], [10, "module-lmflow.models.base_model", false], [11, "module-lmflow.models.decoder_model", false], [12, "module-lmflow.models.encoder_decoder_model", false], [13, "module-lmflow.models.hf_decoder_model", false], [14, "module-lmflow.models.hf_encoder_decoder_model", false], [15, "module-lmflow.models.hf_model_mixin", false], [16, "module-lmflow.models.hf_text_regression_model", false], [17, "module-lmflow.models", false], [18, "module-lmflow.models.interfaces", false], [19, "module-lmflow.models.interfaces.tunable", false], [20, "module-lmflow.models.regression_model", false], [21, "module-lmflow.models.text_regression_model", false], [22, "module-lmflow.models.vision2seq_model", false], [23, "module-lmflow.models.vision_encoder.clip_encoder", false], [24, "module-lmflow.models.vision_encoder", false], [25, "module-lmflow.optim.adabelief", false], [26, "module-lmflow.optim.adabound", false], [27, "module-lmflow.optim.adadelta", false], [28, "module-lmflow.optim.adagrad", false], [29, "module-lmflow.optim.adam", false], [30, "module-lmflow.optim.adamax", false], [31, "module-lmflow.optim.adamp", false], [32, "module-lmflow.optim.adamw_schedule_free", false], [33, "module-lmflow.optim.adan", false], [34, "module-lmflow.optim.dummy", false], [35, "module-lmflow.optim", false], [36, "module-lmflow.optim.lamb", false], [37, "module-lmflow.optim.lars", false], [38, "module-lmflow.optim.nadam", false], [39, "module-lmflow.optim.novograd", false], [40, "module-lmflow.optim.optimizers", false], [41, "module-lmflow.optim.radam", false], [42, "module-lmflow.optim.sgd_schedule_free", false], [43, "module-lmflow.optim.sgdp", false], [44, "module-lmflow.optim.sophia", false], [45, "module-lmflow.optim.yogi", false], [46, "module-lmflow.pipeline.auto_pipeline", false], [47, "module-lmflow.pipeline.base_aligner", false], [48, "module-lmflow.pipeline.base_pipeline", false], [49, "module-lmflow.pipeline.base_tuner", false], [50, "module-lmflow.pipeline.dpo_aligner", false], [51, "module-lmflow.pipeline.dpov2_aligner", false], [52, "module-lmflow.pipeline.evaluator", false], [53, "module-lmflow.pipeline.finetuner", false], [54, "module-lmflow.pipeline", false], [55, "module-lmflow.pipeline.inferencer", false], [56, "module-lmflow.pipeline.iterative_dpo_aligner", false], [57, "module-lmflow.pipeline.raft_aligner", false], [58, "module-lmflow.pipeline.rm_inferencer", false], [59, "module-lmflow.pipeline.rm_tuner", false], [60, "module-lmflow.pipeline.utils.dpov2_dataprocessor", false], [61, "module-lmflow.pipeline.utils.dpov2_trainer", false], [62, "module-lmflow.pipeline.utils", false], [63, "module-lmflow.pipeline.utils.memory_safe_dpov2_align", false], [64, "module-lmflow.pipeline.utils.memory_safe_vllm_inference", false], [65, "module-lmflow.pipeline.utils.peft_trainer", false], [66, "module-lmflow.pipeline.utils.raft_trainer", false], [67, "module-lmflow.pipeline.utils.rm_dataprocessor", false], [68, "module-lmflow.pipeline.utils.rm_trainer", false], [69, "module-lmflow.pipeline.vllm_inferencer", false], [70, "module-lmflow.tokenization.hf_decoder_model", false], [71, "module-lmflow.tokenization.hf_text_regression_model", false], [72, "module-lmflow.tokenization", false], [73, "module-lmflow.utils.common", false], [74, "module-lmflow.utils.constants", false], [75, "module-lmflow.utils.conversation_template.base", false], [76, "module-lmflow.utils.conversation_template.chatglm", false], [77, "module-lmflow.utils.conversation_template.chatml", false], [78, "module-lmflow.utils.conversation_template.deepseek", false], [79, "module-lmflow.utils.conversation_template.fox", false], [80, "module-lmflow.utils.conversation_template.gemma", false], [81, "module-lmflow.utils.conversation_template", false], [82, "module-lmflow.utils.conversation_template.internlm", false], [83, "module-lmflow.utils.conversation_template.llama", false], [84, "module-lmflow.utils.conversation_template.phi", false], [85, "module-lmflow.utils.conversation_template.qwen", false], [86, "module-lmflow.utils.conversation_template.yi", false], [87, "module-lmflow.utils.conversation_template.zephyr", false], [88, "module-lmflow.utils.data_utils", false], [89, "module-lmflow.utils.flash_attention.bloom_flash_attention", false], [90, "module-lmflow.utils.flash_attention.gpt2_flash_attention", false], [91, "module-lmflow.utils.flash_attention.gpt_neo_flash_attention", false], [92, "module-lmflow.utils.flash_attention", false], [93, "module-lmflow.utils.flash_attention.llama_flash_attention", false], [94, "module-lmflow.utils.flash_attention.triton_flash_attention", false], [95, "module-lmflow.utils", false], [96, "module-lmflow.utils.llava_conversation_lib", false], [97, "module-lmflow.utils.model", false], [98, "module-lmflow.utils.multimodal", false], [99, "module-lmflow.utils.position_interpolation", false], [100, "module-lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch", false], [101, "module-lmflow.version", false]], "mpt (lmflow.utils.llava_conversation_lib.separatorstyle attribute)": [[96, "lmflow.utils.llava_conversation_lib.SeparatorStyle.MPT", false]], "multimodaldatasetarguments (class in lmflow.args)": [[4, "lmflow.args.MultiModalDatasetArguments", false]], "nadam (class in lmflow.optim.nadam)": [[38, "lmflow.optim.nadam.NAdam", false]], "nadam (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.NADAM", false]], "novograd (class in lmflow.optim.novograd)": [[39, "lmflow.optim.novograd.NovoGrad", false]], "novograd (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.NOVOGRAD", false]], "ntk_ratio (lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.condenserotaryembedding attribute)": [[100, "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding.ntk_ratio", false]], "num_examples() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.num_examples", false]], "num_output_sequences (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.num_output_sequences", false]], "num_patches (lmflow.models.vision_encoder.clip_encoder.clipvisiontower property)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.num_patches", false]], "num_raft_iteration (lmflow.args.raftalignerarguments attribute)": [[4, "lmflow.args.RaftAlignerArguments.num_raft_iteration", false]], "offset (lmflow.utils.llava_conversation_lib.conversation attribute)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.offset", false]], "on_epoch_end() (lmflow.pipeline.utils.peft_trainer.peftsavingcallback method)": [[65, "lmflow.pipeline.utils.peft_trainer.PeftSavingCallback.on_epoch_end", false]], "on_save() (lmflow.pipeline.utils.peft_trainer.peftsavingcallback method)": [[65, "lmflow.pipeline.utils.peft_trainer.PeftSavingCallback.on_save", false]], "on_train_end() (lmflow.pipeline.utils.peft_trainer.peftsavingcallback method)": [[65, "lmflow.pipeline.utils.peft_trainer.PeftSavingCallback.on_train_end", false]], "optim_adam_beta1 (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.optim_adam_beta1", false]], "optim_adam_beta2 (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.optim_adam_beta2", false]], "optim_beta1 (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.optim_beta1", false]], "optim_beta2 (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.optim_beta2", false]], "optim_beta3 (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.optim_beta3", false]], "optim_dummy_beta1 (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.optim_dummy_beta1", false]], "optim_dummy_beta2 (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.optim_dummy_beta2", false]], "optim_momentum (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.optim_momentum", false]], "optim_weight_decay (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.optim_weight_decay", false]], "optimizer_name (in module lmflow.pipeline.utils.raft_trainer)": [[66, "lmflow.pipeline.utils.raft_trainer.OPTIMIZER_NAME", false]], "optimizer_type (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.optimizer_type", false]], "optimizernames (class in lmflow.args)": [[4, "lmflow.args.OptimizerNames", false]], "output (lmflow.utils.data_utils.rewardmodelinferenceresultwithinput attribute)": [[88, "lmflow.utils.data_utils.RewardModelInferenceResultWithInput.output", false]], "output (lmflow.utils.data_utils.vllminferenceresultwithinput attribute)": [[88, "lmflow.utils.data_utils.VLLMInferenceResultWithInput.output", false]], "output_dir (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.output_dir", false]], "output_dir (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.output_dir", false]], "output_dir (lmflow.args.iterativedpoalignerarguments attribute)": [[4, "lmflow.args.IterativeDPOAlignerArguments.output_dir", false]], "output_max_length (lmflow.args.raftalignerarguments attribute)": [[4, "lmflow.args.RaftAlignerArguments.output_max_length", false]], "output_min_length (lmflow.args.raftalignerarguments attribute)": [[4, "lmflow.args.RaftAlignerArguments.output_min_length", false]], "output_reward_path (lmflow.args.raftalignerarguments attribute)": [[4, "lmflow.args.RaftAlignerArguments.output_reward_path", false]], "output_reward_path (lmflow.pipeline.raft_aligner.raftaligner attribute)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner.output_reward_path", false]], "overwrite_cache (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.overwrite_cache", false]], "pad_to_multiple_of (lmflow.pipeline.utils.rm_dataprocessor.rewarddatacollatorwithpadding attribute)": [[67, "lmflow.pipeline.utils.rm_dataprocessor.RewardDataCollatorWithPadding.pad_to_multiple_of", false]], "padding (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding attribute)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.padding", false]], "padding (lmflow.pipeline.utils.rm_dataprocessor.rewarddatacollatorwithpadding attribute)": [[67, "lmflow.pipeline.utils.rm_dataprocessor.RewardDataCollatorWithPadding.padding", false]], "padding_side (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.padding_side", false]], "padding_value (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding attribute)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.padding_value", false]], "paired_conversation_dataset_description (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.PAIRED_CONVERSATION_DATASET_DESCRIPTION", false]], "paired_conversation_tokenize_function() (in module lmflow.tokenization.hf_text_regression_model)": [[71, "lmflow.tokenization.hf_text_regression_model.paired_conversation_tokenize_function", false]], "paired_text_to_text_dataset_description (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.PAIRED_TEXT_TO_TEXT_DATASET_DESCRIPTION", false]], "parse_to_sampling_params() (lmflow.pipeline.vllm_inferencer.vllminferencer method)": [[69, "lmflow.pipeline.vllm_inferencer.VLLMInferencer.parse_to_sampling_params", false]], "peft_config (lmflow.models.hf_model_mixin.hfmodelmixin attribute)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.peft_config", false]], "peftrewardtrainer (class in lmflow.pipeline.utils.rm_trainer)": [[68, "lmflow.pipeline.utils.rm_trainer.PeftRewardTrainer", false]], "peftsavingcallback (class in lmflow.pipeline.utils.peft_trainer)": [[65, "lmflow.pipeline.utils.peft_trainer.PeftSavingCallback", false]], "pefttrainer (class in lmflow.pipeline.utils.peft_trainer)": [[65, "lmflow.pipeline.utils.peft_trainer.PeftTrainer", false]], "per_device_eval_batch_size (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.per_device_eval_batch_size", false]], "per_device_train_batch_size (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.per_device_train_batch_size", false]], "phi3_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.PHI3_TEMPLATE", false]], "phi3_template (in module lmflow.utils.conversation_template.phi)": [[84, "lmflow.utils.conversation_template.phi.PHI3_TEMPLATE", false]], "pi_ratio (lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.condenserotaryembedding attribute)": [[100, "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding.pi_ratio", false]], "pipeline_argument_mapping (in module lmflow.args)": [[4, "lmflow.args.PIPELINE_ARGUMENT_MAPPING", false]], "pipeline_mapping (in module lmflow.pipeline.auto_pipeline)": [[46, "lmflow.pipeline.auto_pipeline.PIPELINE_MAPPING", false]], "place_model_on_device (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.place_model_on_device", false]], "plain (lmflow.utils.llava_conversation_lib.separatorstyle attribute)": [[96, "lmflow.utils.llava_conversation_lib.SeparatorStyle.PLAIN", false]], "pop_callback() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.pop_callback", false]], "postprocess_distributed_inference_outputs() (lmflow.models.hf_text_regression_model.hftextregressionmodel static method)": [[16, "lmflow.models.hf_text_regression_model.HFTextRegressionModel.postprocess_distributed_inference_outputs", false]], "postprocess_inference_outputs() (lmflow.models.hf_text_regression_model.hftextregressionmodel static method)": [[16, "lmflow.models.hf_text_regression_model.HFTextRegressionModel.postprocess_inference_outputs", false]], "predict() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.predict", false]], "predict_next_token() (lmflow.pipeline.inferencer.speculativeinferencer static method)": [[55, "lmflow.pipeline.inferencer.SpeculativeInferencer.predict_next_token", false]], "prediction_loop() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.prediction_loop", false]], "prediction_step() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.prediction_step", false]], "preferencedatacollatorwithpadding (class in lmflow.pipeline.utils.dpov2_dataprocessor)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding", false]], "prepare_inputs_for_inference() (lmflow.models.hf_decoder_model.hfdecodermodel method)": [[13, "lmflow.models.hf_decoder_model.HFDecoderModel.prepare_inputs_for_inference", false]], "prepare_inputs_for_inference() (lmflow.models.hf_text_regression_model.hftextregressionmodel method)": [[16, "lmflow.models.hf_text_regression_model.HFTextRegressionModel.prepare_inputs_for_inference", false]], "prepare_inputs_labels_for_multimodal() (lmflow.models.vision_encoder.clip_encoder.clipvisiontower method)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.prepare_inputs_labels_for_multimodal", false]], "preprocess_llama_from_llava_plain() (in module lmflow.datasets.multi_modal_dataset)": [[7, "lmflow.datasets.multi_modal_dataset.preprocess_llama_from_llava_plain", false]], "preprocess_llama_from_llava_v1() (in module lmflow.datasets.multi_modal_dataset)": [[7, "lmflow.datasets.multi_modal_dataset.preprocess_llama_from_llava_v1", false]], "preprocess_logits_for_metrics (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.preprocess_logits_for_metrics", false]], "preprocess_multimodal_llava() (in module lmflow.datasets.multi_modal_dataset)": [[7, "lmflow.datasets.multi_modal_dataset.preprocess_multimodal_llava", false]], "preprocessing_num_workers (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.preprocessing_num_workers", false]], "preset_templates (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.PRESET_TEMPLATES", false]], "pretrained_language_projection_path (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.pretrained_language_projection_path", false]], "print_banner() (in module lmflow.utils.common)": [[73, "lmflow.utils.common.print_banner", false]], "process_image_flag() (in module lmflow.utils.data_utils)": [[88, "lmflow.utils.data_utils.process_image_flag", false]], "processor_image_token_in_minigpt4() (lmflow.models.vision2seq_model.customautovision2seqmodel method)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.processor_image_token_in_minigpt4", false]], "prompt_cache_path (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.prompt_cache_path", false]], "prompt_structure (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.prompt_structure", false]], "push_to_hub() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.push_to_hub", false]], "qformer_from_pretrained() (lmflow.models.vision2seq_model.customautovision2seqmodel method)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.qformer_from_pretrained", false]], "qformer_name_or_path (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.qformer_name_or_path", false]], "quant_config (lmflow.models.hf_model_mixin.hfmodelmixin attribute)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.quant_config", false]], "quant_type (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.quant_type", false]], "qwen2_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.QWEN2_TEMPLATE", false]], "qwen2_template (in module lmflow.utils.conversation_template.qwen)": [[85, "lmflow.utils.conversation_template.qwen.QWEN2_TEMPLATE", false]], "radam (class in lmflow.optim.radam)": [[41, "lmflow.optim.radam.RAdam", false]], "radam (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.RADAM", false]], "raft_batch_size (lmflow.args.raftalignerarguments attribute)": [[4, "lmflow.args.RaftAlignerArguments.raft_batch_size", false]], "raftaligner (class in lmflow.pipeline.raft_aligner)": [[57, "lmflow.pipeline.raft_aligner.RaftAligner", false]], "raftalignerarguments (class in lmflow.args)": [[4, "lmflow.args.RaftAlignerArguments", false]], "rafttrainer (class in lmflow.pipeline.utils.raft_trainer)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer", false]], "random_seed (lmflow.args.dpov2alignerarguments attribute)": [[4, "lmflow.args.DPOv2AlignerArguments.random_seed", false]], "random_seed (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.random_seed", false]], "random_seed (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.random_seed", false]], "random_shuffle (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.random_shuffle", false]], "rectify (lmflow.optim.adabelief.adabelief attribute)": [[25, "lmflow.optim.adabelief.AdaBelief.rectify", false]], "ref_model_args (lmflow.pipeline.dpov2_aligner.dpov2aligner attribute)": [[51, "lmflow.pipeline.dpov2_aligner.DPOv2Aligner.ref_model_args", false]], "ref_model_args (lmflow.pipeline.dpov2_aligner.memorysafedpov2aligner attribute)": [[51, "lmflow.pipeline.dpov2_aligner.MemorySafeDPOv2Aligner.ref_model_args", false]], "ref_model_args (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner attribute)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner.ref_model_args", false]], "referencemodelarguments (in module lmflow.pipeline.dpov2_aligner)": [[51, "lmflow.pipeline.dpov2_aligner.ReferenceModelArguments", false]], "referencemodelarguments (in module lmflow.pipeline.utils.memory_safe_dpov2_align)": [[63, "lmflow.pipeline.utils.memory_safe_dpov2_align.ReferenceModelArguments", false]], "register_inference_function() (lmflow.models.text_regression_model.textregressionmodel method)": [[21, "lmflow.models.text_regression_model.TextRegressionModel.register_inference_function", false]], "register_prompt_cache() (lmflow.models.vision2seq_model.customautovision2seqmodel method)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.register_prompt_cache", false]], "register_tokenizer() (lmflow.datasets.custommultimodaldataset method)": [[6, "lmflow.datasets.CustomMultiModalDataset.register_tokenizer", false]], "register_tokenizer() (lmflow.datasets.multi_modal_dataset.custommultimodaldataset method)": [[7, "lmflow.datasets.multi_modal_dataset.CustomMultiModalDataset.register_tokenizer", false]], "regressionmodel (class in lmflow.models.regression_model)": [[20, "lmflow.models.regression_model.RegressionModel", false]], "remove_callback() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.remove_callback", false]], "remove_dataclass_attr_prefix() (in module lmflow.utils.common)": [[73, "lmflow.utils.common.remove_dataclass_attr_prefix", false]], "remove_last_separator() (lmflow.utils.conversation_template.base.conversationtemplate method)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate.remove_last_separator", false]], "remove_last_separator() (lmflow.utils.conversation_template.conversationtemplate method)": [[81, "lmflow.utils.conversation_template.ConversationTemplate.remove_last_separator", false]], "remove_unused_columns (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.remove_unused_columns", false]], "repetition_penalty (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.repetition_penalty", false]], "repetition_penalty (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.repetition_penalty", false]], "replace_bloom_attn_with_flash_attn() (in module lmflow.utils.flash_attention.bloom_flash_attention)": [[89, "lmflow.utils.flash_attention.bloom_flash_attention.replace_bloom_attn_with_flash_attn", false]], "replace_gpt2_attn_with_flash_attn() (in module lmflow.utils.flash_attention.gpt2_flash_attention)": [[90, "lmflow.utils.flash_attention.gpt2_flash_attention.replace_gpt2_attn_with_flash_attn", false]], "replace_gpt_neo_attn_with_flash_attn() (in module lmflow.utils.flash_attention.gpt_neo_flash_attention)": [[91, "lmflow.utils.flash_attention.gpt_neo_flash_attention.replace_gpt_neo_attn_with_flash_attn", false]], "replace_llama_attn_with_flash_attn() (in module lmflow.utils.flash_attention.llama_flash_attention)": [[93, "lmflow.utils.flash_attention.llama_flash_attention.replace_llama_attn_with_flash_attn", false]], "replace_llama_with_condense() (in module lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch)": [[100, "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.replace_llama_with_condense", false]], "report_to (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.report_to", false]], "reset() (lmflow.optim.adabelief.adabelief method)": [[25, "lmflow.optim.adabelief.AdaBelief.reset", false]], "restart_opt() (lmflow.optim.adan.adan method)": [[33, "lmflow.optim.adan.Adan.restart_opt", false]], "results_path (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.results_path", false]], "return_code_error_buffer (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.RETURN_CODE_ERROR_BUFFER", false]], "return_tensors (lmflow.pipeline.utils.rm_dataprocessor.rewarddatacollatorwithpadding attribute)": [[67, "lmflow.pipeline.utils.rm_dataprocessor.RewardDataCollatorWithPadding.return_tensors", false]], "reward_model_args (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner attribute)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner.reward_model_args", false]], "reward_model_inference_batch_size (lmflow.args.iterativedpoalignerarguments attribute)": [[4, "lmflow.args.IterativeDPOAlignerArguments.reward_model_inference_batch_size", false]], "reward_model_inference_block_size (lmflow.args.iterativedpoalignerarguments attribute)": [[4, "lmflow.args.IterativeDPOAlignerArguments.reward_model_inference_block_size", false]], "rewarddatacollatorwithpadding (class in lmflow.pipeline.utils.rm_dataprocessor)": [[67, "lmflow.pipeline.utils.rm_dataprocessor.RewardDataCollatorWithPadding", false]], "rewardmodelinferencer (class in lmflow.pipeline.rm_inferencer)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer", false]], "rewardmodelinferenceresultwithinput (class in lmflow.utils.data_utils)": [[88, "lmflow.utils.data_utils.RewardModelInferenceResultWithInput", false]], "rewardmodeltuner (class in lmflow.pipeline.rm_tuner)": [[59, "lmflow.pipeline.rm_tuner.RewardModelTuner", false]], "rewardmodeltunerarguments (class in lmflow.args)": [[4, "lmflow.args.RewardModelTunerArguments", false]], "rewardtrainer (class in lmflow.pipeline.utils.rm_trainer)": [[68, "lmflow.pipeline.utils.rm_trainer.RewardTrainer", false]], "rm_loss() (in module lmflow.pipeline.utils.rm_trainer)": [[68, "lmflow.pipeline.utils.rm_trainer.rm_loss", false]], "roles (lmflow.utils.llava_conversation_lib.conversation attribute)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.roles", false]], "rope_ntk_ratio (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.rope_ntk_ratio", false]], "rope_pi_ratio (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.rope_pi_ratio", false]], "rstrip_partial_utf8() (in module lmflow.pipeline.inferencer)": [[55, "lmflow.pipeline.inferencer.rstrip_partial_utf8", false]], "run_name (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.run_name", false]], "sample() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.sample", false]], "sample() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.sample", false]], "sample() (lmflow.pipeline.inferencer.speculativeinferencer static method)": [[55, "lmflow.pipeline.inferencer.SpeculativeInferencer.sample", false]], "sampling_paired_idx_from_rewards() (lmflow.pipeline.dpov2_aligner.dpov2aligner method)": [[51, "lmflow.pipeline.dpov2_aligner.DPOv2Aligner.sampling_paired_idx_from_rewards", false]], "sampling_paired_method (lmflow.args.dpov2alignerarguments attribute)": [[4, "lmflow.args.DPOv2AlignerArguments.sampling_paired_method", false]], "sampling_params (lmflow.pipeline.vllm_inferencer.vllminferencer attribute)": [[69, "lmflow.pipeline.vllm_inferencer.VLLMInferencer.sampling_params", false]], "sanity_check (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.sanity_check", false]], "sanity_check() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.sanity_check", false]], "sanity_check() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.sanity_check", false]], "save() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.save", false]], "save() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.save", false]], "save() (lmflow.models.hf_decoder_model.hfdecodermodel method)": [[13, "lmflow.models.hf_decoder_model.HFDecoderModel.save", false]], "save() (lmflow.models.hf_encoder_decoder_model.hfencoderdecodermodel method)": [[14, "lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel.save", false]], "save() (lmflow.models.hf_text_regression_model.hftextregressionmodel method)": [[16, "lmflow.models.hf_text_regression_model.HFTextRegressionModel.save", false]], "save_aggregated_lora (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.save_aggregated_lora", false]], "save_counter (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.save_counter", false]], "save_inference_results() (lmflow.pipeline.vllm_inferencer.inferencerwithoffloading method)": [[69, "lmflow.pipeline.vllm_inferencer.InferencerWithOffloading.save_inference_results", false]], "save_inference_results() (lmflow.pipeline.vllm_inferencer.vllminferencer method)": [[69, "lmflow.pipeline.vllm_inferencer.VLLMInferencer.save_inference_results", false]], "save_language_projection (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.save_language_projection", false]], "save_model() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.save_model", false]], "save_pretrain_model_path (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.save_pretrain_model_path", false]], "save_prompt_cache() (lmflow.models.vision2seq_model.customautovision2seqmodel method)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.save_prompt_cache", false]], "save_results (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.save_results", false]], "save_steps (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.save_steps", false]], "scaler_name (in module lmflow.pipeline.utils.raft_trainer)": [[66, "lmflow.pipeline.utils.raft_trainer.SCALER_NAME", false]], "scheduler_name (in module lmflow.pipeline.utils.raft_trainer)": [[66, "lmflow.pipeline.utils.raft_trainer.SCHEDULER_NAME", false]], "score_to_prob() (lmflow.pipeline.inferencer.speculativeinferencer static method)": [[55, "lmflow.pipeline.inferencer.SpeculativeInferencer.score_to_prob", false]], "seed (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.seed", false]], "select_feature (lmflow.models.vision_encoder.clip_encoder.clipvisiontower attribute)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.select_feature", false]], "select_layer (lmflow.models.vision_encoder.clip_encoder.clipvisiontower attribute)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.select_layer", false]], "sep (lmflow.utils.llava_conversation_lib.conversation attribute)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.sep", false]], "sep2 (lmflow.utils.llava_conversation_lib.conversation attribute)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.sep2", false]], "sep_style (lmflow.args.multimodaldatasetarguments attribute)": [[4, "lmflow.args.MultiModalDatasetArguments.sep_style", false]], "sep_style (lmflow.utils.llava_conversation_lib.conversation attribute)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.sep_style", false]], "separator (lmflow.utils.conversation_template.base.conversationtemplate attribute)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate.separator", false]], "separator (lmflow.utils.conversation_template.conversationtemplate attribute)": [[81, "lmflow.utils.conversation_template.ConversationTemplate.separator", false]], "separatorstyle (class in lmflow.utils.llava_conversation_lib)": [[96, "lmflow.utils.llava_conversation_lib.SeparatorStyle", false]], "set_random_seed() (in module lmflow.utils.data_utils)": [[88, "lmflow.utils.data_utils.set_random_seed", false]], "sgd_schedule_free (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.SGD_SCHEDULE_FREE", false]], "sgdp (class in lmflow.optim.sgdp)": [[43, "lmflow.optim.sgdp.SGDP", false]], "sgdp (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.SGDP", false]], "sgdschedulefree (class in lmflow.optim.sgd_schedule_free)": [[42, "lmflow.optim.sgd_schedule_free.SGDScheduleFree", false]], "sharded_ddp (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.sharded_ddp", false]], "single (lmflow.utils.llava_conversation_lib.separatorstyle attribute)": [[96, "lmflow.utils.llava_conversation_lib.SeparatorStyle.SINGLE", false]], "skip_first_batches (in module lmflow.pipeline.utils.raft_trainer)": [[66, "lmflow.pipeline.utils.raft_trainer.skip_first_batches", false]], "skip_next (lmflow.utils.llava_conversation_lib.conversation attribute)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.skip_next", false]], "sophia (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.SOPHIA", false]], "sophiag (class in lmflow.optim.sophia)": [[44, "lmflow.optim.sophia.SophiaG", false]], "special_starter (lmflow.utils.conversation_template.base.conversationtemplate attribute)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate.special_starter", false]], "special_starter (lmflow.utils.conversation_template.conversationtemplate attribute)": [[81, "lmflow.utils.conversation_template.ConversationTemplate.special_starter", false]], "special_stopper (lmflow.utils.conversation_template.base.conversationtemplate attribute)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate.special_stopper", false]], "special_stopper (lmflow.utils.conversation_template.conversationtemplate attribute)": [[81, "lmflow.utils.conversation_template.ConversationTemplate.special_stopper", false]], "speculativeinferencer (class in lmflow.pipeline.inferencer)": [[55, "lmflow.pipeline.inferencer.SpeculativeInferencer", false]], "state (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.state", false]], "step() (lmflow.optim.adabelief.adabelief method)": [[25, "lmflow.optim.adabelief.AdaBelief.step", false]], "step() (lmflow.optim.adabound.adabound method)": [[26, "lmflow.optim.adabound.AdaBound.step", false]], "step() (lmflow.optim.adadelta.adadelta method)": [[27, "lmflow.optim.adadelta.Adadelta.step", false]], "step() (lmflow.optim.adagrad.adagrad method)": [[28, "lmflow.optim.adagrad.AdaGrad.step", false]], "step() (lmflow.optim.adam.adam method)": [[29, "lmflow.optim.adam.Adam.step", false]], "step() (lmflow.optim.adamax.adamax method)": [[30, "lmflow.optim.adamax.Adamax.step", false]], "step() (lmflow.optim.adamp.adamp method)": [[31, "lmflow.optim.adamp.AdamP.step", false]], "step() (lmflow.optim.adamw_schedule_free.adamwschedulefree method)": [[32, "lmflow.optim.adamw_schedule_free.AdamWScheduleFree.step", false]], "step() (lmflow.optim.adan.adan method)": [[33, "lmflow.optim.adan.Adan.step", false]], "step() (lmflow.optim.dummy.dummy method)": [[34, "lmflow.optim.dummy.Dummy.step", false]], "step() (lmflow.optim.lamb.lamb method)": [[36, "lmflow.optim.lamb.Lamb.step", false]], "step() (lmflow.optim.lars.lars method)": [[37, "lmflow.optim.lars.LARS.step", false]], "step() (lmflow.optim.nadam.nadam method)": [[38, "lmflow.optim.nadam.NAdam.step", false]], "step() (lmflow.optim.novograd.novograd method)": [[39, "lmflow.optim.novograd.NovoGrad.step", false]], "step() (lmflow.optim.radam.radam method)": [[41, "lmflow.optim.radam.RAdam.step", false]], "step() (lmflow.optim.sgd_schedule_free.sgdschedulefree method)": [[42, "lmflow.optim.sgd_schedule_free.SGDScheduleFree.step", false]], "step() (lmflow.optim.sgdp.sgdp method)": [[43, "lmflow.optim.sgdp.SGDP.step", false]], "step() (lmflow.optim.sophia.sophiag method)": [[44, "lmflow.optim.sophia.SophiaG.step", false]], "step() (lmflow.optim.yogi.yogi method)": [[45, "lmflow.optim.yogi.Yogi.step", false]], "store_flos() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.store_flos", false]], "stream_inference() (lmflow.pipeline.inferencer.inferencer method)": [[55, "lmflow.pipeline.inferencer.Inferencer.stream_inference", false]], "stream_inference() (lmflow.pipeline.inferencer.speculativeinferencer method)": [[55, "lmflow.pipeline.inferencer.SpeculativeInferencer.stream_inference", false]], "streaming (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.streaming", false]], "stringformatter (class in lmflow.utils.conversation_template.base)": [[75, "lmflow.utils.conversation_template.base.StringFormatter", false]], "supported_dataset_type (in module lmflow.pipeline.inferencer)": [[55, "lmflow.pipeline.inferencer.supported_dataset_type", false]], "system (lmflow.utils.llava_conversation_lib.conversation attribute)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.system", false]], "system_formatter (lmflow.utils.conversation_template.base.conversationtemplate attribute)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate.system_formatter", false]], "system_formatter (lmflow.utils.conversation_template.conversationtemplate attribute)": [[81, "lmflow.utils.conversation_template.ConversationTemplate.system_formatter", false]], "t (lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.condenserotaryembedding attribute)": [[100, "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding.t", false]], "temperature (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.temperature", false]], "temperature (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.temperature", false]], "template (lmflow.utils.conversation_template.base.formatter attribute)": [[75, "lmflow.utils.conversation_template.base.Formatter.template", false]], "template_name (lmflow.utils.conversation_template.base.conversationtemplate attribute)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate.template_name", false]], "template_name (lmflow.utils.conversation_template.conversationtemplate attribute)": [[81, "lmflow.utils.conversation_template.ConversationTemplate.template_name", false]], "templatecomponent (class in lmflow.utils.conversation_template.base)": [[75, "lmflow.utils.conversation_template.base.TemplateComponent", false]], "tensor_parallel_size (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.tensor_parallel_size", false]], "test_file (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.test_file", false]], "text2text_dataset_description (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.TEXT2TEXT_DATASET_DESCRIPTION", false]], "text2text_dataset_details (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.TEXT2TEXT_DATASET_DETAILS", false]], "text2text_dataset_long_descrition (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.TEXT2TEXT_DATASET_LONG_DESCRITION", false]], "text_only_dataset_description (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.TEXT_ONLY_DATASET_DESCRIPTION", false]], "text_only_dataset_details (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.TEXT_ONLY_DATASET_DETAILS", false]], "text_only_dataset_long_descrition (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.TEXT_ONLY_DATASET_LONG_DESCRITION", false]], "text_to_scored_textlist_dataset_description (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.TEXT_TO_SCORED_TEXTLIST_DATASET_DESCRIPTION", false]], "text_to_textlist_dataset_description (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.TEXT_TO_TEXTLIST_DATASET_DESCRIPTION", false]], "text_to_textlist_tokenize_function() (in module lmflow.tokenization.hf_text_regression_model)": [[71, "lmflow.tokenization.hf_text_regression_model.text_to_textlist_tokenize_function", false]], "textregressionmodel (class in lmflow.models.text_regression_model)": [[21, "lmflow.models.text_regression_model.TextRegressionModel", false]], "to_dict() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.to_dict", false]], "to_dict() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.to_dict", false]], "to_gradio_chatbot() (lmflow.utils.llava_conversation_lib.conversation method)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.to_gradio_chatbot", false]], "to_list() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.to_list", false]], "to_list() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.to_list", false]], "tok_logger (in module lmflow.tokenization.hf_decoder_model)": [[70, "lmflow.tokenization.hf_decoder_model.tok_logger", false]], "tok_logger (in module lmflow.tokenization.hf_text_regression_model)": [[71, "lmflow.tokenization.hf_text_regression_model.tok_logger", false]], "tokenize() (lmflow.models.hf_decoder_model.hfdecodermodel method)": [[13, "lmflow.models.hf_decoder_model.HFDecoderModel.tokenize", false]], "tokenize() (lmflow.models.hf_encoder_decoder_model.hfencoderdecodermodel method)": [[14, "lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel.tokenize", false]], "tokenize() (lmflow.models.hf_text_regression_model.hftextregressionmodel method)": [[16, "lmflow.models.hf_text_regression_model.HFTextRegressionModel.tokenize", false]], "tokenize_batch_element() (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding method)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.tokenize_batch_element", false]], "tokenize_function() (in module lmflow.tokenization.hf_decoder_model)": [[70, "lmflow.tokenization.hf_decoder_model.tokenize_function", false]], "tokenize_function() (in module lmflow.tokenization.hf_text_regression_model)": [[71, "lmflow.tokenization.hf_text_regression_model.tokenize_function", false]], "tokenizer (lmflow.datasets.multi_modal_dataset.datacollatorforsuperviseddataset attribute)": [[7, "lmflow.datasets.multi_modal_dataset.DataCollatorForSupervisedDataset.tokenizer", false]], "tokenizer (lmflow.models.hf_model_mixin.hfmodelmixin attribute)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.tokenizer", false]], "tokenizer (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding attribute)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.tokenizer", false]], "tokenizer (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.tokenizer", false]], "tokenizer (lmflow.pipeline.utils.rm_dataprocessor.rewarddatacollatorwithpadding attribute)": [[67, "lmflow.pipeline.utils.rm_dataprocessor.RewardDataCollatorWithPadding.tokenizer", false]], "tokenizer_image_token() (in module lmflow.datasets.multi_modal_dataset)": [[7, "lmflow.datasets.multi_modal_dataset.tokenizer_image_token", false]], "tokenizer_name (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.tokenizer_name", false]], "toolinferencer (class in lmflow.pipeline.inferencer)": [[55, "lmflow.pipeline.inferencer.ToolInferencer", false]], "tools_formatter (lmflow.utils.conversation_template.base.conversationtemplate attribute)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate.tools_formatter", false]], "tools_formatter (lmflow.utils.conversation_template.conversationtemplate attribute)": [[81, "lmflow.utils.conversation_template.ConversationTemplate.tools_formatter", false]], "top_k (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.top_k", false]], "top_p (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.top_p", false]], "top_reward_percentage (lmflow.args.raftalignerarguments attribute)": [[4, "lmflow.args.RaftAlignerArguments.top_reward_percentage", false]], "torch_dtype (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.torch_dtype", false]], "torch_dtype (lmflow.models.hf_model_mixin.hfmodelmixin attribute)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.torch_dtype", false]], "torch_jit_model_eval() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.torch_jit_model_eval", false]], "train() (lmflow.optim.adamw_schedule_free.adamwschedulefree method)": [[32, "lmflow.optim.adamw_schedule_free.AdamWScheduleFree.train", false]], "train() (lmflow.optim.sgd_schedule_free.sgdschedulefree method)": [[42, "lmflow.optim.sgd_schedule_free.SGDScheduleFree.train", false]], "train() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.train", false]], "train_batch_size (lmflow.pipeline.evaluator.evaluator attribute)": [[52, "lmflow.pipeline.evaluator.Evaluator.train_batch_size", false]], "train_dataset (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.train_dataset", false]], "train_file (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.train_file", false]], "train_on_prompt (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.train_on_prompt", false]], "train_test_split() (lmflow.datasets.dataset method)": [[6, "lmflow.datasets.Dataset.train_test_split", false]], "train_test_split() (lmflow.datasets.dataset.dataset method)": [[5, "lmflow.datasets.dataset.Dataset.train_test_split", false]], "trainer_state_name (in module lmflow.pipeline.utils.raft_trainer)": [[66, "lmflow.pipeline.utils.raft_trainer.TRAINER_STATE_NAME", false]], "training_args_name (in module lmflow.pipeline.utils.raft_trainer)": [[66, "lmflow.pipeline.utils.raft_trainer.TRAINING_ARGS_NAME", false]], "training_step() (lmflow.pipeline.utils.raft_trainer.rafttrainer method)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.training_step", false]], "truncate_to_model_max_length (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.truncate_to_model_max_length", false]], "truncation_mode (lmflow.pipeline.utils.dpov2_dataprocessor.preferencedatacollatorwithpadding attribute)": [[60, "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding.truncation_mode", false]], "truncation_side (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.truncation_side", false]], "trust_remote_code (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.trust_remote_code", false]], "tunable (class in lmflow.models.interfaces.tunable)": [[19, "lmflow.models.interfaces.tunable.Tunable", false]], "tune() (lmflow.pipeline.base_tuner.basetuner method)": [[49, "lmflow.pipeline.base_tuner.BaseTuner.tune", false]], "tune() (lmflow.pipeline.finetuner.finetuner method)": [[53, "lmflow.pipeline.finetuner.Finetuner.tune", false]], "tune() (lmflow.pipeline.rm_tuner.rewardmodeltuner method)": [[59, "lmflow.pipeline.rm_tuner.RewardModelTuner.tune", false]], "two (lmflow.utils.llava_conversation_lib.separatorstyle attribute)": [[96, "lmflow.utils.llava_conversation_lib.SeparatorStyle.TWO", false]], "type (lmflow.datasets.dataset attribute)": [[6, "lmflow.datasets.Dataset.type", false]], "type (lmflow.datasets.dataset.dataset attribute)": [[5, "lmflow.datasets.dataset.Dataset.type", false]], "type (lmflow.utils.conversation_template.base.templatecomponent attribute)": [[75, "lmflow.utils.conversation_template.base.TemplateComponent.type", false]], "update_custom_config() (in module lmflow.utils.multimodal)": [[98, "lmflow.utils.multimodal.update_custom_config", false]], "update_hessian() (lmflow.optim.sophia.sophiag method)": [[44, "lmflow.optim.sophia.SophiaG.update_hessian", false]], "use_accelerator (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.use_accelerator", false]], "use_accelerator (lmflow.models.hf_model_mixin.hfmodelmixin attribute)": [[15, "lmflow.models.hf_model_mixin.HFModelMixin.use_accelerator", false]], "use_accelerator_for_evaluator (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.use_accelerator_for_evaluator", false]], "use_apex (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.use_apex", false]], "use_auth_token (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.use_auth_token", false]], "use_beam_search (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.use_beam_search", false]], "use_cpu_amp (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.use_cpu_amp", false]], "use_cuda_amp (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.use_cuda_amp", false]], "use_customized_optim (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.use_customized_optim", false]], "use_dpo_data_collator (lmflow.pipeline.utils.dpov2_trainer.dpov2trainer attribute)": [[61, "lmflow.pipeline.utils.dpov2_trainer.DPOv2Trainer.use_dpo_data_collator", false]], "use_fast_tokenizer (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.use_fast_tokenizer", false]], "use_flash_attention (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.use_flash_attention", false]], "use_image_start_end (lmflow.args.multimodaldatasetarguments attribute)": [[4, "lmflow.args.MultiModalDatasetArguments.use_image_start_end", false]], "use_int8 (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.use_int8", false]], "use_lisa (lmflow.args.finetunerarguments attribute)": [[4, "lmflow.args.FinetunerArguments.use_lisa", false]], "use_lora (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.use_lora", false]], "use_prompt_cache (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.use_prompt_cache", false]], "use_qlora (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.use_qlora", false]], "use_ram_optimized_load (lmflow.args.modelarguments attribute)": [[4, "lmflow.args.ModelArguments.use_ram_optimized_load", false]], "use_tune_checkpoints (lmflow.pipeline.utils.raft_trainer.rafttrainer attribute)": [[66, "lmflow.pipeline.utils.raft_trainer.RaftTrainer.use_tune_checkpoints", false]], "use_vllm (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.use_vllm", false]], "use_wandb (lmflow.args.evaluatorarguments attribute)": [[4, "lmflow.args.EvaluatorArguments.use_wandb", false]], "user_formatter (lmflow.utils.conversation_template.base.conversationtemplate attribute)": [[75, "lmflow.utils.conversation_template.base.ConversationTemplate.user_formatter", false]], "user_formatter (lmflow.utils.conversation_template.conversationtemplate attribute)": [[81, "lmflow.utils.conversation_template.ConversationTemplate.user_formatter", false]], "validation_file (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.validation_file", false]], "validation_split_percentage (lmflow.args.datasetarguments attribute)": [[4, "lmflow.args.DatasetArguments.validation_split_percentage", false]], "version (lmflow.utils.llava_conversation_lib.conversation attribute)": [[96, "lmflow.utils.llava_conversation_lib.Conversation.version", false]], "vision_feature_select() (lmflow.models.vision2seq_model.customautovision2seqmodel method)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.vision_feature_select", false]], "vision_model_from_pretrained() (lmflow.models.vision2seq_model.customautovision2seqmodel method)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.vision_model_from_pretrained", false]], "vision_select_layer (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.vision_select_layer", false]], "vision_tower_name (lmflow.models.vision_encoder.clip_encoder.clipvisiontower attribute)": [[23, "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower.vision_tower_name", false]], "vismodelarguments (class in lmflow.args)": [[4, "lmflow.args.VisModelArguments", false]], "vllm_gpu_memory_utilization (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.vllm_gpu_memory_utilization", false]], "vllm_inference_batch_size (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.vllm_inference_batch_size", false]], "vllm_tensor_parallel_size (lmflow.args.inferencerarguments attribute)": [[4, "lmflow.args.InferencerArguments.vllm_tensor_parallel_size", false]], "vllminferencer (class in lmflow.pipeline.vllm_inferencer)": [[69, "lmflow.pipeline.vllm_inferencer.VLLMInferencer", false]], "vllminferenceresultwithinput (class in lmflow.utils.data_utils)": [[88, "lmflow.utils.data_utils.VLLMInferenceResultWithInput", false]], "warmup_steps (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.warmup_steps", false]], "weight_decay (lmflow.args.dpoalignerarguments attribute)": [[4, "lmflow.args.DPOAlignerArguments.weight_decay", false]], "weight_decouple (lmflow.optim.adabelief.adabelief attribute)": [[25, "lmflow.optim.adabelief.AdaBelief.weight_decouple", false]], "with_qformer (lmflow.args.vismodelarguments attribute)": [[4, "lmflow.args.VisModelArguments.with_qformer", false]], "with_qformer (lmflow.models.vision2seq_model.customautovision2seqmodel attribute)": [[22, "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel.with_qformer", false]], "worker_heart_beat_interval (in module lmflow.utils.constants)": [[74, "lmflow.utils.constants.WORKER_HEART_BEAT_INTERVAL", false]], "workspace_path (lmflow.pipeline.iterative_dpo_aligner.iterativedpoaligner attribute)": [[56, "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner.workspace_path", false]], "world_size (lmflow.pipeline.evaluator.evaluator attribute)": [[52, "lmflow.pipeline.evaluator.Evaluator.world_size", false]], "world_size (lmflow.pipeline.inferencer.inferencer attribute)": [[55, "lmflow.pipeline.inferencer.Inferencer.world_size", false]], "world_size (lmflow.pipeline.rm_inferencer.rewardmodelinferencer attribute)": [[58, "lmflow.pipeline.rm_inferencer.RewardModelInferencer.world_size", false]], "yi1_5_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.YI1_5_TEMPLATE", false]], "yi1_5_template (in module lmflow.utils.conversation_template.yi)": [[86, "lmflow.utils.conversation_template.yi.YI1_5_TEMPLATE", false]], "yogi (class in lmflow.optim.yogi)": [[45, "lmflow.optim.yogi.Yogi", false]], "yogi (lmflow.args.optimizernames attribute)": [[4, "lmflow.args.OptimizerNames.YOGI", false]], "zephyr_template (in module lmflow.utils.conversation_template)": [[81, "lmflow.utils.conversation_template.ZEPHYR_TEMPLATE", false]], "zephyr_template (in module lmflow.utils.conversation_template.zephyr)": [[87, "lmflow.utils.conversation_template.zephyr.ZEPHYR_TEMPLATE", false]], "zephyrconversationtemplate (class in lmflow.utils.conversation_template.zephyr)": [[87, "lmflow.utils.conversation_template.zephyr.ZephyrConversationTemplate", false]]}, "objects": {"": [[8, 0, 0, "-", "lmflow"]], "lmflow": [[8, 1, 1, "", "__version__"], [4, 0, 0, "-", "args"], [6, 0, 0, "-", "datasets"], [8, 1, 1, "", "internal_version"], [17, 0, 0, "-", "models"], [35, 0, 0, "-", "optim"], [54, 0, 0, "-", "pipeline"], [72, 0, 0, "-", "tokenization"], [95, 0, 0, "-", "utils"], [101, 0, 0, "-", "version"]], "lmflow.args": [[4, 2, 1, "", "AutoArguments"], [4, 2, 1, "", "BenchmarkingArguments"], [4, 2, 1, "", "DPOAlignerArguments"], [4, 2, 1, "", "DPOv2AlignerArguments"], [4, 2, 1, "", "DatasetArguments"], [4, 2, 1, "", "EvaluatorArguments"], [4, 2, 1, "", "FinetunerArguments"], [4, 2, 1, "", "InferencerArguments"], [4, 2, 1, "", "IterativeAlignerArguments"], [4, 2, 1, "", "IterativeDPOAlignerArguments"], [4, 1, 1, "", "MODEL_CONFIG_CLASSES"], [4, 1, 1, "", "MODEL_TYPES"], [4, 2, 1, "", "ModelArguments"], [4, 2, 1, "", "MultiModalDatasetArguments"], [4, 2, 1, "", "OptimizerNames"], [4, 1, 1, "", "PIPELINE_ARGUMENT_MAPPING"], [4, 2, 1, "", "RaftAlignerArguments"], [4, 2, 1, "", "RewardModelTunerArguments"], [4, 2, 1, "", "VisModelArguments"], [4, 1, 1, "", "logger"]], "lmflow.args.AutoArguments": [[4, 3, 1, "", "get_pipeline_args_class"]], "lmflow.args.BenchmarkingArguments": [[4, 4, 1, "", "dataset_name"], [4, 4, 1, "", "lm_evaluation_metric"]], "lmflow.args.DPOAlignerArguments": [[4, 4, 1, "", "beta"], [4, 4, 1, "", "eval_steps"], [4, 4, 1, "", "gradient_accumulation_steps"], [4, 4, 1, "", "gradient_checkpointing"], [4, 4, 1, "", "gradient_checkpointing_use_reentrant"], [4, 4, 1, "", "learning_rate"], [4, 4, 1, "", "local_rank"], [4, 4, 1, "", "log_freq"], [4, 4, 1, "", "logging_steps"], [4, 4, 1, "", "lr_scheduler_type"], [4, 4, 1, "", "max_length"], [4, 4, 1, "", "max_prompt_length"], [4, 4, 1, "", "max_steps"], [4, 4, 1, "", "optimizer_type"], [4, 4, 1, "", "output_dir"], [4, 4, 1, "", "per_device_eval_batch_size"], [4, 4, 1, "", "per_device_train_batch_size"], [4, 4, 1, "", "report_to"], [4, 4, 1, "", "run_name"], [4, 4, 1, "", "sanity_check"], [4, 4, 1, "", "save_steps"], [4, 4, 1, "", "seed"], [4, 4, 1, "", "warmup_steps"], [4, 4, 1, "", "weight_decay"]], "lmflow.args.DPOv2AlignerArguments": [[4, 4, 1, "", "accelerate_config_file"], [4, 4, 1, "", "beta"], [4, 4, 1, "", "length_penalty"], [4, 4, 1, "", "loss_type"], [4, 4, 1, "", "margin_scale"], [4, 4, 1, "", "mask_prompt"], [4, 4, 1, "", "max_length"], [4, 4, 1, "", "max_prompt_length"], [4, 4, 1, "", "random_seed"], [4, 4, 1, "", "sampling_paired_method"]], "lmflow.args.DatasetArguments": [[4, 3, 1, "", "__post_init__"], [4, 4, 1, "", "block_size"], [4, 4, 1, "", "conversation_template"], [4, 4, 1, "", "customized_cache_dir"], [4, 4, 1, "", "dataset_config_name"], [4, 4, 1, "", "dataset_name"], [4, 4, 1, "", "dataset_path"], [4, 4, 1, "", "disable_group_texts"], [4, 4, 1, "", "group_texts_batch_size"], [4, 4, 1, "", "is_custom_dataset"], [4, 4, 1, "", "keep_linebreaks"], [4, 4, 1, "", "max_eval_samples"], [4, 4, 1, "", "max_train_samples"], [4, 4, 1, "", "overwrite_cache"], [4, 4, 1, "", "preprocessing_num_workers"], [4, 4, 1, "", "streaming"], [4, 4, 1, "", "test_file"], [4, 4, 1, "", "train_file"], [4, 4, 1, "", "train_on_prompt"], [4, 4, 1, "", "validation_file"], [4, 4, 1, "", "validation_split_percentage"]], "lmflow.args.EvaluatorArguments": [[4, 4, 1, "", "answer_type"], [4, 4, 1, "", "deepspeed"], [4, 4, 1, "", "evaluate_block_size"], [4, 4, 1, "", "inference_batch_size_per_device"], [4, 4, 1, "", "local_rank"], [4, 4, 1, "", "max_new_tokens"], [4, 4, 1, "", "metric"], [4, 4, 1, "", "mixed_precision"], [4, 4, 1, "", "output_dir"], [4, 4, 1, "", "prompt_structure"], [4, 4, 1, "", "random_seed"], [4, 4, 1, "", "random_shuffle"], [4, 4, 1, "", "repetition_penalty"], [4, 4, 1, "", "temperature"], [4, 4, 1, "", "use_accelerator_for_evaluator"], [4, 4, 1, "", "use_wandb"]], "lmflow.args.FinetunerArguments": [[4, 4, 1, "", "customized_optim"], [4, 4, 1, "", "customized_optim_args"], [4, 4, 1, "", "eval_dataset_path"], [4, 4, 1, "", "finetune_part"], [4, 4, 1, "", "lisa_activated_layers"], [4, 4, 1, "", "lisa_interval_steps"], [4, 4, 1, "", "lisa_layers_attribute"], [4, 4, 1, "", "optim_adam_beta1"], [4, 4, 1, "", "optim_adam_beta2"], [4, 4, 1, "", "optim_beta1"], [4, 4, 1, "", "optim_beta2"], [4, 4, 1, "", "optim_beta3"], [4, 4, 1, "", "optim_dummy_beta1"], [4, 4, 1, "", "optim_dummy_beta2"], [4, 4, 1, "", "optim_momentum"], [4, 4, 1, "", "optim_weight_decay"], [4, 4, 1, "", "remove_unused_columns"], [4, 4, 1, "", "save_language_projection"], [4, 4, 1, "", "use_customized_optim"], [4, 4, 1, "", "use_lisa"]], "lmflow.args.InferencerArguments": [[4, 3, 1, "", "__post_init__"], [4, 4, 1, "", "additional_stop_token_ids"], [4, 4, 1, "", "apply_chat_template"], [4, 4, 1, "", "deepspeed"], [4, 4, 1, "", "device"], [4, 4, 1, "", "distributed_inference_num_instances"], [4, 4, 1, "", "do_sample"], [4, 4, 1, "", "enable_decode_inference_result"], [4, 4, 1, "", "enable_distributed_inference"], [4, 4, 1, "", "inference_batch_size"], [4, 4, 1, "", "local_rank"], [4, 4, 1, "", "max_new_tokens"], [4, 4, 1, "", "mixed_precision"], [4, 4, 1, "", "num_output_sequences"], [4, 4, 1, "", "random_seed"], [4, 4, 1, "", "repetition_penalty"], [4, 4, 1, "", "results_path"], [4, 4, 1, "", "save_results"], [4, 4, 1, "", "temperature"], [4, 4, 1, "", "tensor_parallel_size"], [4, 4, 1, "", "top_k"], [4, 4, 1, "", "top_p"], [4, 4, 1, "", "use_accelerator"], [4, 4, 1, "", "use_beam_search"], [4, 4, 1, "", "use_vllm"], [4, 4, 1, "", "vllm_gpu_memory_utilization"], [4, 4, 1, "", "vllm_inference_batch_size"], [4, 4, 1, "", "vllm_tensor_parallel_size"]], "lmflow.args.IterativeAlignerArguments": [[4, 4, 1, "", "dataset_path_list"], [4, 4, 1, "", "initial_iter_idx"]], "lmflow.args.IterativeDPOAlignerArguments": [[4, 4, 1, "", "do_dpo_align"], [4, 4, 1, "", "do_response_generation"], [4, 4, 1, "", "do_scoring"], [4, 4, 1, "", "output_dir"], [4, 4, 1, "", "reward_model_inference_batch_size"], [4, 4, 1, "", "reward_model_inference_block_size"]], "lmflow.args.ModelArguments": [[4, 3, 1, "", "__post_init__"], [4, 4, 1, "", "arch_type"], [4, 4, 1, "", "bits"], [4, 4, 1, "", "cache_dir"], [4, 4, 1, "", "config_name"], [4, 4, 1, "", "config_overrides"], [4, 4, 1, "", "do_rope_scaling"], [4, 4, 1, "", "double_quant"], [4, 4, 1, "", "eos_padding"], [4, 4, 1, "", "ignore_bias_buffers"], [4, 4, 1, "", "load_in_4bit"], [4, 4, 1, "", "lora_alpha"], [4, 4, 1, "", "lora_dropout"], [4, 4, 1, "", "lora_model_path"], [4, 4, 1, "", "lora_r"], [4, 4, 1, "", "lora_target_modules"], [4, 4, 1, "", "model_max_length"], [4, 4, 1, "", "model_name_or_path"], [4, 4, 1, "", "model_revision"], [4, 4, 1, "", "model_type"], [4, 4, 1, "", "padding_side"], [4, 4, 1, "", "quant_type"], [4, 4, 1, "", "rope_ntk_ratio"], [4, 4, 1, "", "rope_pi_ratio"], [4, 4, 1, "", "save_aggregated_lora"], [4, 4, 1, "", "tokenizer_name"], [4, 4, 1, "", "torch_dtype"], [4, 4, 1, "", "truncate_to_model_max_length"], [4, 4, 1, "", "truncation_side"], [4, 4, 1, "", "trust_remote_code"], [4, 4, 1, "", "use_auth_token"], [4, 4, 1, "", "use_fast_tokenizer"], [4, 4, 1, "", "use_flash_attention"], [4, 4, 1, "", "use_int8"], [4, 4, 1, "", "use_lora"], [4, 4, 1, "", "use_qlora"], [4, 4, 1, "", "use_ram_optimized_load"]], "lmflow.args.MultiModalDatasetArguments": [[4, 4, 1, "", "image_aspect_ratio"], [4, 4, 1, "", "image_folder"], [4, 4, 1, "", "is_multimodal"], [4, 4, 1, "", "sep_style"], [4, 4, 1, "", "use_image_start_end"]], "lmflow.args.OptimizerNames": [[4, 4, 1, "", "ADABELIEF"], [4, 4, 1, "", "ADABOUND"], [4, 4, 1, "", "ADADELTA"], [4, 4, 1, "", "ADAGRAD"], [4, 4, 1, "", "ADAM"], [4, 4, 1, "", "ADAMAX"], [4, 4, 1, "", "ADAMP"], [4, 4, 1, "", "ADAMW_SCHEDULE_FREE"], [4, 4, 1, "", "ADAN"], [4, 4, 1, "", "DUMMY"], [4, 4, 1, "", "LAMB"], [4, 4, 1, "", "LARS"], [4, 4, 1, "", "NADAM"], [4, 4, 1, "", "NOVOGRAD"], [4, 4, 1, "", "RADAM"], [4, 4, 1, "", "SGDP"], [4, 4, 1, "", "SGD_SCHEDULE_FREE"], [4, 4, 1, "", "SOPHIA"], [4, 4, 1, "", "YOGI"]], "lmflow.args.RaftAlignerArguments": [[4, 4, 1, "", "collection_strategy"], [4, 4, 1, "", "inference_batch_size_per_device"], [4, 4, 1, "", "num_raft_iteration"], [4, 4, 1, "", "output_max_length"], [4, 4, 1, "", "output_min_length"], [4, 4, 1, "", "output_reward_path"], [4, 4, 1, "", "raft_batch_size"], [4, 4, 1, "", "top_reward_percentage"]], "lmflow.args.VisModelArguments": [[4, 4, 1, "", "custom_model"], [4, 4, 1, "", "custom_vision_model"], [4, 4, 1, "", "image_encoder_name_or_path"], [4, 4, 1, "", "llava_loading"], [4, 4, 1, "", "llava_pretrain_model_path"], [4, 4, 1, "", "llm_model_name_or_path"], [4, 4, 1, "", "low_resource"], [4, 4, 1, "", "pretrained_language_projection_path"], [4, 4, 1, "", "prompt_cache_path"], [4, 4, 1, "", "qformer_name_or_path"], [4, 4, 1, "", "save_pretrain_model_path"], [4, 4, 1, "", "use_prompt_cache"], [4, 4, 1, "", "vision_select_layer"], [4, 4, 1, "", "with_qformer"]], "lmflow.datasets": [[6, 2, 1, "", "CustomMultiModalDataset"], [6, 2, 1, "", "Dataset"], [5, 0, 0, "-", "dataset"], [7, 0, 0, "-", "multi_modal_dataset"]], "lmflow.datasets.CustomMultiModalDataset": [[6, 3, 1, "", "__getitem__"], [6, 3, 1, "", "__len__"], [6, 4, 1, "", "data_args"], [6, 4, 1, "id0", "data_dict"], [6, 4, 1, "", "image_folder"], [6, 3, 1, "", "register_tokenizer"]], "lmflow.datasets.Dataset": [[6, 3, 1, "", "__len__"], [6, 3, 1, "", "_check_data_format"], [6, 4, 1, "", "backend"], [6, 4, 1, "", "backend_dataset"], [6, 3, 1, "", "create_from_dict"], [6, 4, 1, "", "data_args"], [6, 4, 1, "", "dataset_path"], [6, 3, 1, "", "drop_instances"], [6, 3, 1, "", "from_dict"], [6, 3, 1, "", "get_backend"], [6, 3, 1, "", "get_backend_dataset"], [6, 3, 1, "", "get_data_args"], [6, 3, 1, "", "get_fingerprint"], [6, 3, 1, "", "get_type"], [6, 3, 1, "", "hf_dataset_sanity_check"], [6, 3, 1, "", "map"], [6, 3, 1, "", "sample"], [6, 3, 1, "", "sanity_check"], [6, 3, 1, "", "save"], [6, 3, 1, "", "to_dict"], [6, 3, 1, "", "to_list"], [6, 3, 1, "", "train_test_split"], [6, 4, 1, "", "type"]], "lmflow.datasets.dataset": [[5, 1, 1, "", "DATASET_TYPES"], [5, 2, 1, "", "Dataset"], [5, 1, 1, "", "KEY_INSTANCES"], [5, 1, 1, "", "KEY_SCORE"], [5, 1, 1, "", "KEY_TYPE"], [5, 1, 1, "", "logger"]], "lmflow.datasets.dataset.Dataset": [[5, 3, 1, "", "__len__"], [5, 3, 1, "", "_check_data_format"], [5, 4, 1, "", "backend"], [5, 4, 1, "", "backend_dataset"], [5, 3, 1, "", "create_from_dict"], [5, 4, 1, "", "data_args"], [5, 4, 1, "", "dataset_path"], [5, 3, 1, "", "drop_instances"], [5, 3, 1, "", "from_dict"], [5, 3, 1, "", "get_backend"], [5, 3, 1, "", "get_backend_dataset"], [5, 3, 1, "", "get_data_args"], [5, 3, 1, "", "get_fingerprint"], [5, 3, 1, "", "get_type"], [5, 3, 1, "", "hf_dataset_sanity_check"], [5, 3, 1, "", "map"], [5, 3, 1, "", "sample"], [5, 3, 1, "", "sanity_check"], [5, 3, 1, "", "save"], [5, 3, 1, "", "to_dict"], [5, 3, 1, "", "to_list"], [5, 3, 1, "", "train_test_split"], [5, 4, 1, "", "type"]], "lmflow.datasets.multi_modal_dataset": [[7, 2, 1, "", "CustomMultiModalDataset"], [7, 2, 1, "", "DataCollatorForSupervisedDataset"], [7, 5, 1, "", "preprocess_llama_from_llava_plain"], [7, 5, 1, "", "preprocess_llama_from_llava_v1"], [7, 5, 1, "", "preprocess_multimodal_llava"], [7, 5, 1, "", "tokenizer_image_token"]], "lmflow.datasets.multi_modal_dataset.CustomMultiModalDataset": [[7, 3, 1, "", "__getitem__"], [7, 3, 1, "", "__len__"], [7, 4, 1, "", "data_args"], [7, 4, 1, "id0", "data_dict"], [7, 4, 1, "", "image_folder"], [7, 3, 1, "", "register_tokenizer"]], "lmflow.datasets.multi_modal_dataset.DataCollatorForSupervisedDataset": [[7, 3, 1, "", "__call__"], [7, 4, 1, "", "tokenizer"]], "lmflow.models": [[9, 0, 0, "-", "auto_model"], [10, 0, 0, "-", "base_model"], [11, 0, 0, "-", "decoder_model"], [12, 0, 0, "-", "encoder_decoder_model"], [13, 0, 0, "-", "hf_decoder_model"], [14, 0, 0, "-", "hf_encoder_decoder_model"], [15, 0, 0, "-", "hf_model_mixin"], [16, 0, 0, "-", "hf_text_regression_model"], [18, 0, 0, "-", "interfaces"], [20, 0, 0, "-", "regression_model"], [21, 0, 0, "-", "text_regression_model"], [22, 0, 0, "-", "vision2seq_model"], [24, 0, 0, "-", "vision_encoder"]], "lmflow.models.auto_model": [[9, 2, 1, "", "AutoModel"]], "lmflow.models.auto_model.AutoModel": [[9, 3, 1, "", "get_model"]], "lmflow.models.base_model": [[10, 2, 1, "", "BaseModel"]], "lmflow.models.decoder_model": [[11, 2, 1, "", "DecoderModel"]], "lmflow.models.encoder_decoder_model": [[12, 2, 1, "", "EncoderDecoderModel"]], "lmflow.models.hf_decoder_model": [[13, 1, 1, "id0", "GPU_SUPPORT_FLASH_ATTENTION"], [13, 2, 1, "", "HFDecoderModel"], [13, 1, 1, "", "MODELS_SUPPORT_FLASH_ATTENTION"], [13, 1, 1, "", "logger"]], "lmflow.models.hf_decoder_model.HFDecoderModel": [[13, 3, 1, "", "__inference"], [13, 3, 1, "", "__prepare_inputs_for_inference"], [13, 3, 1, "", "__prepare_inputs_for_vllm_inference"], [13, 3, 1, "", "__vllm_inference"], [13, 3, 1, "", "decode"], [13, 3, 1, "", "encode"], [13, 3, 1, "", "get_peft_without_qlora"], [13, 3, 1, "", "inference"], [13, 3, 1, "", "merge_lora_weights"], [13, 3, 1, "", "prepare_inputs_for_inference"], [13, 3, 1, "", "save"], [13, 3, 1, "", "tokenize"]], "lmflow.models.hf_encoder_decoder_model": [[14, 2, 1, "", "HFEncoderDecoderModel"], [14, 1, 1, "", "logger"]], "lmflow.models.hf_encoder_decoder_model.HFEncoderDecoderModel": [[14, 3, 1, "", "decode"], [14, 4, 1, "", "device"], [14, 3, 1, "", "encode"], [14, 3, 1, "", "get_backend_model"], [14, 3, 1, "", "get_max_length"], [14, 3, 1, "", "get_tokenizer"], [14, 3, 1, "", "inference"], [14, 3, 1, "", "merge_lora_weights"], [14, 3, 1, "", "save"], [14, 3, 1, "", "tokenize"]], "lmflow.models.hf_model_mixin": [[15, 2, 1, "", "HFModelMixin"], [15, 1, 1, "", "HF_AUTOMODEL_MAPPING"], [15, 1, 1, "", "HF_AUTOMODEL_TYPE"], [15, 1, 1, "", "LORA_TARGET_MODULES_MAPPING"], [15, 1, 1, "", "logger"]], "lmflow.models.hf_model_mixin.HFModelMixin": [[15, 3, 1, "", "__model_module_inject"], [15, 3, 1, "", "__prepare_dtype"], [15, 3, 1, "", "__prepare_model_config"], [15, 3, 1, "", "__prepare_model_for_inference"], [15, 3, 1, "", "__prepare_model_for_training"], [15, 3, 1, "", "__prepare_model_for_vllm_inference"], [15, 3, 1, "", "__prepare_model_post_process"], [15, 3, 1, "", "__prepare_peft_config"], [15, 3, 1, "", "__prepare_quant_config"], [15, 3, 1, "", "__prepare_tokenizer"], [15, 4, 1, "", "_activated"], [15, 3, 1, "", "activate_model_for_inference"], [15, 3, 1, "", "deactivate_model_for_inference"], [15, 4, 1, "", "device"], [15, 4, 1, "", "do_train"], [15, 4, 1, "", "ds_config"], [15, 3, 1, "", "get_backend_model"], [15, 3, 1, "", "get_max_length"], [15, 3, 1, "", "get_tokenizer"], [15, 4, 1, "", "hf_auto_model"], [15, 4, 1, "", "hf_model_config"], [15, 4, 1, "", "model_args"], [15, 4, 1, "", "peft_config"], [15, 4, 1, "", "quant_config"], [15, 4, 1, "", "tokenizer"], [15, 4, 1, "", "torch_dtype"], [15, 4, 1, "", "use_accelerator"]], "lmflow.models.hf_text_regression_model": [[16, 2, 1, "", "HFTextRegressionModel"], [16, 1, 1, "", "logger"]], "lmflow.models.hf_text_regression_model.HFTextRegressionModel": [[16, 3, 1, "", "__inference"], [16, 3, 1, "", "__vllm_inference"], [16, 4, 1, "", "config_additional_args"], [16, 3, 1, "", "inference"], [16, 3, 1, "", "postprocess_distributed_inference_outputs"], [16, 3, 1, "", "postprocess_inference_outputs"], [16, 3, 1, "", "prepare_inputs_for_inference"], [16, 3, 1, "", "save"], [16, 3, 1, "", "tokenize"]], "lmflow.models.interfaces": [[19, 0, 0, "-", "tunable"]], "lmflow.models.interfaces.tunable": [[19, 2, 1, "", "Tunable"]], "lmflow.models.regression_model": [[20, 2, 1, "", "RegressionModel"]], "lmflow.models.text_regression_model": [[21, 2, 1, "", "TextRegressionModel"]], "lmflow.models.text_regression_model.TextRegressionModel": [[21, 3, 1, "", "inference"], [21, 4, 1, "", "inference_func"], [21, 3, 1, "", "register_inference_function"]], "lmflow.models.vision2seq_model": [[22, 2, 1, "", "CustomAutoVision2SeqModel"]], "lmflow.models.vision2seq_model.CustomAutoVision2SeqModel": [[22, 4, 1, "", "custom_vision_model"], [22, 3, 1, "", "forward"], [22, 3, 1, "", "generate"], [22, 3, 1, "", "get_backend_model"], [22, 3, 1, "", "get_tokenizer"], [22, 4, 1, "id0", "hidden_size"], [22, 4, 1, "", "kwargs"], [22, 4, 1, "", "language_model"], [22, 3, 1, "", "language_model_from_pretrained"], [22, 3, 1, "", "load_prompt_cache"], [22, 3, 1, "", "processor_image_token_in_minigpt4"], [22, 3, 1, "", "qformer_from_pretrained"], [22, 3, 1, "", "register_prompt_cache"], [22, 3, 1, "", "save_prompt_cache"], [22, 3, 1, "", "vision_feature_select"], [22, 3, 1, "", "vision_model_from_pretrained"], [22, 4, 1, "", "with_qformer"]], "lmflow.models.vision_encoder": [[24, 5, 1, "", "build_vision_tower"], [23, 0, 0, "-", "clip_encoder"]], "lmflow.models.vision_encoder.clip_encoder": [[23, 2, 1, "", "CLIPVisionTower"], [23, 5, 1, "", "build_vision_tower"]], "lmflow.models.vision_encoder.clip_encoder.CLIPVisionTower": [[23, 6, 1, "", "config"], [23, 6, 1, "", "device"], [23, 6, 1, "", "dtype"], [23, 6, 1, "", "dummy_feature"], [23, 3, 1, "", "encode_images"], [23, 3, 1, "", "feature_select"], [23, 3, 1, "", "forward"], [23, 6, 1, "", "hidden_size"], [23, 4, 1, "", "is_loaded"], [23, 3, 1, "", "load_model"], [23, 6, 1, "", "num_patches"], [23, 3, 1, "", "prepare_inputs_labels_for_multimodal"], [23, 4, 1, "", "select_feature"], [23, 4, 1, "", "select_layer"], [23, 4, 1, "", "vision_tower_name"]], "lmflow.optim": [[25, 0, 0, "-", "adabelief"], [26, 0, 0, "-", "adabound"], [27, 0, 0, "-", "adadelta"], [28, 0, 0, "-", "adagrad"], [29, 0, 0, "-", "adam"], [30, 0, 0, "-", "adamax"], [31, 0, 0, "-", "adamp"], [32, 0, 0, "-", "adamw_schedule_free"], [33, 0, 0, "-", "adan"], [34, 0, 0, "-", "dummy"], [36, 0, 0, "-", "lamb"], [37, 0, 0, "-", "lars"], [38, 0, 0, "-", "nadam"], [39, 0, 0, "-", "novograd"], [40, 0, 0, "-", "optimizers"], [41, 0, 0, "-", "radam"], [42, 0, 0, "-", "sgd_schedule_free"], [43, 0, 0, "-", "sgdp"], [44, 0, 0, "-", "sophia"], [45, 0, 0, "-", "yogi"]], "lmflow.optim.adabelief": [[25, 2, 1, "", "AdaBelief"]], "lmflow.optim.adabelief.AdaBelief": [[25, 3, 1, "", "__setstate__"], [25, 4, 1, "", "defaults"], [25, 4, 1, "id0", "degenerated_to_sgd"], [25, 4, 1, "", "fixed_decay"], [25, 4, 1, "", "rectify"], [25, 3, 1, "", "reset"], [25, 3, 1, "", "step"], [25, 4, 1, "", "weight_decouple"]], "lmflow.optim.adabound": [[26, 2, 1, "", "AdaBound"]], "lmflow.optim.adabound.AdaBound": [[26, 3, 1, "", "__setstate__"], [26, 4, 1, "", "base_lrs"], [26, 4, 1, "", "defaults"], [26, 3, 1, "", "step"]], "lmflow.optim.adadelta": [[27, 2, 1, "", "Adadelta"]], "lmflow.optim.adadelta.Adadelta": [[27, 4, 1, "", "defaults"], [27, 3, 1, "", "step"]], "lmflow.optim.adagrad": [[28, 2, 1, "", "AdaGrad"]], "lmflow.optim.adagrad.AdaGrad": [[28, 4, 1, "", "defaults"], [28, 3, 1, "", "step"]], "lmflow.optim.adam": [[29, 2, 1, "", "Adam"]], "lmflow.optim.adam.Adam": [[29, 4, 1, "", "defaults"], [29, 3, 1, "", "step"]], "lmflow.optim.adamax": [[30, 2, 1, "", "Adamax"]], "lmflow.optim.adamax.Adamax": [[30, 3, 1, "", "__setstate__"], [30, 4, 1, "", "defaults"], [30, 3, 1, "", "step"]], "lmflow.optim.adamp": [[31, 2, 1, "", "AdamP"]], "lmflow.optim.adamp.AdamP": [[31, 3, 1, "", "_channel_view"], [31, 3, 1, "", "_cosine_similarity"], [31, 3, 1, "", "_layer_view"], [31, 3, 1, "", "_projection"], [31, 4, 1, "", "defaults"], [31, 3, 1, "", "step"]], "lmflow.optim.adamw_schedule_free": [[32, 2, 1, "", "AdamWScheduleFree"]], "lmflow.optim.adamw_schedule_free.AdamWScheduleFree": [[32, 4, 1, "", "defaults"], [32, 3, 1, "", "eval"], [32, 3, 1, "", "step"], [32, 3, 1, "", "train"]], "lmflow.optim.adan": [[33, 2, 1, "", "Adan"], [33, 5, 1, "", "_multi_tensor_adan"], [33, 5, 1, "", "_single_tensor_adan"]], "lmflow.optim.adan.Adan": [[33, 3, 1, "", "__setstate__"], [33, 4, 1, "", "defaults"], [33, 3, 1, "", "restart_opt"], [33, 3, 1, "", "step"]], "lmflow.optim.dummy": [[34, 2, 1, "", "Dummy"]], "lmflow.optim.dummy.Dummy": [[34, 4, 1, "", "defaults"], [34, 3, 1, "", "step"]], "lmflow.optim.lamb": [[36, 2, 1, "", "Lamb"]], "lmflow.optim.lamb.Lamb": [[36, 4, 1, "", "adam"], [36, 4, 1, "", "clamp_value"], [36, 4, 1, "", "debias"], [36, 4, 1, "", "defaults"], [36, 3, 1, "", "step"]], "lmflow.optim.lars": [[37, 2, 1, "", "LARS"]], "lmflow.optim.lars.LARS": [[37, 3, 1, "", "__setstate__"], [37, 4, 1, "", "defaults"], [37, 3, 1, "", "step"]], "lmflow.optim.nadam": [[38, 2, 1, "", "NAdam"]], "lmflow.optim.nadam.NAdam": [[38, 3, 1, "", "__setstate__"], [38, 4, 1, "", "defaults"], [38, 3, 1, "", "step"]], "lmflow.optim.novograd": [[39, 2, 1, "", "NovoGrad"]], "lmflow.optim.novograd.NovoGrad": [[39, 3, 1, "", "__setstate__"], [39, 4, 1, "", "defaults"], [39, 3, 1, "", "step"]], "lmflow.optim.radam": [[41, 2, 1, "", "RAdam"]], "lmflow.optim.radam.RAdam": [[41, 3, 1, "", "__setstate__"], [41, 4, 1, "", "defaults"], [41, 3, 1, "", "step"]], "lmflow.optim.sgd_schedule_free": [[42, 2, 1, "", "SGDScheduleFree"]], "lmflow.optim.sgd_schedule_free.SGDScheduleFree": [[42, 4, 1, "", "defaults"], [42, 3, 1, "", "eval"], [42, 3, 1, "", "step"], [42, 3, 1, "", "train"]], "lmflow.optim.sgdp": [[43, 2, 1, "", "SGDP"]], "lmflow.optim.sgdp.SGDP": [[43, 3, 1, "", "_channel_view"], [43, 3, 1, "", "_cosine_similarity"], [43, 3, 1, "", "_layer_view"], [43, 3, 1, "", "_projection"], [43, 4, 1, "", "defaults"], [43, 3, 1, "", "step"]], "lmflow.optim.sophia": [[44, 2, 1, "", "SophiaG"]], "lmflow.optim.sophia.SophiaG": [[44, 3, 1, "", "__setstate__"], [44, 4, 1, "", "defaults"], [44, 3, 1, "", "step"], [44, 3, 1, "", "update_hessian"]], "lmflow.optim.yogi": [[45, 2, 1, "", "Yogi"]], "lmflow.optim.yogi.Yogi": [[45, 4, 1, "", "defaults"], [45, 3, 1, "", "step"]], "lmflow.pipeline": [[46, 0, 0, "-", "auto_pipeline"], [47, 0, 0, "-", "base_aligner"], [48, 0, 0, "-", "base_pipeline"], [49, 0, 0, "-", "base_tuner"], [50, 0, 0, "-", "dpo_aligner"], [51, 0, 0, "-", "dpov2_aligner"], [52, 0, 0, "-", "evaluator"], [53, 0, 0, "-", "finetuner"], [55, 0, 0, "-", "inferencer"], [56, 0, 0, "-", "iterative_dpo_aligner"], [57, 0, 0, "-", "raft_aligner"], [58, 0, 0, "-", "rm_inferencer"], [59, 0, 0, "-", "rm_tuner"], [62, 0, 0, "-", "utils"], [69, 0, 0, "-", "vllm_inferencer"]], "lmflow.pipeline.auto_pipeline": [[46, 2, 1, "", "AutoPipeline"], [46, 1, 1, "", "PIPELINE_MAPPING"], [46, 5, 1, "", "is_package_version_at_least"]], "lmflow.pipeline.auto_pipeline.AutoPipeline": [[46, 3, 1, "", "get_pipeline"]], "lmflow.pipeline.base_aligner": [[47, 2, 1, "", "BaseAligner"]], "lmflow.pipeline.base_aligner.BaseAligner": [[47, 3, 1, "", "_check_if_alignable"], [47, 3, 1, "", "align"]], "lmflow.pipeline.base_pipeline": [[48, 2, 1, "", "BasePipeline"]], "lmflow.pipeline.base_tuner": [[49, 2, 1, "", "BaseTuner"]], "lmflow.pipeline.base_tuner.BaseTuner": [[49, 3, 1, "", "_check_if_tunable"], [49, 3, 1, "", "tune"]], "lmflow.pipeline.dpo_aligner": [[50, 2, 1, "", "DPOAligner"], [50, 5, 1, "", "get_paired_dataset"]], "lmflow.pipeline.dpo_aligner.DPOAligner": [[50, 3, 1, "", "_initialize_trainer"], [50, 3, 1, "", "_load_dataset"], [50, 3, 1, "", "align"], [50, 4, 1, "", "aligner_args"], [50, 4, 1, "", "data_args"], [50, 4, 1, "", "model_args"]], "lmflow.pipeline.dpov2_aligner": [[51, 2, 1, "", "DPOv2Aligner"], [51, 2, 1, "", "MemorySafeDPOv2Aligner"], [51, 1, 1, "", "ReferenceModelArguments"], [51, 1, 1, "", "logger"]], "lmflow.pipeline.dpov2_aligner.DPOv2Aligner": [[51, 3, 1, "", "__prepare_training_args"], [51, 3, 1, "", "_calc_response_lengths"], [51, 3, 1, "", "_calc_reward_with_length_penalty"], [51, 3, 1, "", "_sampling_paired_idx_from_rewards"], [51, 3, 1, "", "_sampling_paired_idx_from_rewards_fast"], [51, 3, 1, "", "align"], [51, 4, 1, "", "aligner_args"], [51, 3, 1, "", "convert_to_paired_dataset"], [51, 4, 1, "", "data_args"], [51, 4, 1, "", "model_args"], [51, 4, 1, "", "ref_model_args"], [51, 3, 1, "", "sampling_paired_idx_from_rewards"]], "lmflow.pipeline.dpov2_aligner.MemorySafeDPOv2Aligner": [[51, 3, 1, "", "align"], [51, 4, 1, "", "aligner_args"], [51, 4, 1, "", "aligner_file_path"], [51, 4, 1, "", "data_args"], [51, 4, 1, "", "model_args"], [51, 4, 1, "", "ref_model_args"]], "lmflow.pipeline.evaluator": [[52, 2, 1, "", "Evaluator"]], "lmflow.pipeline.evaluator.Evaluator": [[52, 3, 1, "", "_evaluate_acc_with_accelerator"], [52, 3, 1, "", "_evaluate_acc_with_deepspeed"], [52, 3, 1, "", "_evaluate_nll"], [52, 3, 1, "", "_evaluate_ppl"], [52, 3, 1, "", "_match"], [52, 4, 1, "", "block_size"], [52, 4, 1, "", "config"], [52, 3, 1, "", "create_dataloader"], [52, 4, 1, "", "data_args"], [52, 3, 1, "", "evaluate"], [52, 4, 1, "", "evaluator_args"], [52, 4, 1, "", "local_rank"], [52, 4, 1, "", "minibatch_size"], [52, 4, 1, "", "model_args"], [52, 4, 1, "", "train_batch_size"], [52, 4, 1, "", "world_size"]], "lmflow.pipeline.finetuner": [[53, 2, 1, "", "Finetuner"], [53, 1, 1, "", "logger"]], "lmflow.pipeline.finetuner.Finetuner": [[53, 3, 1, "", "create_customized_optimizer"], [53, 4, 1, "", "data_args"], [53, 4, 1, "", "finetuner_args"], [53, 3, 1, "", "group_text"], [53, 4, 1, "id0", "last_checkpoint"], [53, 4, 1, "", "log_level"], [53, 4, 1, "", "model_args"], [53, 3, 1, "", "tune"]], "lmflow.pipeline.inferencer": [[55, 2, 1, "", "Inferencer"], [55, 2, 1, "", "SpeculativeInferencer"], [55, 2, 1, "", "ToolInferencer"], [55, 1, 1, "", "logger"], [55, 5, 1, "", "rstrip_partial_utf8"], [55, 1, 1, "", "supported_dataset_type"]], "lmflow.pipeline.inferencer.Inferencer": [[55, 4, 1, "", "config"], [55, 3, 1, "", "create_dataloader"], [55, 4, 1, "", "data_args"], [55, 3, 1, "", "inference"], [55, 4, 1, "", "inferencer_args"], [55, 4, 1, "", "local_rank"], [55, 4, 1, "", "model_args"], [55, 3, 1, "", "stream_inference"], [55, 4, 1, "", "world_size"]], "lmflow.pipeline.inferencer.SpeculativeInferencer": [[55, 3, 1, "", "autoregressive_sampling"], [55, 4, 1, "", "draft_config"], [55, 4, 1, "", "draft_model_args"], [55, 3, 1, "", "inference"], [55, 3, 1, "", "predict_next_token"], [55, 3, 1, "", "sample"], [55, 3, 1, "", "score_to_prob"], [55, 3, 1, "", "stream_inference"]], "lmflow.pipeline.inferencer.ToolInferencer": [[55, 3, 1, "", "code_exec"], [55, 3, 1, "", "inference"], [55, 4, 1, "", "model"]], "lmflow.pipeline.iterative_dpo_aligner": [[56, 2, 1, "", "IterativeDPOAligner"], [56, 1, 1, "", "logger"]], "lmflow.pipeline.iterative_dpo_aligner.IterativeDPOAligner": [[56, 3, 1, "", "__filter_args"], [56, 3, 1, "", "_align_single_iteration"], [56, 3, 1, "", "_do_reward_model_inference"], [56, 3, 1, "", "_do_single_dpo_align"], [56, 3, 1, "", "_do_target_model_inference"], [56, 3, 1, "", "_parse_dpo_aligner_args"], [56, 3, 1, "", "_parse_reward_model_inference_args"], [56, 3, 1, "", "_parse_target_model_inference_args"], [56, 3, 1, "", "align"], [56, 4, 1, "", "aligner_args"], [56, 4, 1, "", "data_args"], [56, 4, 1, "", "model_args"], [56, 4, 1, "", "ref_model_args"], [56, 4, 1, "", "reward_model_args"], [56, 4, 1, "", "workspace_path"]], "lmflow.pipeline.raft_aligner": [[57, 2, 1, "", "RaftAligner"], [57, 1, 1, "", "logger"]], "lmflow.pipeline.raft_aligner.RaftAligner": [[57, 4, 1, "", "INF"], [57, 3, 1, "", "_clean_text"], [57, 3, 1, "", "_discard_sample"], [57, 3, 1, "", "_get_batch_dataset_local"], [57, 3, 1, "", "_get_batch_dataset_top"], [57, 3, 1, "", "_initialize_trainer"], [57, 3, 1, "", "_load_dataset"], [57, 3, 1, "", "_load_input_dataset"], [57, 3, 1, "", "align"], [57, 4, 1, "", "aligner_args"], [57, 4, 1, "", "data_args"], [57, 4, 1, "", "model_args"], [57, 4, 1, "", "output_reward_path"]], "lmflow.pipeline.rm_inferencer": [[58, 2, 1, "", "RewardModelInferencer"], [58, 1, 1, "", "logger"]], "lmflow.pipeline.rm_inferencer.RewardModelInferencer": [[58, 3, 1, "", "__distributed_inference"], [58, 3, 1, "", "__inference"], [58, 3, 1, "", "__post_process_model_output"], [58, 3, 1, "", "__vllm_inference"], [58, 3, 1, "", "_inference"], [58, 3, 1, "", "compress_list"], [58, 4, 1, "", "data_args"], [58, 3, 1, "", "flatten_list"], [58, 3, 1, "", "inference"], [58, 4, 1, "", "inferencer_args"], [58, 4, 1, "", "local_rank"], [58, 4, 1, "", "model_args"], [58, 4, 1, "", "world_size"]], "lmflow.pipeline.rm_tuner": [[59, 2, 1, "", "RewardModelTuner"], [59, 1, 1, "", "logger"]], "lmflow.pipeline.rm_tuner.RewardModelTuner": [[59, 3, 1, "", "tune"]], "lmflow.pipeline.utils": [[60, 0, 0, "-", "dpov2_dataprocessor"], [61, 0, 0, "-", "dpov2_trainer"], [63, 0, 0, "-", "memory_safe_dpov2_align"], [64, 0, 0, "-", "memory_safe_vllm_inference"], [65, 0, 0, "-", "peft_trainer"], [66, 0, 0, "-", "raft_trainer"], [67, 0, 0, "-", "rm_dataprocessor"], [68, 0, 0, "-", "rm_trainer"]], "lmflow.pipeline.utils.dpov2_dataprocessor": [[60, 2, 1, "", "PreferenceDataCollatorWithPadding"], [60, 1, 1, "", "logger"]], "lmflow.pipeline.utils.dpov2_dataprocessor.PreferenceDataCollatorWithPadding": [[60, 3, 1, "", "__call__"], [60, 3, 1, "", "collate"], [60, 4, 1, "", "is_encoder_decoder"], [60, 4, 1, "", "label_pad_token_id"], [60, 4, 1, "", "mask_prompt"], [60, 4, 1, "", "max_length"], [60, 4, 1, "", "max_prompt_length"], [60, 4, 1, "", "max_target_length"], [60, 4, 1, "", "model"], [60, 4, 1, "", "padding"], [60, 4, 1, "", "padding_value"], [60, 3, 1, "", "tokenize_batch_element"], [60, 4, 1, "", "tokenizer"], [60, 4, 1, "", "truncation_mode"]], "lmflow.pipeline.utils.dpov2_trainer": [[61, 2, 1, "", "DPOv2Trainer"], [61, 1, 1, "", "logger"]], "lmflow.pipeline.utils.dpov2_trainer.DPOv2Trainer": [[61, 3, 1, "", "dpo_loss"], [61, 3, 1, "", "get_batch_loss_metrics"], [61, 3, 1, "", "get_batch_metrics"], [61, 4, 1, "", "len_penalty"], [61, 4, 1, "", "use_dpo_data_collator"]], "lmflow.pipeline.utils.memory_safe_dpov2_align": [[63, 1, 1, "", "ReferenceModelArguments"], [63, 1, 1, "", "logger"], [63, 5, 1, "", "main"]], "lmflow.pipeline.utils.memory_safe_vllm_inference": [[64, 1, 1, "", "logger"], [64, 5, 1, "", "main"]], "lmflow.pipeline.utils.peft_trainer": [[65, 2, 1, "", "PeftSavingCallback"], [65, 2, 1, "", "PeftTrainer"]], "lmflow.pipeline.utils.peft_trainer.PeftSavingCallback": [[65, 3, 1, "", "_save"], [65, 3, 1, "", "on_epoch_end"], [65, 3, 1, "", "on_save"], [65, 3, 1, "", "on_train_end"]], "lmflow.pipeline.utils.peft_trainer.PeftTrainer": [[65, 3, 1, "", "_save_checkpoint"]], "lmflow.pipeline.utils.raft_trainer": [[66, 1, 1, "", "DEFAULT_CALLBACKS"], [66, 1, 1, "id0", "DEFAULT_PROGRESS_CALLBACK"], [66, 1, 1, "", "IS_SAGEMAKER_MP_POST_1_10"], [66, 1, 1, "", "OPTIMIZER_NAME"], [66, 2, 1, "", "RaftTrainer"], [66, 1, 1, "", "SCALER_NAME"], [66, 1, 1, "", "SCHEDULER_NAME"], [66, 1, 1, "", "TRAINER_STATE_NAME"], [66, 1, 1, "", "TRAINING_ARGS_NAME"], [66, 1, 1, "", "_is_native_cpu_amp_available"], [66, 1, 1, "", "is_torch_greater_or_equal_than_1_10"], [66, 1, 1, "", "is_torch_less_than_1_11"], [66, 1, 1, "", "logger"], [66, 1, 1, "", "skip_first_batches"]], "lmflow.pipeline.utils.raft_trainer.RaftTrainer": [[66, 3, 1, "", "_add_sm_patterns_to_gitignore"], [66, 3, 1, "", "_gather_and_numpify"], [66, 3, 1, "", "_get_collator_with_removed_columns"], [66, 3, 1, "", "_get_eval_sampler"], [66, 3, 1, "", "_get_output_dir"], [66, 3, 1, "", "_get_train_sampler"], [66, 3, 1, "", "_hp_search_setup"], [66, 3, 1, "", "_inner_training_loop"], [66, 3, 1, "", "_issue_warnings_after_load"], [66, 3, 1, "", "_load_best_model"], [66, 3, 1, "", "_load_from_checkpoint"], [66, 3, 1, "", "_load_optimizer_and_scheduler"], [66, 3, 1, "", "_load_rng_state"], [66, 4, 1, "", "_loggers_initialized"], [66, 3, 1, "", "_maybe_log_save_evaluate"], [66, 4, 1, "", "_memory_tracker"], [66, 3, 1, "", "_move_model_to_device"], [66, 3, 1, "", "_nested_gather"], [66, 3, 1, "", "_one_train"], [66, 3, 1, "", "_pad_across_processes"], [66, 3, 1, "", "_prepare_input"], [66, 3, 1, "", "_prepare_inputs"], [66, 3, 1, "", "_push_from_checkpoint"], [66, 3, 1, "", "_remove_unused_columns"], [66, 3, 1, "", "_report_to_hp_search"], [66, 3, 1, "", "_rotate_checkpoints"], [66, 3, 1, "", "_save"], [66, 3, 1, "", "_save_checkpoint"], [66, 3, 1, "", "_save_tpu"], [66, 3, 1, "", "_set_signature_columns_if_needed"], [66, 4, 1, "", "_signature_columns"], [66, 3, 1, "", "_sorted_checkpoints"], [66, 4, 1, "", "_train_batch_size"], [66, 3, 1, "", "_tune_save_checkpoint"], [66, 3, 1, "", "_wrap_model"], [66, 3, 1, "", "add_callback"], [66, 4, 1, "", "args"], [66, 3, 1, "", "autocast_smart_context_manager"], [66, 3, 1, "", "call_model_init"], [66, 4, 1, "", "callback_handler"], [66, 4, 1, "", "callbacks"], [66, 4, 1, "", "can_return_loss"], [66, 3, 1, "", "compute_loss"], [66, 3, 1, "", "compute_loss_context_manager"], [66, 4, 1, "", "compute_metrics"], [66, 4, 1, "id1", "control"], [66, 3, 1, "", "create_model_card"], [66, 3, 1, "", "create_optimizer"], [66, 3, 1, "", "create_optimizer_and_scheduler"], [66, 3, 1, "", "create_scheduler"], [66, 4, 1, "", "current_flos"], [66, 4, 1, "", "data_collator"], [66, 4, 1, "", "deepspeed"], [66, 4, 1, "", "default_callbacks"], [66, 4, 1, "", "default_collator"], [66, 4, 1, "", "default_label_names"], [66, 4, 1, "", "do_grad_scaling"], [66, 4, 1, "", "eval_dataset"], [66, 3, 1, "", "evaluate"], [66, 3, 1, "", "evaluation_loop"], [66, 3, 1, "", "floating_point_ops"], [66, 4, 1, "", "fsdp"], [66, 3, 1, "", "get_eval_dataloader"], [66, 3, 1, "", "get_optimizer_cls_and_kwargs"], [66, 3, 1, "", "get_test_dataloader"], [66, 3, 1, "", "get_train_dataloader"], [66, 4, 1, "", "hp_name"], [66, 4, 1, "", "hp_search_backend"], [66, 3, 1, "", "hyperparameter_search"], [66, 3, 1, "", "init_git_repo"], [66, 3, 1, "", "ipex_optimize_model"], [66, 4, 1, "", "is_in_train"], [66, 3, 1, "", "is_local_process_zero"], [66, 3, 1, "", "is_world_process_zero"], [66, 4, 1, "", "label_names"], [66, 3, 1, "", "log"], [66, 4, 1, "", "log_level"], [66, 4, 1, "", "model"], [66, 4, 1, "", "model_wrapped"], [66, 3, 1, "", "num_examples"], [66, 4, 1, "", "place_model_on_device"], [66, 3, 1, "", "pop_callback"], [66, 3, 1, "", "predict"], [66, 3, 1, "", "prediction_loop"], [66, 3, 1, "", "prediction_step"], [66, 4, 1, "", "preprocess_logits_for_metrics"], [66, 3, 1, "", "push_to_hub"], [66, 3, 1, "", "remove_callback"], [66, 4, 1, "", "save_counter"], [66, 3, 1, "", "save_model"], [66, 4, 1, "", "sharded_ddp"], [66, 4, 1, "", "state"], [66, 3, 1, "", "store_flos"], [66, 4, 1, "", "tokenizer"], [66, 3, 1, "", "torch_jit_model_eval"], [66, 3, 1, "", "train"], [66, 4, 1, "", "train_dataset"], [66, 3, 1, "", "training_step"], [66, 4, 1, "", "use_apex"], [66, 4, 1, "", "use_cpu_amp"], [66, 4, 1, "", "use_cuda_amp"], [66, 4, 1, "", "use_tune_checkpoints"]], "lmflow.pipeline.utils.rm_dataprocessor": [[67, 2, 1, "", "RewardDataCollatorWithPadding"], [67, 1, 1, "", "logger"]], "lmflow.pipeline.utils.rm_dataprocessor.RewardDataCollatorWithPadding": [[67, 3, 1, "", "__call__"], [67, 4, 1, "", "max_length"], [67, 4, 1, "", "pad_to_multiple_of"], [67, 4, 1, "", "padding"], [67, 4, 1, "", "return_tensors"], [67, 4, 1, "", "tokenizer"]], "lmflow.pipeline.utils.rm_trainer": [[68, 2, 1, "", "PeftRewardTrainer"], [68, 2, 1, "", "RewardTrainer"], [68, 5, 1, "", "compute_metrics"], [68, 5, 1, "", "rm_loss"]], "lmflow.pipeline.utils.rm_trainer.PeftRewardTrainer": [[68, 3, 1, "", "compute_loss"]], "lmflow.pipeline.utils.rm_trainer.RewardTrainer": [[68, 3, 1, "", "compute_loss"]], "lmflow.pipeline.vllm_inferencer": [[69, 2, 1, "", "InferencerWithOffloading"], [69, 2, 1, "", "MemorySafeVLLMInferencer"], [69, 2, 1, "", "VLLMInferencer"], [69, 1, 1, "", "logger"]], "lmflow.pipeline.vllm_inferencer.InferencerWithOffloading": [[69, 4, 1, "", "data_args"], [69, 4, 1, "", "eos_token_id"], [69, 3, 1, "", "inference"], [69, 4, 1, "", "inferencer_args"], [69, 3, 1, "", "load_inference_results"], [69, 4, 1, "", "model_args"], [69, 3, 1, "", "save_inference_results"]], "lmflow.pipeline.vllm_inferencer.MemorySafeVLLMInferencer": [[69, 3, 1, "", "inference"], [69, 4, 1, "", "inferencer_file_path"]], "lmflow.pipeline.vllm_inferencer.VLLMInferencer": [[69, 3, 1, "", "_distributed_inference"], [69, 3, 1, "", "_inference"], [69, 3, 1, "", "inference"], [69, 3, 1, "", "load_inference_results"], [69, 3, 1, "", "parse_to_sampling_params"], [69, 4, 1, "", "sampling_params"], [69, 3, 1, "", "save_inference_results"]], "lmflow.tokenization": [[70, 0, 0, "-", "hf_decoder_model"], [71, 0, 0, "-", "hf_text_regression_model"]], "lmflow.tokenization.hf_decoder_model": [[70, 5, 1, "", "blocking"], [70, 5, 1, "", "conversation_tokenize_function"], [70, 1, 1, "", "logger"], [70, 1, 1, "", "tok_logger"], [70, 5, 1, "", "tokenize_function"]], "lmflow.tokenization.hf_text_regression_model": [[71, 5, 1, "", "blocking"], [71, 5, 1, "", "blocking_paired"], [71, 5, 1, "", "blocking_text_to_textlist"], [71, 5, 1, "", "conversation_tokenize_function"], [71, 1, 1, "", "logger"], [71, 5, 1, "", "paired_conversation_tokenize_function"], [71, 5, 1, "", "text_to_textlist_tokenize_function"], [71, 1, 1, "", "tok_logger"], [71, 5, 1, "", "tokenize_function"]], "lmflow.utils": [[73, 0, 0, "-", "common"], [74, 0, 0, "-", "constants"], [81, 0, 0, "-", "conversation_template"], [88, 0, 0, "-", "data_utils"], [92, 0, 0, "-", "flash_attention"], [96, 0, 0, "-", "llava_conversation_lib"], [97, 0, 0, "-", "model"], [98, 0, 0, "-", "multimodal"], [99, 0, 0, "-", "position_interpolation"]], "lmflow.utils.common": [[73, 5, 1, "", "add_dataclass_attr_prefix"], [73, 5, 1, "", "create_copied_dataclass"], [73, 1, 1, "", "logger"], [73, 5, 1, "", "make_shell_args_from_dataclass"], [73, 5, 1, "", "print_banner"], [73, 5, 1, "", "remove_dataclass_attr_prefix"]], "lmflow.utils.constants": [[74, 1, 1, "", "CONTROLLER_HEART_BEAT_EXPIRATION"], [74, 1, 1, "", "CONVERSATION_DATASET_DESCRIPTION"], [74, 1, 1, "", "CONVERSATION_ROLE_NAMES"], [74, 1, 1, "", "DATASET_DESCRIPTION_MAP"], [74, 1, 1, "", "DEFAULT_IMAGE_PATCH_TOKEN"], [74, 1, 1, "", "DEFAULT_IMAGE_TOKEN"], [74, 1, 1, "", "DEFAULT_IM_END_TOKEN"], [74, 1, 1, "", "DEFAULT_IM_START_TOKEN"], [74, 1, 1, "", "FLOAT_ONLY_DATASET_DESCRIPTION"], [74, 1, 1, "", "IGNORE_INDEX"], [74, 1, 1, "", "IMAGE_TOKEN_INDEX"], [74, 1, 1, "", "INSTANCE_FIELDS_MAP"], [74, 1, 1, "", "LMFLOW_LORA_TARGET_MODULES_MAPPING"], [74, 1, 1, "", "LOGDIR"], [74, 1, 1, "", "MEMORY_SAFE_DPOV2_ALIGN_ENV_VAR_TO_REMOVE"], [74, 1, 1, "", "MEMORY_SAFE_VLLM_INFERENCE_ENV_VAR_TO_REMOVE"], [74, 1, 1, "", "MEMORY_SAFE_VLLM_INFERENCE_FINISH_FLAG"], [74, 1, 1, "", "PAIRED_CONVERSATION_DATASET_DESCRIPTION"], [74, 1, 1, "", "PAIRED_TEXT_TO_TEXT_DATASET_DESCRIPTION"], [74, 1, 1, "", "RETURN_CODE_ERROR_BUFFER"], [74, 1, 1, "", "TEXT2TEXT_DATASET_DESCRIPTION"], [74, 1, 1, "", "TEXT2TEXT_DATASET_DETAILS"], [74, 1, 1, "", "TEXT2TEXT_DATASET_LONG_DESCRITION"], [74, 1, 1, "", "TEXT_ONLY_DATASET_DESCRIPTION"], [74, 1, 1, "", "TEXT_ONLY_DATASET_DETAILS"], [74, 1, 1, "", "TEXT_ONLY_DATASET_LONG_DESCRITION"], [74, 1, 1, "", "TEXT_TO_SCORED_TEXTLIST_DATASET_DESCRIPTION"], [74, 1, 1, "", "TEXT_TO_TEXTLIST_DATASET_DESCRIPTION"], [74, 1, 1, "", "WORKER_HEART_BEAT_INTERVAL"]], "lmflow.utils.conversation_template": [[81, 1, 1, "", "CHATGLM3_TEMPLATE"], [81, 1, 1, "", "CHATML_TEMPLATE"], [81, 2, 1, "", "ConversationTemplate"], [81, 1, 1, "", "DEEPSEEK_TEMPLATE"], [81, 1, 1, "", "EMPTY_NO_SPECIAL_TOKENS_TEMPLATE"], [81, 1, 1, "", "EMPTY_TEMPLATE"], [81, 1, 1, "", "FOX_TEMPLATE"], [81, 1, 1, "", "GEMMA_TEMPLATE"], [81, 1, 1, "", "INTERNLM2_TEMPLATE"], [81, 1, 1, "", "LLAMA2_TEMPLATE"], [81, 1, 1, "", "LLAMA3_TEMPLATE"], [81, 1, 1, "", "PHI3_TEMPLATE"], [81, 1, 1, "", "PRESET_TEMPLATES"], [81, 1, 1, "", "QWEN2_TEMPLATE"], [81, 1, 1, "", "YI1_5_TEMPLATE"], [81, 1, 1, "", "ZEPHYR_TEMPLATE"], [75, 0, 0, "-", "base"], [76, 0, 0, "-", "chatglm"], [77, 0, 0, "-", "chatml"], [78, 0, 0, "-", "deepseek"], [79, 0, 0, "-", "fox"], [80, 0, 0, "-", "gemma"], [82, 0, 0, "-", "internlm"], [83, 0, 0, "-", "llama"], [84, 0, 0, "-", "phi"], [85, 0, 0, "-", "qwen"], [86, 0, 0, "-", "yi"], [87, 0, 0, "-", "zephyr"]], "lmflow.utils.conversation_template.ConversationTemplate": [[81, 3, 1, "", "__post_init__"], [81, 3, 1, "", "_encode"], [81, 3, 1, "", "_encode_template"], [81, 3, 1, "", "_ensure_id_list"], [81, 3, 1, "", "add_special_starter"], [81, 3, 1, "", "add_special_stopper"], [81, 4, 1, "", "assistant_formatter"], [81, 3, 1, "", "encode_conversation"], [81, 3, 1, "", "remove_last_separator"], [81, 4, 1, "", "separator"], [81, 4, 1, "", "special_starter"], [81, 4, 1, "", "special_stopper"], [81, 4, 1, "", "system_formatter"], [81, 4, 1, "", "template_name"], [81, 4, 1, "", "tools_formatter"], [81, 4, 1, "", "user_formatter"]], "lmflow.utils.conversation_template.base": [[75, 2, 1, "", "ConversationTemplate"], [75, 1, 1, "", "EMPTY_NO_SPECIAL_TOKENS_TEMPLATE"], [75, 1, 1, "", "EMPTY_TEMPLATE"], [75, 2, 1, "", "EmptyFormatter"], [75, 2, 1, "", "Formatter"], [75, 2, 1, "", "ListFormatter"], [75, 2, 1, "", "StringFormatter"], [75, 2, 1, "", "TemplateComponent"], [75, 1, 1, "", "logger"]], "lmflow.utils.conversation_template.base.ConversationTemplate": [[75, 3, 1, "", "__post_init__"], [75, 3, 1, "", "_encode"], [75, 3, 1, "", "_encode_template"], [75, 3, 1, "", "_ensure_id_list"], [75, 3, 1, "", "add_special_starter"], [75, 3, 1, "", "add_special_stopper"], [75, 4, 1, "", "assistant_formatter"], [75, 3, 1, "", "encode_conversation"], [75, 3, 1, "", "remove_last_separator"], [75, 4, 1, "", "separator"], [75, 4, 1, "", "special_starter"], [75, 4, 1, "", "special_stopper"], [75, 4, 1, "", "system_formatter"], [75, 4, 1, "", "template_name"], [75, 4, 1, "", "tools_formatter"], [75, 4, 1, "", "user_formatter"]], "lmflow.utils.conversation_template.base.EmptyFormatter": [[75, 3, 1, "", "__post_init__"], [75, 3, 1, "", "format"]], "lmflow.utils.conversation_template.base.Formatter": [[75, 3, 1, "", "format"], [75, 3, 1, "", "has_placeholder"], [75, 4, 1, "", "template"]], "lmflow.utils.conversation_template.base.ListFormatter": [[75, 3, 1, "", "format"]], "lmflow.utils.conversation_template.base.StringFormatter": [[75, 3, 1, "", "__post_init__"], [75, 3, 1, "", "format"]], "lmflow.utils.conversation_template.base.TemplateComponent": [[75, 3, 1, "", "__post_init__"], [75, 3, 1, "", "__repr__"], [75, 3, 1, "", "__str__"], [75, 4, 1, "", "content"], [75, 4, 1, "", "mask"], [75, 4, 1, "", "type"]], "lmflow.utils.conversation_template.chatglm": [[76, 1, 1, "", "CHATGLM3_TEMPLATE"]], "lmflow.utils.conversation_template.chatml": [[77, 1, 1, "", "CHATML_TEMPLATE"]], "lmflow.utils.conversation_template.deepseek": [[78, 1, 1, "", "DEEPSEEK_TEMPLATE"]], "lmflow.utils.conversation_template.fox": [[79, 1, 1, "", "FOX_TEMPLATE"]], "lmflow.utils.conversation_template.gemma": [[80, 1, 1, "", "GEMMA_TEMPLATE"], [80, 2, 1, "", "GemmaConversationTemplate"], [80, 1, 1, "", "logger"]], "lmflow.utils.conversation_template.gemma.GemmaConversationTemplate": [[80, 3, 1, "", "encode_conversation"]], "lmflow.utils.conversation_template.internlm": [[82, 1, 1, "", "INTERNLM2_TEMPLATE"]], "lmflow.utils.conversation_template.llama": [[83, 1, 1, "", "LLAMA2_TEMPLATE"], [83, 1, 1, "", "LLAMA3_TEMPLATE"], [83, 2, 1, "", "Llama2ConversationTemplate"], [83, 1, 1, "", "logger"]], "lmflow.utils.conversation_template.llama.Llama2ConversationTemplate": [[83, 3, 1, "", "_encode"]], "lmflow.utils.conversation_template.phi": [[84, 1, 1, "", "PHI3_TEMPLATE"]], "lmflow.utils.conversation_template.qwen": [[85, 1, 1, "", "QWEN2_TEMPLATE"]], "lmflow.utils.conversation_template.yi": [[86, 1, 1, "", "YI1_5_TEMPLATE"]], "lmflow.utils.conversation_template.zephyr": [[87, 1, 1, "", "ZEPHYR_TEMPLATE"], [87, 2, 1, "", "ZephyrConversationTemplate"], [87, 1, 1, "", "logger"]], "lmflow.utils.conversation_template.zephyr.ZephyrConversationTemplate": [[87, 3, 1, "", "_encode"]], "lmflow.utils.data_utils": [[88, 2, 1, "", "RewardModelInferenceResultWithInput"], [88, 2, 1, "", "VLLMInferenceResultWithInput"], [88, 5, 1, "", "answer_extraction"], [88, 5, 1, "", "batchlize"], [88, 5, 1, "", "load_data"], [88, 5, 1, "", "process_image_flag"], [88, 5, 1, "", "set_random_seed"]], "lmflow.utils.data_utils.RewardModelInferenceResultWithInput": [[88, 4, 1, "", "input"], [88, 4, 1, "", "output"]], "lmflow.utils.data_utils.VLLMInferenceResultWithInput": [[88, 4, 1, "", "input"], [88, 4, 1, "", "output"]], "lmflow.utils.flash_attention": [[89, 0, 0, "-", "bloom_flash_attention"], [90, 0, 0, "-", "gpt2_flash_attention"], [91, 0, 0, "-", "gpt_neo_flash_attention"], [93, 0, 0, "-", "llama_flash_attention"], [94, 0, 0, "-", "triton_flash_attention"]], "lmflow.utils.flash_attention.bloom_flash_attention": [[89, 5, 1, "", "_prepare_attn_mask"], [89, 5, 1, "", "forward"], [89, 5, 1, "", "replace_bloom_attn_with_flash_attn"]], "lmflow.utils.flash_attention.gpt2_flash_attention": [[90, 5, 1, "", "_prepare_decoder_attention_mask"], [90, 5, 1, "", "forward"], [90, 5, 1, "", "replace_gpt2_attn_with_flash_attn"]], "lmflow.utils.flash_attention.gpt_neo_flash_attention": [[91, 5, 1, "", "_attn"], [91, 5, 1, "", "forward"], [91, 5, 1, "", "replace_gpt_neo_attn_with_flash_attn"]], "lmflow.utils.flash_attention.llama_flash_attention": [[93, 5, 1, "", "_prepare_decoder_attention_mask"], [93, 5, 1, "", "forward"], [93, 5, 1, "", "replace_llama_attn_with_flash_attn"]], "lmflow.utils.flash_attention.triton_flash_attention": [[94, 2, 1, "", "FlashAttnFunc"], [94, 2, 1, "", "FlashAttnKVPackedFunc"], [94, 2, 1, "", "FlashAttnQKVPackedFunc"], [94, 5, 1, "", "_bwd_kernel"], [94, 5, 1, "", "_bwd_kernel_one_col_block"], [94, 5, 1, "", "_bwd_preprocess_do_o_dot"], [94, 5, 1, "", "_bwd_store_dk_dv"], [94, 5, 1, "", "_flash_attn_backward"], [94, 5, 1, "", "_flash_attn_forward"], [94, 5, 1, "", "_fwd_kernel"], [94, 1, 1, "", "flash_attn_func"], [94, 1, 1, "", "flash_attn_kvpacked_func"], [94, 1, 1, "", "flash_attn_qkvpacked_func"], [94, 5, 1, "", "init_to_zero"]], "lmflow.utils.flash_attention.triton_flash_attention.FlashAttnFunc": [[94, 3, 1, "", "backward"], [94, 3, 1, "", "forward"]], "lmflow.utils.flash_attention.triton_flash_attention.FlashAttnKVPackedFunc": [[94, 3, 1, "", "backward"], [94, 3, 1, "", "forward"]], "lmflow.utils.flash_attention.triton_flash_attention.FlashAttnQKVPackedFunc": [[94, 3, 1, "", "backward"], [94, 3, 1, "", "forward"]], "lmflow.utils.llava_conversation_lib": [[96, 2, 1, "", "Conversation"], [96, 2, 1, "", "SeparatorStyle"], [96, 1, 1, "", "conv_llama_2"], [96, 1, 1, "", "conv_llava_llama_2"], [96, 1, 1, "", "conv_llava_plain"], [96, 1, 1, "", "conv_llava_v0"], [96, 1, 1, "", "conv_llava_v0_mmtag"], [96, 1, 1, "", "conv_llava_v1"], [96, 1, 1, "", "conv_llava_v1_mmtag"], [96, 1, 1, "", "conv_mpt"], [96, 1, 1, "", "conv_templates"], [96, 1, 1, "", "conv_vicuna_v0"], [96, 1, 1, "", "conv_vicuna_v1"], [96, 1, 1, "", "default_conversation"]], "lmflow.utils.llava_conversation_lib.Conversation": [[96, 3, 1, "", "append_message"], [96, 3, 1, "", "copy"], [96, 3, 1, "", "dict"], [96, 3, 1, "", "get_images"], [96, 3, 1, "", "get_prompt"], [96, 4, 1, "", "messages"], [96, 4, 1, "", "offset"], [96, 4, 1, "", "roles"], [96, 4, 1, "", "sep"], [96, 4, 1, "", "sep2"], [96, 4, 1, "", "sep_style"], [96, 4, 1, "", "skip_next"], [96, 4, 1, "", "system"], [96, 3, 1, "", "to_gradio_chatbot"], [96, 4, 1, "", "version"]], "lmflow.utils.llava_conversation_lib.SeparatorStyle": [[96, 4, 1, "", "LLAMA_2"], [96, 4, 1, "", "MPT"], [96, 4, 1, "", "PLAIN"], [96, 4, 1, "", "SINGLE"], [96, 4, 1, "", "TWO"]], "lmflow.utils.model": [[97, 5, 1, "", "check_homogeneity"], [97, 1, 1, "", "logger"]], "lmflow.utils.multimodal": [[98, 5, 1, "", "adapt_llava_model_to_lmflow_type"], [98, 5, 1, "", "load_llava_pretrain_model"], [98, 5, 1, "", "update_custom_config"]], "lmflow.utils.position_interpolation": [[100, 0, 0, "-", "llama_rope_scaled_monkey_patch"]], "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch": [[100, 2, 1, "", "CondenseRotaryEmbedding"], [100, 5, 1, "", "replace_llama_with_condense"]], "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch.CondenseRotaryEmbedding": [[100, 4, 1, "", "base"], [100, 4, 1, "", "dtype"], [100, 4, 1, "", "emb"], [100, 3, 1, "", "forward"], [100, 4, 1, "", "freqs"], [100, 4, 1, "", "inv_freq"], [100, 4, 1, "", "max_seq_len_cached"], [100, 4, 1, "", "ntk_ratio"], [100, 4, 1, "", "pi_ratio"], [100, 4, 1, "", "t"]], "lmflow.version": [[101, 1, 1, "", "__version__"]]}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "data", "Python data"], "2": ["py", "class", "Python class"], "3": ["py", "method", "Python method"], "4": ["py", "attribute", "Python attribute"], "5": ["py", "function", "Python function"], "6": ["py", "property", "Python property"]}, "objtypes": {"0": "py:module", "1": "py:data", "2": "py:class", "3": "py:method", "4": "py:attribute", "5": "py:function", "6": "py:property"}, "terms": {"": [4, 13, 16, 51, 66, 102, 104, 105, 107, 110, 111, 112, 113, 114], "0": [2, 4, 5, 6, 8, 13, 22, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45, 51, 52, 55, 57, 60, 61, 66, 94, 101, 102, 106, 111, 112, 113, 114], "000": 102, "0001": 44, "001": [25, 26, 28, 29, 31, 33, 36, 41, 43, 45], "002": [30, 38], "0025": 32, "004": 38, "00962": 36, "01": [37, 39, 45], "02155": [111, 112], "03265": 41, "04": 44, "0501": 102, "06": [27, 36, 45], "06677": 33, "08": [26, 28, 29, 30, 31, 32, 33, 37, 38, 39, 41, 43], "08217": [31, 43], "09843": 26, "1": [2, 3, 4, 5, 6, 13, 22, 26, 27, 31, 37, 42, 43, 44, 51, 52, 55, 61, 66, 94, 102, 104, 108, 109, 110, 114], "10": [36, 102, 111, 112], "100": [55, 61, 66, 94, 112], "10000": [100, 111, 112], "101": [13, 102], "1010": 13, "102": 13, "1024": [55, 111], "105": 102, "106": 102, "109": 102, "10k": 112, "112k": [111, 112], "113": 102, "119": 102, "12": [111, 112], "120b": 114, "121": 102, "123": 102, "124": 102, "125": [102, 111], "128": [94, 112], "129": 102, "12k": 112, "130": 102, "134": [74, 102], "135": 102, "139": 102, "13b": [102, 112], "140": 102, "141": 102, "146": 102, "147": 102, "149": 102, "15": 74, "150": 102, "151": 102, "153": 102, "155": 102, "16": [25, 57, 94, 112], "160": 102, "163": 102, "164": 102, "165": 102, "1659": 111, "167": 102, "1692": 111, "170": 102, "17192": 55, "17192v2": 55, "172": 102, "173": 102, "175": 102, "175b": 114, "176b": 114, "18": 114, "180": 102, "181": 102, "183": 102, "184": 102, "188": 102, "1902": 26, "1904": 36, "1908": [15, 41], "1935": 111, "1938": 111, "198": 102, "1b": 102, "1e": [25, 26, 27, 28, 29, 30, 31, 32, 33, 36, 37, 38, 39, 41, 43, 45, 55], "2": [5, 6, 32, 42, 55, 57, 75, 81, 94, 102, 104, 108, 109, 110, 114], "20": [66, 102, 108, 111], "200": 102, "2005": 111, "2006": [31, 43], "2020": [25, 111], "2023": [2, 102, 114], "2048": 100, "206": 102, "207": 102, "2088": 13, "21": 114, "211": 102, "213": 102, "214": 102, "215": 102, "219": 102, "22": 111, "220": 102, "2203": [111, 112], "2208": 33, "2211": 55, "222": 102, "224": 102, "228": 102, "23": 114, "237": 102, "24": [50, 111, 112, 114], "240": 102, "245": 102, "25": 114, "254": 102, "256": 111, "258": 102, "25mb": 114, "26": 114, "262": 102, "266": 102, "27": 114, "28": [2, 114], "280b": 114, "29": 114, "2e": 111, "2k": 111, "3": [94, 102, 104, 105, 108, 109, 114], "30": [74, 102, 111, 114], "30b": 114, "32": [94, 112, 114], "33": 102, "33b": 114, "34": 102, "35": [102, 114], "36": 114, "37": [102, 114], "38": 102, "39": [102, 111, 114], "3b": [102, 108, 111, 112], "3e": 111, "4": [69, 102, 104, 111, 114], "40": [94, 102, 114], "400": 111, "41": 102, "42": [5, 6, 102], "43": 114, "44": [102, 114], "447": 102, "46": 114, "48": [57, 94], "49": 114, "4bit": 4, "4rtemi5": 45, "5": [55, 61, 102, 108, 111, 112, 114], "50": 114, "51": 114, "512": 112, "5120": 44, "52": [102, 111, 112], "53": 102, "54": 114, "55": [102, 112, 114], "56": [102, 114], "57": [102, 114], "58": [102, 111, 112, 114], "59": [102, 114], "5k": [111, 112], "6": [37, 55, 102, 111, 114], "60": [102, 114], "600mb": 102, "61": [102, 114], "62": 102, "63": [102, 114], "64": [94, 102, 111, 112, 114], "65": [102, 111, 112, 114], "66": [102, 114], "67": [102, 114], "68": [102, 114], "69": [102, 111, 112, 114], "7": [8, 101, 102, 111, 114], "70": [102, 114], "71": [102, 111, 112], "72": [102, 114], "73": [102, 114], "74": [102, 114], "75": [102, 114], "7592": 13, "76": [36, 102], "767": 102, "77": 102, "78": [102, 114], "79": [102, 111, 112], "7b": [102, 104, 106, 108, 111, 112, 114], "7oemwynu": 112, "8": [4, 57, 102, 111, 114], "80": [94, 112], "800": 111, "81": [102, 112], "8186": 45, "82": 102, "82147": 111, "83": 102, "84": 112, "85": [102, 114], "86": 102, "87": 114, "88": [94, 102], "888888888": 57, "89": 102, "8b": 108, "8fc1rcf8": 112, "9": [25, 26, 29, 30, 31, 32, 34, 36, 38, 39, 41, 42, 45, 102, 105, 114], "90": 114, "900mb": 102, "92": 33, "95": [27, 102], "96": [94, 102], "965": 44, "97": 102, "98": 33, "99": [33, 44], "999": [25, 26, 29, 30, 31, 32, 34, 36, 38, 39, 41, 45], "9b": 102, "A": [1, 5, 6, 11, 12, 13, 14, 16, 21, 22, 25, 26, 31, 32, 34, 36, 37, 41, 42, 43, 44, 45, 47, 49, 50, 51, 52, 57, 61, 66, 73, 88, 96, 102, 104, 111, 113], "And": 7, "As": [32, 42, 102, 104, 111, 112, 113], "At": 60, "But": 111, "By": [4, 66, 106, 111, 114], "For": [4, 66, 71, 75, 94, 102, 104, 107, 108, 109, 111, 112, 113, 114], "If": [4, 61, 66, 88, 94, 102, 104, 105, 110, 111, 112, 114], "In": [66, 102, 104, 105, 106, 111, 112], "It": [4, 13, 14, 26, 31, 36, 41, 43, 45, 52, 66, 102, 104, 107, 111, 114], "Its": [102, 104, 114], "NOT": 55, "No": 102, "ONE": 107, "Of": [13, 16], "On": [41, 102], "One": [66, 102], "Or": [111, 112], "That": 111, "The": [1, 4, 5, 6, 7, 11, 12, 13, 14, 16, 22, 32, 34, 37, 42, 46, 50, 52, 53, 55, 57, 61, 66, 73, 75, 88, 94, 102, 104, 111, 112, 113, 114], "Then": [52, 106, 111], "There": [102, 111, 112], "These": [102, 107], "To": [32, 42, 66, 104, 107, 111, 112, 114], "Will": [66, 69], "With": [106, 113], "_": 65, "__call__": [7, 60, 67], "__distributed_infer": 58, "__filter_arg": 56, "__getitem__": [6, 7], "__infer": [13, 16, 58], "__init__": [13, 14, 107], "__len__": [5, 6, 7, 66], "__model_module_inject": 15, "__post_init__": [4, 75, 81], "__post_process_model_output": 58, "__prepare_dtyp": 15, "__prepare_inputs_for_infer": 13, "__prepare_inputs_for_vllm_infer": 13, "__prepare_model_config": 15, "__prepare_model_for_infer": 15, "__prepare_model_for_train": 15, "__prepare_model_for_vllm_infer": 15, "__prepare_model_post_process": 15, "__prepare_peft_config": 15, "__prepare_quant_config": 15, "__prepare_token": 15, "__prepare_training_arg": 51, "__repr__": 75, "__setstate__": [25, 26, 30, 33, 37, 38, 39, 41, 44], "__str__": 75, "__version__": [8, 101], "__vllm_infer": [13, 16, 58], "_activ": 15, "_add_sm_patterns_to_gitignor": 66, "_align_single_iter": 56, "_attn": 91, "_bwd_kernel": 94, "_bwd_kernel_one_col_block": 94, "_bwd_preprocess_do_o_dot": 94, "_bwd_store_dk_dv": 94, "_calc_response_length": 51, "_calc_reward_with_length_penalti": 51, "_channel_view": [31, 43], "_check_data_format": [5, 6], "_check_if_align": 47, "_check_if_tun": 49, "_clean_text": [57, 111], "_cosine_similar": [31, 43], "_discard_sampl": [57, 111], "_distributed_infer": 69, "_do_reward_model_infer": 56, "_do_single_dpo_align": 56, "_do_target_model_infer": 56, "_encod": [75, 81, 83, 87], "_encode_templ": [75, 81], "_ensure_id_list": [75, 81], "_evaluate_acc_with_acceler": 52, "_evaluate_acc_with_deepspe": 52, "_evaluate_nl": 52, "_evaluate_ppl": 52, "_flash_attn_backward": 94, "_flash_attn_forward": 94, "_foreach_mul_": [32, 42], "_fwd_kernel": 94, "_gather_and_numpifi": 66, "_get_batch_dataset_loc": 57, "_get_batch_dataset_top": 57, "_get_collator_with_removed_column": 66, "_get_eval_sampl": 66, "_get_output_dir": 66, "_get_train_sampl": 66, "_hp_search_setup": 66, "_infer": [58, 69], "_initialize_train": [50, 57], "_inner_training_loop": 66, "_internal_cal": 66, "_is_native_cpu_amp_avail": 66, "_issue_warnings_after_load": 66, "_layer_view": [31, 43], "_load_best_model": 66, "_load_dataset": [50, 57], "_load_from_checkpoint": 66, "_load_input_dataset": 57, "_load_optimizer_and_schedul": 66, "_load_rng_stat": 66, "_loggers_initi": 66, "_match": 52, "_maybe_log_save_evalu": 66, "_memory_track": 66, "_move_model_to_devic": 66, "_multi_tensor_adan": 33, "_nested_gath": 66, "_one_train": 66, "_pad_across_process": 66, "_parse_dpo_aligner_arg": 56, "_parse_reward_model_inference_arg": 56, "_parse_target_model_inference_arg": 56, "_prepare_attn_mask": 89, "_prepare_decoder_attention_mask": [90, 93], "_prepare_input": 66, "_project": [31, 43], "_provided_": 61, "_push_from_checkpoint": 66, "_remove_unused_column": 66, "_report_to_hp_search": 66, "_rotate_checkpoint": 66, "_sampling_paired_idx_from_reward": 51, "_sampling_paired_idx_from_rewards_fast": 51, "_save": [65, 66], "_save_checkpoint": [65, 66], "_save_tpu": 66, "_set_signature_columns_if_need": 66, "_signature_column": 66, "_single_tensor_adan": 33, "_sorted_checkpoint": 66, "_train_batch_s": 66, "_tune_save_checkpoint": 66, "_wrap_model": 66, "a1": 102, "a100": 94, "a2": 102, "a3": 102, "ab": [26, 31, 33, 36, 41, 43, 55, 111, 112], "abc": [10, 12, 19, 48, 75], "abil": [102, 114], "abl": [22, 66, 102, 111], "about": [4, 66, 102, 111, 112, 114], "abov": [102, 104, 107], "abspath": 110, "abstract": [13, 14, 16, 47, 49, 55, 58, 69, 75, 107], "accelerate_config_fil": 4, "accept": [14, 15, 21, 52, 66, 104, 111], "access": [66, 102, 104, 106, 114], "accid": 111, "accomplish": 111, "accord": [37, 66, 102, 111], "account": 114, "accuraci": [52, 102, 111, 112, 114], "achiev": [102, 111, 114], "acquir": [102, 114], "across": [75, 94], "activ": [105, 111, 114], "activate_model_for_infer": 15, "actual": [75, 102, 111], "ad": [108, 111, 112], "adabelief": [3, 4, 35], "adabound": [3, 4, 35], "adadelta": [3, 4, 35], "adagrad": [3, 4, 35], "adam": [3, 4, 25, 35, 36, 102], "adamax": [3, 4, 35], "adamp": [3, 4, 35, 43], "adamw": [32, 66, 109], "adamw_schedule_fre": [3, 4, 35], "adamwschedulefre": 32, "adan": [3, 4, 35], "adapt": [4, 13, 14, 25, 26, 33, 41, 45, 65, 66, 109, 111, 114], "adapt_llava_model_to_lmflow_typ": 98, "add": [7, 32, 42, 66, 73, 104, 105, 107, 111, 113], "add_callback": 66, "add_dataclass_attr_prefix": 73, "add_generation_prompt": 113, "add_special_start": [75, 81], "add_special_stopp": [75, 81], "add_special_token": [13, 16, 70, 71], "addit": [4, 7, 66], "addition": [102, 111, 112], "additional_stop_token_id": 4, "adjust": [55, 111, 112], "administ": 102, "administr": 111, "adobada": 102, "adopt": 111, "advanc": 111, "advantag": 102, "advers": 102, "advic": 114, "affect": 112, "afflict": 102, "afford": [102, 111], "after": [4, 7, 13, 16, 55, 69, 75, 102, 105, 107, 111, 113], "ag": [104, 107, 113], "again": 66, "ago": 112, "ai": [102, 104, 111, 112, 114], "aim": 114, "al": 102, "algorithm": [25, 26, 31, 33, 36, 41, 43, 45, 102, 108, 109], "alibi": [89, 94], "align": [4, 37, 47, 50, 51, 56, 57, 104, 109], "aligner_arg": [50, 51, 56, 57], "aligner_file_path": 51, "all": [15, 40, 61, 66, 94, 96, 102, 104, 106, 107, 108, 111, 112, 114], "allevi": 111, "allow": [13, 14, 66, 102, 108, 114], "almost": 107, "along": 66, "alpaca": [102, 104, 106, 108], "alpha": 57, "alreadi": [4, 66, 75, 102, 104, 111, 112], "alright": 112, "also": [4, 11, 12, 32, 42, 52, 60, 66, 102, 105, 107, 111, 112, 113, 114], "altern": [111, 113], "although": [102, 111, 112], "alwai": [66, 102, 111], "am": [102, 104, 107, 111, 113], "america": [111, 112], "american": 102, "among": 114, "amount": [111, 112], "amsbound": 26, "amsgrad": [25, 39], "an": [4, 10, 11, 12, 15, 19, 34, 48, 66, 75, 94, 103, 104, 107, 108, 111, 112, 114], "analog": 37, "anatomi": 114, "ani": [7, 13, 16, 60, 66, 67, 102, 104, 111, 112, 113, 114], "anim": 112, "announc": 114, "anoth": [66, 102, 111], "another_data": 104, "answer": [13, 14, 50, 88, 102, 104, 105, 111, 112, 114], "answer_extract": 88, "answer_neg": 112, "answer_posit": 112, "answer_typ": [4, 52, 88, 105, 106], "anthropologi": 111, "anyon": 114, "anyth": 111, "api": [1, 102, 114], "api_doc": 66, "app": 66, "appear": 112, "append": 75, "append_messag": 96, "appl": 102, "appli": [4, 69, 75, 102, 104, 111, 114], "applic": [37, 66, 102, 114], "apply_chat_templ": [4, 13, 69], "appreci": 111, "approach": 114, "appropri": [66, 102, 111, 112], "approxim": 55, "ar": [13, 14, 16, 50, 60, 66, 73, 75, 94, 102, 104, 105, 106, 107, 111, 112, 113, 114], "arbitrari": 94, "arc_": 102, "arc_c": 102, "arc_easi": 102, "arch_typ": 4, "architectur": [4, 66], "area": [102, 112, 114], "aren": 111, "arena": 102, "arg": [3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21, 22, 23, 46, 47, 49, 51, 53, 55, 56, 57, 58, 59, 61, 63, 65, 66, 69, 70, 71, 80, 88, 96, 97, 110], "argmax": 55, "argument": [4, 5, 6, 13, 14, 15, 16, 21, 25, 26, 31, 32, 34, 36, 37, 41, 42, 43, 45, 52, 53, 55, 57, 58, 59, 66, 73, 75, 88, 104, 110], "argv": 110, "around": [13, 14, 102], "arr": 102, "arrai": [66, 102], "arriv": 102, "articl": 102, "arxiv": [26, 31, 33, 36, 41, 43, 55, 111, 112], "asada": 102, "ask": [107, 111], "aspect": 102, "assign": [4, 61, 111], "assist": [75, 81, 102, 104, 105, 107, 111, 112, 113], "assistant_formatt": [75, 81, 107], "assistant_reply_0": 113, "assistant_reply_1": 113, "assistant_response_1": 104, "assistant_response_1_bad": 104, "assistant_response_1_good": 104, "assistant_response_2": 104, "assistant_response_2_bad": 104, "assistant_response_2_good": 104, "associ": 114, "assum": [112, 114], "at_init": 66, "atmospher": 111, "atomic_add": 94, "attack": 111, "attain": 114, "attempt": 102, "attend": 102, "attent": [13, 14, 22, 94], "attention_mask": [13, 22, 23, 89, 90, 91, 93, 112], "attn": 71, "auk": 112, "authent": 102, "author": [102, 114], "auto": [3, 4, 15], "auto_model": [3, 17], "auto_pipelin": [3, 54, 110], "autoapi": 3, "autoargu": [4, 110], "autocast": 66, "autocast_smart_context_manag": 66, "autograd": 94, "automat": [4, 9, 37, 46, 66, 103, 111], "automodel": 9, "automodelforsequenceclassif": 15, "autopipelin": [46, 110], "autoregressive_sampl": 55, "autotoken": 67, "avail": [66, 102, 104, 105, 114], "averag": [102, 114], "avoid": [22, 102, 111], "awar": [102, 114], "b": [44, 88, 105], "back": [4, 107, 111], "backbon": 114, "backend": [5, 6, 13, 14, 15, 16, 66, 94], "backend_dataset": [5, 6], "backward": [66, 94], "bad": 102, "bai": 102, "bake": [102, 111], "bar": [11, 12], "barbecu": 102, "barrel": 102, "base": [3, 4, 6, 7, 10, 11, 12, 13, 14, 15, 16, 19, 20, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 57, 58, 59, 61, 65, 66, 68, 69, 80, 81, 83, 87, 88, 94, 96, 100, 102, 107, 108, 111, 112, 114], "base_align": [3, 50, 51, 54, 57], "base_lr": 26, "base_model": [3, 11, 12, 15, 17, 20, 22], "base_pipelin": [3, 47, 49, 52, 54, 55, 58, 69], "base_trainer_class": 53, "base_tun": [3, 53, 54], "basealign": [47, 50, 51, 57], "basemodel": [10, 11, 12, 15, 20, 22, 57], "basepipelin": [47, 48, 49, 50, 51, 52, 55, 58, 69], "basetun": [47, 49, 53], "basic": [66, 112], "batch": [13, 22, 36, 37, 52, 60, 61, 66, 88, 94, 111, 112], "batch_input": 57, "batch_siz": [22, 58, 61, 66, 69, 88, 94, 111], "batchliz": [55, 88], "battl": 102, "bbq": 102, "beam": 4, "bean": 102, "beauti": 102, "becaus": [66, 102, 111], "been": [26, 31, 36, 41, 43, 45, 66, 94, 102, 111], "befor": [32, 42, 66, 104, 107, 111, 113, 114], "begin": [13, 16, 32, 37, 42, 104, 107, 111, 113], "begin_of_text": [104, 107, 113], "beginn": [107, 114], "behavior": 66, "behind": 102, "being": [66, 75, 81, 102, 111, 112], "belief": [25, 111], "believ": 114, "below": [15, 104, 105, 107, 114], "ben": 102, "benchamrk": 105, "benchmark": [103, 109], "benchmarkingargu": 4, "benefit": 114, "bert": 36, "besid": 102, "best": [51, 65, 66, 102, 111, 112], "bestrun": 66, "beta": [4, 25, 26, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 44, 45, 61], "beta1": 33, "beta2": 33, "beta3": 33, "better": [102, 108, 111, 114], "between": [75, 94, 102, 107, 108], "beyond": 41, "bf16": 4, "bg677mxa": 112, "bia": [4, 94], "bias": 52, "bias_correction1": 33, "bias_correction2": 33, "bias_correction3_sqrt": 33, "bias_typ": 94, "bigger": 111, "bin": 66, "biologi": [102, 114], "bit": [4, 94, 111, 112], "blank": [11, 12], "blend": 102, "bleu": 66, "blip2config": 22, "blip2forconditionalgener": 22, "block": [4, 53, 66, 70, 71], "block_headdim": 94, "block_m": 94, "block_n": 94, "block_siz": [4, 52, 70, 71], "blocking_pair": 71, "blocking_text_to_textlist": 71, "blog": 114, "bloom": [102, 114], "bloom_flash_attent": [3, 92], "bloomforcausallm": 13, "bo": [75, 104, 107, 113], "bold": 102, "bolt": 37, "book": 111, "bool": [4, 5, 6, 7, 13, 15, 16, 22, 26, 31, 33, 36, 37, 43, 44, 50, 51, 55, 58, 60, 61, 66, 67, 69, 73, 75, 81, 88, 89, 90, 93, 96, 97], "boolean": 4, "boolq": 102, "booltensor": 89, "bos_token": [75, 107, 113], "both": [66, 94, 102, 111, 114], "bound": 26, "bowl": 102, "branch": [4, 105], "brand": 111, "break": [102, 112], "breakthrough": 102, "brief": [11, 12], "bring": [102, 111], "brisket": 102, "broadcast": 94, "brought": 102, "brown": 111, "buffer": 4, "bug": 111, "bui": 111, "build": [102, 111, 112, 114], "build_dataset": 112, "build_vision_tow": [23, 24], "built": 111, "burden": 111, "burrito": 102, "butter": 111, "byol": 37, "c": [88, 102], "cach": [4, 5, 6, 66], "cache_dir": [4, 50], "cache_en": 66, "cache_key_seqlen_k": 94, "cache_key_seqlen_q": 94, "cake": 102, "calam": 102, "call": [13, 14, 32, 42, 66, 102, 107, 111], "call_model_init": 66, "callabl": [25, 32, 34, 42, 61, 66], "callback": [61, 66], "callback_handl": 66, "calm": 102, "can": [4, 13, 14, 32, 42, 66, 73, 75, 102, 104, 105, 106, 107, 111, 112, 114], "can_return_loss": 66, "cannot": [15, 102], "cant": [111, 112], "capabl": 111, "capac": 114, "capit": 102, "capitol": 102, "caption": 22, "captur": 44, "card": 66, "care": 111, "carefulli": [102, 111], "caribbean": 111, "carn": 102, "carnita": 102, "carri": [102, 111], "carrier": 102, "casa": 102, "case": [60, 65, 66, 94, 102, 106, 111], "casual": 102, "cat": 111, "categori": 111, "cater": 114, "caus": [102, 111], "causal": 94, "causallmoutputwithpast": 22, "caution": 94, "cc": 45, "cd": [104, 105, 106, 108, 111], "cell": 111, "centuri": 111, "certain": [102, 111, 112], "cevich": 102, "chain": 102, "challeng": [102, 111, 112], "chang": [94, 105, 111], "changelog": [2, 114], "charact": 111, "chat": [4, 69, 104], "chatbot": [102, 104, 107, 113], "chatbot_typ": 55, "chatglm": [3, 81], "chatglm3": 104, "chatglm3_templ": [76, 81], "chatgpt": [102, 107, 114], "chatml": [3, 81, 104], "chatml_templ": [77, 81], "cheap": 102, "check": [5, 6, 66, 102, 104], "check_homogen": 97, "checkout": [105, 111], "checkpoint": [4, 32, 42, 65, 66, 102, 109, 111], "checkpoint_fold": 66, "checkpoint_path": 98, "checkpoint_prefix": 66, "chees": 102, "chemistri": 102, "chesapeak": 102, "chess": 102, "chessboard": 102, "chicken": 102, "child": 111, "chile": 102, "chili": 102, "chines": 102, "chip": 111, "chocol": [102, 111], "choic": [4, 88, 102, 111, 112], "choos": [4, 66, 102, 111], "chop": 111, "chosen": [50, 60, 61, 102, 104, 105, 111, 112], "chosen_attention_mask": 112, "chosen_input_id": 112, "chosen_reward": [61, 112], "circumst": [102, 111], "citi": 102, "civil": 111, "clamp_valu": 36, "class": [73, 107], "class_prefix": 73, "classfoo": [11, 12], "classic": 102, "classif": [13, 14, 66], "classmethod": [5, 6, 9, 46], "clean_text": 111, "clear": 111, "clearer": 112, "clearli": 111, "cli": 4, "clinic": 114, "clip_encod": [3, 24], "clip_global_grad_norm": 33, "clipvisiontow": 23, "clone": [105, 114], "closur": [25, 26, 27, 28, 29, 30, 31, 32, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45], "clovaai": [31, 43], "co": [4, 111], "coal": 102, "coast": [102, 111], "cob": 102, "code": [5, 6, 7, 23, 26, 31, 36, 37, 41, 43, 44, 45, 55, 66, 102, 114], "code_exec": 55, "coleslaw": 102, "collard": 102, "collat": [7, 60, 66], "collect": [102, 111, 114], "collection_strategi": [4, 111], "colleg": 114, "coloni": 111, "columbia": 102, "column": 66, "column_nam": [70, 71], "com": [66, 102, 105, 111, 114], "comal": 102, "combin": 105, "come": [66, 102], "command": [105, 108, 111, 114], "comment": 107, "commerci": [111, 114], "commit": [4, 66], "commit_messag": 66, "common": [3, 95, 102, 104, 114], "commonli": [15, 74, 102, 104, 114], "commun": [102, 111, 114], "compani": 102, "compar": [102, 111, 112, 114], "comparison": [102, 111], "compet": [102, 114], "competit": 102, "compil": 94, "complet": [66, 111, 113, 114], "complex": [102, 112], "complic": 112, "compliment": 111, "compon": [75, 81, 107, 114], "comprehens": 102, "compress_list": 58, "comput": [61, 66], "compute_loss": [66, 68], "compute_loss_context_manag": 66, "compute_metr": [61, 66, 68], "compute_object": 66, "concaten": [66, 102, 104], "concept": [104, 107, 113], "concis": 112, "conda": [105, 114], "condens": 15, "condenserotaryembed": 100, "condit": [22, 94, 102, 104, 111], "confer": 102, "confid": 94, "config": [4, 15, 22, 23, 52, 55, 98, 110, 112], "config_additional_arg": 16, "config_nam": 4, "config_overrid": 4, "configu": [13, 14, 16], "configur": [4, 15, 111], "conflict": 102, "confus": 104, "conjectur": 102, "connot": 102, "consequ": 102, "consequenti": 114, "consist": [37, 111, 112], "consol": 52, "constant": [3, 95], "constexpr": 94, "constraint": 114, "constructor": 52, "contain": [3, 4, 5, 6, 11, 12, 13, 22, 52, 53, 55, 57, 58, 59, 61, 66, 69, 75, 102, 104, 111], "content": [104, 107, 112, 113], "context": [55, 66, 102, 111], "context_window": 52, "contribut": 111, "contributor": [2, 114], "control": [4, 5, 6, 55, 65, 66], "controller_heart_beat_expir": 74, "conv_llama_2": 96, "conv_llava_llama_2": 96, "conv_llava_plain": 96, "conv_llava_v0": 96, "conv_llava_v0_mmtag": 96, "conv_llava_v1": 96, "conv_llava_v1_mmtag": 96, "conv_mpt": 96, "conv_templ": 96, "conv_vicuna_v0": 96, "conv_vicuna_v1": 96, "conveni": [13, 14, 75, 102, 111, 114], "convent": 114, "convers": [4, 5, 70, 71, 96, 102, 109, 111, 112, 114], "conversation_dataset_descript": 74, "conversation_id": [75, 81, 104], "conversation_role_nam": 74, "conversation_templ": [3, 4, 70, 71, 95, 104, 107, 108], "conversation_tokenize_funct": [70, 71], "conversationtempl": [70, 71, 75, 80, 81, 83, 87, 107, 113], "convert": [50, 51, 55, 60, 66, 75, 81, 88, 106], "convert_llama_weights_to_hf": 106, "convert_to_paired_dataset": 51, "convert_tokens_to_id": 75, "convienc": 104, "convolut": 37, "cook": [102, 111], "cooki": 111, "copi": [23, 73, 96, 111], "copyright": 106, "core": 66, "corn": 102, "corp": 111, "corpora": 102, "corpu": 102, "correct": [9, 104, 111, 112], "correctli": [65, 102, 111], "correl": 102, "correspond": [13, 69, 102, 104], "could": [102, 111], "count": [66, 102], "countri": 102, "court": 102, "cozi": 102, "crab": 102, "creat": [3, 5, 6, 10, 12, 19, 48, 52, 60, 65, 66, 73, 75, 107, 109, 111, 112, 114], "create_copied_dataclass": 73, "create_customized_optim": 53, "create_dataload": [52, 55], "create_from_dict": [5, 6], "create_model_card": 66, "create_optim": 66, "create_optimizer_and_schedul": 66, "create_schedul": 66, "create_studi": 66, "creation": 111, "creator": 111, "crispi": 102, "criteria": 111, "critic": 102, "cross": 94, "cross_entropi": 61, "crowd": 102, "crucial": [112, 114], "ctx": 94, "cuda": [88, 94], "cuda_visible_devic": 106, "cue": 114, "cuisin": 102, "culinari": 102, "cultur": [102, 111], "cumul": 55, "cup": 111, "current": [4, 15, 66, 102, 104, 105, 108, 113], "current_flo": 66, "curv": 111, "custom": [4, 15, 66, 111, 112], "custom_model": [4, 14], "custom_vision_model": [4, 22], "customautovision2seqmodel": 22, "customized_cache_dir": 4, "customized_optim": 4, "customized_optim_arg": 4, "custommultimodaldataset": [6, 7], "cybertronai": 36, "d": [88, 94, 102, 111, 112], "dahoa": [111, 112], "dai": 111, "damag": [102, 111, 114], "dampen": [37, 43], "danger": 102, "dark": 102, "data": [4, 5, 6, 7, 13, 16, 21, 52, 53, 57, 58, 66, 69, 75, 81, 88, 102, 105, 106, 108, 111, 112, 114], "data_1": 104, "data_2": 104, "data_arg": [5, 6, 7, 46, 50, 51, 52, 53, 55, 56, 57, 58, 59, 69, 70, 71, 110], "data_col": [53, 59, 61, 66], "data_collect": 111, "data_dict": [6, 7], "data_dir": 50, "data_fil": 112, "data_inst": 73, "data_root": 50, "data_util": [3, 13, 16, 58, 69, 95], "dataclass": [4, 73], "dataclass_object": 73, "datacol": [61, 66], "datacollatorforsuperviseddataset": 7, "datacollatorwithpad": 66, "dataload": [55, 57, 66, 88], "dataset": [1, 3, 4, 8, 13, 14, 16, 21, 47, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 66, 69, 70, 71, 75, 88, 102, 107, 108, 109, 110, 112, 114], "dataset_arg": 66, "dataset_config_nam": 4, "dataset_description_map": 74, "dataset_list": 56, "dataset_nam": [4, 105], "dataset_path": [4, 5, 6, 7, 104, 106, 107, 108, 111, 112], "dataset_path_list": 4, "dataset_s": 55, "dataset_tag": 66, "dataset_typ": [5, 51], "datasetargu": [4, 5, 6, 7, 51, 52, 53, 55, 56, 57, 58, 59, 69, 70, 71, 110], "date": 111, "ddp": 4, "de": 102, "deactiv": 15, "deactivate_model_for_infer": 15, "deadli": 102, "deal": 94, "death": 111, "debia": 36, "debug": 4, "decai": 37, "decid": [102, 105], "declin": 111, "decod": [13, 14, 52, 69, 88, 104], "decoder_model": [3, 13, 17], "decodermodel": [11, 13], "decor": 4, "decreas": 112, "dedic": 111, "deep": [33, 36, 111], "deepseek": [3, 81, 104], "deepseek_templ": [78, 81], "deepspe": [4, 13, 14, 16, 66, 104, 105, 106, 110], "deepspeed_arg": 104, "def": [102, 110, 111, 112], "default": [4, 5, 6, 13, 14, 16, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45, 55, 66, 69, 73, 88, 111], "default_callback": 66, "default_col": 66, "default_compute_object": 66, "default_convers": 96, "default_data_col": 66, "default_hp_space_optuna": 66, "default_hp_space_rai": 66, "default_hp_space_sigopt": 66, "default_im_end_token": 74, "default_im_start_token": 74, "default_image_patch_token": 74, "default_image_token": 74, "default_label_nam": 66, "default_progress_callback": 66, "defin": [1, 4, 5, 6, 7, 34, 37, 66, 104, 107, 113], "definit": 111, "degenerated_to_sgd": 25, "degrad": 102, "delay_load": 23, "delici": 102, "delta": [31, 43, 94], "demo": 102, "demo_example_in_prompt": 4, "democrat": 114, "demonstr": [102, 111, 114], "denot": 37, "depart": [102, 111], "depend": [4, 66, 102, 111], "deprec": [41, 66], "deriv": 4, "desc": 110, "describ": 102, "descript": [11, 12, 66, 109], "design": [46, 114], "desir": [66, 111, 112], "despit": 114, "detail": [66, 102, 105, 109, 111, 112, 113], "detect": 111, "determin": [66, 102], "detoken": [4, 13], "dev20221202": 94, "develop": [1, 102, 104, 107, 111, 113], "devic": [4, 13, 14, 15, 16, 23, 66, 100, 111], "diao": [0, 114], "dict": [4, 5, 6, 13, 15, 51, 55, 60, 61, 66, 67, 70, 71, 73, 75, 81, 83, 87, 88, 96, 104, 107], "dict_obj": [5, 6], "dictionari": [5, 6, 34, 50, 52, 53, 66], "did": [111, 112], "didn": [111, 112], "diff": 106, "differ": [4, 5, 6, 13, 14, 51, 66, 75, 94, 96, 102, 106, 107, 111, 112, 113], "difficult": 102, "difficulti": 102, "dim": 100, "dimens": 94, "dinosaur": [111, 112], "diplomaci": 102, "diplomat": 111, "dir": [13, 14, 16, 105], "direct": [66, 104, 114], "directli": [75, 102, 105, 106, 114], "director": 111, "directori": [4, 13, 14, 16, 52, 66, 104, 111], "disabl": 111, "disable_dropout": 61, "disable_group_text": 4, "disast": [102, 111], "discard": 111, "discard_sampl": 111, "diseas": 102, "dish": [102, 111], "disk": 4, "distinct": 111, "distress": 102, "distribut": [4, 55, 66], "distributed_inference_num_inst": 4, "distributeddataparallel": 66, "district": 102, "divers": [4, 102, 111], "divis": 111, "dk": 94, "dk_ptr": 94, "do": [15, 66, 75, 94, 102, 104, 107, 111, 112, 113, 114], "do_dpo_align": 4, "do_grad_sc": 66, "do_response_gener": 4, "do_rope_sc": [4, 15], "do_sampl": 4, "do_scor": 4, "do_train": 15, "doc": 66, "docstr": [11, 12, 107], "document": [3, 66, 102], "doe": [34, 66, 102, 111, 112, 113, 114], "doesn": [15, 94, 102, 111], "dog": 102, "domain": [102, 114], "don": [7, 60, 65, 71, 104, 107, 111, 112, 113], "done": [94, 110], "dong": [0, 114], "donut": 102, "door": 102, "double_qu": 4, "down": [31, 43, 102], "download": [4, 102, 104, 105, 106, 108, 111], "dpo": [4, 51, 61, 104], "dpo_align": [3, 54], "dpo_loss": 61, "dpoalign": 50, "dpoalignerargu": 4, "dpotrain": 61, "dpov2": 4, "dpov2_align": [3, 54], "dpov2_dataprocessor": [3, 62], "dpov2_train": [3, 62], "dpov2align": 51, "dpov2alignerargu": [4, 51, 56], "dpov2train": 61, "dq": 94, "draft": [55, 66], "draft_config": 55, "draft_model": 55, "draft_model_arg": 55, "drink": 111, "drl": 111, "drop": [5, 6, 111], "drop_inst": [5, 6], "drop_invalid": [5, 6], "dropout": [66, 94], "ds_config": [4, 13, 14, 15, 16, 105, 106], "dtype": [4, 15, 23, 66, 100], "due": [94, 102, 106], "dummi": [3, 4, 35], "dummy_featur": 23, "dure": [4, 66, 102, 112], "dv": 94, "dv_ptr": 94, "dynam": [26, 66], "e": [4, 55, 57, 66, 88, 94, 104, 105, 111, 114], "each": [4, 13, 55, 61, 66, 69, 104, 107, 111, 112], "earth": 112, "easi": [102, 107, 111, 112], "easier": 66, "easili": [105, 112, 114], "east": 102, "econom": 102, "edit": [111, 112], "educ": 111, "effect": [102, 111, 112, 114], "effici": [1, 102, 108, 111, 114], "effortlessli": 102, "eg": 107, "egg": 111, "either": [66, 104, 111], "el": 102, "elabor": [111, 112], "element": [13, 60, 66, 69, 102, 114], "eleuth": [102, 105], "eleutherai": [102, 105, 111], "elif": 113, "elimin": 112, "ellipsi": 90, "els": [66, 110, 111, 112, 113], "emb": 100, "embed": [15, 22], "emerg": 102, "emphas": 114, "employe": 111, "empti": [75, 104], "empty_no_special_token": 104, "empty_no_special_tokens_templ": [75, 81], "empty_templ": [75, 81], "emptyformatt": 75, "en": [66, 102], "en_multi_round_chat": 102, "en_singe_round_chat": 102, "enabl": [4, 114], "enable_decode_inference_result": [4, 69], "enable_distributed_infer": [4, 13, 16, 58, 69], "enchilada": 102, "encod": [13, 14, 52, 75, 81, 104], "encode_convers": [75, 80, 81], "encode_imag": 23, "encoded_pair": [75, 81], "encoder_attention_mask": 90, "encoder_decoder_model": [3, 14, 17], "encoder_hidden_st": 90, "encoderdecodermodel": [12, 14], "encourag": [104, 111], "end": [4, 13, 16, 37, 66, 104, 107, 113], "end_header_id": [104, 113], "end_of_turn": [104, 113], "end_str": 55, "endfor": 113, "endgam": 102, "endif": 113, "endoftext": [104, 113], "endpoint": 66, "endswith": 110, "enemi": 102, "engin": [102, 104], "england": 111, "english": 102, "enhanc": 114, "enjoi": [106, 111], "enough": [4, 112], "ensur": 37, "entail": 102, "enter": 102, "entir": [112, 114], "entri": 66, "enum": 96, "environ": 111, "eo": [4, 75, 104, 107], "eos_pad": 4, "eos_token": [75, 107, 113], "eos_token_id": 69, "eot_id": [104, 113], "ep": [25, 26, 27, 28, 29, 30, 31, 32, 33, 36, 37, 38, 39, 41, 43, 45], "epoch": [66, 102, 111, 112], "eq": 37, "equal": [60, 61, 66, 102], "equat": 37, "err": 105, "error": [66, 112], "escap": 102, "especi": 102, "essai": 102, "essenti": 102, "estim": [66, 112], "etc": [13, 14, 16, 21, 65, 66, 102, 104, 111, 114], "ethic": 102, "ethiopian": 102, "ethnic": 102, "evacu": 102, "eval": [32, 42, 61, 66, 111, 112], "eval_bleu": 66, "eval_dataset": [51, 61, 66, 112], "eval_dataset_path": 4, "eval_pr": 68, "eval_step": [4, 111], "evalloopoutput": [61, 66], "evalpredict": 66, "evalu": [3, 4, 32, 42, 54, 66, 103, 104, 106, 111, 112, 114], "evaluate_block_s": 4, "evaluation_loop": 66, "evaluator_arg": 52, "evaluatorargu": [4, 52], "even": [102, 104, 112], "even_headdim": 94, "even_m": 94, "even_n": 94, "event": 102, "eventu": 111, "ever": 111, "everett": 111, "everi": [57, 66, 104, 107], "everyon": 102, "exact": [105, 111], "exactli": [111, 112], "examin": 111, "exampl": [4, 7, 11, 12, 61, 66, 70, 71, 75, 81, 88, 94, 102, 104, 105, 106, 107, 108, 113, 114], "example_dataset": 104, "excel": 111, "except": 114, "exchang": 111, "excit": 114, "exclud": 37, "execut": [66, 105], "exist": [66, 104, 107, 113], "exp_avg": 33, "exp_avg_diff": 33, "exp_avg_sq": 33, "expans": 114, "expect": [66, 102], "expens": 102, "experi": [66, 102, 111, 114], "experiment": [94, 102], "expert": 114, "explain": 111, "explan": 102, "explanation_in_prompt": 4, "exploit": 111, "explor": [111, 112], "explos": 102, "export": [11, 12], "extend": 37, "extens": [1, 111, 114], "extern": [66, 102], "extinct": 112, "extra": 111, "extract": [4, 88, 111], "extrem": 114, "face": [5, 6], "facebook": 108, "facebookresearch": 106, "facilit": 111, "factor": [102, 111], "factual": 102, "fail": 102, "falafel": 102, "fals": [4, 7, 13, 14, 15, 16, 22, 23, 25, 26, 31, 33, 36, 37, 39, 43, 44, 50, 51, 55, 58, 60, 61, 66, 68, 69, 75, 81, 89, 90, 91, 93, 94, 96, 105, 111, 112, 113], "famou": 102, "far": 111, "fashion": [37, 66], "fast": 4, "faster": [33, 94, 102], "fate": 102, "featur": [60, 66, 67, 102], "feature_select": 23, "fed": [13, 14, 66], "feder": 102, "feed": [57, 66, 104], "feedback": [111, 112], "feel": [107, 111], "ferret": 102, "few": [102, 111, 114], "field": [4, 73, 104, 111, 112, 114], "field_prefix": 73, "fifth": 102, "fight": 111, "figur": [102, 111], "file": [4, 5, 6, 52, 66, 88, 104, 107, 109, 110, 111, 114], "file_nam": 88, "file_path": [5, 6], "fill": [102, 104, 113], "filter": [102, 112], "final": [65, 102], "final_lr": 26, "financi": 102, "find": [102, 111, 112], "fine": [7, 13, 14, 66, 111, 112, 114], "finetun": [1, 3, 54, 59, 102, 104, 106, 107, 114], "finetune_part": 4, "finetuned_from": 66, "finetuned_galactica_lora": 108, "finetuned_gpt2": 108, "finetuned_llama2_7b": 108, "finetuned_llama2_7b_lisa": 108, "finetuned_llama2_7b_lora": 108, "finetuned_llama3_8b": 108, "finetuner_arg": [53, 59], "finetunerargu": [4, 53], "fingerprint": [5, 6], "finish": [66, 105, 112], "fire": [102, 111], "firefight": 111, "fireplac": 111, "first": [1, 60, 66, 75, 81, 102, 105, 106, 111, 112, 113], "fish": 102, "fit": [37, 102], "fix": [4, 94], "fixed_decai": 25, "flag": 22, "flame": 102, "flammabl": 102, "flash_attent": [3, 95], "flash_attn_func": 94, "flash_attn_kvpacked_func": 94, "flash_attn_qkvpacked_func": 94, "flashattent": 94, "flashattnfunc": 94, "flashattnkvpackedfunc": 94, "flashattnqkvpackedfunc": 94, "flatbread": 102, "flatten_list": 58, "flavor": 102, "flexibl": 107, "float": [4, 5, 6, 15, 16, 21, 26, 31, 33, 34, 36, 37, 41, 43, 45, 51, 52, 55, 58, 61, 66, 88, 111], "float32": 66, "float_onli": 5, "float_only_dataset_descript": 74, "floating_point_op": 66, "floattensor": [22, 61, 90], "florida": 111, "flour": 111, "folder": 65, "follow": [37, 50, 66, 104, 105, 107, 111, 112, 114], "foo": [11, 12], "food": [102, 111], "foreach": [32, 33, 42], "foreign": 111, "forget": 102, "fork": 105, "form": [53, 66, 111], "format": [5, 6, 50, 55, 73, 75, 81, 107, 109, 113], "formatt": [75, 81], "former": [111, 112], "fort": [111, 112], "fortun": [111, 112], "forward": [22, 23, 66, 89, 90, 91, 93, 94, 100], "found": [66, 102, 111, 112, 114], "foundat": [102, 111, 114], "four": [104, 112], "fox": [3, 81], "fox_templ": [79, 81], "fp16": 4, "framework": [1, 103, 111, 112], "free": [32, 42, 107], "freq": 100, "frequent": 75, "fresh": 102, "fri": [102, 111], "friend": 102, "friendli": 114, "from": [4, 5, 6, 13, 14, 23, 25, 37, 44, 51, 52, 55, 66, 73, 88, 94, 102, 106, 107, 110, 111, 112, 113, 114], "from_dict": [5, 6], "from_pretrain": 66, "front": 7, "frustrat": 111, "fsdp": 66, "fuel": 111, "full": [13, 14, 16, 65, 102, 109, 111, 112, 114], "fun": [111, 112], "function": [4, 11, 12, 21, 22, 57, 66, 111, 112, 114], "functionbar": [11, 12], "funtion": 88, "furnitur": [111, 112], "further": [4, 102, 111, 114], "futur": [15, 22, 111], "g": [4, 37, 66, 104, 111], "g_": 37, "ga": 102, "gain": [102, 111, 114], "galactica": [108, 114], "game": 111, "gamma": [26, 55], "gap": 102, "gather": [66, 111], "gear": 111, "gemma": [3, 81, 104], "gemma_templ": [80, 81], "gemmaconversationtempl": 80, "gener": [3, 4, 7, 13, 14, 16, 20, 22, 52, 55, 57, 66, 69, 88, 94, 102, 106, 109, 111, 112, 114], "generate_during_ev": 61, "generate_kwarg": 22, "generation_kwarg": 57, "genet": 114, "georg": 102, "get": [9, 21, 57, 66, 102, 105, 106, 109, 110], "get_backend": [5, 6], "get_backend_dataset": [5, 6], "get_backend_model": [14, 15, 22], "get_batch_loss_metr": 61, "get_batch_metr": 61, "get_data_arg": [5, 6], "get_eval_dataload": 66, "get_fingerprint": [5, 6], "get_imag": 96, "get_linear_schedule_with_warmup": 66, "get_max_length": [14, 15, 110], "get_model": 9, "get_optimizer_cls_and_kwarg": 66, "get_paired_dataset": 50, "get_peft_without_qlora": 13, "get_pipelin": [46, 110], "get_pipeline_args_class": [4, 110], "get_prompt": 96, "get_reward_funct": 111, "get_test_dataload": 66, "get_token": [14, 15, 22], "get_train_dataload": 66, "get_typ": [5, 6], "giant": 112, "girl": 111, "girlfriend": 111, "git": [66, 105, 114], "github": [15, 102, 105, 111, 114], "gitignor": 66, "give": [102, 111, 112], "given": [4, 5, 6, 21, 52, 53, 55, 57, 58, 61, 66, 102, 105, 111, 112], "glad": [104, 111], "global": [66, 111], "gmask": [104, 113], "go": [66, 102, 106, 111], "goal": [111, 114], "golden": 111, "good": [102, 104, 106, 107, 111, 112], "goodi": 111, "gopher": 114, "govern": 102, "gpt": [102, 108, 111, 112, 114], "gpt2": [108, 111], "gpt2_flash_attent": [3, 92], "gpt2forcausallm": 13, "gpt3": 114, "gpt4_en_instruct": 102, "gpt_neo_flash_attent": [3, 92], "gptneoforcausallm": 13, "gpu": [4, 13, 14, 15, 16, 66, 69, 108, 111], "gpu_support_flash_attent": 13, "grad": [31, 33, 43], "grad_averag": 39, "grade": 111, "gradient": [25, 26, 37], "gradient_accumulation_step": 4, "gradient_checkpoint": 4, "gradient_checkpointing_use_reentr": 4, "grammar": 112, "great": [111, 112], "greater": [66, 102], "greatest": 111, "green": 102, "grill": 102, "groundtruth": 52, "group": [34, 53, 66, 110], "group_rank": 74, "group_text": [53, 110], "group_texts_batch_s": 4, "grow": 114, "guarante": [75, 81, 114], "guess": [111, 112], "guest": 111, "guid": [109, 111, 112], "ha": [7, 13, 14, 26, 31, 36, 41, 43, 45, 52, 66, 75, 94, 102, 104, 107], "had": [107, 111, 112], "half": 111, "hallucin": 102, "hamburg": 102, "hand": [102, 111, 114], "handel": [70, 71], "handl": [60, 66, 75, 81, 111], "hanz": [0, 114], "happi": [102, 111], "har": [102, 105], "hard": [102, 111, 112], "hardship": 102, "hardwar": 111, "harm": 111, "has_imag": 7, "has_placehold": 75, "hasattr": [32, 42], "hasn": 66, "hate": 111, "have": [4, 15, 66, 94, 102, 104, 105, 106, 107, 111, 112, 113, 114], "hawaii": 102, "hazard": 111, "he": 111, "head": [94, 111], "head_mask": [89, 90, 91], "headdim": 94, "health": 102, "hear": 104, "heard": 111, "hearti": 102, "heat": [102, 111], "heater": 111, "heavi": 111, "heavili": 111, "height": [22, 102], "held": [102, 114], "hellaswag": 102, "hello": [13, 75, 81, 114], "help": [4, 102, 104, 111, 113, 114], "helper": [10, 12, 19, 48, 66, 75], "her": 111, "here": [52, 66, 71, 75, 81, 102, 108, 111, 112], "hero": 111, "hf": [15, 104, 106, 108], "hf_auto_model": 15, "hf_auto_model_additional_arg": 15, "hf_automodel_map": 15, "hf_automodel_typ": 15, "hf_dataset_sanity_check": [5, 6], "hf_decoder_model": [3, 17, 51, 55, 56, 69, 72], "hf_encoder_decoder_model": [3, 17], "hf_model_config": 15, "hf_model_mixin": [3, 13, 16, 17], "hf_text_regression_model": [3, 17, 56, 58, 59, 72], "hfargumentpars": 110, "hfdecodermodel": [13, 14, 51, 52, 55, 56, 69], "hfencoderdecodermodel": 14, "hfmodelmixin": [13, 15, 16], "hftextregressionmodel": [16, 56, 58, 59], "hh": [111, 112], "hh_rlhf": [111, 112], "hh_rlhf_llama": 111, "hh_rlhf_raft_align": 111, "hh_rlhf_rm": 111, "hh_rlhf_rm_sft_gptneo_2_7b": 111, "hh_rlhf_rm_train": 111, "hh_rlhf_sft": 111, "hi": [75, 81, 104, 111], "hidden": 107, "hidden_s": [22, 23], "hidden_st": [89, 90, 91, 93], "hide": [66, 111, 112], "hideo": 111, "high": 111, "higher": [55, 102, 111], "highlight": 114, "him": 111, "hing": 61, "hint": [4, 5, 6], "histor": 111, "histori": [96, 102, 111], "hit": 111, "hm": 111, "home": [102, 111, 112], "homeown": 102, "homework": 111, "homogen": 4, "honei": 102, "host": 111, "hostil": 102, "hot": [102, 111], "hous": [102, 111, 112], "how": [66, 102, 104, 107, 109, 111, 112, 113], "howev": [13, 14, 102, 106, 111, 114], "howpublish": 114, "hp": 66, "hp_name": 66, "hp_search_backend": 66, "hp_space": 66, "hpsearchbackend": 66, "html": [66, 102, 111], "http": [26, 31, 33, 36, 41, 43, 45, 55, 66, 102, 105, 111, 112, 114], "hub": [66, 102], "hub_model_id": 66, "hug": [5, 6, 104], "huge": [102, 112], "huggingfac": [4, 5, 6, 13, 14, 16, 106, 111, 112], "human": [102, 104, 105, 107, 111, 112, 113, 114], "hundr": 112, "hurrican": 111, "hyper": [66, 109], "hyperparamet": 66, "hyperparameter_search": 66, "i": [4, 6, 7, 13, 14, 15, 22, 32, 37, 42, 47, 49, 50, 51, 52, 55, 57, 60, 66, 69, 75, 81, 94, 102, 104, 106, 107, 108, 111, 112, 113, 114], "ianz2020": 112, "icml": 102, "id": [4, 13, 14, 22, 69, 75, 81], "idea": [102, 111, 112], "ident": 66, "identifi": [66, 104, 112], "idx_gap": 112, "ignor": [61, 66], "ignore_bias_buff": 4, "ignore_index": 74, "ignore_kei": 66, "ignore_keys_for_ev": 66, "ignored_args_list": 73, "ill": 102, "illeg": 102, "illustr": [107, 112], "im_end": [74, 104, 113], "im_patch": 74, "im_start": [74, 104, 113], "imag": [7, 22, 23, 74], "image_aspect_ratio": 4, "image_encoder_name_or_path": [4, 22], "image_flag": 88, "image_fold": [4, 6, 7], "image_forward_out": [22, 23], "image_processor": [6, 7], "image_text": [5, 55], "image_token_index": [7, 22, 74], "imageher": 88, "imagin": [111, 112], "immedi": 102, "impact": [111, 112], "imperfect": 111, "implement": [13, 14, 15, 25, 26, 31, 33, 36, 41, 43, 45, 51, 66, 94, 102, 105, 111], "impli": 102, "implicitli": 61, "import": [4, 52, 66, 102, 107, 109, 110, 111, 114], "improp": 114, "improv": [102, 111, 112, 114], "incent": 111, "incentiv": 111, "incident": 114, "includ": [4, 5, 6, 51, 66, 88, 102, 111, 112, 114], "incompat": 66, "incomplet": 102, "inconveni": 102, "incorpor": 114, "incorrect": 102, "incorrectli": 111, "increas": [31, 43, 111, 112], "inde": 102, "index": [22, 66, 114], "index0": 113, "indian": 102, "indic": [4, 5, 6, 22, 102, 104], "indirect": 114, "induc": 102, "inf": 57, "infer": [1, 4, 13, 14, 16, 21, 22, 52, 55, 58, 69, 71, 104, 111, 114], "infer_batch_s": 57, "inferenc": [3, 4, 54, 58, 104], "inference_arg": 69, "inference_batch_s": 4, "inference_batch_size_per_devic": [4, 111], "inference_func": 21, "inference_result": 16, "inferencer_arg": [55, 58, 69], "inferencer_file_path": 69, "inferencerargu": [4, 55, 56, 58, 69], "inferencerwithoffload": 69, "influenc": 111, "info": 105, "inform": [4, 5, 6, 52, 66, 111, 114], "inher": 102, "inherit": [10, 12, 19, 48, 66, 75, 107], "init": 66, "init_git_repo": 66, "init_to_zero": 94, "initi": [4, 5, 6, 13, 14, 16, 21, 52, 53, 55, 57, 58, 59, 66, 110, 111, 112], "initial_accumul": 45, "initial_iter_idx": 4, "inject": 66, "injera": 102, "injur": [102, 111], "injuri": 102, "injustic": 102, "inner": 66, "innov": 114, "inplac": 4, "input": [4, 7, 13, 14, 15, 16, 21, 22, 52, 55, 57, 61, 66, 68, 69, 71, 88, 102, 104, 105, 106, 107, 112], "input_dataset": 55, "input_dir": 106, "input_id": [7, 13, 22, 23, 55, 71, 112], "input_shap": [89, 90, 93], "inputs_emb": [22, 90, 93], "insert": 22, "inspir": 111, "inst": [104, 113], "instal": [66, 105], "instanc": [5, 6, 7, 13, 14, 16, 21, 51, 52, 53, 66, 73, 102, 104, 111, 112, 114], "instance_fields_map": 74, "instanti": 66, "instead": [4, 75, 94, 102], "institut": 102, "instruct": [1, 112], "instructgpt": [111, 112, 114], "int": [4, 5, 6, 13, 14, 15, 16, 51, 55, 58, 60, 61, 66, 67, 69, 70, 71, 75, 81, 83, 87, 88, 89, 96, 111, 112], "int8": 4, "integ": [4, 75, 81], "integr": 105, "intellig": 114, "intend": 114, "intens": [102, 111], "interact": [107, 111], "interest": [111, 114], "interfac": [3, 13, 14, 16, 17], "intermedi": 65, "intern": [66, 102], "internal_vers": 8, "internlm": [3, 81], "internlm2": 104, "internlm2_templ": [81, 82], "interpol": 15, "interpret": 104, "interrupt": [65, 66], "intrins": 102, "introduc": [102, 111, 114], "introduct": 109, "inv_freq": 100, "invalid": 4, "invis": 107, "invok": 102, "involv": [102, 111, 112], "io": [66, 102, 111, 114], "ipex_optimize_model": 66, "irrelev": 102, "is_caus": 94, "is_custom_dataset": 4, "is_encoder_decod": [60, 61], "is_first_tim": 66, "is_in_train": 66, "is_load": 23, "is_local_process_zero": 66, "is_model_parallel": 66, "is_multimod": 4, "is_package_version_at_least": 46, "is_sagemaker_mp_post_1_10": 66, "is_torch_greater_or_equal_than_1_10": 66, "is_torch_less_than_1_11": 66, "is_world_process_zero": 66, "island": 102, "isn": 111, "issu": [4, 15, 102, 106, 111, 114], "item": [102, 105], "iter": [4, 34, 52, 55, 57, 66, 111], "iter_id": 57, "iterabledataset": 66, "iteration_nam": 56, "iterative_dpo_align": [3, 54], "iterativealignerargu": 4, "iterativedpoalign": 56, "iterativedpoalignerargu": [4, 56], "its": [46, 66, 102, 111, 114], "itself": 102, "j": [52, 111], "jack": 111, "japanes": 102, "jfk": 111, "jinja": 113, "jipeng": [0, 114], "job": 107, "join": 114, "joint": 102, "journal": 114, "journei": 111, "json": [4, 5, 6, 66, 75, 81, 88, 104, 105, 106, 110, 111, 112], "json_fil": 110, "jsonl": 114, "judg": 111, "juic": 102, "just": [7, 15, 32, 42, 60, 94, 102, 104, 107, 111, 112, 114], "k": [57, 94, 102, 111], "ka": 0, "ka9v1ywd": 112, "kashun": 114, "katrina": 111, "kebab": 102, "keep": [96, 111, 112], "keep_end": [60, 61], "keep_linebreak": 4, "kei": [66, 91, 102, 104, 105, 111, 112], "keith": 111, "kennedi": 111, "key_1": [5, 6, 104], "key_2": [5, 6, 104], "key_3": 104, "key_4": 104, "key_inst": 5, "key_scor": 5, "key_typ": 5, "keyword": [5, 6, 13, 14, 16, 21, 53, 57, 59, 66, 75], "kid": [111, 112], "kind": [111, 112], "king": 102, "kitchen": 111, "kitfo": 102, "kl": 61, "knive": 111, "know": [111, 112], "knowledg": [102, 114], "known": 102, "kojima": 111, "konami": 111, "korean": 102, "kv": 94, "kwarg": [5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21, 22, 23, 24, 46, 47, 49, 53, 56, 57, 58, 59, 65, 66, 69, 75, 80, 81, 83, 87], "kwd": 96, "l": 94, "label": [7, 22, 23, 60, 66, 71, 102, 111, 112], "label_column": [70, 71], "label_id": 66, "label_nam": 66, "label_pad_token_id": [60, 61], "lamb": [3, 4, 35], "lambda": 112, "lambdalr": [61, 66], "land": 112, "landmark": 102, "languag": [4, 13, 14, 44, 52, 53, 66, 94, 102, 108, 114], "language_model": [22, 23], "language_model_from_pretrain": 22, "language_model_input": 22, "language_model_name_or_path": 22, "language_project": 23, "lar": [3, 4, 35], "larg": [36, 37, 102, 111, 114], "lars_lr": 37, "last": [66, 104, 111, 112, 113], "last_checkpoint": 53, "latest": [66, 108], "laugh": 112, "launch": 66, "layer": [37, 66, 108], "layer_past": [89, 90, 91], "layerwis": 109, "lead": [13, 16, 102, 111], "leader": 111, "leadership": 111, "leak": 102, "learn": [26, 32, 34, 36, 41, 42, 66, 111, 112, 114], "learning_r": [4, 111], "leav": [11, 12, 102, 111], "led": 111, "left": [94, 102], "legal": [102, 114], "len": [88, 102, 110, 112], "len_penalti": 61, "length": [4, 14, 15, 22, 51, 52, 53, 55, 60, 66, 88, 94, 102, 104], "length_penalti": [4, 51], "less": 102, "let": [110, 111, 112, 114], "level": [102, 114], "li": 114, "liabil": 114, "librari": [4, 66], "licens": [66, 102, 111], "light": 114, "lighter": 102, "lightn": 37, "lightweight": 114, "like": [37, 66, 94, 102, 104, 105, 107, 111, 113, 114], "likelihood": [52, 102, 105], "lime": 102, "limit": 102, "line": [11, 12, 102], "link": 104, "lisa": [109, 112], "lisa_activated_lay": [4, 108], "lisa_interval_step": [4, 108], "lisa_layers_attribut": 4, "list": [4, 5, 6, 13, 14, 16, 22, 33, 50, 51, 56, 58, 60, 61, 66, 67, 69, 71, 73, 75, 81, 83, 87, 88, 96, 97, 104, 107, 111, 114], "list_of_list": 58, "list_to_compress": 58, "listformatt": 75, "liter": [61, 75], "littl": 102, "liuhong99": 44, "live": [111, 112], "liyuanlucasliu": 41, "ll": [94, 111, 112], "llama": [3, 15, 81, 102, 104, 107, 108, 109, 111, 112, 114], "llama2": [104, 108], "llama2_templ": [81, 83], "llama2conversationtempl": 83, "llama3": [104, 108], "llama3_templ": [81, 83], "llama_2": 96, "llama_flash_attent": [3, 92], "llama_rope_scaled_monkey_patch": [3, 99], "llamaforcausallm": 13, "llava": 23, "llava_conversation_lib": [3, 95], "llava_load": 4, "llava_pretrain_model_path": 4, "llm": [103, 107, 111, 114], "llm_model_name_or_path": 4, "lm": [4, 102, 109], "lm_dataset": 110, "lm_eval_dataset_map": 105, "lm_evaluation_metr": 4, "lmflow": [3, 103, 104, 107, 109, 110, 111, 112, 113], "lmflow_lora_target_modules_map": 74, "lmsy": 102, "ln": 52, "lntwmcyd": 112, "lo": 102, "load": [4, 5, 6, 13, 14, 22, 50, 52, 53, 55, 57, 58, 59, 66, 88, 106], "load_data": 88, "load_dataset": [111, 112], "load_in_4bit": 4, "load_inference_result": 69, "load_llava_pretrain_model": 98, "load_model": 23, "load_prompt_cach": 22, "load_result": 66, "loader": 52, "local": [66, 102, 111], "local_datset_answertype_map": 105, "local_datset_group_map": 105, "local_datset_map": 105, "local_rank": [4, 52, 55, 57, 58, 74], "locat": [102, 107], "lock": 102, "locksmith": 102, "log": [52, 61, 66, 102, 105], "log_dir": 105, "log_freq": 4, "log_level": [53, 66], "logdir": 74, "logger": [4, 5, 13, 14, 15, 16, 51, 53, 55, 56, 57, 58, 59, 60, 61, 63, 64, 66, 67, 69, 70, 71, 73, 75, 80, 83, 87, 97], "logging_step": 4, "login": 4, "logit": 66, "logsigmoid": 112, "long": [60, 66, 102, 104, 111], "longer": [51, 102], "longtensor": [22, 61, 93], "look": [107, 111], "loop": [66, 113], "loop_messag": 113, "lora": [1, 13, 14, 109, 112, 114], "lora_alpha": 4, "lora_dropout": 4, "lora_merg": 108, "lora_model_path": [4, 106, 108], "lora_r": 4, "lora_target_modul": 4, "lora_target_modules_map": 15, "loss": [25, 26, 31, 32, 34, 36, 37, 41, 42, 43, 45, 61, 66, 112], "loss_typ": [4, 61], "lot": [111, 112], "low": [109, 112], "low_resourc": [4, 22], "lower": [55, 66, 102], "loyal": 102, "lr": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45], "lr_schedul": [61, 66], "lr_scheduler_typ": 4, "lse": 94, "lst": 66, "luck": 102, "luolc": 26, "m": [94, 102, 104, 111, 112], "macaroni": 102, "machin": [66, 114], "made": [66, 102, 111, 112], "magic": 111, "magnitud": 102, "mai": [11, 12, 66, 102, 104, 105, 111, 112], "main": [63, 64, 66, 105, 110, 111, 114], "main_process_first": 110, "mainstream": 111, "maintain": 114, "major": 102, "make": [55, 66, 75, 81, 94, 102, 104, 105, 107, 111, 112, 114], "make_shell_args_from_dataclass": 73, "malfunct": 111, "manag": 66, "mani": [94, 102, 111, 112], "manipul": [5, 6], "manual": [66, 102], "map": [4, 5, 6, 21, 110, 112], "mar": 2, "marathon": 112, "margarita": 102, "margin": 61, "margin_scal": [4, 51], "marin": 102, "maryland": 102, "mask": [4, 13, 14, 22, 71, 75, 94], "mask_prompt": [4, 60, 61], "master": 102, "match": [5, 6, 102], "materi": 111, "math": [37, 102], "mathemat": 114, "matter": 114, "max": [14, 15, 52], "max_eval_sampl": 4, "max_grad_norm": 33, "max_length": [4, 60, 61, 67], "max_max": 51, "max_min": 51, "max_new_token": [4, 55], "max_position_embed": 100, "max_prompt_length": [4, 60, 61], "max_random": 51, "max_seq_len_cach": 100, "max_step": 4, "max_target_length": [60, 61], "max_train_sampl": 4, "maxim": [44, 66, 114], "maximum": [4, 52, 53, 55, 66], "mayb": [111, 112], "me": [111, 112], "mean": [66, 102, 111, 112], "meaning": 111, "meantim": 102, "measur": 112, "meat": [102, 111], "medic": 114, "medicin": 114, "medmcqa": 114, "medqa": 114, "melt": 111, "member": [66, 111], "memor": 102, "memori": [4, 108, 111], "memory_safe_dpov2_align": [3, 62], "memory_safe_dpov2_align_env_var_to_remov": 74, "memory_safe_vllm_infer": [3, 62], "memory_safe_vllm_inference_don": 74, "memory_safe_vllm_inference_env_var_to_remov": 74, "memory_safe_vllm_inference_finish_flag": 74, "memorysafedpov2align": 51, "memorysafevllminferenc": 69, "mention": 102, "menu": [102, 111], "merg": 109, "merge_lora": 111, "merge_lora_weight": [13, 14], "messag": [4, 5, 6, 66, 73, 75, 81, 83, 87, 96, 104, 107, 112, 113], "meta": [104, 108], "metadata": [4, 66, 111], "metal": 111, "method": [5, 6, 13, 14, 15, 26, 45, 52, 66, 75, 102, 111, 114], "metric": [4, 52, 61, 65, 66, 105, 111], "metric_key_prefix": 66, "mexican": 102, "middl": 102, "might": [66, 111, 112], "mild": 102, "milk": 111, "million": 112, "min_vers": 46, "mini": 52, "mini_gpt": 55, "miniatur": 111, "minibatch_s": 52, "minim": [66, 75], "minut": 36, "misc": 114, "misfortun": 102, "mistak": [102, 111], "mix": [4, 111], "mixed_arg": 56, "mixed_precis": 4, "mizuzu": 111, "mkdir": 105, "mlir": 94, "mmlu": 114, "modal": [6, 7], "mode": [4, 32, 42, 66, 111], "model": [1, 3, 4, 7, 8, 25, 26, 31, 32, 33, 34, 36, 37, 41, 42, 43, 44, 45, 47, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 60, 61, 65, 66, 68, 69, 95, 98, 102, 104, 106, 107, 108, 109, 110, 113, 114], "model_arg": [9, 13, 14, 15, 16, 21, 46, 50, 51, 52, 53, 55, 56, 57, 58, 59, 69, 98, 110], "model_args_list": 97, "model_config_class": 4, "model_for_causal_lm_map": 4, "model_init": [61, 66], "model_input": [58, 69], "model_max_length": [4, 53, 70, 71, 110], "model_nam": [66, 105], "model_name_or_path": [4, 104, 105, 106, 107, 108, 111], "model_output": 58, "model_revis": 4, "model_s": 106, "model_typ": 4, "model_wrap": 66, "modelargu": [4, 15, 16, 51, 52, 53, 55, 56, 57, 58, 59, 63, 69, 97, 110], "modelconfig": 15, "modeling_output": [16, 22, 58], "modeling_util": 66, "models_support_flash_attent": 13, "modern": 102, "modif": 66, "modifi": [25, 37, 111, 112], "mole": 102, "momentum": [31, 33, 37, 42, 43], "momentum_decai": 38, "monei": 102, "monitor": 102, "monument": 102, "more": [55, 66, 102, 104, 108, 111, 112, 113, 114], "moreov": [112, 114], "morgan": 102, "most": [66, 102, 104, 111, 112], "mostli": [75, 104], "motiv": 111, "mpi4pi": [105, 114], "mpt": 96, "mu": 37, "much": 94, "multi": [6, 7, 102], "multi_modal_dataset": [3, 6], "multimod": [3, 95], "multimodaldatasetargu": 4, "multipl": [22, 51, 94, 102, 111, 112, 113, 114], "museum": 102, "must": [66, 110, 113, 114], "mutipl": 88, "mv": 105, "my": [102, 111, 112], "n": [5, 6, 13, 52, 69, 102, 104, 105, 107, 112, 113, 114], "n_trial": 66, "nadam": [3, 4, 35], "name": [4, 13, 14, 16, 21, 32, 42, 46, 66, 73, 88, 94, 102, 104, 105, 107, 111], "namedtupl": 66, "nassist": 113, "nation": [102, 111], "natur": [102, 114], "ndarrai": 66, "necessari": [4, 50, 66, 102, 107, 112], "need": [7, 32, 42, 65, 66, 71, 75, 102, 105, 106, 107, 111, 114], "neg": [52, 102, 105, 111, 112, 114], "negro": 102, "neighborhood": 102, "neither": 102, "neo": [111, 112], "nest": 66, "nestedtensor": 94, "nesterov": [31, 33, 37, 43], "network": [37, 111], "neurip": 25, "nevertheless": 102, "new": [5, 6, 66, 73, 94, 102, 105, 111, 112, 114], "new_default": 73, "next": [55, 107, 111], "nhead": 94, "nhello": 104, "nhow": 113, "ni": 113, "nice": 111, "nif": 112, "nip": 45, "nll": [52, 102, 109], "nlp": [13, 14, 102], "nn": [23, 34, 61, 66, 100, 112], "no_prox": 33, "nogada": 102, "nois": [111, 112], "noisi": 111, "non": [94, 111], "nonconvex": 45, "none": [4, 5, 6, 7, 13, 14, 15, 16, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45, 50, 52, 53, 57, 59, 60, 61, 65, 66, 67, 69, 73, 75, 81, 83, 87, 88, 89, 90, 91, 93, 94, 96, 100, 112], "nonsens": 102, "noqa": 45, "nor": 102, "norm": [31, 43], "normal": [13, 14, 16, 52, 102], "north": 112, "northern": 102, "notat": 111, "note": [4, 15, 26, 31, 36, 37, 41, 43, 45, 66, 71, 102, 105, 114], "noth": 34, "notic": 105, "novograd": [3, 4, 35], "now": [102, 104, 106, 111, 112, 113], "np": 66, "ntk_ratio": 100, "num_capt": 22, "num_channel": 22, "num_exampl": 66, "num_inst": [58, 69], "num_label": 15, "num_new_token": 55, "num_output_sequ": 4, "num_patch": 23, "num_proc": 50, "num_raft_iter": [4, 111], "num_sampl": 55, "num_train_epoch": 111, "num_training_step": 66, "number": [4, 5, 6, 52, 55, 66, 108, 111, 112], "numpi": [66, 88], "nuser": 113, "nwhat": 104, "nwho": 113, "nyou": [104, 113], "o": [94, 102, 110], "oaxaca": 102, "obei": 102, "obj": [75, 81], "object": [5, 6, 7, 13, 16, 21, 52, 53, 55, 57, 58, 59, 66, 69, 73, 75, 81], "obqa": 102, "observ": [15, 25, 102, 111, 112, 114], "obstacl": 102, "obtain": [102, 106, 111, 112, 114], "occasion": 111, "occup": 111, "occupi": 112, "ocean": 111, "off": 111, "offer": [102, 111], "offici": [15, 106, 113], "offs_d": 94, "offs_n": 94, "offset": 96, "often": [102, 111], "oil": 111, "okai": 111, "old": [104, 107, 113], "omit": 102, "omp_num_thread": 74, "on_epoch_end": 65, "on_sav": 65, "on_train_end": 65, "onc": [66, 94], "one": [11, 12, 51, 66, 102, 104, 105, 108, 110, 111, 112], "one_sample_multiple_imag": 22, "ones": 112, "onli": [4, 21, 66, 94, 102, 104, 105, 107, 108, 110, 111, 112, 113, 114], "onlin": 102, "open": [103, 105, 114], "openai": 94, "oper": [4, 66], "opportun": 111, "oppress": 102, "opt": [102, 114], "optim": [3, 8, 61, 65, 66, 104, 111, 114], "optim_adam_beta1": 4, "optim_adam_beta2": 4, "optim_beta1": 4, "optim_beta2": 4, "optim_beta3": 4, "optim_dummy_beta1": 4, "optim_dummy_beta2": 4, "optim_momentum": 4, "optim_weight_decai": 4, "optimalscal": [102, 111, 114], "optimizer_kera": 45, "optimizer_nam": 66, "optimizer_typ": 4, "optimizernam": 4, "option": [4, 5, 6, 11, 12, 13, 14, 15, 16, 21, 22, 25, 32, 34, 42, 53, 55, 57, 59, 66, 69, 73, 94, 102, 104, 106, 107, 111], "optuna": 66, "order": [44, 102, 104], "org": [26, 31, 33, 36, 41, 43, 55, 102, 111, 112], "organ": [102, 111, 112], "orient": 114, "origin": [66, 73, 75, 102, 106, 111, 112], "original_dataclass": 73, "other": [52, 61, 66, 94, 102, 111, 112, 114], "otherwis": 66, "otherworld": 111, "ouptut": 102, "our": [15, 102, 104, 105, 106, 107, 108, 109, 110, 111, 114], "out": [66, 94, 102, 107, 111, 112, 114], "outag": 111, "outcom": 114, "output": [4, 13, 14, 16, 51, 52, 55, 57, 66, 69, 71, 88, 102, 104], "output_attent": [22, 89, 90, 91, 93], "output_dataset": 55, "output_dir": [4, 56, 66, 106, 111], "output_hidden_st": 22, "output_lora_path": 108, "output_max_length": [4, 57], "output_min_length": [4, 57], "output_model": [106, 107, 108, 111], "output_model_path": [107, 108], "output_reward_path": [4, 57], "over": [52, 102, 104, 111, 112], "overal": [11, 12, 13, 14, 94, 114], "overfit": 112, "overrid": [4, 15, 22, 66], "overridden": 66, "overse": 111, "overview": [109, 112], "overwrite_cach": 4, "overwrite_output_dir": 66, "overwritten": 111, "own": [66, 102, 104, 105, 107, 111, 112, 114], "oyster": 102, "p": [31, 37, 43, 52, 55], "p2ju3r1a": 112, "p_": [37, 52], "pacif": 102, "packag": [52, 109, 114], "package_nam": 46, "pad": [4, 22, 60, 66, 67], "pad_index": 66, "pad_to_multiple_of": 67, "pad_token_id": [70, 71], "padding_sid": [4, 70, 71], "padding_valu": [60, 61], "paddingstrategi": 67, "page": [3, 102, 114], "pain": 111, "pair": [51, 75, 81, 102, 105, 107, 109], "paired_convers": [5, 104], "paired_conversation_dataset_descript": 74, "paired_conversation_tokenize_funct": 71, "paired_text_to_text_dataset_descript": 74, "pan": [0, 114], "panda": 112, "paper": [37, 45, 111, 112], "par": 102, "parallel": [4, 66, 94], "param": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45], "paramet": [4, 5, 6, 13, 14, 15, 16, 21, 32, 34, 37, 42, 52, 53, 55, 57, 58, 59, 61, 66, 69, 73, 75, 81, 88, 109, 112, 114], "parent": 111, "pars": 110, "parse_args_into_dataclass": 110, "parse_json_fil": 110, "parse_to_sampling_param": 69, "parser": 110, "part": [37, 102], "partial": 102, "particip": 114, "particular": [111, 112, 114], "partit": 102, "pass": [4, 15, 66, 94, 102, 104, 110, 114], "past_key_valu": [22, 23, 93], "past_key_values_length": [89, 90, 93], "pasta": 102, "pastor": 102, "path": [4, 5, 6, 13, 14, 16, 21, 22, 66, 105, 106, 110, 111], "path_to_dataset": 104, "path_to_your_model": 107, "patient": 111, "pattern": 66, "peac": 111, "peakier": 55, "peanut": 111, "pecan": 111, "peft": 65, "peft_config": [15, 61], "peft_train": [3, 62, 68], "peftrewardtrain": 68, "peftsavingcallback": 65, "pefttrain": [65, 68], "penal": [4, 51], "peopl": [102, 111, 114], "pepper": 102, "per": [4, 111], "per_device_eval_batch_s": 4, "per_device_train_batch_s": [4, 111], "percentag": 112, "perfect": 111, "perform": [5, 6, 13, 14, 16, 22, 25, 26, 31, 32, 33, 34, 36, 37, 41, 42, 43, 45, 52, 53, 55, 57, 58, 59, 66, 69, 108, 111, 114], "period": [11, 12, 102], "permiss": 102, "permit": 102, "perplex": [4, 102], "persecut": [102, 111], "person": 111, "perturb": [31, 43], "pet": 102, "pete": 102, "phantom": 111, "phenomenon": [102, 111], "phi": [3, 81], "phi3": 104, "phi3_templ": [81, 84], "phil": 94, "phone": 111, "pi_ratio": 100, "pick": 66, "pie": 102, "piec": [104, 107, 113], "pile": [111, 112], "pioneer": 102, "pip": [105, 114], "pipelin": [3, 4, 8, 104, 110, 111, 114], "pipeline_arg": [14, 46, 110], "pipeline_argument_map": 4, "pipeline_map": 46, "pipeline_nam": [46, 110], "pipelineargu": 110, "piqa": 102, "pit": 102, "pivot": 102, "pixel_valu": 22, "place": [32, 42, 66, 111, 112], "place_model_on_devic": 66, "placehold": 15, "plai": 102, "plain": [88, 96], "plan": [94, 111], "plantain": 111, "platform": 114, "plausibl": 102, "pleas": [41, 75, 104, 105, 107, 108, 111, 112, 113, 114], "plot": 111, "poblano": 102, "pocket": 111, "point": [66, 94, 111], "polic": 102, "polici": [4, 61, 102, 111], "policy_chosen_logp": 61, "policy_rejected_logp": 61, "polish": 23, "poor": 111, "pop": 66, "pop_callback": 66, "popish": 111, "popul": [102, 112], "popular": [102, 111], "pork": 102, "portion": 102, "posit": [5, 6, 13, 14, 15, 16, 21, 53, 57, 59, 102, 111, 112, 114], "position_id": 93, "position_interpol": [3, 95], "possess": 102, "possibl": [102, 112], "post": [111, 112], "postprocess_distributed_inference_output": 16, "postprocess_inference_output": 16, "potenti": [66, 102], "powder": 111, "power": [111, 114], "ppl": 102, "ppo": 111, "practic": 102, "pre": 44, "pre_grad": 33, "precis": 4, "predict": [55, 66, 102, 107], "predict_next_token": 55, "predicted_answ": 52, "prediction_loop": 66, "prediction_loss_onli": 66, "prediction_step": 66, "predictionoutput": 66, "prefer": [104, 111, 112], "preferencedatacollatorwithpad": 60, "prefix": [66, 73, 112], "prefix_checkpoint_dir": 66, "prelat": 111, "prepar": [13, 15, 51, 57, 66, 105, 111, 112, 114], "prepare_inputs_for_infer": [13, 16], "prepare_inputs_labels_for_multimod": 23, "prepend": 66, "preprocess": [66, 112], "preprocess_llama_from_llava_plain": 7, "preprocess_llama_from_llava_v1": 7, "preprocess_logits_for_metr": [61, 66], "preprocess_multimodal_llava": 7, "preprocessing_num_work": [4, 61], "present": [66, 111], "preset": [15, 104, 107, 113], "preset_templ": [81, 107], "presid": 102, "pretrain": [4, 13, 14, 66, 102, 106, 114], "pretrained_language_projection_path": 4, "pretrained_path": 22, "pretrainedmodel": [60, 61, 66], "pretrainedtoken": [7, 15, 70, 71, 75, 81, 83, 87], "pretrainedtokenizerbas": [60, 61, 66], "pretrainedtokenizerfast": [15, 70, 71], "pretti": 112, "previou": [66, 111, 112], "price": 102, "print_bann": 73, "print_change_log": 25, "priorit": 102, "privat": 4, "privileg": 111, "prob": 55, "probabilist": 114, "probabl": [55, 61, 66, 102, 111, 112], "problem": [102, 109, 111], "procedur": [102, 111, 112], "proceed": 102, "process": [7, 13, 14, 16, 22, 52, 53, 55, 57, 66, 102, 104, 109, 110, 111, 112, 114], "process_image_flag": 88, "processor_image_token_in_minigpt4": 22, "produc": 102, "product": [111, 114], "profession": 114, "profici": 114, "program": [11, 12, 88, 111], "progress": 66, "prohibit": 102, "project": [15, 102, 111, 112], "project_dir": 111, "prompt": [4, 7, 13, 14, 16, 22, 50, 55, 57, 60, 69, 75, 104, 111, 112, 114], "prompt_cache_path": 4, "prompt_id": 22, "prompt_keys_valu": 22, "prompt_structur": [4, 55, 105, 106], "proper": 102, "properli": [102, 111], "properti": [23, 102], "propon": 111, "proport": [4, 5, 6], "propos": [26, 31, 33, 36, 41, 43, 45], "protect": 111, "provid": [4, 10, 12, 13, 14, 19, 41, 48, 52, 55, 66, 69, 75, 102, 104, 105, 107, 109, 111, 114], "pt": [66, 67], "public": [1, 111], "publicli": 114, "publish": 114, "pubmedqa": 114, "pull": 102, "punctuat": 112, "purpos": 107, "purs": 111, "push": 66, "push_to_hub": 66, "put": [7, 105], "py": [94, 104, 105, 106, 107, 111, 112], "python": [5, 6, 7, 75, 102, 105, 106, 114], "pytorch": [25, 33, 36, 37, 60, 66], "pytorch_": 41, "pytorchlightn": 37, "q": [94, 102], "q1": 102, "q2": 102, "q3": 102, "qa": [88, 102], "qformer_from_pretrain": 22, "qformer_name_or_path": [4, 22], "qkv": 94, "qualit": 102, "qualiti": [102, 111], "quant_config": 15, "quant_typ": 4, "quantit": 102, "quantiti": 66, "quantiz": 4, "queri": 91, "quesadilla": 102, "queso": 102, "question": [13, 14, 50, 57, 102, 104, 112, 114], "quick": 111, "quicker": 4, "quickli": [102, 114], "quicksort": 102, "quit": 94, "qwen": [3, 81, 108], "qwen1": 108, "qwen2": 104, "qwen2_templ": [81, 85], "r": [32, 42], "r1": 102, "r2": 102, "r3": 102, "rabi": 102, "race": 94, "radam": [3, 4, 35], "raft": [4, 61, 109], "raft_align": [3, 54, 111], "raft_aligner_arg": 57, "raft_batch_s": [4, 111], "raft_train": [3, 62], "raftalign": 57, "raftalignerargu": [4, 57], "rafttrain": 66, "raggedtensor": 94, "rai": [13, 16, 58, 66, 69], "rais": [5, 6, 66], "raise_except": 113, "random": [51, 55, 66, 88, 111], "random_se": 4, "random_shuffl": [4, 88], "randomli": [51, 108, 111], "rang": [61, 102, 112, 114], "rank": [74, 109, 111, 112], "rare": 112, "rarest": 112, "rate": [26, 32, 34, 41, 42, 66, 111, 112], "rather": [32, 42], "raw": [102, 104], "re": [60, 66, 111, 112], "read": [111, 112], "reader": 111, "readi": 111, "readm": 114, "readthedoc": 66, "real": [102, 104], "realli": [111, 112], "reason": [66, 111], "recal": 107, "receiv": 66, "recent": 102, "recip": 111, "recommend": [102, 111], "record": [111, 112], "rectifi": 25, "recurs": [66, 102], "reduc": [94, 111], "redund": 111, "reevalu": [25, 26, 31, 32, 34, 36, 37, 41, 42, 43, 45], "ref": 55, "ref_model": [51, 61], "ref_model_arg": [51, 56], "refer": [25, 26, 31, 36, 37, 41, 43, 45, 61, 66, 71, 104, 105, 107, 109, 111, 113, 114], "reference_chosen_logp": 61, "reference_fre": 61, "reference_rejected_logp": 61, "referencemodelargu": [51, 63], "reflect": [66, 102], "reform": 111, "reformul": 111, "regard": 102, "regist": [15, 21, 94], "register_inference_funct": 21, "register_prompt_cach": 22, "register_token": [6, 7], "registr": 109, "regress": [20, 21], "regression_model": [3, 17, 21], "regressionmodel": [20, 21, 57], "reinforc": [111, 112], "reiniti": 66, "reject": [50, 51, 60, 61, 104, 111, 112], "rejected_attention_mask": 112, "rejected_input_id": 112, "rejected_reward": [61, 112], "rel": 112, "relat": [52, 111, 114], "releas": [13, 15, 16, 69, 111, 114], "release_gpu": [13, 16, 69], "relev": 114, "reli": 114, "reliabl": [102, 111, 114], "relianc": 114, "religion": 111, "relleno": 102, "reload": 66, "remain": [51, 102], "remark": [111, 112, 114], "rememb": [75, 105, 111], "remind": 111, "remov": [66, 73, 111], "remove_callback": 66, "remove_dataclass_attr_prefix": 73, "remove_image_flag": 55, "remove_last_sep": [75, 81], "remove_last_separ": [75, 81], "remove_unused_column": 4, "repetit": 4, "repetition_penalti": 4, "replac": [15, 75, 104, 111], "replace_bloom_attn_with_flash_attn": 89, "replace_gpt2_attn_with_flash_attn": 90, "replace_gpt_neo_attn_with_flash_attn": 91, "replace_llama_attn_with_flash_attn": 93, "replace_llama_with_condens": 100, "repli": 111, "repo": [66, 105, 111], "report_to": 4, "repositori": [66, 102, 114], "repr": 75, "repres": [4, 5, 6, 13, 14, 16, 52, 75], "requir": [5, 6, 32, 42, 52, 53, 55, 57, 58, 59, 102, 104, 107, 111, 112, 114], "require_vers": 4, "rerun": 66, "rescu": 111, "research": [102, 111], "reset": 25, "residu": 89, "resolv": 111, "resourc": [13, 15, 16, 69, 111], "respect": [32, 37, 42, 61, 111], "respond": 107, "respons": [60, 61, 66, 88, 102, 111, 112, 114], "rest": [11, 12], "restart": 111, "restart_opt": 33, "restaur": 102, "restor": 111, "result": [4, 21, 69, 102, 111, 112, 114], "result_cache_path": 56, "results_path": [4, 69], "resum": 66, "resume_from_checkpoint": 66, "retrain": 111, "retriev": [5, 6], "return": [4, 5, 6, 7, 13, 14, 16, 22, 25, 26, 31, 32, 34, 36, 37, 41, 42, 43, 45, 46, 52, 53, 55, 57, 61, 66, 69, 73, 75, 81, 88, 102, 111, 112], "return_code_error_buff": 74, "return_dict": 22, "return_output": [66, 68], "return_pil": 96, "return_tensor": [7, 67], "reus": [22, 66], "rev_kl": 61, "review": 111, "revis": [13, 14, 16, 21], "reward": [4, 15, 51, 57, 61, 104, 109], "reward_model": [47, 50, 57], "reward_model_arg": 56, "reward_model_inference_batch_s": 4, "reward_model_inference_block_s": 4, "reward_model_or_path": 111, "rewarddatacollatorwithpad": 67, "rewardmodelinferenc": 58, "rewardmodelinferenceresultwithinput": [16, 58, 88], "rewardmodeltun": 59, "rewardmodeltunerargu": [4, 59], "rewardtrain": 68, "rewrit": 112, "rho": [27, 44], "rib": 102, "rich": 102, "right": [66, 70, 71, 102, 111, 113], "risk": 114, "rl": 111, "rlhf": [111, 112], "rlhf_prompt": 111, "rm": [71, 111, 112], "rm_dataprocessor": [3, 62], "rm_inferenc": [3, 54], "rm_loss": 68, "rm_trainer": [3, 62], "rm_tuner": [3, 54], "rng": 66, "robin": 102, "robust": [94, 114], "role": [75, 81, 96, 102, 104, 107, 111, 112, 113], "role_nam": 74, "role_rank": 74, "room": [111, 112], "root": 111, "rope_ntk_ratio": 4, "rope_pi_ratio": 4, "roughli": 102, "round": [102, 104, 111, 113], "rstrip_partial_utf8": 55, "rui": [0, 114], "run": [4, 52, 53, 57, 66, 102, 104, 105, 106, 111, 112], "run_evaluation_with_lora": 106, "run_finetun": [104, 107, 108, 111], "run_finetune_with_lisa": 108, "run_finetune_with_lora": [108, 111], "run_merge_lora": 108, "run_nam": 4, "run_raft_align": 111, "run_reward_model": [111, 112], "run_summari": 66, "rwandan": 111, "safe": [66, 102, 111], "safeti": [102, 111], "sagemak": 66, "sai": [107, 111, 112], "said": [107, 111], "salam": 111, "salem": 111, "salmonellosi": 102, "salt": [102, 111], "same": [4, 66, 104, 111, 112], "samoa": 111, "sampl": [4, 5, 6, 13, 51, 52, 55, 66, 69, 102, 104, 109, 111, 112], "sample_dataset": [5, 6], "sample_input_1": 104, "sample_input_2": 104, "sample_input_3": 104, "sample_output_1": 104, "sample_output_2": 104, "sample_output_3": 104, "sample_text_1": 104, "sample_text_2": 104, "sample_text_3": 104, "sampler": 66, "sampling_paired_idx_from_reward": 51, "sampling_paired_method": [4, 51], "sampling_param": [13, 16, 69], "samplingparam": [13, 16, 69], "saniti": [5, 6], "sanity_check": [4, 5, 6, 50], "sauc": [102, 111], "save": [4, 5, 6, 13, 14, 16, 22, 32, 42, 65, 66, 69, 106, 111, 114], "save_aggregated_lora": 4, "save_count": 66, "save_file_path": 69, "save_full_model": [13, 14], "save_inference_result": 69, "save_language_project": 4, "save_model": 66, "save_pretrain_model_path": 4, "save_prompt_cach": 22, "save_result": [4, 69], "save_step": 4, "scalabl": [44, 102, 114], "scalar": 4, "scale": 37, "scaler": 66, "scaler_nam": 66, "scenario": 102, "scene": 102, "schedul": [32, 42, 66], "scheduler_nam": 66, "scienc": 111, "scope": 112, "score": [5, 16, 51, 55], "score_to_prob": 55, "scout": 111, "scratch": 4, "screen": 111, "script": [4, 66, 104, 106, 107, 108, 110, 111, 112], "seafood": 102, "search": [4, 66, 111, 114], "second": [44, 51, 66, 75, 81, 106, 112], "section": [55, 102, 111], "see": [15, 66, 102, 107, 111, 112], "seed": [4, 5, 6, 66, 88], "seek": 114, "seem": [94, 111], "select": [51, 112], "select_featur": 23, "select_lay": 23, "selected_dataset": 57, "self": [5, 6, 66, 75, 89, 90, 91, 93, 94, 111], "sens": [102, 104, 107, 113], "sentenc": [4, 13, 16, 102, 104, 112, 113], "sep": 96, "sep2": 96, "sep_styl": [4, 96], "separ": [75, 81, 96, 105], "separatorstyl": 96, "seq_len": 100, "seqlen": 94, "seqlen_k": 94, "seqlen_q": 94, "seqlen_q_round": 94, "sequenc": [4, 13, 14, 16, 22, 51, 66, 75, 81, 83, 87, 94, 102], "sequence_length": 22, "sequence_parallel": 94, "sequenceclassifieroutputwithpast": [16, 58], "seri": 111, "serv": [66, 102, 111, 114], "servic": [102, 107], "session": [66, 107], "set": [4, 5, 6, 13, 14, 32, 37, 42, 66, 69, 75, 88, 102, 104, 106, 107, 109, 111, 112, 113], "set_epoch": 66, "set_random_se": 88, "setback": 102, "setup": [66, 109, 111], "seventeenth": 111, "sever": [4, 13, 14, 66, 88, 102, 104, 105, 109], "sft": [104, 109], "sgd": [37, 42], "sgd_schedule_fre": [3, 4, 35], "sgdp": [3, 4, 35], "sgdschedulefre": 42, "sh": [104, 105, 106, 107, 108, 111, 112], "shall": [104, 114], "shape": [22, 61, 94], "sharded_ddp": 66, "share": [66, 111, 114], "sharegpt": [4, 102, 104], "shaw": 102, "she": 111, "sheet": 102, "shell": [73, 104], "shizh": [0, 114], "shop": 102, "should": [4, 11, 12, 13, 15, 16, 23, 32, 42, 66, 75, 81, 94, 102, 104, 111], "show": [102, 109, 111, 112], "shrimp": 102, "shuffl": [5, 6, 88], "shum": [0, 114], "shun": 0, "sick": 111, "side": [4, 102, 111], "sigmoid": 61, "signific": [111, 112, 114], "sigopt": 66, "simclr": 37, "similar": [102, 106, 107, 111], "similarli": 105, "simpl": [1, 66, 102, 107, 111, 112, 114], "simplest": [111, 112], "simpli": [105, 111], "simplifi": [1, 52, 53, 55, 57, 112, 114], "sinc": [75, 102, 104, 107, 111], "singl": [13, 25, 26, 31, 32, 33, 34, 36, 37, 41, 42, 43, 45, 60, 66, 96, 102, 104, 108, 111, 112], "situat": [66, 102, 111], "sixteenth": 111, "size": [4, 22, 52, 66, 94, 102, 111, 112], "skill": 114, "skillet": 111, "skip": [73, 111], "skip_default": 73, "skip_first_batch": 66, "skip_next": 96, "slight": [102, 112], "slightli": [94, 106, 113], "slow": [31, 43, 102], "slower": 94, "small": [94, 102, 112], "smaller": [102, 111], "smell": 102, "smoke": 102, "smokehous": 102, "smoother": 107, "smoothli": 111, "so": [7, 15, 66, 102, 104, 107, 111, 112, 113, 114], "social": 102, "societi": 111, "soda": 111, "softmax": 55, "softmax_scal": 94, "softwar": [104, 107, 111, 113], "sole": 114, "solid": 111, "solv": 102, "some": [4, 15, 52, 66, 102, 105, 111, 112, 114], "someon": 111, "someth": [61, 66, 102, 111, 112], "sometim": [102, 111], "soon": [104, 113], "sop": [104, 113], "sophia": [3, 4, 35], "sophiag": 44, "sorri": [102, 104], "sort": [102, 111], "soul": 102, "sourc": [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 60, 61, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 93, 94, 96, 97, 98, 100, 101, 103, 104, 111, 114], "source_dataset": 51, "sourdough": 102, "southern": 102, "sovereign": 102, "space": [66, 104, 111], "special": [13, 15, 16, 75, 104, 107, 111, 113, 114], "special_start": [75, 81, 107], "special_stopp": [75, 81, 107], "specif": [4, 102, 104, 111, 114], "specifi": [104, 105, 107, 108, 111], "speculativeinferenc": 55, "speed": 94, "speedi": 114, "spell": 112, "spend": 111, "sphinx": 3, "spice": 102, "spici": [102, 111], "spill": 94, "split": [5, 6, 66, 111, 112], "spot": 102, "src": [107, 111], "stabl": 66, "stage": [60, 102], "stai": [102, 104, 108, 111], "stand": 102, "standard": [10, 12, 19, 37, 48, 75], "start": [52, 66, 94, 104, 111, 112], "start_header_id": [104, 113], "start_n": 94, "start_of_turn": [104, 113], "starter": 107, "state": [25, 26, 30, 33, 37, 38, 39, 41, 44, 65, 66, 102, 111], "state_dict": [66, 98], "static": [16, 31, 43, 55, 66, 94], "step": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45, 66, 105, 107, 109, 111], "stepsiz": 25, "stew": 102, "stext": 111, "still": [60, 66, 94, 102, 111, 112, 114], "stochast": 44, "storag": 114, "store": [4, 94, 111], "store_flo": 66, "storm": 111, "str": [4, 5, 6, 7, 13, 14, 15, 16, 50, 51, 55, 56, 58, 60, 61, 66, 67, 69, 70, 71, 73, 75, 81, 83, 87, 88, 96, 111], "strand": 111, "strategi": [13, 14, 51, 111], "stream": 4, "stream_infer": 55, "streamlin": 114, "street": 102, "strictli": 102, "stride_bb": 94, "stride_bh": 94, "stride_bm": 94, "stride_dkb": 94, "stride_dkh": 94, "stride_dkn": 94, "stride_dob": 94, "stride_doh": 94, "stride_dom": 94, "stride_dqb": 94, "stride_dqh": 94, "stride_dqm": 94, "stride_dvb": 94, "stride_dvh": 94, "stride_dvn": 94, "stride_kb": 94, "stride_kh": 94, "stride_kn": 94, "stride_ob": 94, "stride_oh": 94, "stride_om": 94, "stride_qb": 94, "stride_qh": 94, "stride_qm": 94, "stride_vb": 94, "stride_vh": 94, "stride_vn": 94, "string": [4, 5, 6, 13, 14, 16, 22, 55, 66, 69, 73, 75, 88, 104, 107], "stringformatt": [75, 107], "strip": [111, 113], "strong": 111, "stronger": 111, "strongest": 111, "strongli": 104, "structur": [5, 6, 50, 111, 112], "struggl": 102, "studi": [66, 111], "studio": 111, "stuf": 102, "stuff": [111, 112], "sturdier": [111, 112], "style": [96, 102], "sub": 102, "subarrai": 102, "subclass": [47, 49, 50, 51, 66], "subject": 114, "sublist_length": 58, "submit": 114, "subprocess": 73, "subsect": 111, "subtract": 51, "succe": 111, "success": 112, "suffer": 102, "sugar": 111, "suggest": [32, 42, 102, 111], "sum": [60, 66], "sum_": 52, "summar": 102, "summari": [11, 12, 66], "superior": 111, "superstit": 111, "supervis": [7, 109], "supervisor": 111, "support": [13, 14, 55, 75, 94, 105, 106, 109, 112], "supported_dataset_typ": 55, "suprem": 102, "sure": [75, 81, 94, 102, 104, 111, 112], "surfac": 112, "surpass": [102, 114], "surpris": 102, "surprisingli": 112, "surround": 111, "suspect": 111, "switch": [66, 111], "sy": [104, 110, 113], "synonym": 102, "sysinfo1": [75, 81], "system": [75, 81, 83, 87, 96, 104, 107, 113], "system_formatt": [75, 81, 107], "system_messag": 113, "system_propmt": 104, "t": [7, 15, 37, 60, 65, 66, 71, 94, 100, 102, 104, 107, 111, 112, 113], "t3uwm8yp": 112, "tabl": [102, 105, 111], "taco": 102, "tag": [4, 66], "take": [13, 14, 52, 57, 66, 111], "talk": 111, "taller": [111, 112], "target": [7, 55, 66, 114], "target_cl": 56, "target_model_arg": [55, 56], "task": [1, 13, 14, 66, 102, 109, 111], "task_1": 105, "task_2": 105, "task_combin": 105, "task_guid": 102, "tast": 111, "teach": [111, 112, 114], "team": [104, 107, 113], "teaspoon": 111, "technic": 114, "techniqu": [104, 114], "technologi": 111, "tee": 105, "tell": [107, 111, 112], "temperatur": [4, 55, 61, 111], "templat": [4, 69, 75, 81], "template_nam": [75, 81, 107], "templatecompon": [75, 81, 107], "tend": [102, 111], "tensor": [4, 13, 16, 22, 33, 55, 60, 61, 66, 89, 90, 93], "tensor_parallel_s": 4, "term": [14, 15, 102, 111], "termin": [11, 12], "test": [5, 6, 52, 61, 66, 94, 102, 104, 106, 108, 111, 112, 114], "test_13": 104, "test_bleu": 66, "test_dataset": [5, 6, 66], "test_fil": 4, "test_flash_attn": 94, "test_flash_attn_triton_race_condit": 94, "test_siz": [5, 6], "text": [4, 7, 13, 14, 37, 52, 53, 55, 57, 88, 102, 104, 106, 107, 110, 111, 112, 114], "text2text": [5, 70, 71, 105, 109], "text2text_dataset_descript": 74, "text2text_dataset_detail": 74, "text2text_dataset_long_descrit": 74, "text_onli": [5, 21, 55, 70, 71, 104, 105, 111], "text_only_dataset_descript": 74, "text_only_dataset_detail": 74, "text_only_dataset_long_descrit": 74, "text_regression_model": [3, 16, 17], "text_to_scored_textlist": 51, "text_to_scored_textlist_dataset_descript": 74, "text_to_textlist_dataset_descript": 74, "text_to_textlist_tokenize_funct": 71, "textonli": 109, "textregressionmodel": [16, 21], "th": 52, "than": [32, 42, 94, 102, 108, 111], "thank": 111, "thankfulli": 111, "thei": [4, 13, 16, 66, 102, 104, 107, 111, 112], "them": [66, 102, 104, 105, 106, 107, 111, 112, 114], "therefor": [111, 114], "theses": 111, "thi": [3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 16, 32, 37, 42, 57, 60, 66, 75, 88, 94, 102, 104, 105, 107, 108, 111, 112, 113, 114], "thing": 111, "think": [111, 112], "thoma": 111, "thoroughli": 114, "those": [66, 102, 111], "thoughtfulli": 114, "three": [52, 61, 102, 104, 111], "threshold": 55, "thrive": 102, "through": [66, 102], "thu": [15, 102, 107], "tillet": 94, "time": [66, 102, 104, 111, 112], "tip": [66, 104], "titl": 114, "tmp": 94, "tmp_trainer": 66, "to_dict": [5, 6], "to_gradio_chatbot": 96, "to_list": [5, 6], "todai": [107, 112], "todo": 110, "togeth": [53, 66, 111, 114], "tok_logg": [70, 71], "token": [3, 4, 6, 7, 8, 13, 14, 15, 16, 22, 50, 52, 55, 57, 60, 61, 66, 67, 69, 75, 81, 83, 87, 102, 104, 107, 110, 111, 112, 113], "token_dict": [70, 71], "token_id": 75, "token_per_step": 55, "token_type_id": 13, "tokenization_utils_bas": 66, "tokenize_batch_el": 60, "tokenize_funct": [70, 71], "tokenized_column_ord": [70, 71], "tokenized_dataset": [13, 14, 16, 53, 110], "tokenized_neg": 112, "tokenized_po": 112, "tokenizer_image_token": 7, "tokenizer_nam": 4, "told": 112, "tong": [0, 114], "too": [60, 102, 112], "tool": [75, 81, 83, 87, 104, 107, 114], "tool_1_desc": [75, 81], "tool_description_1": 104, "tool_description_2": 104, "tool_description_3": 104, "tool_description_x": 104, "toolbox": 114, "toolinferenc": 55, "toolkit": [102, 111, 114], "tools_formatt": [75, 81], "top": [55, 102, 111, 112], "top_k": 4, "top_p": [4, 55], "top_reward_percentag": [4, 111], "torch": [6, 7, 13, 15, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45, 55, 57, 61, 66, 88, 89, 90, 93, 94, 100], "torch_dtyp": [4, 15], "torch_jit_model_ev": 66, "tortilla": 102, "total": [102, 112], "touch": 111, "toxoplasmosi": 102, "tr_loss": 66, "track": [66, 104, 111], "tradeoff": 108, "tradit": [102, 104, 107, 111, 113], "trail": [13, 16], "train": [4, 5, 6, 13, 14, 32, 36, 37, 42, 44, 51, 53, 57, 59, 61, 65, 66, 88, 102, 104, 105, 108, 111, 112, 114], "train_50": 104, "train_batch_s": 52, "train_convers": 108, "train_dataset": [5, 6, 51, 61, 66, 112], "train_ev": 61, "train_fil": 4, "train_on_prompt": 4, "train_test_split": [5, 6], "trainer": [57, 65, 66, 68], "trainer_callback": [61, 65, 66], "trainer_st": 66, "trainer_state_nam": 66, "trainer_util": [61, 66], "trainercallback": [61, 65, 66], "trainercontrol": 65, "trainerst": 65, "training_arg": [57, 65, 66], "training_args_nam": 66, "training_step": 66, "training_util": 66, "trainingargu": [4, 51, 61, 65, 66], "transform": [4, 7, 13, 14, 15, 16, 22, 51, 58, 60, 61, 65, 66, 67, 68, 70, 71, 75, 81, 83, 87, 110], "transform_dataset_in_plac": [51, 53, 58, 59], "tri": 111, "trial": [65, 66, 102, 111], "tribul": 102, "trick": 107, "trim": [104, 113], "triton": 94, "triton_flash_attent": [3, 92], "trl": 61, "troubl": [102, 111], "truck": 102, "true": [4, 5, 6, 13, 14, 16, 25, 33, 51, 52, 53, 58, 59, 60, 61, 66, 67, 69, 73, 75, 88, 111, 112], "truncat": [4, 60, 112], "truncate_to_model_max_length": 4, "truncation_mod": [60, 61], "truncation_sid": [4, 70, 71], "trust_coeffici": 37, "trust_remote_cod": 4, "try": [75, 102, 111, 112], "tunabl": [3, 13, 14, 16, 18, 49], "tunable_model": 110, "tunablemodel": [52, 53, 55, 59, 110], "tune": [1, 7, 13, 14, 49, 53, 59, 66, 102, 104, 108, 110, 111, 112], "tune_strategi": [13, 14, 16], "tuned_model": 110, "tupl": [4, 22, 34, 51, 58, 61, 66, 75, 81, 83, 87, 89, 90, 93], "turn": [102, 111], "tweak": 66, "two": [51, 52, 66, 75, 96, 102, 105, 111, 112], "type": [4, 5, 6, 9, 21, 66, 75, 102, 104, 105, 107, 111, 112, 114], "typeddict": 88, "typic": [11, 12, 61, 102, 111, 112], "u": [111, 114], "udpat": 22, "unabl": 102, "unclear": 112, "under": [4, 66, 102, 104, 105, 114], "understand": [102, 111, 114], "underw": 112, "unfair": 102, "unfinish": 111, "unfreez": 108, "uniform": 55, "union": [13, 16, 66, 73, 75], "unit": [75, 102, 111], "univers": 111, "unknown": 96, "unlock": 114, "unnecessari": 112, "unpack": 66, "unpleas": 102, "unseen": 114, "unsort": 102, "until": 111, "unus": 66, "unusu": 111, "up": [66, 94, 102, 111, 112], "upcom": 114, "updat": [66, 94, 104, 108, 111, 112, 113], "update_custom_config": 98, "update_hessian": 44, "upload": 66, "upscal": 102, "url": [66, 114], "us": [4, 5, 6, 10, 12, 13, 14, 15, 16, 19, 22, 32, 34, 41, 42, 48, 52, 55, 61, 66, 69, 73, 74, 75, 81, 88, 94, 102, 104, 105, 106, 108, 109, 111, 112, 114], "usa": 102, "usag": [11, 12, 102], "use_acceler": [4, 13, 14, 15, 16], "use_accelerator_for_evalu": 4, "use_apex": 66, "use_auth_token": 4, "use_beam_search": 4, "use_cach": [22, 89, 90, 91, 93], "use_cpu_amp": 66, "use_cuda_amp": 66, "use_customized_optim": 4, "use_dpo_data_col": 61, "use_fast": 51, "use_fast_token": 4, "use_flash_attent": 4, "use_image_start_end": 4, "use_int8": 4, "use_lisa": 4, "use_lora": 4, "use_mtim": 66, "use_prompt_cach": [4, 22], "use_qlora": 4, "use_ram_optimized_load": [4, 105], "use_trunc": [70, 71], "use_tune_checkpoint": 66, "use_vllm": [4, 13, 15, 16, 58], "use_wandb": [4, 52], "user": [1, 75, 81, 102, 104, 105, 107, 111, 112, 113, 114], "user_formatt": [75, 81, 107], "user_input_1": 104, "user_input_2": 104, "user_message_0": 113, "user_message_1": 113, "usernam": 105, "usml": 114, "usr_nam": 111, "usual": 102, "util": [3, 4, 6, 7, 8, 13, 16, 54, 57, 58, 69, 70, 71, 102, 107, 112, 114], "v": [37, 51, 94, 107], "v_": 37, "valid": 66, "validation_fil": 4, "validation_split_percentag": [4, 111, 112], "valu": [4, 55, 66, 73, 75, 91, 105, 111], "value_1": [5, 6, 104], "value_2": [5, 6, 104], "value_3": 104, "varianc": 41, "variant": 33, "varieti": 102, "variou": [13, 14, 66, 104, 114], "ve": [94, 104, 111], "veget": [102, 111], "vegetarian": 102, "veloc": 37, "ventil": 102, "verbos": 52, "veri": [107, 111, 112], "verifi": [55, 114], "versa": 51, "version": [2, 3, 4, 8, 37, 41, 94, 96, 102], "via": [4, 102, 111], "vibrant": 114, "vice": 51, "victorinox": 111, "vicuna": 102, "video": 111, "videogam": 111, "view_func": [31, 43], "vigil": 111, "virginia": 102, "viru": 102, "vision": 111, "vision2seq_model": [3, 17], "vision_encod": [3, 17], "vision_feature_select": 22, "vision_model_from_pretrain": 22, "vision_select_lay": 4, "vision_tow": 23, "vision_tower_cfg": [23, 24], "vision_tower_nam": 23, "visit": 102, "vismodelargu": 4, "visual": 111, "vllm": [4, 13, 15, 16, 69], "vllm_gpu_memory_util": [4, 15], "vllm_inferenc": [3, 54], "vllm_inference_batch_s": 4, "vllm_tensor_parallel_s": [4, 15], "vllminferenc": 69, "vllminferenceresultwithinput": [13, 69, 88], "vocabulari": 102, "w_": 52, "w_i": 52, "wa": [33, 102, 111, 112], "wai": [10, 12, 15, 19, 48, 66, 75, 102, 104, 105, 106, 111, 112], "wait": 102, "walnut": 102, "wandb": 112, "want": [66, 105, 107, 111, 112], "warmup": [32, 42], "warmup_step": [4, 32, 42], "warn": 66, "washington": 102, "wat": 102, "watch": 66, "wd_ratio": [31, 43], "we": [51, 60, 61, 66, 75, 94, 102, 104, 105, 107, 109, 110, 111, 112, 113, 114], "weather": [104, 111], "wei": [0, 114], "weight": [4, 31, 37, 43, 52, 114], "weight_decai": [4, 25, 26, 28, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45], "weight_decoupl": 25, "weight_lr_pow": [32, 42], "weixiong5237": 112, "welcom": [111, 114], "well": [66, 102, 104, 111, 114], "weqweasda": 111, "were": [111, 112], "what": [107, 111, 112], "when": [4, 13, 16, 32, 42, 51, 55, 66, 69, 73, 75, 102, 107, 111, 112], "whenev": 112, "where": [4, 5, 6, 13, 37, 52, 69, 102, 104, 111, 112, 114], "whether": [4, 7, 13, 14, 16, 66, 69, 73, 111, 114], "which": [4, 5, 6, 13, 14, 47, 49, 50, 51, 52, 60, 66, 75, 102, 104, 111, 112, 114], "while": [66, 75, 94, 102, 114], "white": 102, "who": [104, 107, 111, 113, 114], "whole": 114, "whose": 111, "why": [107, 111], "wide": [102, 114], "width": 22, "wild": 112, "willing": 111, "window": [102, 111], "window_length": 52, "winogrand": 102, "wipe": 66, "wise": 37, "wish": 66, "witch": 111, "witchcraft": 111, "with_deepspe": 14, "with_qform": [4, 22], "within": [55, 107], "without": [13, 16, 66, 102, 111, 112, 113], "won": 111, "word": [102, 107, 111, 112], "wordi": 112, "work": [66, 94, 102, 111, 112, 114], "worker_heart_beat_interv": 74, "workflow": 114, "workspace_path": 56, "world": [13, 102, 111, 112], "world_siz": [52, 55, 58], "wors": 111, "worst": 51, "would": [13, 69, 94, 102, 107, 111, 112], "wrap": 66, "wrapper": [13, 14, 66], "write": [52, 102, 112], "w\u00fcsthof": 111, "x": [31, 43, 100, 102, 111, 112], "xiong": [0, 114], "xiongwei": 111, "y": [31, 43, 105, 114], "ye": [102, 111, 112], "year": [102, 111, 112, 114], "yet": [13, 14, 60, 75, 94], "yi": [3, 81, 104], "yi1_5": 104, "yi1_5_templ": [81, 86], "yogi": [3, 4, 35], "you": [32, 42, 66, 75, 94, 102, 104, 105, 106, 107, 111, 112, 113, 114], "your": [66, 109, 111, 112, 113], "your_conversation_dataset": 107, "your_model": 107, "your_templ": 107, "your_template_fil": 107, "your_template_nam": 107, "yourself": 102, "zephyr": [3, 81, 104], "zephyr_templ": [81, 87], "zephyrconversationtempl": 87, "zero": 66, "zhang": [0, 114]}, "titles": ["Contributors", "Changelog", "About", "API Reference", "lmflow.args", "lmflow.datasets.dataset", "lmflow.datasets", "lmflow.datasets.multi_modal_dataset", "lmflow", "lmflow.models.auto_model", "lmflow.models.base_model", "lmflow.models.decoder_model", "lmflow.models.encoder_decoder_model", "lmflow.models.hf_decoder_model", "lmflow.models.hf_encoder_decoder_model", "lmflow.models.hf_model_mixin", "lmflow.models.hf_text_regression_model", "lmflow.models", "lmflow.models.interfaces", "lmflow.models.interfaces.tunable", "lmflow.models.regression_model", "lmflow.models.text_regression_model", "lmflow.models.vision2seq_model", "lmflow.models.vision_encoder.clip_encoder", "lmflow.models.vision_encoder", "lmflow.optim.adabelief", "lmflow.optim.adabound", "lmflow.optim.adadelta", "lmflow.optim.adagrad", "lmflow.optim.adam", "lmflow.optim.adamax", "lmflow.optim.adamp", "lmflow.optim.adamw_schedule_free", "lmflow.optim.adan", "lmflow.optim.dummy", "lmflow.optim", "lmflow.optim.lamb", "lmflow.optim.lars", "lmflow.optim.nadam", "lmflow.optim.novograd", "lmflow.optim.optimizers", "lmflow.optim.radam", "lmflow.optim.sgd_schedule_free", "lmflow.optim.sgdp", "lmflow.optim.sophia", "lmflow.optim.yogi", "lmflow.pipeline.auto_pipeline", "lmflow.pipeline.base_aligner", "lmflow.pipeline.base_pipeline", "lmflow.pipeline.base_tuner", "lmflow.pipeline.dpo_aligner", "lmflow.pipeline.dpov2_aligner", "lmflow.pipeline.evaluator", "lmflow.pipeline.finetuner", "lmflow.pipeline", "lmflow.pipeline.inferencer", "lmflow.pipeline.iterative_dpo_aligner", "lmflow.pipeline.raft_aligner", "lmflow.pipeline.rm_inferencer", "lmflow.pipeline.rm_tuner", "lmflow.pipeline.utils.dpov2_dataprocessor", "lmflow.pipeline.utils.dpov2_trainer", "lmflow.pipeline.utils", "lmflow.pipeline.utils.memory_safe_dpov2_align", "lmflow.pipeline.utils.memory_safe_vllm_inference", "lmflow.pipeline.utils.peft_trainer", "lmflow.pipeline.utils.raft_trainer", "lmflow.pipeline.utils.rm_dataprocessor", "lmflow.pipeline.utils.rm_trainer", "lmflow.pipeline.vllm_inferencer", "lmflow.tokenization.hf_decoder_model", "lmflow.tokenization.hf_text_regression_model", "lmflow.tokenization", "lmflow.utils.common", "lmflow.utils.constants", "lmflow.utils.conversation_template.base", "lmflow.utils.conversation_template.chatglm", "lmflow.utils.conversation_template.chatml", "lmflow.utils.conversation_template.deepseek", "lmflow.utils.conversation_template.fox", "lmflow.utils.conversation_template.gemma", "lmflow.utils.conversation_template", "lmflow.utils.conversation_template.internlm", "lmflow.utils.conversation_template.llama", "lmflow.utils.conversation_template.phi", "lmflow.utils.conversation_template.qwen", "lmflow.utils.conversation_template.yi", "lmflow.utils.conversation_template.zephyr", "lmflow.utils.data_utils", "lmflow.utils.flash_attention.bloom_flash_attention", "lmflow.utils.flash_attention.gpt2_flash_attention", "lmflow.utils.flash_attention.gpt_neo_flash_attention", "lmflow.utils.flash_attention", "lmflow.utils.flash_attention.llama_flash_attention", "lmflow.utils.flash_attention.triton_flash_attention", "lmflow.utils", "lmflow.utils.llava_conversation_lib", "lmflow.utils.model", "lmflow.utils.multimodal", "lmflow.utils.position_interpolation", "lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch", "lmflow.version", "LMFlow Benchmark: An Automatic Evaluation Framework for Open-Source LLMs", "Blogs", "Dataset", "LMFlow Benchmark Guide", "Checkpoints", "Customize Conversation Template", "Finetuning", "Examples", "Finetune", "RAFT", "Reward Modeling", "Supported Conversation Template", "LMFlow"], "titleterms": {"0": 1, "1": [1, 105, 107, 111, 112, 113], "2": [105, 107, 111, 112, 113], "2023": [1, 103], "28": 1, "3": [107, 111, 113], "4": 107, "5": [107, 113], "8x22b": 113, "8x7b": 113, "about": 2, "adabelief": 25, "adabound": 26, "adadelta": 27, "adagrad": 28, "adam": 29, "adamax": 30, "adamp": 31, "adamw": 108, "adamw_schedule_fre": 32, "adan": 33, "adapt": 108, "algorithm": 111, "align": 111, "an": 102, "api": 3, "arg": 4, "attribut": [4, 5, 8, 13, 14, 15, 16, 46, 51, 53, 55, 56, 57, 58, 59, 60, 61, 63, 64, 66, 67, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 94, 96, 97, 101], "auto_model": 9, "auto_pipelin": 46, "automat": 102, "base": 75, "base_align": 47, "base_model": 10, "base_pipelin": 48, "base_tun": 49, "benchmark": [102, 105], "blog": 103, "bloom_flash_attent": 89, "build": 107, "changelog": 1, "chat": 102, "chatglm": [76, 113], "chatml": [77, 113], "checkpoint": [106, 114], "choos": 107, "citat": 114, "class": [4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 19, 20, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 60, 61, 65, 66, 67, 68, 69, 75, 80, 81, 83, 87, 88, 94, 96, 100], "clip_encod": 23, "common": 73, "commonsens": 102, "conclus": 102, "constant": 74, "content": [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 60, 61, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 93, 94, 96, 97, 98, 100, 101, 114], "contributor": 0, "convers": [104, 107, 108, 113], "conversation_templ": [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87], "creat": 105, "custom": [104, 107], "data": [104, 109], "data_util": 88, "dataset": [5, 6, 7, 104, 105, 111], "decoder_model": 11, "decompos": 107, "deepseek": [78, 113], "descript": 111, "detail": 104, "disclaim": 114, "dpo_align": 50, "dpov2_align": 51, "dpov2_dataprocessor": 60, "dpov2_train": 61, "dummi": 34, "encoder_decoder_model": 12, "end": 111, "evalu": [52, 102, 105, 109], "exampl": [109, 111, 112], "featur": 114, "file": 105, "finetun": [53, 108, 109, 110, 111, 112], "flash_attent": [89, 90, 91, 92, 93, 94], "follow": 102, "format": 104, "formatt": 107, "fox": 79, "framework": 102, "full": 108, "function": [7, 23, 24, 33, 46, 50, 55, 63, 64, 68, 70, 71, 73, 88, 89, 90, 91, 93, 94, 97, 98, 100], "gemma": [80, 113], "gener": 104, "get": 111, "gpt2_flash_attent": 90, "gpt_neo_flash_attent": 91, "guid": 105, "hf_decoder_model": [13, 70], "hf_encoder_decoder_model": 14, "hf_model_mixin": 15, "hf_text_regression_model": [16, 71], "hyper": 111, "import": 108, "indic": 114, "infer": 109, "inferenc": 55, "instal": 114, "instruct": [102, 114], "interfac": [18, 19], "internlm": 82, "internlm2": 113, "introduct": [102, 111, 112, 114], "iterative_dpo_align": 56, "lamb": 36, "lar": 37, "layerwis": 108, "lisa": 108, "llama": [83, 106, 113], "llama_flash_attent": 93, "llama_rope_scaled_monkey_patch": 100, "llava_conversation_lib": 96, "llm": 102, "lm": 105, "lmflow": [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 105, 114], "lora": [108, 111], "low": 108, "mar": 1, "memory_safe_dpov2_align": 63, "memory_safe_vllm_infer": 64, "merg": [108, 111], "metric": 102, "mixtral": 113, "model": [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 97, 111, 112], "modul": [4, 5, 7, 9, 10, 11, 12, 13, 14, 15, 16, 19, 20, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 60, 61, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 93, 94, 96, 97, 98, 100, 101], "multi_modal_dataset": 7, "multimod": 98, "nadam": 38, "nll": 105, "note": 111, "notic": 113, "novograd": 39, "open": 102, "optim": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45], "overview": 111, "packag": [6, 8, 24, 81], "pair": 104, "paramet": [108, 111], "peft_train": 65, "perform": 102, "phi": [84, 113], "pipelin": [46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69], "position_interpol": [99, 100], "prepar": 109, "progress": [104, 113], "proper": 107, "qwen": [85, 113], "radam": 41, "raft": 111, "raft_align": 57, "raft_train": 66, "rank": 108, "refer": [3, 102], "regist": 107, "registr": 105, "regression_model": 20, "return": 15, "reward": [111, 112], "rm_dataprocessor": 67, "rm_inferenc": 58, "rm_trainer": 68, "rm_tuner": 59, "sampl": 108, "set": 105, "setup": 105, "sft": [111, 112], "sgd_schedule_fre": 42, "sgdp": 43, "sophia": 44, "sourc": 102, "step": 112, "submodul": [6, 8, 17, 18, 24, 35, 54, 62, 72, 81, 92, 95, 99], "subpackag": [8, 17, 54, 95], "supervis": [111, 112], "support": [104, 113, 114], "tabl": 114, "task": [105, 114], "templat": [104, 107, 108, 113], "text2text": 104, "text_regression_model": 21, "textonli": 104, "token": [70, 71, 72], "triton_flash_attent": 94, "tunabl": 19, "tune": 114, "us": 107, "util": [60, 61, 62, 63, 64, 65, 66, 67, 68, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100], "version": [1, 101], "vision": 114, "vision2seq_model": 22, "vision_encod": [23, 24], "vllm_inferenc": 69, "weight": 108, "work": [104, 113], "yi": [86, 113], "yogi": 45, "your": [105, 107], "zephyr": [87, 113]}}) \ No newline at end of file