From 743f44b25294fe6489bf6feab5a74220808edaa1 Mon Sep 17 00:00:00 2001 From: Shuhaib T U Date: Sun, 5 May 2024 19:30:28 +0530 Subject: [PATCH 1/3] feat: version update --- src/ragrank/_version.py | 2 +- src/ragrank/constants.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ragrank/_version.py b/src/ragrank/_version.py index 7e1abed..33e2e6c 100644 --- a/src/ragrank/_version.py +++ b/src/ragrank/_version.py @@ -1 +1 @@ -__version__: str = "0.0.6" +__version__: str = "0.0.7" diff --git a/src/ragrank/constants.py b/src/ragrank/constants.py index 3e7c8f1..608c8e7 100644 --- a/src/ragrank/constants.py +++ b/src/ragrank/constants.py @@ -16,6 +16,6 @@ RESPONSE_FIELD, ] -SERVER_URL: str = "https://ragrank-trace.vercel.app/" +SERVER_URL: str = "https://ragrank-trace.onrender.com/api/" DEBUG_MODE: str = "DEBUG_MODE_ON" REQUEST_TIME_OUT: float = 10.0 From ecce2208609a3b8dd4fda3079542d895c0e949a9 Mon Sep 17 00:00:00 2001 From: Shuhaib T U Date: Sun, 5 May 2024 19:32:31 +0530 Subject: [PATCH 2/3] docs: updated code in the documentation --- README.md | 4 ++-- docs/api_reference/conf.py | 2 +- docs/docs/conf.py | 2 +- docs/docs/evaluation/result.md | 2 +- docs/docs/evaluation/with_llm.md | 4 ++-- docs/docs/evaluation/with_metrics.md | 2 +- docs/docs/get_started/basic_evaluation.md | 2 +- .../context_related/context_relevancy.md | 2 +- .../context_related/context_utilization.md | 2 +- .../metrics/custom_metrics/custom_instruct.md | 2 +- .../metrics/custom_metrics/custom_metric.md | 2 +- .../response_related/response_conciseness.md | 2 +- .../response_related/response_relevancy.md | 2 +- docs/docs/more/contributing.md | 21 +++++++++++++------ 14 files changed, 30 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index c8cbdd7..b8b7565 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ Documentation | API reference | Quickstart | - Join the Community + Join the Community

@@ -73,7 +73,7 @@ data = from_dict({ }) # Evaluate the response relevance metric -result = evaluate(data=data, metrics=[response_relevancy]) +result = evaluate(data, metrics=[response_relevancy]) # Display the evaluation results result.to_dataframe() diff --git a/docs/api_reference/conf.py b/docs/api_reference/conf.py index fe48e28..caae3cb 100644 --- a/docs/api_reference/conf.py +++ b/docs/api_reference/conf.py @@ -8,7 +8,7 @@ project = "Ragrank" copyright = "2024, Izam Mohammed" author = "Izam Mohammed" -release = "0.0.6" +release = "0.0.7" # -- General configuration --------- diff --git a/docs/docs/conf.py b/docs/docs/conf.py index 620398e..b46eef8 100644 --- a/docs/docs/conf.py +++ b/docs/docs/conf.py @@ -9,7 +9,7 @@ project = "Ragrank" copyright = "2024, Izam Mohammed" author = "Izam Mohammed" -release = "0.0.6" +release = "0.0.7" # -- General configuration ------ diff --git a/docs/docs/evaluation/result.md b/docs/docs/evaluation/result.md index 66f26c0..db73094 100644 --- a/docs/docs/evaluation/result.md +++ b/docs/docs/evaluation/result.md @@ -17,7 +17,7 @@ data = DataNode( ) result:EvalResult = evaluate( - dataset=data, + data , ) print(result.to_dict()) diff --git a/docs/docs/evaluation/with_llm.md b/docs/docs/evaluation/with_llm.md index 74149e3..fdaf7cd 100644 --- a/docs/docs/evaluation/with_llm.md +++ b/docs/docs/evaluation/with_llm.md @@ -29,7 +29,7 @@ llm_configuration = LLMConfig( llm = OpenaiLLM(llm_config=llm_configuration) result = evaluate( - dataset=data, + data , llm=llm, ) @@ -72,7 +72,7 @@ data = DataNode( ) result = evaluate( - dataset = data, + data , llm = ragrank_llm, ) ``` diff --git a/docs/docs/evaluation/with_metrics.md b/docs/docs/evaluation/with_metrics.md index e9d37ba..3847180 100644 --- a/docs/docs/evaluation/with_metrics.md +++ b/docs/docs/evaluation/with_metrics.md @@ -45,7 +45,7 @@ data = DataNode( # evaluating the metrics result = evaluate( - data = data, + data , metrics=[ response_relevancy, response_conciseness, diff --git a/docs/docs/get_started/basic_evaluation.md b/docs/docs/get_started/basic_evaluation.md index 4aee108..3fe19d4 100644 --- a/docs/docs/get_started/basic_evaluation.md +++ b/docs/docs/get_started/basic_evaluation.md @@ -29,7 +29,7 @@ data = from_dict({ }) # Evaluate the response relevance metric -result = evaluate(data=data, metrics=[response_relevancy]) +result = evaluate(data, metrics=[response_relevancy]) # Display the evaluation results result.to_dataframe() diff --git a/docs/docs/metrics/context_related/context_relevancy.md b/docs/docs/metrics/context_related/context_relevancy.md index 5188101..fafa00b 100644 --- a/docs/docs/metrics/context_related/context_relevancy.md +++ b/docs/docs/metrics/context_related/context_relevancy.md @@ -24,7 +24,7 @@ data = DataNode( ) result = evaluate( - dataset=data, + data , metrics=[ response_relevancy, ] diff --git a/docs/docs/metrics/context_related/context_utilization.md b/docs/docs/metrics/context_related/context_utilization.md index b4fe6ea..b162dbd 100644 --- a/docs/docs/metrics/context_related/context_utilization.md +++ b/docs/docs/metrics/context_related/context_utilization.md @@ -25,7 +25,7 @@ data = DataNode( ) result = evaluate( - dataset=data, + data , metrics=[ response_relevancy, ] diff --git a/docs/docs/metrics/custom_metrics/custom_instruct.md b/docs/docs/metrics/custom_metrics/custom_instruct.md index 00dc7d8..871ebac 100644 --- a/docs/docs/metrics/custom_metrics/custom_instruct.md +++ b/docs/docs/metrics/custom_metrics/custom_instruct.md @@ -57,7 +57,7 @@ data = DataNode( ) result = evaluate( - dataset=data, + data , metrics=[grammar_checker], ) print(result) diff --git a/docs/docs/metrics/custom_metrics/custom_metric.md b/docs/docs/metrics/custom_metrics/custom_metric.md index fddadaa..3926aac 100644 --- a/docs/docs/metrics/custom_metrics/custom_metric.md +++ b/docs/docs/metrics/custom_metrics/custom_metric.md @@ -42,7 +42,7 @@ data = DataNode( ) result = evaluate( - dataset=data, + data , metrics=[ my_metric, ] diff --git a/docs/docs/metrics/response_related/response_conciseness.md b/docs/docs/metrics/response_related/response_conciseness.md index 76100d5..ee8563b 100644 --- a/docs/docs/metrics/response_related/response_conciseness.md +++ b/docs/docs/metrics/response_related/response_conciseness.md @@ -25,7 +25,7 @@ data = DataNode( ) result = evaluate( - dataset=data, + data , metrics=[ response_conciseness, ] diff --git a/docs/docs/metrics/response_related/response_relevancy.md b/docs/docs/metrics/response_related/response_relevancy.md index d89861e..951272b 100644 --- a/docs/docs/metrics/response_related/response_relevancy.md +++ b/docs/docs/metrics/response_related/response_relevancy.md @@ -25,7 +25,7 @@ data = DataNode( ) result = evaluate( - dataset=data, + data , metrics=[ response_relevancy, ] diff --git a/docs/docs/more/contributing.md b/docs/docs/more/contributing.md index 5d8649f..7740dc5 100644 --- a/docs/docs/more/contributing.md +++ b/docs/docs/more/contributing.md @@ -34,33 +34,42 @@ **5. Test Your Changes:** - Run tests to ensure your changes haven't introduced any regressions: ``` - pytest + make test ``` -**6. Commit Your Changes:** +**6. Linting and formating:** + - Format the code + ``` + make format + ``` + - Check the linting + ``` + make lint + ``` +**7. Commit Your Changes:** - Once you're satisfied with your changes, commit them: ``` git add . git commit -m "Add your descriptive commit message here" ``` -**7. Push Changes to Your Fork:** +**8. Push Changes to Your Fork:** - Push your changes to your forked repository: ``` git push origin my-feature ``` -**8. Create a Pull Request:** +**9. Create a Pull Request:** - Go to your forked repository on GitHub. - Click on the "Compare & pull request" button next to your branch. - Fill out the pull request form with a descriptive title and details of your changes. - Click on the "Create pull request" button to submit your contribution. -**9. Collaborate and Iterate:** +**10. Collaborate and Iterate:** - Engage with reviewers and address any feedback or requests for changes. - Iterate on your code until it meets the project's standards and requirements. -**10. Stay Updated:** +**11. Stay Updated:** - Keep an eye on the pull request for any updates or requests from maintainers. - Stay engaged with the Ragrank community and contribute to discussions and future development efforts. From ef69d4de1a16fad8698eec2899b54c7d2e41f7bb Mon Sep 17 00:00:00 2001 From: Shuhaib T U Date: Sun, 5 May 2024 19:33:07 +0530 Subject: [PATCH 3/3] feat: version updated in pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 15c0686..ee3d21c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "ragrank" -version = "0.0.6" +version = "0.0.7" description = "An evaluation library for RAG models" authors = ["Izam Mohammed "] license = "Apache-2.0"