diff --git a/README.md b/README.md index c8cbdd7..b8b7565 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ Documentation | API reference | Quickstart | - Join the Community + Join the Community
@@ -73,7 +73,7 @@ data = from_dict({
})
# Evaluate the response relevance metric
-result = evaluate(data=data, metrics=[response_relevancy])
+result = evaluate(data, metrics=[response_relevancy])
# Display the evaluation results
result.to_dataframe()
diff --git a/docs/api_reference/conf.py b/docs/api_reference/conf.py
index fe48e28..caae3cb 100644
--- a/docs/api_reference/conf.py
+++ b/docs/api_reference/conf.py
@@ -8,7 +8,7 @@
project = "Ragrank"
copyright = "2024, Izam Mohammed"
author = "Izam Mohammed"
-release = "0.0.6"
+release = "0.0.7"
# -- General configuration ---------
diff --git a/docs/docs/conf.py b/docs/docs/conf.py
index 620398e..b46eef8 100644
--- a/docs/docs/conf.py
+++ b/docs/docs/conf.py
@@ -9,7 +9,7 @@
project = "Ragrank"
copyright = "2024, Izam Mohammed"
author = "Izam Mohammed"
-release = "0.0.6"
+release = "0.0.7"
# -- General configuration ------
diff --git a/docs/docs/evaluation/result.md b/docs/docs/evaluation/result.md
index 66f26c0..db73094 100644
--- a/docs/docs/evaluation/result.md
+++ b/docs/docs/evaluation/result.md
@@ -17,7 +17,7 @@ data = DataNode(
)
result:EvalResult = evaluate(
- dataset=data,
+ data ,
)
print(result.to_dict())
diff --git a/docs/docs/evaluation/with_llm.md b/docs/docs/evaluation/with_llm.md
index 74149e3..fdaf7cd 100644
--- a/docs/docs/evaluation/with_llm.md
+++ b/docs/docs/evaluation/with_llm.md
@@ -29,7 +29,7 @@ llm_configuration = LLMConfig(
llm = OpenaiLLM(llm_config=llm_configuration)
result = evaluate(
- dataset=data,
+ data ,
llm=llm,
)
@@ -72,7 +72,7 @@ data = DataNode(
)
result = evaluate(
- dataset = data,
+ data ,
llm = ragrank_llm,
)
```
diff --git a/docs/docs/evaluation/with_metrics.md b/docs/docs/evaluation/with_metrics.md
index e9d37ba..3847180 100644
--- a/docs/docs/evaluation/with_metrics.md
+++ b/docs/docs/evaluation/with_metrics.md
@@ -45,7 +45,7 @@ data = DataNode(
# evaluating the metrics
result = evaluate(
- data = data,
+ data ,
metrics=[
response_relevancy,
response_conciseness,
diff --git a/docs/docs/get_started/basic_evaluation.md b/docs/docs/get_started/basic_evaluation.md
index 4aee108..3fe19d4 100644
--- a/docs/docs/get_started/basic_evaluation.md
+++ b/docs/docs/get_started/basic_evaluation.md
@@ -29,7 +29,7 @@ data = from_dict({
})
# Evaluate the response relevance metric
-result = evaluate(data=data, metrics=[response_relevancy])
+result = evaluate(data, metrics=[response_relevancy])
# Display the evaluation results
result.to_dataframe()
diff --git a/docs/docs/metrics/context_related/context_relevancy.md b/docs/docs/metrics/context_related/context_relevancy.md
index 5188101..fafa00b 100644
--- a/docs/docs/metrics/context_related/context_relevancy.md
+++ b/docs/docs/metrics/context_related/context_relevancy.md
@@ -24,7 +24,7 @@ data = DataNode(
)
result = evaluate(
- dataset=data,
+ data ,
metrics=[
response_relevancy,
]
diff --git a/docs/docs/metrics/context_related/context_utilization.md b/docs/docs/metrics/context_related/context_utilization.md
index b4fe6ea..b162dbd 100644
--- a/docs/docs/metrics/context_related/context_utilization.md
+++ b/docs/docs/metrics/context_related/context_utilization.md
@@ -25,7 +25,7 @@ data = DataNode(
)
result = evaluate(
- dataset=data,
+ data ,
metrics=[
response_relevancy,
]
diff --git a/docs/docs/metrics/custom_metrics/custom_instruct.md b/docs/docs/metrics/custom_metrics/custom_instruct.md
index 00dc7d8..871ebac 100644
--- a/docs/docs/metrics/custom_metrics/custom_instruct.md
+++ b/docs/docs/metrics/custom_metrics/custom_instruct.md
@@ -57,7 +57,7 @@ data = DataNode(
)
result = evaluate(
- dataset=data,
+ data ,
metrics=[grammar_checker],
)
print(result)
diff --git a/docs/docs/metrics/custom_metrics/custom_metric.md b/docs/docs/metrics/custom_metrics/custom_metric.md
index fddadaa..3926aac 100644
--- a/docs/docs/metrics/custom_metrics/custom_metric.md
+++ b/docs/docs/metrics/custom_metrics/custom_metric.md
@@ -42,7 +42,7 @@ data = DataNode(
)
result = evaluate(
- dataset=data,
+ data ,
metrics=[
my_metric,
]
diff --git a/docs/docs/metrics/response_related/response_conciseness.md b/docs/docs/metrics/response_related/response_conciseness.md
index 76100d5..ee8563b 100644
--- a/docs/docs/metrics/response_related/response_conciseness.md
+++ b/docs/docs/metrics/response_related/response_conciseness.md
@@ -25,7 +25,7 @@ data = DataNode(
)
result = evaluate(
- dataset=data,
+ data ,
metrics=[
response_conciseness,
]
diff --git a/docs/docs/metrics/response_related/response_relevancy.md b/docs/docs/metrics/response_related/response_relevancy.md
index d89861e..951272b 100644
--- a/docs/docs/metrics/response_related/response_relevancy.md
+++ b/docs/docs/metrics/response_related/response_relevancy.md
@@ -25,7 +25,7 @@ data = DataNode(
)
result = evaluate(
- dataset=data,
+ data ,
metrics=[
response_relevancy,
]
diff --git a/docs/docs/more/contributing.md b/docs/docs/more/contributing.md
index 5d8649f..7740dc5 100644
--- a/docs/docs/more/contributing.md
+++ b/docs/docs/more/contributing.md
@@ -34,33 +34,42 @@
**5. Test Your Changes:**
- Run tests to ensure your changes haven't introduced any regressions:
```
- pytest
+ make test
```
-**6. Commit Your Changes:**
+**6. Linting and formating:**
+ - Format the code
+ ```
+ make format
+ ```
+ - Check the linting
+ ```
+ make lint
+ ```
+**7. Commit Your Changes:**
- Once you're satisfied with your changes, commit them:
```
git add .
git commit -m "Add your descriptive commit message here"
```
-**7. Push Changes to Your Fork:**
+**8. Push Changes to Your Fork:**
- Push your changes to your forked repository:
```
git push origin my-feature
```
-**8. Create a Pull Request:**
+**9. Create a Pull Request:**
- Go to your forked repository on GitHub.
- Click on the "Compare & pull request" button next to your branch.
- Fill out the pull request form with a descriptive title and details of your changes.
- Click on the "Create pull request" button to submit your contribution.
-**9. Collaborate and Iterate:**
+**10. Collaborate and Iterate:**
- Engage with reviewers and address any feedback or requests for changes.
- Iterate on your code until it meets the project's standards and requirements.
-**10. Stay Updated:**
+**11. Stay Updated:**
- Keep an eye on the pull request for any updates or requests from maintainers.
- Stay engaged with the Ragrank community and contribute to discussions and future development efforts.
diff --git a/pyproject.toml b/pyproject.toml
index 15c0686..ee3d21c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "ragrank"
-version = "0.0.6"
+version = "0.0.7"
description = "An evaluation library for RAG models"
authors = ["Izam Mohammed