diff --git a/webapp/api/api/views.py b/webapp/api/api/views.py
index 2bcb8764..e4af3c97 100644
--- a/webapp/api/api/views.py
+++ b/webapp/api/api/views.py
@@ -636,7 +636,18 @@ def model_loaded(_):
def metrics(request):
p_ids = request.GET.get('projectIds').split(',')
projects = ProjectAnnotateEntities.objects.filter(id__in=p_ids)
- # assume projects all use the same model for eval purposes.
+
+ # provide warning of inconsistent models used or for models that are not loaded.
+ p_cdbs = set(p.concept_db for p in projects)
+ if len(p_cdbs) > 1:
+ logger.warning('Inconsistent CDBs used in the generation of metrics - should use the same CDB for '
+ f'consistent results - found {[cdb.name for cdb in p_cdbs]} - metrics will only use the first'
+ f' CDB {projects[0].concept_db.name}')
+ for p_cdb in p_cdbs:
+ if p_cdb not in CDB_MAP:
+ logger.warning(f'CDB {p_cdb.name} not in CDB_MAP cache - this will now be loaded - '
+ f'and will not show intermediary training status')
+
cat = get_medcat(CDB_MAP=CDB_MAP, VOCAB_MAP=VOCAB_MAP,
CAT_MAP=CAT_MAP, project=projects[0])
project_data = retrieve_project_data(projects)
diff --git a/webapp/frontend/src/App.vue b/webapp/frontend/src/App.vue
index 1aefe0a7..963fa4f7 100644
--- a/webapp/frontend/src/App.vue
+++ b/webapp/frontend/src/App.vue
@@ -109,7 +109,6 @@ export default {
}
.link {
- //padding-top: 10px;
display:inline-block;
height: 25px;
cursor: pointer;
diff --git a/webapp/frontend/src/components/anns/AnnoResult.vue b/webapp/frontend/src/components/anns/AnnoResult.vue
index 1d4e1b22..19ef15de 100644
--- a/webapp/frontend/src/components/anns/AnnoResult.vue
+++ b/webapp/frontend/src/components/anns/AnnoResult.vue
@@ -3,7 +3,7 @@
{{result['document name']}}
{{result.cui}}
{{result['source value']}}
- {{result.acc}}
+ {{Number(result.acc).toFixed(3)}}
@@ -33,7 +33,6 @@ export default {
if (this.type === 'fp' || this.type === 'fn') {
highlightClass = 'highlight-task-1'
}
-
const srcVal = this.result['source value']
const resTxt = this.result.text
const regexp = RegExp(`${srcVal}`, 'sg')
@@ -45,13 +44,30 @@ export default {
} else {
outText += `${resTxt.slice(matches[matches.indexOf(match) - 1].index + srcVal.length, match.index)}`
}
- outText += `${srcVal} `
+ outText += `${srcVal} `
if (matches.length === 1 || match === matches[-1]) {
outText += `${resTxt.slice(match.index + srcVal.length)}`
}
}
return outText
}
+ },
+ methods: {
+ openAnno () {
+ const routeData = this.$router.resolve(
+ {
+ name: 'train-annotations',
+ params: {
+ projectId: this.result['project id'],
+ docId: this.result['document id'],
+ },
+ query: {
+ annoStart: this.result['start'],
+ annoEnd: this.result['end']
+ }
+ })
+ window.open(routeData.href, '_blank');
+ }
}
}
diff --git a/webapp/frontend/src/views/Home.vue b/webapp/frontend/src/views/Home.vue
index 4d8edbdb..ba5a5d96 100644
--- a/webapp/frontend/src/views/Home.vue
+++ b/webapp/frontend/src/views/Home.vue
@@ -13,10 +13,14 @@
v-if="!loadingProjects"
@row-selected="select">
- Metrics
+ Metrics
+
@@ -119,7 +123,7 @@
Error saving model
Loading model
Error loading MedCAT model for project
- Unable load a locked project. Unlock via /admin/
+ Unable load a locked project. Contact your CogStack administrator to unlock
@@ -174,7 +178,6 @@ export default {
'anno_class',
'cdb_search_filter',
'model_loaded',
- 'metrics',
'save_model'
]
},
diff --git a/webapp/frontend/src/views/Metrics.vue b/webapp/frontend/src/views/Metrics.vue
index a20e8eca..ca8a755f 100644
--- a/webapp/frontend/src/views/Metrics.vue
+++ b/webapp/frontend/src/views/Metrics.vue
@@ -15,7 +15,94 @@
-
+
+
+ Concept
+
+
+
+ Concept Count
+
+
+
+ # Vars
+
+
+
+ Variations
+
+
+
+ Variations Ratio
+
+
+
+ CUI
+
+
+
+ F1
+
+
+
+ Prec
+
+
+
+ Rec
+
+
+
+ TPs
+
+
+
+ FNs
+
+
+
+ FPs
+
+
+
+ {{data.item.value.join(', ')}}
+
@@ -30,20 +117,17 @@
@click="openExamples('tp_examples', data.item)">
{{data.item.tps}}
-
{{data.item.fns}}
-
{{data.item.fps}}
-
@@ -52,9 +136,24 @@
-
- {{predictionResultsTitle}}
+
+ {{modalData.title}}
+
+
False positive model predictions can be the result of:
+
+ Alternative model predictions that are overlapping with other concepts
+ Genuine missed annotations by an annotator.
+
+
Clicking through these annotations will not highlight this annotation as it doesn't exist in the dataset
+
+
+
False negative model predictions can be the result of:
+
+ An model mistake that marked an annotation 'correct' where it should be incorrect
+ An annotator mistake that marked an annotation 'correct' where it should be incorrect
+
+
@@ -121,8 +220,10 @@ export default {
},
conceptSummary: {
fields: [
- { key: 'concept_count', label: 'Count', sortable: true },
- { key: 'concept_name', label: 'Concept', sortable: true },
+ { key: 'concept_name', sortable: true },
+ { key: 'concept_count', sortable: true },
+ { key: 'variations', sortable: true },
+ { key: 'variation_values', label: ''},
{ key: 'count_variations_ratio', label: 'Variation Ratio', sortable: true },
{ key: 'cui', label: 'CUI' },
{ key: 'cui_f1', label: 'F1', sortable: true, formatter: this.perfFormatter },
@@ -136,21 +237,34 @@ export default {
metaAnnsSummary: {
fields: []
},
- predictedResults: null,
- predictionResultsTitle: null
+ modalData: {
+ results: null,
+ title: null,
+ type: null
+ }
}
},
methods: {
+ clearModalData () {
+ this.modalData = {
+ results: null,
+ title: null,
+ type: null
+ }
+ },
openExamples (exampleType, item) {
if (exampleType === 'tp_examples') {
- this.predictionResultsTitle = 'True Positive Model Predictions'
+ this.modalData.title = 'True Positive Model Predictions'
+ this.modalData.type = 'tp'
} else if (exampleType === 'fp_examples') {
- this.predictionResultsTitle = 'False Positive Model Predictions'
+ this.modalData.title = 'False Positive Model Predictions'
+ this.modalData.type = 'fp'
} else {
- this.predictionResultsTitle = 'False Negative Model Predictions'
+ this.modalData.title = 'False Negative Model Predictions'
+ this.modalData.type = 'fn'
}
const idx = this.conceptSummary.items.indexOf(item)
- this.predictedResults = this.conceptSummary.items[idx][exampleType]
+ this.modalData.results = this.conceptSummary.items[idx][exampleType]
},
perfFormatter (value) {
let txtColorClass = 'good-perf'
@@ -209,7 +323,6 @@ $metrics-header-height: 42px;
.concept-summary {
overflow-y: auto;
- height: 100%
}
.meta-anno-summary {
diff --git a/webapp/frontend/src/views/TrainAnnotations.vue b/webapp/frontend/src/views/TrainAnnotations.vue
index 31042b64..62aade36 100644
--- a/webapp/frontend/src/views/TrainAnnotations.vue
+++ b/webapp/frontend/src/views/TrainAnnotations.vue
@@ -485,6 +485,15 @@ export default {
this.metaAnnotate = this.currentEnt && (this.currentEnt.assignedValues[TASK_NAME] === CONCEPT_ALTERNATIVE ||
this.currentEnt.assignedValues[TASK_NAME] === CONCEPT_CORRECT)
this.loadingDoc = false
+ if (this.$route.query.annoStart && this.$route.query.annoEnd) {
+ const ent = _.find(this.ents, e => {
+ return Number(this.$route.query.annoStart) === e.start_ind &
+ Number(this.$route.query.annoEnd) === e.end_ind
+ })
+ if (ent) {
+ this.currentEnt = ent
+ }
+ }
}
})
},