diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 26c7d769f..57e83ce54 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -262,3 +262,38 @@ jobs:
- name: PyTest
run: make pytest
+
+ editor-e2e-test:
+ name: Editor End-to-End Tests
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: ./editor
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.11'
+
+ - name: Install library
+ run: pip install -r requirements.txt
+
+ - name: Install mlcroissant
+ run: sudo apt-get install -y libgraphviz-dev && pip install .[dev]
+ working-directory: ./python/mlcroissant
+
+ - uses: cypress-io/github-action@v6
+ with:
+ start: streamlit run app.py
+ wait-on: 'http://localhost:8501'
+ working-directory: ./editor
+
+ - name: Upload screenshots
+ uses: actions/upload-artifact@v3
+ if: failure()
+ with:
+ name: cypress-screenshots
+ path: ./editor/cypress/screenshots
+ retention-days: 1
diff --git a/editor/core/files.py b/editor/core/files.py
index 40770e251..ef7df4222 100644
--- a/editor/core/files.py
+++ b/editor/core/files.py
@@ -183,8 +183,10 @@ def file_from_url(url: str, names: set[str], folder: epath.Path) -> FileObject:
sha256 = _sha256(file.read())
file_type = guess_file_type(file_path)
df = get_dataframe(file_type, file_path)
+ name = find_unique_name(names, url.split("/")[-1])
return FileObject(
- name=find_unique_name(names, url.split("/")[-1]),
+ id=name,
+ name=name,
description="",
content_url=url,
encoding_format=file_type.encoding_format,
@@ -206,8 +208,10 @@ def file_from_upload(
f.write(value)
file_type = guess_file_type(file_path)
df = get_dataframe(file_type, file)
+ name = find_unique_name(names, file.name)
return FileObject(
- name=find_unique_name(names, file.name),
+ id=name,
+ name=name,
description="",
content_url=content_url,
encoding_format=file_type.encoding_format,
@@ -222,9 +226,11 @@ def file_from_form(
) -> FileObject | FileSet:
"""Creates a file based on manually added fields."""
if type == FILE_OBJECT:
- return FileObject(name=find_unique_name(names, "file_object"), folder=folder)
+ name = find_unique_name(names, "file_object")
+ return FileObject(id=name, name=name, folder=folder)
elif type == FILE_SET:
- return FileSet(name=find_unique_name(names, "file_set"))
+ name = find_unique_name(names, "file_set")
+ return FileSet(id=name, name=name)
else:
raise ValueError("type has to be one of FILE_OBJECT, FILE_SET")
diff --git a/editor/core/record_sets.py b/editor/core/record_sets.py
index 3c9726516..51711c9bc 100644
--- a/editor/core/record_sets.py
+++ b/editor/core/record_sets.py
@@ -23,16 +23,19 @@ def infer_record_sets(file: FileObject | FileSet, names: set[str]) -> list[Recor
extract=mlc.Extract(column=column),
)
field = Field(
+ id=column,
name=column,
data_types=[convert_dtype(value)],
source=source,
references=mlc.Source(),
)
fields.append(field)
+ name = find_unique_name(names, file.name + "_record_set")
return [
RecordSet(
+ id=name,
fields=fields,
- name=find_unique_name(names, file.name + "_record_set"),
+ name=name,
description="",
)
]
diff --git a/editor/core/state.py b/editor/core/state.py
index 0620038be..0c0ea2ce3 100644
--- a/editor/core/state.py
+++ b/editor/core/state.py
@@ -9,6 +9,7 @@
import dataclasses
import datetime
from typing import Any
+import uuid
from etils import epath
import pandas as pd
@@ -33,9 +34,6 @@ def create_class(mlc_class: type, instance: Any, **kwargs) -> Any:
name = field.name
if hasattr(instance, name) and name not in kwargs:
params[name] = getattr(instance, name)
- if "uuid" in params and params.get("uuid") is None:
- # Let mlcroissant handle the default value
- del params["uuid"]
return mlc_class(**params, **kwargs)
@@ -127,11 +125,22 @@ class SelectedRecordSet:
@dataclasses.dataclass
-class FileObject:
- """FileObject analogue for editor"""
-
+class Node:
ctx: mlc.Context = dataclasses.field(default_factory=mlc.Context)
+ id: str | None = None
name: str | None = None
+
+ def get_name_or_id(self):
+ if self.ctx.is_v0():
+ return self.name
+ else:
+ return self.id
+
+
+@dataclasses.dataclass
+class FileObject(Node):
+ """FileObject analogue for editor"""
+
description: str | None = None
contained_in: list[str] | None = dataclasses.field(default_factory=list)
content_size: str | None = None
@@ -140,65 +149,51 @@ class FileObject:
sha256: str | None = None
df: pd.DataFrame | None = None
folder: epath.PathLike | None = None
- id: str | None = None
@dataclasses.dataclass
-class FileSet:
+class FileSet(Node):
"""FileSet analogue for editor"""
- ctx: mlc.Context = dataclasses.field(default_factory=mlc.Context)
contained_in: list[str] = dataclasses.field(default_factory=list)
description: str | None = None
encoding_format: str | None = ""
includes: str | None = ""
- name: str = ""
- id: str | None = None
@dataclasses.dataclass
-class Field:
+class Field(Node):
"""Field analogue for editor"""
- ctx: mlc.Context = dataclasses.field(default_factory=mlc.Context)
- name: str | None = None
description: str | None = None
data_types: str | list[str] | None = None
source: mlc.Source | None = None
references: mlc.Source | None = None
- id: str | None = None
@dataclasses.dataclass
-class RecordSet:
+class RecordSet(Node):
"""Record Set analogue for editor"""
- ctx: mlc.Context = dataclasses.field(default_factory=mlc.Context)
- name: str = ""
data: list[Any] | None = None
description: str | None = None
is_enumeration: bool | None = None
key: str | list[str] | None = None
fields: list[Field] = dataclasses.field(default_factory=list)
- id: str | None = None
@dataclasses.dataclass
-class Metadata:
+class Metadata(Node):
"""main croissant data object, helper functions exist to load and unload this into the mlcroissant version"""
- name: str = ""
description: str | None = None
cite_as: str | None = None
- context: dict = dataclasses.field(default_factory=dict)
creators: list[mlc.PersonOrOrganization] = dataclasses.field(default_factory=list)
- ctx: mlc.Context = dataclasses.field(default_factory=mlc.Context)
data_biases: str | None = None
data_collection: str | None = None
date_published: datetime.datetime | None = None
license: str | None = ""
personal_sensitive_information: str | None = None
- id: str | None = None
url: str = ""
distribution: list[FileObject | FileSet] = dataclasses.field(default_factory=list)
record_sets: list[RecordSet] = dataclasses.field(default_factory=list)
@@ -272,6 +267,25 @@ def rename_field(self, old_name: str, new_name: str):
new_uuid = references.id.replace(old_name, new_name, 1)
self.record_sets[i].fields[j].references.id = new_uuid
+ def rename_id(self, old_id: str, new_id: str):
+ for resource in self.distribution:
+ if resource.id == old_id:
+ resource.id = new_id
+ if resource.contained_in and old_id in resource.contained_in:
+ resource.contained_in = [
+ new_id if uuid == old_id else uuid for uuid in resource.contained_in
+ ]
+ for record_set in self.record_sets:
+ if record_set.id == old_id:
+ record_set.id = new_id
+ for field in record_set.fields:
+ if field.id == old_id:
+ field.id = new_id
+ if field.source and field.source.id == old_id:
+ field.source.id = new_id
+ if field.references and field.references.id == old_id:
+ field.references.id = new_id
+
def add_distribution(self, distribution: FileSet | FileObject) -> None:
self.distribution.append(distribution)
@@ -352,8 +366,16 @@ def from_canonical(cls, canonical_metadata: mlc.Metadata) -> Metadata:
)
def names(self) -> set[str]:
- nodes = self.distribution + self.record_sets
- return set([node.name for node in nodes])
+ distribution = set()
+ record_sets = set()
+ fields = set()
+ for resource in self.distribution:
+ distribution.add(resource.get_name_or_id())
+ for record_set in self.record_sets:
+ record_sets.add(record_set.get_name_or_id())
+ for field in record_set.fields:
+ fields.add(field.get_name_or_id())
+ return distribution.union(record_sets).union(fields)
class OpenTab:
diff --git a/editor/cypress/e2e/createManually.cy.js b/editor/cypress/e2e/createManually.cy.js
index ed3a23cb7..5d3910434 100644
--- a/editor/cypress/e2e/createManually.cy.js
+++ b/editor/cypress/e2e/createManually.cy.js
@@ -1,42 +1,47 @@
///
-import 'cypress-file-upload';
-import 'cypress-iframe';
+import "cypress-file-upload";
+import "cypress-iframe";
+describe("Create a resource manually", () => {
+ it("should allow adding a FileObject resource", () => {
+ cy.visit("http://localhost:8501");
+ cy.get("button").contains("Create").click();
+ cy.get('input[aria-label="Name:red[*]"]').type("MyDataset{enter}");
+ cy.contains("Croissant files are composed of three layers:");
+ cy.enter('[title="components.tabs.tabs_component"]').then((getBody) => {
+ getBody().contains("Metadata").click();
+ });
+ cy.get('input[aria-label="URL"]').type("https://mydataset.com{enter}", {
+ force: true,
+ });
-describe('Create a resource manually', () => {
- it('should allow adding a FileObject resource', () => {
- // Streamlit starts on :8501.
- cy.visit('http://localhost:8501')
- cy.get('button').contains('Create').click()
- cy.get('input[aria-label="Name:red[*]"]').type('MyDataset{enter}')
- cy.contains("Croissant files are composed of three layers:")
- cy.enter('[title="components.tabs.tabs_component"]').then(getBody => {
- getBody().contains('Metadata').click()
- })
- cy.get('input[aria-label="URL"]').type('https://mydataset.com{enter}', {force: true})
-
// Create a resource manually.
- cy.enter('[title="components.tabs.tabs_component"]').then(getBody => {
- getBody().contains('Resources').click()
- })
- cy.get('[data-testid="stMarkdownContainer"]').contains('Add manually').click()
- cy.get('button').contains('Upload').click()
-
+ cy.enter('[title="components.tabs.tabs_component"]').then((getBody) => {
+ getBody().contains("Resources").click();
+ });
+ cy.get('[data-testid="stMarkdownContainer"]')
+ .contains("Add manually")
+ .click();
+ cy.get("button").contains("Upload").click();
// The file is created, so we can click on it to see the details.
- cy.enter('[title="components.tree.tree_component"]').then(getBody => {
- getBody().contains('file_object').click()
- })
+ cy.enter('[title="components.tree.tree_component"]').then((getBody) => {
+ getBody().contains("file_object").click();
+ });
// We can edit it
- cy.get('input[aria-label="Name:red[*]"]').type('{selectall}{backspace}test.csv{enter}')
- cy.wait(1000)
- cy.enter('[title="components.tree.tree_component"]').then(getBody => {
- getBody().contains('test.csv').click()
- })
- cy.get('input[aria-label="SHA256:red[*]"]').type('abcdefgh1234567{enter}')
+ cy.get('input[aria-label="ID:red[*]"]').type(
+ "{selectall}{backspace}test.csv{enter}"
+ );
+ cy.wait(1000);
+ cy.enter('[title="components.tree.tree_component"]').then((getBody) => {
+ getBody().contains("test.csv").click();
+ });
+ cy.get('input[aria-label="SHA256:red[*]"]').type("abcdefgh1234567{enter}");
- cy.get('input[aria-label="SHA256:red[*]"]')
- .should('have.value', 'abcdefgh1234567')
- })
-})
+ cy.get('input[aria-label="SHA256:red[*]"]').should(
+ "have.value",
+ "abcdefgh1234567"
+ );
+ });
+});
diff --git a/editor/cypress/e2e/displayErrors.cy.js b/editor/cypress/e2e/displayErrors.cy.js
index ab1470a36..b6d6f0692 100644
--- a/editor/cypress/e2e/displayErrors.cy.js
+++ b/editor/cypress/e2e/displayErrors.cy.js
@@ -32,7 +32,8 @@ VERSIONS.forEach((version) => {
cy.contains("annotations (4 fields)");
cy.contains("split_enums (2 fields)").click();
cy.contains("Generating the dataset...").should("not.exist");
- cy.get('input[aria-label="Name:red[*]"][value="split_enums"]')
+ const input = version == "0.8" ? "Name" : "ID"
+ cy.get(`input[aria-label="${input}:red[*]"][value="split_enums"]`)
.should("be.visible")
.type("{selectall}{backspace}{enter}");
cy.wait(2000);
diff --git a/editor/cypress/e2e/renameDistribution.cy.js b/editor/cypress/e2e/renameDistribution.cy.js
index 3371cbea5..5621e30a2 100644
--- a/editor/cypress/e2e/renameDistribution.cy.js
+++ b/editor/cypress/e2e/renameDistribution.cy.js
@@ -1,40 +1,46 @@
///
-import 'cypress-file-upload';
-import 'cypress-iframe';
+import "cypress-file-upload";
+import "cypress-iframe";
+import { VERSIONS } from "../support/constants";
-describe('Renaming of FileObjects/FileSets/RecordSets/Fields.', () => {
- it('should rename the FileObject/FileSet everywhere', () => {
- cy.visit('http://localhost:8501')
+VERSIONS.forEach((version) => {
+ const fixture = `${version}/titanic.json`;
+ describe(`[Version ${version}] Renaming of FileObjects/FileSets/RecordSets/Fields.`, () => {
+ it("should rename the FileObject/FileSet everywhere", () => {
+ cy.visit("http://localhost:8501");
- cy.fixture('1.0/titanic.json').then((fileContent) => {
- const file = {
- fileContent,
- fileName: 'titanic.json', mimeType: 'text/json',
- }
- cy.get(
- "[data-testid='stFileUploadDropzone']",
- ).attachFile(file, {
- force: true,
- subjectType: "drag-n-drop",
- events: ["dragenter", "drop"],
- })
- })
- cy.enter('[title="components.tabs.tabs_component"]').then(getBody => {
- getBody().contains('Resources').click()
- })
- cy.enter('[title="components.tree.tree_component"]').then(getBody => {
- // Click on genders.csv
- getBody().contains('genders.csv').click()
- })
- cy.get('input[aria-label="ID:red[*]"][value="genders.csv"]').type('{selectall}{backspace}the-new-name{enter}')
+ cy.fixture(fixture).then((fileContent) => {
+ const file = {
+ fileContent,
+ fileName: "titanic.json",
+ mimeType: "text/json",
+ };
+ cy.get("[data-testid='stFileUploadDropzone']").attachFile(file, {
+ force: true,
+ subjectType: "drag-n-drop",
+ events: ["dragenter", "drop"],
+ });
+ });
+ cy.enter('[title="components.tabs.tabs_component"]').then((getBody) => {
+ getBody().contains("Resources").click();
+ });
+ cy.enter('[title="components.tree.tree_component"]').then((getBody) => {
+ // Click on genders.csv
+ getBody().contains("genders.csv").click();
+ });
+ const input = version == "0.8"? "Name" : "ID"
+ cy.get(`input[aria-label="${input}:red[*]"][value="genders.csv"]`).type(
+ "{selectall}{backspace}the-new-name{enter}"
+ );
- cy.enter('[title="components.tabs.tabs_component"]').then(getBody => {
- getBody().contains('Record Sets').click()
- })
- cy.contains('genders').click()
- cy.contains('Edit fields details').click({force: true})
- cy.contains('the-new-name')
- })
-})
+ cy.enter('[title="components.tabs.tabs_component"]').then((getBody) => {
+ getBody().contains("Record Sets").click();
+ });
+ cy.contains("genders").click();
+ cy.contains("Edit fields details").click({ force: true });
+ cy.contains("the-new-name");
+ });
+ });
+});
diff --git a/editor/events/fields.py b/editor/events/fields.py
index dddd2967b..b0f6458dd 100644
--- a/editor/events/fields.py
+++ b/editor/events/fields.py
@@ -58,6 +58,7 @@ class FieldEvent(enum.Enum):
"""Event that triggers a field change."""
NAME = "NAME"
+ ID = "ID"
DESCRIPTION = "DESCRIPTION"
DATA_TYPE = "DATA_TYPE"
SOURCE = "SOURCE"
@@ -86,6 +87,12 @@ def handle_field_change(
metadata: Metadata = st.session_state[Metadata]
metadata.rename_field(old_name=old_name, new_name=new_name)
field.name = value
+ elif change == FieldEvent.ID:
+ old_id = field.id
+ new_id = value
+ if old_id != new_id:
+ metadata: Metadata = st.session_state[Metadata]
+ metadata.rename_id(old_id=old_id, new_id=new_id)
elif change == FieldEvent.DESCRIPTION:
field.description = value
elif change == FieldEvent.DATA_TYPE:
diff --git a/editor/events/record_sets.py b/editor/events/record_sets.py
index 37f8380bd..8ff1912e3 100644
--- a/editor/events/record_sets.py
+++ b/editor/events/record_sets.py
@@ -11,6 +11,7 @@ class RecordSetEvent(enum.Enum):
"""Event that triggers a RecordSet change."""
NAME = "NAME"
+ ID = "ID"
DESCRIPTION = "DESCRIPTION"
IS_ENUMERATION = "IS_ENUMERATION"
HAS_DATA = "HAS_DATA"
@@ -26,6 +27,12 @@ def handle_record_set_change(event: RecordSetEvent, record_set: RecordSet, key:
metadata: Metadata = st.session_state[Metadata]
metadata.rename_record_set(old_name=old_name, new_name=new_name)
record_set.name = value
+ elif event == RecordSetEvent.ID:
+ old_id = record_set.id
+ new_id = value
+ if old_id != new_id:
+ metadata: Metadata = st.session_state[Metadata]
+ metadata.rename_id(old_id=old_id, new_id=new_id)
elif event == RecordSetEvent.DESCRIPTION:
record_set.description = value
elif event == RecordSetEvent.IS_ENUMERATION:
diff --git a/editor/events/resources.py b/editor/events/resources.py
index 6d68536cb..6dfcb9250 100644
--- a/editor/events/resources.py
+++ b/editor/events/resources.py
@@ -42,8 +42,7 @@ def handle_resource_change(event: ResourceEvent, resource: Resource, key: str):
new_id = value
if old_id != new_id:
metadata: Metadata = st.session_state[Metadata]
- metadata.rename_distribution(old_name=old_id, new_name=new_id)
- resource.id = value
+ metadata.rename_id(old_id=old_id, new_id=new_id)
elif event == ResourceEvent.DESCRIPTION:
resource.description = value
elif event == ResourceEvent.ENCODING_FORMAT:
diff --git a/editor/events/resources_test.py b/editor/events/resources_test.py
index f739c317f..e843bc962 100644
--- a/editor/events/resources_test.py
+++ b/editor/events/resources_test.py
@@ -6,6 +6,7 @@
def test_create_instance1_from_instance2():
file_object = FileObject(
+ id="id",
name="name",
description="description",
contained_in=["foo", "bar"],
@@ -13,6 +14,7 @@ def test_create_instance1_from_instance2():
)
file_set = _create_instance1_from_instance2(file_object, FileSet)
assert isinstance(file_set, FileSet)
+ assert file_set.id == "id"
assert file_set.name == "name"
assert file_set.description == "description"
assert file_set.contained_in == ["foo", "bar"]
diff --git a/editor/views/files.py b/editor/views/files.py
index 93ab76bfd..306fb5f1b 100644
--- a/editor/views/files.py
+++ b/editor/views/files.py
@@ -84,7 +84,7 @@ def _render_resources_panel(files: list[Resource]) -> Resource | None:
filename_to_file: dict[str, list[Resource]] = {}
nodes = []
for file in files:
- name = file.name
+ name = file.get_name_or_id()
filename_to_file[name] = file
type = "FileObject" if isinstance(file, FileObject) else "FileSet"
if file.contained_in:
@@ -141,7 +141,7 @@ def handle_on_click():
record_sets = infer_record_sets(file, names)
for record_set in record_sets:
st.session_state[Metadata].add_record_set(record_set)
- st.session_state[SelectedResource] = file.name
+ st.session_state[SelectedResource] = file.get_name_or_id()
st.form_submit_button("Upload", on_click=handle_on_click)
@@ -159,7 +159,7 @@ def _render_resource_details(selected_file: Resource):
"""Renders the details of the selected resource."""
file: FileObject | FileSet
for i, file in enumerate(st.session_state[Metadata].distribution):
- if file.name == selected_file.name:
+ if file.get_name_or_id() == selected_file.get_name_or_id():
is_file_object = isinstance(file, FileObject)
index = (
RESOURCE_TYPES.index(FILE_OBJECT)
@@ -221,7 +221,7 @@ def _render_resource(prefix: int, file: Resource, is_file_object: bool):
else:
st.text_input(
needed_field("ID"),
- value=file.name,
+ value=file.id,
key=key,
help=f"The ID of the resource. {NAMES_INFO}",
on_change=handle_resource_change,
diff --git a/editor/views/jsonld.py b/editor/views/jsonld.py
index 603885815..0bd0f1071 100644
--- a/editor/views/jsonld.py
+++ b/editor/views/jsonld.py
@@ -14,6 +14,7 @@ def render_jsonld():
for file in croissant.distribution:
distribution.append(
mlc.FileObject(
+ id=file.id,
name=file.name,
description=file.description,
content_url=file.content_url,
@@ -27,6 +28,7 @@ def render_jsonld():
for _, field in record_set.get("fields", pd.DataFrame()).iterrows():
fields.append(
mlc.Field(
+ id=field["id"],
name=field["name"],
description=field["description"],
data_types=field["data_type"],
@@ -39,6 +41,7 @@ def render_jsonld():
)
record_sets.append(
mlc.RecordSet(
+ id=record_set["id"],
name=record_set["name"],
description=record_set["description"],
fields=fields,
@@ -46,6 +49,7 @@ def render_jsonld():
)
if croissant.metadata:
metadata = mlc.Metadata(
+ id=croissant.metadata.id,
name=croissant.metadata.name,
cite_as=croissant.metadata.cite_as,
license=croissant.metadata.license,
diff --git a/editor/views/record_sets.py b/editor/views/record_sets.py
index a68f18b17..d5596db95 100644
--- a/editor/views/record_sets.py
+++ b/editor/views/record_sets.py
@@ -126,16 +126,11 @@ def _get_possible_sources(metadata: Metadata) -> list[str]:
for field in record_set.fields:
possible_sources.append(f"{record_set.name}/{field.name}")
else:
- # TODO(marcenacp): This workaround is temporary. We should be able to properly
- # infer IDs using mlcroissant.
- get_original_id = lambda id: id.split(mlc.constants.BASE_IRI)[-1]
for resource in metadata.distribution:
- if resource.id:
- possible_sources.append(get_original_id(resource.id))
+ possible_sources.append(resource.id)
for record_set in metadata.record_sets:
for field in record_set.fields:
- if field.id:
- possible_sources.append(get_original_id(field.id))
+ possible_sources.append(field.id)
return possible_sources
@@ -171,7 +166,8 @@ def _find_joins(fields: list[Field]) -> set[Join]:
def _handle_create_record_set():
metadata: Metadata = st.session_state[Metadata]
- metadata.add_record_set(RecordSet(name="new-record-set", description=""))
+ name = "new-record-set"
+ metadata.add_record_set(RecordSet(id=name, name=name, description=""))
def _handle_remove_record_set(record_set_key: int):
@@ -200,6 +196,7 @@ def _handle_fields_change(record_set_key: int, record_set: RecordSet):
for added_row in result["added_rows"]:
data_type = str_to_mlc_data_type(added_row.get(FieldDataFrame.DATA_TYPE))
field = Field(
+ id=added_row.get(FieldDataFrame.NAME),
name=added_row.get(FieldDataFrame.NAME),
description=added_row.get(FieldDataFrame.DESCRIPTION),
data_types=[data_type],
@@ -247,15 +244,26 @@ def _render_left_panel():
with st.expander(title, expanded=is_record_set_expanded(record_set)):
col1, col2 = st.columns([1, 3])
key = f"{prefix}-name"
- col1.text_input(
- needed_field("Name"),
- placeholder="Name without special character.",
- key=key,
- help=f"The name of the RecordSet. {NAMES_INFO}",
- value=record_set.name,
- on_change=handle_record_set_change,
- args=(RecordSetEvent.NAME, record_set, key),
- )
+ if record_set.ctx.is_v0():
+ col1.text_input(
+ needed_field("Name"),
+ placeholder="Name without special character.",
+ key=key,
+ help=f"The name of the RecordSet. {NAMES_INFO}",
+ value=record_set.name,
+ on_change=handle_record_set_change,
+ args=(RecordSetEvent.NAME, record_set, key),
+ )
+ else:
+ col1.text_input(
+ needed_field("ID"),
+ placeholder="ID without special character.",
+ key=key,
+ help=f"The ID of the resource. {NAMES_INFO}",
+ value=record_set.name,
+ on_change=handle_record_set_change,
+ args=(RecordSetEvent.ID, record_set, key),
+ )
key = f"{prefix}-description"
col2.text_input(
"Description",
@@ -464,15 +472,26 @@ def _render_right_panel():
col1, col2, col3 = st.columns([1, 1, 1])
key = f"{prefix}-name"
- col1.text_input(
- needed_field("Name"),
- placeholder="Name without special character.",
- key=key,
- help=f"The name of the field. {NAMES_INFO}",
- value=field.name,
- on_change=handle_field_change,
- args=(FieldEvent.NAME, field, key),
- )
+ if field.ctx.is_v0():
+ col1.text_input(
+ needed_field("Name"),
+ placeholder="Name without special character.",
+ key=key,
+ help=f"The name of the field. {NAMES_INFO}",
+ value=field.name,
+ on_change=handle_field_change,
+ args=(FieldEvent.NAME, field, key),
+ )
+ else:
+ col1.text_input(
+ needed_field("ID"),
+ placeholder="ID without special character.",
+ key=key,
+ help=f"The ID of the field. {NAMES_INFO}",
+ value=field.id,
+ on_change=handle_field_change,
+ args=(FieldEvent.ID, field, key),
+ )
key = f"{prefix}-description"
col2.text_input(
"Description",
diff --git a/editor/views/wizard.py b/editor/views/wizard.py
index 405fbc19e..81ad428fe 100644
--- a/editor/views/wizard.py
+++ b/editor/views/wizard.py
@@ -23,8 +23,9 @@
def _export_json() -> str | None:
metadata: Metadata = st.session_state[Metadata]
try:
+ name = metadata.name or "metadata"
return {
- "name": f"croissant-{metadata.name.lower()}.json",
+ "name": f"croissant-{name.lower()}.json",
"content": json.dumps(metadata.to_canonical().to_json()),
}
except mlc.ValidationError as exception: