Skip to content

Commit

Permalink
Remove some unused code (#727)
Browse files Browse the repository at this point in the history
* Remove lowering cruft from ingot/module items
* Remove more unused code
  • Loading branch information
sbillig authored Jun 1, 2022
2 parents 43b58a8 + ecfccc5 commit 5dc66d5
Show file tree
Hide file tree
Showing 46 changed files with 291 additions and 422 deletions.
7 changes: 3 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,7 @@ The most advanced example that we can provide at this point is an implementation

The Fe implementation is split into several crates. Crates that depend on the
solidity compiler (directly or indirectly) are licensed GPL-3.0-or-later. This
includes the `fe` CLI tool, compiler "back end" (yulgen, yulc), driver, tests,
and test-utils.
includes the `fe` CLI tool, yulc, driver, tests, and test-utils.

The remaining crates are licensed Apache-2.0. This includes the compiler
"front end" (parser, analyzer, lowering), abi, and common.
The remaining crates are licensed Apache-2.0. This includes the parser,
analyzer, mir, abi, and common.
4 changes: 0 additions & 4 deletions crates/analyzer/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -374,10 +374,6 @@ pub struct FunctionBody {
pub string_literals: IndexSet<SmolStr>, // for yulgen
// Map lhs of variable declaration to type.
pub var_types: IndexMap<NodeId, Type>,

// This is the id of the VarDecl TypeDesc node
// TODO: Do we really need this?
pub var_decl_types: IndexMap<NodeId, Type>,
pub calls: IndexMap<NodeId, CallType>,
pub spans: HashMap<NodeId, Span>,
}
Expand Down
11 changes: 5 additions & 6 deletions crates/analyzer/src/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use crate::namespace::items::{
};
use crate::namespace::types;
use fe_common::db::{SourceDb, SourceDbStorage, Upcast, UpcastMut};
use fe_common::Span;
use fe_common::{SourceFileId, Span};
use fe_parser::ast;
use indexmap::map::IndexMap;
use smol_str::SmolStr;
Expand Down Expand Up @@ -39,16 +39,15 @@ pub trait AnalyzerDb: SourceDb + Upcast<dyn SourceDb> + UpcastMut<dyn SourceDb>
// Ingot

// These are inputs so that the (future) language server can add
// and remove files/dependencies. Set via eg `db.set_ingot_modules`.
// and remove files/dependencies. Set via eg `db.set_ingot_files`.
// If an input is used before it's set, salsa will panic.
// Ideally, `ingot_files` would be the input instead, but in order to support analysis
// of the post-lowering-phase stuff, we need to be able to construct a new
// lowered ingot, and give it a set of lowered modules.
#[salsa::input]
fn ingot_modules(&self, ingot: IngotId) -> Rc<[ModuleId]>;
fn ingot_files(&self, ingot: IngotId) -> Rc<[SourceFileId]>;
#[salsa::input]
fn ingot_external_ingots(&self, ingot: IngotId) -> Rc<IndexMap<SmolStr, IngotId>>;

#[salsa::invoke(queries::ingots::ingot_modules)]
fn ingot_modules(&self, ingot: IngotId) -> Rc<[ModuleId]>;
#[salsa::invoke(queries::ingots::ingot_root_module)]
fn ingot_root_module(&self, ingot: IngotId) -> Option<ModuleId>;

Expand Down
2 changes: 1 addition & 1 deletion crates/analyzer/src/db/queries/functions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ pub fn function_dependency_graph(db: &dyn AnalyzerDb, function: FunctionId) -> D
.values()
.map(|event| (root, Item::Event(*event), DepLocality::Local)),
);
directs.extend(body.var_decl_types.values().filter_map(|typ| match typ {
directs.extend(body.var_types.values().filter_map(|typ| match typ {
Type::Contract(Contract { id, .. }) => Some((
root,
Item::Type(TypeDef::Contract(*id)),
Expand Down
69 changes: 68 additions & 1 deletion crates/analyzer/src/db/queries/ingots.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,72 @@
use crate::namespace::items::{IngotId, IngotMode, ModuleId};
use crate::namespace::items::{IngotId, IngotMode, ModuleId, ModuleSource};
use crate::AnalyzerDb;
use fe_common::files::{SourceFileId, Utf8Path, Utf8PathBuf};
use indexmap::IndexSet;
use std::rc::Rc;

pub fn ingot_modules(db: &dyn AnalyzerDb, ingot: IngotId) -> Rc<[ModuleId]> {
let files: Vec<(SourceFileId, Rc<Utf8PathBuf>)> = db
.ingot_files(ingot)
.iter()
.map(|f| (*f, f.path(db.upcast())))
.collect();

// Create a module for every .fe source file.
let file_mods = files
.iter()
.map(|(file, path)| {
ModuleId::new(
db,
path.file_stem().unwrap(),
ModuleSource::File(*file),
ingot,
)
})
.collect();

// We automatically build a module hierarchy that matches the directory
// structure. We don't (yet?) require a .fe file for each directory like
// rust does. (eg `a/b.fe` alongside `a/b/`), but we do allow it (the
// module's items will be everything inside the .fe file, and the
// submodules inside the dir).
//
// Collect the set of all directories in the file hierarchy
// (after stripping the common prefix from all paths).
// eg given ["src/lib.fe", "src/a/b/x.fe", "src/a/c/d/y.fe"],
// the dir set is {"a", "a/b", "a/c", "a/c/d"}.
let file_path_prefix = &ingot.data(db).src_dir;
let dirs = files
.iter()
.flat_map(|(_file, path)| {
path.strip_prefix(&file_path_prefix.as_str())
.unwrap_or(path)
.ancestors()
.skip(1) // first elem of .ancestors() is the path itself
})
.collect::<IndexSet<&Utf8Path>>();

let dir_mods = dirs
// Skip the dirs that have an associated fe file; eg skip "a/b" if "a/b.fe" exists.
.difference(
&files
.iter()
.map(|(_file, path)| {
path.strip_prefix(&file_path_prefix.as_str())
.unwrap_or(path)
.as_str()
.trim_end_matches(".fe")
.into()
})
.collect::<IndexSet<&Utf8Path>>(),
)
.filter_map(|dir| {
dir.file_name()
.map(|name| ModuleId::new(db, name, ModuleSource::Dir(dir.as_str().into()), ingot))
})
.collect::<Vec<_>>();

[file_mods, dir_mods].concat().into()
}

pub fn ingot_root_module(db: &dyn AnalyzerDb, ingot: IngotId) -> Option<ModuleId> {
let filename = match ingot.data(db).mode {
Expand Down
2 changes: 0 additions & 2 deletions crates/analyzer/src/db/queries/module.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ pub fn module_file_path(db: &dyn AnalyzerDb, module: ModuleId) -> SmolStr {
let full_path = match &module.data(db).source {
ModuleSource::File(file) => file.path(db.upcast()).as_str().into(),
ModuleSource::Dir(path) => path.clone(),
ModuleSource::Lowered { original, .. } => return db.module_file_path(*original),
};

let src_prefix = &module.ingot(db).data(db).src_dir;
Expand All @@ -43,7 +42,6 @@ pub fn module_parse(db: &dyn AnalyzerDb, module: ModuleId) -> Analysis<Rc<ast::M
// Directory with no corresponding source file. Return empty ast.
Analysis::new(ast::Module { body: vec![] }.into(), vec![].into())
}
ModuleSource::Lowered { .. } => panic!("module_parse called on lowered module"),
}
}

Expand Down
7 changes: 0 additions & 7 deletions crates/analyzer/src/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,10 +124,6 @@ pub struct AlreadyDefined;
#[derive(Debug)]
pub struct CannotMove;

/// Error indicating that a [`Type`] does not have a fixed size.
#[derive(Debug)]
pub struct NotFixedSize;

/// Errors that can result from indexing
#[derive(Debug, PartialEq)]
pub enum IndexingError {
Expand All @@ -145,9 +141,6 @@ pub enum BinaryOperationError {
NotEqualAndUnsigned,
}

#[derive(Debug)]
pub struct AnalyzerError(pub Vec<Diagnostic>);

impl From<TypeError> for FatalError {
fn from(err: TypeError) -> Self {
Self::new(err.0)
Expand Down
114 changes: 6 additions & 108 deletions crates/analyzer/src/namespace/items.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use fe_common::files::{common_prefix, Utf8Path};
use fe_common::{impl_intern_key, FileKind, SourceFileId};
use fe_parser::node::{Node, Span};
use fe_parser::{ast, node::NodeId};
use indexmap::{indexmap, IndexMap, IndexSet};
use indexmap::{indexmap, IndexMap};
use smol_str::SmolStr;
use std::ops::Deref;
use std::rc::Rc;
Expand Down Expand Up @@ -214,14 +214,6 @@ impl Item {
}
}

/// Downcast utility function
pub fn as_contract(&self) -> Option<ContractId> {
match self {
Item::Type(TypeDef::Contract(id)) => Some(*id),
_ => None,
}
}

pub fn sink_diagnostics(&self, db: &dyn AnalyzerDb, sink: &mut impl DiagnosticSink) {
match self {
Item::Type(id) => id.sink_diagnostics(db, sink),
Expand Down Expand Up @@ -275,8 +267,6 @@ pub struct Ingot {
pub name: SmolStr,
// pub version: SmolStr,
pub mode: IngotMode,
pub original: Option<IngotId>,

pub src_dir: SmolStr,
}

Expand Down Expand Up @@ -324,73 +314,23 @@ impl IngotId {
let ingot = db.intern_ingot(Rc::new(Ingot {
name: name.into(),
mode,
original: None,
src_dir: file_path_prefix.as_str().into(),
}));

// Create a module for every .fe source file.
let file_mods = files
// Intern the source files
let file_ids = files
.iter()
.map(|(path, content)| {
let file = SourceFileId::new(
SourceFileId::new(
db.upcast_mut(),
file_kind,
path.as_ref(),
content.as_ref().into(),
);
ModuleId::new(
db,
Utf8Path::new(path).file_stem().unwrap(),
ModuleSource::File(file),
ingot,
)
})
.collect();

// We automatically build a module hierarchy that matches the directory
// structure. We don't (yet?) require a .fe file for each directory like
// rust does. (eg `a/b.fe` alongside `a/b/`), but we do allow it (the
// module's items will be everything inside the .fe file, and the
// submodules inside the dir.
//
// Collect the set of all directories in the file hierarchy
// (after stripping the common prefix from all paths).
// eg given ["src/lib.fe", "src/a/b/x.fe", "src/a/c/d/y.fe"],
// the dir set is {"a", "a/b", "a/c", "a/c/d"}.
let dirs = files
.iter()
.flat_map(|(path, _)| {
Utf8Path::new(path)
.strip_prefix(&file_path_prefix)
.unwrap_or_else(|_| Utf8Path::new(path))
.ancestors()
.skip(1) // first elem of .ancestors() is the path itself
})
.collect::<IndexSet<&Utf8Path>>();

let dir_mods = dirs
// Skip the dirs that have an associated fe file; eg skip "a/b" if "a/b.fe" exists.
.difference(
&files
.iter()
.map(|(path, _)| {
Utf8Path::new(path)
.strip_prefix(&file_path_prefix)
.unwrap_or_else(|_| Utf8Path::new(path))
.as_str()
.trim_end_matches(".fe")
.into()
})
.collect::<IndexSet<&Utf8Path>>(),
)
.filter_map(|dir| {
dir.file_name().map(|name| {
ModuleId::new(db, name, ModuleSource::Dir(dir.as_str().into()), ingot)
})
})
.collect::<Vec<_>>();

db.set_ingot_modules(ingot, [file_mods, dir_mods].concat().into());
db.set_ingot_files(ingot, file_ids);
db.set_ingot_external_ingots(ingot, Rc::new(deps));
ingot
}
Expand Down Expand Up @@ -464,32 +404,16 @@ pub enum ModuleSource {
/// For directory modules without a corresponding source file
/// (which will soon not be allowed, and this variant can go away).
Dir(SmolStr),
Lowered {
original: ModuleId,
ast: Rc<ast::Module>,
},
}

#[derive(Debug, PartialEq, Eq, Hash, Clone)]
pub struct Module {
pub name: SmolStr,
pub ingot: IngotId,

/// This differentiates between the original `Module` for a Fe source
/// file (which is parsed in the [`AnalyzerDb::module_parse`] query),
/// and the lowered `Module`, the ast of which is built in the lowering
/// phase, and is stored in the `ModulePhase::Lowered` variant.
// This leaks some knowledge about the existence of the lowering phase
// into the analyzer, but it seems to be the least bad way to move
// parsing into a db query instead of needing to parse at Module intern time.
// Someday we'll likely lower into some new IR, in which case we won't need
// to allow for lowered versions of `ModuleId`s.
pub source: ModuleSource,
}

/// Id of a [`Module`], which corresponds to a single Fe source file.
/// The lowering phase will create a separate `Module` & `ModuleId`
/// for the lowered version of the Fe source code.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Clone)]
pub struct ModuleId(pub(crate) u32);
impl_intern_key!(ModuleId);
Expand Down Expand Up @@ -534,10 +458,7 @@ impl ModuleId {
}

pub fn ast(&self, db: &dyn AnalyzerDb) -> Rc<ast::Module> {
match &self.data(db).source {
ModuleSource::File(_) | ModuleSource::Dir(_) => db.module_parse(*self).value,
ModuleSource::Lowered { ast, .. } => Rc::clone(ast),
}
db.module_parse(*self).value
}

pub fn ingot(&self, db: &dyn AnalyzerDb) -> IngotId {
Expand Down Expand Up @@ -712,11 +633,6 @@ impl ModuleId {
items
}

/// All structs, including duplicatecrates/analyzer/src/db.rss
pub fn all_structs(&self, db: &dyn AnalyzerDb) -> Rc<[StructId]> {
db.module_structs(*self)
}

/// All module constants.
pub fn all_constants(&self, db: &dyn AnalyzerDb) -> Rc<Vec<ModuleConstantId>> {
db.module_constants(*self)
Expand Down Expand Up @@ -1049,11 +965,6 @@ impl ContractId {
db.contract_public_function_map(*self)
}

/// Get a function that takes self by its name.
pub fn self_function(&self, db: &dyn AnalyzerDb, name: &str) -> Option<FunctionId> {
self.function(db, name).filter(|f| f.takes_self(db))
}

/// Lookup an event by name.
pub fn event(&self, db: &dyn AnalyzerDb, name: &str) -> Option<EventId> {
self.events(db).get(name).copied()
Expand Down Expand Up @@ -1267,12 +1178,6 @@ impl Class {
}
}

#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
pub enum MemberFunction {
BuiltIn(builtins::ValueMethod),
Function(FunctionId),
}

#[derive(Debug, PartialEq, Eq, Hash, Clone)]
pub struct Struct {
pub ast: Node<ast::Struct>,
Expand Down Expand Up @@ -1327,10 +1232,6 @@ impl StructId {
matches!(self.field_type(db, name), Some(Ok(types::Type::Base(_))))
}

pub fn field_index(&self, db: &dyn AnalyzerDb, name: &str) -> Option<usize> {
self.fields(db).get_index_of(name)
}

pub fn has_complex_fields(&self, db: &dyn AnalyzerDb) -> bool {
self.fields(db)
.iter()
Expand All @@ -1351,9 +1252,6 @@ impl StructId {
pub fn function(&self, db: &dyn AnalyzerDb, name: &str) -> Option<FunctionId> {
self.functions(db).get(name).copied()
}
pub fn self_function(&self, db: &dyn AnalyzerDb, name: &str) -> Option<FunctionId> {
self.function(db, name).filter(|f| f.takes_self(db))
}
pub fn parent(&self, db: &dyn AnalyzerDb) -> Item {
Item::Module(self.data(db).module)
}
Expand Down
Loading

0 comments on commit 5dc66d5

Please sign in to comment.