Skip to content

Commit

Permalink
Merge branch 'master' into meyer/compact-policy
Browse files Browse the repository at this point in the history
  • Loading branch information
meyerzinn authored Mar 21, 2024
2 parents 08403fc + 700cc9b commit ce4f1c5
Show file tree
Hide file tree
Showing 4 changed files with 65 additions and 78 deletions.
6 changes: 6 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[submodule "external/pcg-cpp"]
path = external/pcg-cpp
url = https://github.com/imneme/pcg-cpp.git
[submodule "external/parallel-hashmap"]
path = external/parallel-hashmap
url = https://github.com/greg7mdp/parallel-hashmap.git
1 change: 1 addition & 0 deletions external/parallel-hashmap
Submodule parallel-hashmap added at 67c246
1 change: 1 addition & 0 deletions external/pcg-cpp
Submodule pcg-cpp added at 428802
135 changes: 57 additions & 78 deletions libgalois/include/galois/graphs/LS_LC_CSR_Graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,12 +72,9 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
using EdgeRange = boost::iterator_range<EdgeIterator>;

std::vector<VertexMetadata> m_vertices;

LargeVector<EdgeMetadata> m_edges[2];

// To avoid deadlock between updates and compaction, at least one vertex lock
// must be held to acquire m_edges_lock.
SpinLock m_edges_lock;
SpinLock m_edges_lock; // guards resizing of edges vectors
std::atomic_uint64_t m_edges_tail = ATOMIC_VAR_INIT(0);

std::atomic_uint64_t m_edges_tail = ATOMIC_VAR_INIT(0);
std::atomic_uint64_t m_holes = ATOMIC_VAR_INIT(0);
Expand Down Expand Up @@ -134,48 +131,45 @@ class LS_LC_CSR_Graph : private boost::noncopyable {

int addEdgesTopologyOnly(VertexTopologyID src,
const std::vector<VertexTopologyID> dsts) {
auto& vertex_meta = m_vertices[src];

// Copies the edge list to the end of m_edges[1], prepending
// the new edges.

vertex_meta.lock(); // prevents compaction
{
uint64_t const new_degree = vertex_meta.degree + dsts.size();
uint64_t const new_begin =
m_edges_tail.fetch_add(new_degree, std::memory_order_relaxed);
uint64_t const new_end = new_begin + new_degree;

if (m_edges[1].size() < new_end) {
m_edges_lock.lock();
{
if (m_edges[1].size() < new_end)
m_edges[1].resize(std::max(m_edges[1].size() * 2, new_end));
}
m_edges_lock.unlock();
}
auto& vertex_meta = m_vertices[src];

// insert new edges
std::transform(dsts.begin(), dsts.end(), &getEdgeMetadata(1, new_begin),
[](VertexTopologyID dst) {
return EdgeMetadata{.flags = 0, .dst = dst};
});

// copy old, non-tombstoned edges
std::copy_if(&getEdgeMetadata(vertex_meta.buffer, vertex_meta.begin),
&getEdgeMetadata(vertex_meta.buffer, vertex_meta.end),
&getEdgeMetadata(1, new_begin + dsts.size()),
[](EdgeMetadata& edge) { return !edge.is_tomb(); });

// update vertex metadata
vertex_meta.buffer = 1;
vertex_meta.begin = new_begin;
vertex_meta.end = new_end;

m_holes.fetch_add(vertex_meta.degree, std::memory_order_relaxed);
vertex_meta.degree += dsts.size();
uint64_t const new_degree = vertex_meta.degree + dsts.size();
uint64_t const new_begin =
m_edges_tail.fetch_add(new_degree, std::memory_order_relaxed);
uint64_t const new_end = new_begin + new_degree;

if (m_edges[1].size() < new_end) {
m_edges_lock.lock();
{
if (m_edges[1].size() < new_end)
m_edges[1].resize(std::max(m_edges[1].size() * 2, new_end));
}
m_edges_lock.unlock();
}
vertex_meta.unlock();

// insert new edges
std::transform(dsts.begin(), dsts.end(), &getEdgeMetadata(1, new_begin),
[](VertexTopologyID dst) {
return EdgeMetadata{.flags = 0, .dst = dst};
});

// copy old, non-tombstoned edges
std::copy_if(&getEdgeMetadata(vertex_meta.buffer, vertex_meta.begin),
&getEdgeMetadata(vertex_meta.buffer, vertex_meta.end),
&getEdgeMetadata(1, new_begin + dsts.size()),
[](EdgeMetadata& edge) { return !edge.is_tomb(); });

// update vertex metadata
vertex_meta.buffer = 1;
vertex_meta.begin = new_begin;
vertex_meta.end = new_end;

m_holes.fetch_add(vertex_meta.degree, std::memory_order_relaxed);
vertex_meta.degree += dsts.size();

return 0;
}
Expand All @@ -184,37 +178,32 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
const std::vector<VertexTopologyID>& edges) {
std::unordered_set<VertexTopologyID> edges_set(edges.begin(), edges.end());

auto& vertex_meta = m_vertices[src];
vertex_meta.lock();
{
uint64_t holes_added = 0;
for (auto i = vertex_meta.begin; i < vertex_meta.end; ++i) {
EdgeMetadata& edge_meta =
getEdgeMetadata(EdgeHandle(vertex_meta.buffer, i));
if (!edge_meta.is_tomb() &&
edges_set.find(edge_meta.dst) != edges_set.end()) {
edge_meta.tomb();
--vertex_meta.degree;
++holes_added;
// remove tombstoned edges from the start of the edge list
if (i == vertex_meta.begin)
++vertex_meta.begin;
}
uint64_t holes_added = 0;
for (auto i = vertex_meta.begin; i < vertex_meta.end; ++i) {
EdgeMetadata& edge_meta =
getEdgeMetadata(EdgeHandle(vertex_meta.buffer, i));
if (!edge_meta.is_tomb() &&
edges_set.find(edge_meta.dst) != edges_set.end()) {
edge_meta.tomb();
--vertex_meta.degree;
++holes_added;
// remove tombstoned edges from the start of the edge list
if (i == vertex_meta.begin)
++vertex_meta.begin;
}
}

// remove tombstoned edges from the end of the edge list
for (auto i = vertex_meta.end; i > vertex_meta.begin; --i) {
if (getEdgeMetadata(EdgeHandle(vertex_meta.buffer, i - 1)).is_tomb()) {
--vertex_meta.end;
--vertex_meta.degree;
} else {
break;
}
// remove tombstoned edges from the end of the edge list
for (auto i = vertex_meta.end; i > vertex_meta.begin; --i) {
if (getEdgeMetadata(EdgeHandle(vertex_meta.buffer, i - 1)).is_tomb()) {
--vertex_meta.end;
--vertex_meta.degree;
} else {
break;
}

m_holes.fetch_add(holes_added, std::memory_order_relaxed);
}
vertex_meta.unlock();

return 0;
}
Expand All @@ -226,15 +215,14 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
// Performs the compaction algorithm by copying any vertices left in buffer 0
// to buffer 1, then swapping the buffers.
//
// Should not be called from within a Galois parallel kernel.
// Not safe to call in parallel with insertions/deletions.
void compact() {
using std::swap;

// move from buffer 0 to buffer 1
galois::do_all(galois::iterate(vertices().begin(), vertices().end()),
[&](VertexTopologyID vertex_id) {
VertexMetadata& vertex_meta = m_vertices[vertex_id];
vertex_meta.lock();

if (vertex_meta.buffer == 0) {
uint64_t new_begin;
Expand All @@ -261,14 +249,9 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
// we are about to swap the buffers, so all vertices will
// be in buffer 0
vertex_meta.buffer = 0;

// don't release the vertex lock until after the edge
// arrays are swapped
});

// At this point, there are no more live edges in buffer 0.
// We also hold the lock for all vertices, so nobody else can hold
// m_edges_lock.
m_edges_lock.lock();
{
m_edges[0].resize(0);
Expand All @@ -278,14 +261,10 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
m_holes.store(0, std::memory_order_relaxed);
}
m_edges_lock.unlock();

galois::do_all(
galois::iterate(vertices().begin(), vertices().end()),
[&](VertexTopologyID vertex_id) { m_vertices[vertex_id].unlock(); });
}

private:
struct VertexMetadata : public SpinLock {
struct VertexMetadata {
uint8_t buffer : 1;
uint64_t begin : 48; // inclusive
uint64_t end : 48; // exclusive
Expand Down

0 comments on commit ce4f1c5

Please sign in to comment.