This commit is contained in:
2025-03-12 12:37:19 +03:00
parent 1c851baa7e
commit 6a4040be3e
426 changed files with 0 additions and 0 deletions

View File

@@ -0,0 +1,89 @@
#include <cmath>
#include "algebra.hpp"
namespace nodesoup {
Point2D::operator Vector2D() const {
return { x, y };
}
Point2D& Point2D::operator+=(const Vector2D& vector) {
x += vector.dx;
y += vector.dy;
return *this;
}
Point2D& Point2D::operator-=(const Vector2D& vector) {
x -= vector.dx;
y -= vector.dy;
return *this;
}
Vector2D::operator Point2D() const {
return { dx, dy };
}
Vector2D& Vector2D::operator+=(const Vector2D& other) {
dx += other.dx;
dy += other.dy;
return *this;
}
Vector2D& Vector2D::operator-=(const Vector2D& other) {
dx -= other.dx;
dy -= other.dy;
return *this;
}
Vector2D& Vector2D::operator*=(double scalar) {
dx *= scalar;
dy *= scalar;
return *this;
}
Vector2D& Vector2D::operator*(const Vector2D& other)
{
dx *= other.dx;
dy *= other.dy;
return *this;
}
Vector2D& Vector2D::operator/=(double scalar) {
dx /= scalar;
dy /= scalar;
return *this;
}
Point2D operator+(const Point2D& point, const Vector2D& vector) {
return { point.x + vector.dx, point.y + vector.dy };
}
Point2D operator-(const Point2D& point, const Vector2D& vector) {
return { point.x - vector.dx, point.y - vector.dy };
}
Vector2D operator-(const Point2D& lhs, const Point2D& rhs) {
return { lhs.x - rhs.x, lhs.y - rhs.y };
}
Vector2D operator+(const Vector2D& lhs, const Vector2D& rhs) {
return { lhs.dx + rhs.dx, lhs.dy + rhs.dy };
}
Vector2D operator-(const Vector2D& lhs, const Vector2D& rhs) {
return { lhs.dx - rhs.dx, lhs.dy - rhs.dy };
}
Vector2D operator*(const Vector2D& vector, double scalar) {
return { vector.dx * scalar, vector.dy * scalar };
}
Vector2D operator*(double scalar, const Vector2D& vector) {
return vector * scalar;
}
Vector2D operator/(const Vector2D& vector, double scalar) {
return { vector.dx / scalar, vector.dy / scalar };
}
}

View File

@@ -0,0 +1,14 @@
#pragma once
#include "nodesoup.hpp"
namespace nodesoup {
Point2D operator+(const Point2D& point, const Vector2D& vector);
Point2D operator-(const Point2D& point, const Vector2D& vector);
Vector2D operator-(const Point2D& lhs, const Point2D& rhs);
Vector2D operator+(const Vector2D lhs, const Vector2D& rhs);
Vector2D operator-(const Vector2D& l, const Vector2D& rhs);
Vector2D operator*(const Vector2D& vector, double scalar);
Vector2D operator*(double scalar, const Vector2D& vector);
Vector2D operator/(const Vector2D& vector, double scalar);
};

View File

@@ -0,0 +1,83 @@
#include "fruchterman_reingold.hpp"
#include "algebra.hpp"
#include <algorithm>
#include <cmath>
#include <iostream>
namespace nodesoup {
using std::vector;
FruchtermanReingold::FruchtermanReingold(const adj_list_t& g, double k)
: g_(g)
, k_(k)
, k_squared_(k * k)
, temp_(10 * sqrt(g.size()))
, mvmts_(g_.size()) {}
void FruchtermanReingold::operator()(vector<Point2D>& positions) {
Vector2D zero = { 0.0, 0.0 };
fill(mvmts_.begin(), mvmts_.end(), zero);
// Repulsion force between vertice pairs
for (vertex_id_t v_id = 0; v_id < g_.size(); v_id++) {
for (vertex_id_t other_id = v_id + 1; other_id < g_.size(); other_id++) {
if (v_id == other_id) {
continue;
}
Vector2D delta = positions[v_id] - positions[other_id];
double distance = delta.norm();
// TODO: handle distance == 0.0
// > 1000.0: not worth computing
if (distance > 1000.0) {
continue;
}
double repulsion = k_squared_ / distance;
mvmts_[v_id] += delta / distance * repulsion;
mvmts_[other_id] -= delta / distance * repulsion;
}
// Attraction force between edges
for (vertex_id_t adj_id : g_[v_id]) {
if (adj_id > v_id) {
continue;
}
Vector2D delta = positions[v_id] - positions[adj_id];
double distance = delta.norm();
if (distance == 0.0) {
continue;
}
double attraction = distance * distance / k_;
mvmts_[v_id] -= delta / distance * attraction;
mvmts_[adj_id] += delta / distance * attraction;
}
}
// Max movement capped by current temperature
for (vertex_id_t v_id = 0; v_id < g_.size(); v_id++) {
double mvmt_norm = mvmts_[v_id].norm();
// < 1.0: not worth computing
if (mvmt_norm < 1.0) {
continue;
}
double capped_mvmt_norm = std::min(mvmt_norm, temp_);
Vector2D capped_mvmt = mvmts_[v_id] / mvmt_norm * capped_mvmt_norm;
positions[v_id] += capped_mvmt;
}
// Cool down fast until we reach 1.5, then stay at low temperature
if (temp_ > 1.5) {
temp_ *= 0.85;
} else {
temp_ = 1.5;
}
}
}

View File

@@ -0,0 +1,20 @@
#pragma once
#include "nodesoup.hpp"
#include <utility>
#include <vector>
namespace nodesoup {
class FruchtermanReingold {
public:
FruchtermanReingold(const adj_list_t& g, double k = 15.0);
void operator()(std::vector<Point2D>& positions);
private:
const adj_list_t& g_;
const double k_;
const double k_squared_;
double temp_;
std::vector<std::pair<vertex_id_t, vertex_id_t>> edges_;
std::vector<Vector2D> mvmts_;
};
}

View File

@@ -0,0 +1,188 @@
#include <algorithm>
#include <cassert>
#include <cmath>
#include <limits>
#include "algebra.hpp"
#include "kamada_kawai.hpp"
namespace nodesoup {
using std::vector;
KamadaKawai::KamadaKawai(const adj_list_t& g, double k, double energy_threshold)
: g_(g)
, energy_threshold_(energy_threshold) {
vector<vector<vertex_id_t>> distances = floyd_warshall_(g_);
// find biggest distance
size_t biggest_distance = 0;
for (vertex_id_t v_id = 0; v_id < g_.size(); v_id++) {
for (vertex_id_t other_id = 0; other_id < g_.size(); other_id++) {
if (distances[v_id][other_id] > biggest_distance) {
biggest_distance = distances[v_id][other_id];
}
}
}
// Ideal length for all edges. we don't really care, the layout is going to be scaled.
// Let's chose 1.0 as the initial positions will be on a 1.0 radius circle, so we're
// on the same order of magnitude
double length = 1.0 / biggest_distance;
// init springs lengths and strengths matrices
for (vertex_id_t v_id = 0; v_id < g_.size(); v_id++) {
vector<Spring> v_springs;
for (vertex_id_t other_id = 0; other_id < g_.size(); other_id++) {
Spring spring;
if (v_id == other_id) {
spring.length = 0.0;
spring.strength = 0.0;
} else {
size_t distance = distances[v_id][other_id];
spring.length = distance * length;
spring.strength = k / (distance * distance);
}
v_springs.push_back(spring);
}
springs_.push_back(v_springs);
}
}
vector<vector<vertex_id_t>> KamadaKawai::floyd_warshall_(const adj_list_t& g) {
// build adjacency matrix (infinity = no edge, 1 = edge)
unsigned int infinity = std::numeric_limits<unsigned int>::max() / 2;
vector<vector<vertex_id_t>> distances(g.size(), vector<vertex_id_t>(g.size(), infinity));
for (vertex_id_t v_id = 0; v_id < g.size(); v_id++) {
distances[v_id][v_id] = 0;
for (vertex_id_t adj_id : g[v_id]) {
if (adj_id > v_id) {
distances[v_id][adj_id] = 1;
distances[adj_id][v_id] = 1;
}
}
}
// floyd warshall itself, find length of shortest path for each pair of vertices
for (vertex_id_t k = 0; k < g.size(); k++) {
for (vertex_id_t i = 0; i < g.size(); i++) {
for (vertex_id_t j = 0; j < g.size(); j++) {
distances[i][j] = std::min(distances[i][j], distances[i][k] + distances[k][j]);
}
}
}
return distances;
}
#define MAX_VERTEX_ITERS_COUNT 50
#define MAX_STEADY_ENERGY_ITERS_COUNT 50
/**
Reduce the energy of the next vertex with most energy until all the vertices have
a energy below energy_threshold
*/
void KamadaKawai::operator()(vector<Point2D>& positions) const {
vertex_id_t v_id;
unsigned int steady_energy_count = 0;
double max_vertex_energy = find_max_vertex_energy_(positions, v_id);
while (max_vertex_energy > energy_threshold_ && steady_energy_count < MAX_STEADY_ENERGY_ITERS_COUNT) {
// move vertex step by step until its energy goes below threshold
// (apparently this is equivalent to the newton raphson method)
unsigned int vertex_count = 0;
do {
positions[v_id] = compute_next_vertex_position_(v_id, positions);
vertex_count++;
} while (compute_vertex_energy_(v_id, positions) > energy_threshold_ && vertex_count < MAX_VERTEX_ITERS_COUNT);
double max_vertex_energy_prev = max_vertex_energy;
max_vertex_energy = find_max_vertex_energy_(positions, v_id);
if (std::abs(max_vertex_energy - max_vertex_energy_prev) < 1e-20) {
steady_energy_count++;
} else {
steady_energy_count = 0;
}
}
}
/**
Find @p max_energy_v_id with the most potential energy and @return its energy
// https://gist.github.com/terakun/b7eff90c889c1485898ec9256ca9f91d
*/
double KamadaKawai::find_max_vertex_energy_(const vector<Point2D>& positions, vertex_id_t& max_energy_v_id) const {
double max_energy = -1.0;
for (vertex_id_t v_id = 0; v_id < g_.size(); v_id++) {
double energy = compute_vertex_energy_(v_id, positions);
if (energy > max_energy) {
max_energy_v_id = v_id;
max_energy = energy;
}
}
assert(max_energy != -1.0);
return max_energy;
}
/** @return the potential energies of springs between @p v_id and all other vertices */
double KamadaKawai::compute_vertex_energy_(vertex_id_t v_id, const vector<Point2D>& positions) const {
double x_energy = 0.0;
double y_energy = 0.0;
for (vertex_id_t other_id = 0; other_id < g_.size(); other_id++) {
if (v_id == other_id) {
continue;
}
Vector2D delta = positions[v_id] - positions[other_id];
double distance = delta.norm();
// delta * k * (1 - l / distance)
Spring spring = springs_[v_id][other_id];
x_energy += delta.dx * spring.strength * (1.0 - spring.length / distance);
y_energy += delta.dy * spring.strength * (1.0 - spring.length / distance);
}
return sqrt(x_energy * x_energy + y_energy * y_energy);
}
/**
@returns next position for @param v_id reducing its potential energy, ie the energy in the whole graph
caused by its position.
The position's delta depends on K (TODO bigger K = faster?).
This is the complicated part of the algorithm.
*/
Point2D KamadaKawai::compute_next_vertex_position_(vertex_id_t v_id, const vector<Point2D>& positions) const {
double xx_energy = 0.0, xy_energy = 0.0, yx_energy = 0.0, yy_energy = 0.0;
double x_energy = 0.0, y_energy = 0.0;
for (vertex_id_t other_id = 0; other_id < g_.size(); other_id++) {
if (v_id == other_id) {
continue;
}
Vector2D delta = positions[v_id] - positions[other_id];
double distance = delta.norm();
double cubed_distance = distance * distance * distance;
Spring spring = springs_[v_id][other_id];
x_energy += delta.dx * spring.strength * (1.0 - spring.length / distance);
y_energy += delta.dy * spring.strength * (1.0 - spring.length / distance);
xy_energy += spring.strength * spring.length * delta.dx * delta.dy / cubed_distance;
xx_energy += spring.strength * (1.0 - spring.length * delta.dy * delta.dy / cubed_distance);
yy_energy += spring.strength * (1.0 - spring.length * delta.dx * delta.dx / cubed_distance);
}
yx_energy = xy_energy;
Point2D position = positions[v_id];
double denom = xx_energy * yy_energy - xy_energy * yx_energy;
position.x += (xy_energy * y_energy - yy_energy * x_energy) / denom;
position.y += (xy_energy * x_energy - xx_energy * y_energy) / denom;
return position;
}
}

View File

@@ -0,0 +1,30 @@
#pragma once
#include "nodesoup.hpp"
#include <vector>
namespace nodesoup {
// https://gist.github.com/terakun/b7eff90c889c1485898ec9256ca9f91d
// https://graphsharp.codeplex.com/SourceControl/latest#Source/Graph#/Algorithms/Layout/Simple/FDP/KKLayoutAlgorithm.cs
class KamadaKawai {
public:
struct Spring {
double length;
double strength;
};
KamadaKawai(const adj_list_t& g, double k = 300.0, double energy_threshold = 1e-2);
void operator()(std::vector<Point2D>& positions) const;
private:
const adj_list_t& g_;
const double energy_threshold_;
std::vector<std::vector<Spring>> springs_;
static std::vector<std::vector<vertex_id_t>> floyd_warshall_(const adj_list_t& g);
// p m
double find_max_vertex_energy_(const std::vector<Point2D>& positions, vertex_id_t& max_energy_v_id) const;
// delta m
double compute_vertex_energy_(vertex_id_t v_id, const std::vector<Point2D>& positions) const;
Point2D compute_next_vertex_position_(vertex_id_t v_id, const std::vector<Point2D>& positions) const;
};
}

View File

@@ -0,0 +1,68 @@
#include "layout.hpp"
#include "algebra.hpp"
#include <cmath>
#include <limits>
namespace nodesoup {
using std::vector;
void circle(const adj_list_t& g, vector<Point2D>& positions) {
const double pi = 3.14159265358979323846;
double angle = 2.0 * pi / g.size();
for (vertex_id_t v_id = 0; v_id < g.size(); v_id++) {
positions[v_id].x = cos(v_id * angle);
positions[v_id].y = sin(v_id * angle);
}
}
void center_and_scale(const adj_list_t& g, unsigned int width, unsigned int height, vector<Point2D>& positions) {
// find current dimensions
double x_min = std::numeric_limits<double>::max();
double x_max = std::numeric_limits<double>::lowest();
double y_min = std::numeric_limits<double>::max();
double y_max = std::numeric_limits<double>::lowest();
for (vertex_id_t v_id = 0; v_id < g.size(); v_id++) {
if (positions[v_id].x < x_min)
x_min = positions[v_id].x;
if (positions[v_id].x > x_max)
x_max = positions[v_id].x;
if (positions[v_id].y < y_min)
y_min = positions[v_id].y;
if (positions[v_id].y > y_max)
y_max = positions[v_id].y;
}
double cur_width = x_max - x_min;
double cur_height = y_max - y_min;
// compute scale factor (0.9: keep some margin)
double x_scale = (cur_width > 0) ? width / cur_width : width;
double y_scale = (cur_height > 0) ? height / cur_height : height;
Vector2D scale = { x_scale, y_scale };
// compute offset and apply it to every position
Vector2D center = { x_max + x_min, y_max + y_min };
Vector2D offset = (center / 2.0) * scale;
double shiftMinX = std::numeric_limits<double>::max();
double shiftMinY = std::numeric_limits<double>::max();
for (vertex_id_t v_id = 0; v_id < g.size(); v_id++)
{
positions[v_id] = (Point2D)((Vector2D)positions[v_id] * scale - offset);
shiftMinX = std::min(shiftMinX, positions[v_id].x);
shiftMinY = std::min(shiftMinY, positions[v_id].y);
}
shiftMinX = abs(shiftMinX) + 50;
shiftMinY = abs(shiftMinY) + 50;
for (auto& pos : positions)
{
pos.x += shiftMinX;
pos.y += shiftMinY;
}
}
}

View File

@@ -0,0 +1,10 @@
#pragma once
#include "nodesoup.hpp"
#include <vector>
namespace nodesoup {
/** Distribute vertices equally on a 1.0 radius circle */
void circle(const adj_list_t& g, std::vector<Point2D>& positions);
/** Center and scale vertices so the graph fits on a canvas of given dimensions */
void center_and_scale(const adj_list_t& g, unsigned int width, unsigned int height, std::vector<Point2D>& positions);
}

View File

@@ -0,0 +1,64 @@
#include "nodesoup.hpp"
#include "fruchterman_reingold.hpp"
#include "kamada_kawai.hpp"
#include "layout.hpp"
#include <algorithm>
#include <cmath>
namespace nodesoup {
using std::vector;
vector<Point2D> fruchterman_reingold(
const adj_list_t& g,
unsigned int width,
unsigned int height,
unsigned int iters_count,
double k,
iter_callback_t iter_cb) {
vector<Point2D> positions(g.size());
// Initial layout on a circle
circle(g, positions);
FruchtermanReingold fr(g, k);
for (unsigned int i = 0; i < iters_count; i++) {
fr(positions);
if (iter_cb != nullptr) {
vector<Point2D> scaled_positions = positions;
center_and_scale(g, width, height, scaled_positions);
iter_cb(scaled_positions, i);
}
}
center_and_scale(g, width, height, positions);
return positions;
}
vector<Point2D> kamada_kawai(
const adj_list_t& g,
unsigned int width,
unsigned int height,
double k,
double energy_threshold) {
vector<Point2D> positions(g.size());
// Initial layout on a circle
circle(g, positions);
KamadaKawai kk(g, k, energy_threshold);
kk(positions);
center_and_scale(g, width, height, positions);
return positions;
}
vector<double> size_radiuses(const adj_list_t& g, double min_radius, double k) {
vector<double> radiuses;
radiuses.reserve(g.size());
for (vertex_id_t v_id = 0; v_id < g.size(); v_id++) {
double delta = log2(k * (double) g[v_id].size() / g.size());
double radius = min_radius + std::max(0.0, delta);
radiuses.push_back(radius);
}
return radiuses;
}
}

View File

@@ -0,0 +1,69 @@
#pragma once
#include <cmath>
#include <functional>
#include <vector>
#include <algorithm>
namespace nodesoup {
/** Simple adjaceny list graph structure */
using vertex_id_t = std::size_t;
using adj_list_t = std::vector<std::vector<vertex_id_t>>;
/** Algebra types */
struct Vector2D;
struct Point2D {
double x;
double y;
explicit operator Vector2D() const;
Point2D& operator+=(const Vector2D& vector);
Point2D& operator-=(const Vector2D& vector);
std::pair<int, int> toPair() { return std::make_pair((int)x, (int)y); }
};
struct Vector2D {
double dx;
double dy;
double norm() const {
return sqrt(dx * dx + dy * dy);
}
explicit operator Point2D() const;
Vector2D& operator+=(const Vector2D& other);
Vector2D& operator-=(const Vector2D& other);
Vector2D& operator*=(double scalar);
Vector2D& operator/=(double scalar);
Vector2D& operator*(const Vector2D& other);
};
/** Main library functions */
using iter_callback_t = std::function<void(const std::vector<Point2D>&, int)>;
/**
Applies the Freuchterman Reingold algorithm to layout graph @p in a frame of dimensions
@p width and @p height, in @p iter-count iterations
*/
std::vector<Point2D> fruchterman_reingold(
const adj_list_t& g,
unsigned int width,
unsigned int height,
unsigned int iters_count = 300,
double k = 15.0,
iter_callback_t iter_cb = nullptr);
std::vector<Point2D> kamada_kawai(
const adj_list_t& g,
unsigned int width,
unsigned int height,
double k = 300.0,
double energy_threshold = 1e-2);
/** Assigns diameters to vertices based on their degree */
std::vector<double> size_radiuses(const adj_list_t& g, double min_radius = 4.0, double k = 300.0);
}