On 8/3/15 12:18pm, Marcin Zalewski wrote:
You can find some examples here:
https://github.com/boostorg/graph_parallel/tree/master/example
You can also look at tests:
https://github.com/boostorg/graph_parallel/tree/master/test
Thanks, Marcin, I finally can compile my code!
I wrote a simple graph based on the named graph example (*), in order to
learn, but my code seems to avoid nodes distribution, while the example
does it perfectly.
(*)
https://github.com/boostorg/graph_parallel/blob/master/test/named_vertices_t...
In my source I think I made the same steps, but I'm obviously
mistaken... my example every processor has every node, and every edge
if I comment the if statement.
I think I'm using the wrong boost::add_vertex/edge here, but honestly, I
am a very newbie on the distributed graph side.
Thanks for any pointers you can give me!
#include <iostream>
#include <string>
#include <vector>
#include <cmath>
#include <list>
#include <functional>
#include
#include
#include
#include
//using namespace boost;
//using boost::graph::distributed::mpi_process_group;
int main(int argc, const char * argv[])
{
boost::mpi::environment env;
boost::mpi::communicator comm;
std::cout << comm.rank() << std::endl;
typedef boost::adjacency_list,
boost::directedS, std::string> graph;
graph g;
std::vectorstd::string names { "alpha", "beta", "gamma", "delta" };
std::vectorgraph::vertex_descriptor descs;
for(auto p : names) descs.push_back(boost::add_vertex(p, g));
if (comm.rank() == 0)
{
boost::add_edge(descs[0], descs[0], g);
boost::add_edge(descs[0], descs[1], g);
boost::add_edge(descs[1], descs[2], g);
boost::add_edge(descs[2], descs[3], g);
}
boost::synchronize(g);
BGL_FORALL_VERTICES(v, g, graph)
{
std::cout << "V @ " << comm.rank() << " " << g[v] << std::endl;
}
BGL_FORALL_EDGES(e, g, graph)
{
std::cout << "E @ " << comm.rank() << " " << boost::source(e,
g).local << " -> " << boost::target(e, g).local << " srccpu " <<
e.source_processor << " dstcpu " << e.target_processor << std::endl;
}
return 0;
}
***OUTPUT***
5
4
2
1
3
0
V @ 1 alpha
V @ 1 beta
V @ 1 gamma
V @ 1 delta
E @ 1 0 -> 0 srccpu 1 dstcpu 1
E @ 1 0 -> 1 srccpu 1 dstcpu 1
E @ 1 1 -> 2 srccpu 1 dstcpu 1
E @ 1 2 -> 3 srccpu 1 dstcpu 1
V @ 3 alpha
V @ 3 beta
V @ 3 gamma
V @ 3 delta
E @ 3 0 -> 0 srccpu 3 dstcpu 3
E @ 3 0 -> 1 srccpu 3 dstcpu 3
E @ 3 1 -> 2 srccpu 3 dstcpu 3
E @ 3 2 -> 3 srccpu 3 dstcpu 3
V @ 2 alpha
V @ 2 beta
V @ 2 gamma
V @ 2 delta
E @ 2 0 -> 0 srccpu 2 dstcpu 2
E @ 2 0 -> 1 srccpu 2 dstcpu 2
E @ 2 1 -> 2 srccpu 2 dstcpu 2
E @ 2 2 -> 3 srccpu 2 dstcpu 2
V @ 5 alpha
V @ 5 beta
V @ 5 gamma
V @ 5 delta
E @ 5 0 -> 0 srccpu 5 dstcpu 5
E @ 5 0 -> 1 srccpu 5 dstcpu 5
E @ 5 1 -> 2 srccpu 5 dstcpu 5
E @ 5 2 -> 3 srccpu 5 dstcpu 5
V @ 4 alpha
V @ 4 beta
V @ 4 gamma
V @ 4 delta
E @ 4 0 -> 0 srccpu 4 dstcpu 4
E @ 4 0 -> 1 srccpu 4 dstcpu 4
E @ 4 1 -> 2 srccpu 4 dstcpu 4
E @ 4 2 -> 3 srccpu 4 dstcpu 4
V @ 0 alpha
V @ 0 beta
V @ 0 gamma
V @ 0 delta
E @ 0 0 -> 0 srccpu 0 dstcpu 0
E @ 0 0 -> 1 srccpu 0 dstcpu 0
E @ 0 1 -> 2 srccpu 0 dstcpu 0
E @ 0 2 -> 3 srccpu 0 dstcpu 0