150 auto& data = *m_storage;
151 bool _thread_scope_only = trait::thread_scope_only<Type>::value;
155 auto _num_pid_count = dmp::size();
157 data.m_node_init = dmp::is_initialized();
158 data.m_node_rank = dmp::rank();
159 data.m_node_size = dmp::size();
166 auto _get_thread_prefix = [&](
const graph_node& itr) {
171 static uint16_t
width = 1;
172 if(_num_thr_count > 9)
174 std::stringstream ss;
176 ss <<
"|" << std::setw(
width) << itr.tid() <<
">>> ";
185 auto _get_node_prefix = [&](
const graph_node& itr) {
186 if(!data.m_node_init || !_use_pid_prefix)
187 return _get_thread_prefix(itr);
190 auto _idx = data.m_node_rank;
191 auto _range = std::make_pair(-1, -1);
193 if(_nc > 0 && _nc < data.m_node_size)
196 int32_t nmod = _num_pid_count % _nc;
197 int32_t bins = _num_pid_count / _nc + ((nmod == 0) ? 0 : 1);
198 int32_t bsize = _num_pid_count / bins;
201 std::map<int32_t, std::set<int32_t>> binmap;
202 for(int32_t i = 0; i < _num_pid_count; ++i)
204 binmap[midx].insert(i);
215 for(
const auto& bitr : binmap)
218 if(bitr.second.find(_idx) != bitr.second.end())
220 auto vitr = bitr.second.begin();
221 _range.first = *vitr;
222 vitr = bitr.second.end();
224 _range.second = *vitr;
230 std::stringstream ss;
231 for(
const auto& bitr : binmap)
233 ss <<
", [" << bitr.first <<
"] ";
234 std::stringstream bss;
235 for(
const auto& nitr : bitr.second)
237 ss << bss.str().substr(2);
240 _msg += ss.str().substr(2);
241 PRINT_HERE(
"[%s][pid=%i][tid=%i]> %s. range = { %i, %i }",
242 demangle<get<Type, true>>().c_str(), (
int) process::get_id(),
243 (
int) threading::get_id(), _msg.c_str(), (
int) _range.first,
244 (
int) _range.second);
249 static uint16_t
width = 1;
250 if(_num_pid_count > 9)
252 std::stringstream ss;
254 if(_range.first >= 0 && _range.second >= 0)
256 ss <<
"|" << std::setw(
width) << _range.first <<
":" << std::setw(
width)
257 << _range.second << _get_thread_prefix(itr);
261 ss <<
"|" << std::setw(
width) << _idx << _get_thread_prefix(itr);
272 auto _compute_modified_prefix = [&](
const graph_node& itr) {
277 int64_t _depth = itr.depth() - 1;
280 for(int64_t ii = 0; ii < _depth - 1; ++ii)
285 return _node_prefix + _indent +
_prefix;
289 auto convert_graph = [&]() {
294 auto& _graph = data.graph();
295 for(
auto itr = _graph.begin(); itr != _graph.end(); ++itr)
300 demangle<Type>().c_str());
303 _min = std::min<int64_t>(_min, itr->depth());
306 for(
auto itr = _graph.begin(); itr != _graph.end(); ++itr)
311 demangle<Type>().c_str());
315 if(operation::get_is_invalid<Type, false>{}(itr->data()))
317 if(itr->depth() > _min)
319 auto _depth = itr->depth() - (_min + 1);
320 auto _prefix = _compute_modified_prefix(*itr);
321 auto _rolling = itr->id();
322 auto _stats = itr->stats();
323 auto _parent = graph_type::parent(itr);
325 auto _tid = itr->tid();
326 auto _pid = itr->pid();
327 while(_parent && _parent->depth() > _min)
329 if(operation::get_is_invalid<Type, false>{}(_parent->data()))
331 _hierarchy.push_back(_parent->id());
332 _rolling += _parent->id();
333 _parent = graph_type::parent(_parent);
335 if(_hierarchy.size() > 1)
336 std::reverse(_hierarchy.begin(), _hierarchy.end());
337 _hierarchy.push_back(itr->id());
339 _rolling, _hierarchy, _stats, _tid, _pid);
340 _list.push_back(std::move(_entry));
350 operation::finalize::merge<Type, true>(_combined, _list);
354 ret = convert_graph();
static int32_t get_thread_count()
This effectively provides the total number of threads which collected data. It is only "decremented" ...
::tim::statistics< Tp > max(::tim::statistics< Tp > lhs, const Tp &rhs)
char const std::string & _prefix
std::string demangle(const char *_mangled_name, int *_status=nullptr)
tim::mpl::apply< std::string > string
typename storage_type::result_array_t result_type
typename storage_type::graph_node graph_node
typename storage_type::result_node result_node
typename storage_type::uintvector_t hierarchy_type