healthtest.cc 9 KB
Newer Older
Volker Springel's avatar
Volker Springel committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
/*******************************************************************************
 * \copyright   This file is part of the GADGET4 N-body/SPH code developed
 * \copyright   by Volker Springel. Copyright (C) 2014-2020 by Volker Springel
 * \copyright   (vspringel@mpa-garching.mpg.de) and all contributing authors.
 *******************************************************************************/

/*! \file  healthtest.cc
 *
 *  \brief routines for testing whether all compute nodes yield the full CPU and communication performance
 */

#include "gadgetconfig.h"

#include <math.h>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#include "../data/allvars.h"
#include "../data/dtypes.h"
#include "../data/mymalloc.h"
#include "../logs/logs.h"
#include "../main/simulation.h"
#include "../mpi_utils/mpi_utils.h"
#include "../system/system.h"

#define TEST_PACKET_SIZE_IN_MB 5
#define WORK_LOOP_COUNTER 50000000
#define WORK_NUMBER_OF_IPROBE_TESTS 1000000

#ifndef MAX_VARIATION_TOLERANCE
#define MAX_VARIATION_TOLERANCE 0.5
#endif

void sim::healthtest(void)
{
  mpi_printf("\n");

  measure_cpu_performance(Communicator);

  // Let's take a look at the communication speed in a global all-to-all data exchange realized through pairwise exchanges along a
  // hypercube
  if(NTask > 1)
    measure_hyper_cube_speed("Full hypercube:", Communicator);

  // Let's take a look at inter-node communication speed
  if(NumNodes > 1)
    {
      int CommSplitColor;

      if(RankInThisNode == 0)
        CommSplitColor = 0;
      else
        CommSplitColor = 1;

      MPI_Comm comm;
      MPI_Comm_split(Communicator, CommSplitColor, ThisTask, &comm);

      if(RankInThisNode == 0)
        measure_hyper_cube_speed("Internode cube:", comm);

      MPI_Comm_free(&comm);
    }

  // Now look at intra-node communication speed
  if(NumNodes < NTask)
    {
      int CommSplitColor = ThisNode;
      MPI_Comm comm;
      MPI_Comm_split(Communicator, CommSplitColor, ThisTask, &comm);

      measure_hyper_cube_speed("Intranode cube, 1st node:", comm);

      MPI_Comm_free(&comm);
    }

  measure_iprobe_performance("Iprobe for any message:");

  mpi_printf("\n");
}

double sim::measure_cpu_performance(MPI_Comm Communicator)
{
  int loc_ntask, loc_thistask, loc_ptask;

  double ta = Logs.second();

  MPI_Comm_rank(Communicator, &loc_thistask);
  MPI_Comm_size(Communicator, &loc_ntask);

  for(loc_ptask = 0; loc_ntask > (1 << loc_ptask); loc_ptask++)
    ;

  double sum = 0;

  MPI_Barrier(Communicator);

  double t0 = Logs.second();

  // do some computationally intense (but useless) work for a while
  for(int i = 0; i < WORK_LOOP_COUNTER; i++)
    sum += sin((i + 0.1) / WORK_LOOP_COUNTER) / (2.0 + cos(i - 0.1) / WORK_LOOP_COUNTER);

  double t1 = Logs.second();

  double tperf = Logs.timediff(t0, t1), tperfsum;

  MPI_Allreduce(&tperf, &tperfsum, 1, MPI_DOUBLE, MPI_SUM, Communicator);
  double tavg = tperfsum / loc_ntask;

  struct
  {
    double t;
    int rank;
  } local = {tperf, ThisTask}, localnode = {tperf, ThisNode}, min_time, max_time, min_timenode, max_timenode;

  MPI_Allreduce(&local, &min_time, 1, MPI_DOUBLE_INT, MPI_MINLOC, Communicator);
  MPI_Allreduce(&local, &max_time, 1, MPI_DOUBLE_INT, MPI_MAXLOC, Communicator);

  MPI_Allreduce(&localnode, &min_timenode, 1, MPI_DOUBLE_INT, MPI_MINLOC, Communicator);
  MPI_Allreduce(&localnode, &max_timenode, 1, MPI_DOUBLE_INT, MPI_MAXLOC, Communicator);

  double variation = (max_time.t - min_time.t) / tavg;

  double tb = Logs.second();

  mpi_printf(
      "HEALTHTEST: %25s  %8.3f sec            %7.3f%%  variation   | Best=%g on Task=%d/Node=%d, Worst=%g on Task=%d/Node=%d, test "
      "took %g sec\n",
      "CPU performance:", tavg, 100.0 * variation, min_time.t, min_time.rank, min_timenode.rank, max_time.t, max_time.rank,
      max_timenode.rank, Logs.timediff(ta, tb));

  if(variation >= MAX_VARIATION_TOLERANCE)
    {
      char name_maxnode[MPI_MAX_PROCESSOR_NAME];
      int len;
      if(ThisTask == max_time.rank)
        MPI_Get_processor_name(name_maxnode, &len);

      MPI_Bcast(name_maxnode, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, max_time.rank, Communicator);

      char buf[1000 + MPI_MAX_PROCESSOR_NAME];
      sprintf(buf, "processes_%s.txt", name_maxnode);

      mpi_printf("HEALTHTEST: We are dumping a process list to the file '%s'\n", buf);

      if(ThisTask == max_time.rank)
        {
          char cmd[10000 + MPI_MAX_PROCESSOR_NAME];
          sprintf(cmd, "ps -ef >& %s", buf);
          system(cmd);
        }

      MPI_Barrier(Communicator);

157
      // only issue a warning for now instead of terminating the code
Volker Springel's avatar
Volker Springel committed
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
      warn(
          "\n\nHEALTHTEST: We issue a warning because the performance variation=%g of the CPUs lies above the prescribed tolerance "
          "MAX_VARIATION_TOLERANCE=%g, possibly indicating a machine problem. (sum=%g)\n",
          variation, MAX_VARIATION_TOLERANCE, sum);
    }

  return sum;
}

double sim::measure_hyper_cube_speed(const char *tag, MPI_Comm Communicator)
{
  double ta = Logs.second();

  int loc_ntask, loc_thistask, loc_ptask;

  MPI_Comm_rank(Communicator, &loc_thistask);
  MPI_Comm_size(Communicator, &loc_ntask);

  for(loc_ptask = 0; loc_ntask > (1 << loc_ptask); loc_ptask++)
    ;

  int bytecount = (TEST_PACKET_SIZE_IN_MB * 1024L * 1024L) / loc_ntask;

  double tall = 0;
  int count   = 0;

  char *sendbuf = (char *)Mem.mymalloc_clear("send", bytecount * sizeof(char));
  char *recvbuf = (char *)Mem.mymalloc_clear("recv", bytecount * sizeof(char));

  /* exchange the test data */
  for(int ngrp = 1; ngrp < (1 << loc_ptask); ngrp++)
    {
      int recvTask = loc_thistask ^ ngrp;

      MPI_Barrier(Communicator);

      if(recvTask < loc_ntask)
        {
          double t0 = Logs.second();
          MPI_Sendrecv(sendbuf, bytecount, MPI_BYTE, recvTask, TAG_DENS_A, recvbuf, bytecount, MPI_BYTE, recvTask, TAG_DENS_A,
                       Communicator, MPI_STATUS_IGNORE);
          double t1 = Logs.second();

          tall += Logs.timediff(t0, t1);
          count++;
        }
    }

  Mem.myfree(recvbuf);
  Mem.myfree(sendbuf);

  double tperf = 0.5 * tall / count, tperfsum;

  MPI_Allreduce(&tperf, &tperfsum, 1, MPI_DOUBLE, MPI_SUM, Communicator);
  double tavg = tperfsum / loc_ntask;

  struct
  {
    double t;
    int rank;
  } local = {tperf, ThisTask}, localnode = {tperf, ThisNode}, min_time, max_time, min_timenode, max_timenode;

  MPI_Allreduce(&local, &min_time, 1, MPI_DOUBLE_INT, MPI_MINLOC, Communicator);
  MPI_Allreduce(&local, &max_time, 1, MPI_DOUBLE_INT, MPI_MAXLOC, Communicator);

  MPI_Allreduce(&localnode, &min_timenode, 1, MPI_DOUBLE_INT, MPI_MINLOC, Communicator);
  MPI_Allreduce(&localnode, &max_timenode, 1, MPI_DOUBLE_INT, MPI_MAXLOC, Communicator);

  double tb = Logs.second();

  double variation = (bytecount / min_time.t - bytecount / max_time.t) / (bytecount / tavg);

  mpi_printf(
      "HEALTHTEST: %25s  %8.1f MB/s per pair  %7.3f%%  variation   | Best=%g on Task=%d/Node=%d, Worst=%g on Task=%d/Node=%d, test "
      "took %g sec\n",
      tag, bytecount / tavg * TO_MBYTE_FAC, 100.0 * variation, bytecount / min_time.t * TO_MBYTE_FAC, min_time.rank, min_timenode.rank,
      bytecount / max_time.t * TO_MBYTE_FAC, max_time.rank, max_timenode.rank, Logs.timediff(ta, tb));

  if(variation > MAX_VARIATION_TOLERANCE && ThisTask == 0)
    warn(
        "\nThe performance variation=%g of the communication speed lies above the prescribed tolerance MAX_VARIATION_TOLERANCE=%g, "
        "possibly indicating a machine problem.\n",
        variation, MAX_VARIATION_TOLERANCE);

  return tavg;
}

void sim::measure_iprobe_performance(const char *tag)
{
  double ta = Logs.second();

  for(int i = 0; i < WORK_NUMBER_OF_IPROBE_TESTS; i++)
    {
      int flag;
      MPI_Status status;

      MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, Communicator, &flag, &status);
    }

  double tb = Logs.second();

  double tperf = Logs.timediff(ta, tb) / WORK_NUMBER_OF_IPROBE_TESTS;

  struct
  {
    double t;
    int rank;
  } local = {tperf, ThisTask}, min_time, max_time;

  MPI_Allreduce(&local, &min_time, 1, MPI_DOUBLE_INT, MPI_MINLOC, Communicator);
  MPI_Allreduce(&local, &max_time, 1, MPI_DOUBLE_INT, MPI_MAXLOC, Communicator);

  double tperfsum;
  MPI_Allreduce(&tperf, &tperfsum, 1, MPI_DOUBLE, MPI_SUM, Communicator);
  double tavg = tperfsum / NTask;

  char name_minnode[MPI_MAX_PROCESSOR_NAME];
  char name_maxnode[MPI_MAX_PROCESSOR_NAME];

  int len;
  if(ThisTask == min_time.rank)
    MPI_Get_processor_name(name_minnode, &len);
  if(ThisTask == max_time.rank)
    MPI_Get_processor_name(name_maxnode, &len);

  MPI_Bcast(name_minnode, MPI_MAX_PROCESSOR_NAME, MPI_BYTE, min_time.rank, Communicator);
  MPI_Bcast(name_maxnode, MPI_MAX_PROCESSOR_NAME, MPI_BYTE, max_time.rank, Communicator);

  double variation = (max_time.t - min_time.t) / tavg;

  mpi_printf(
      "HEALTHTEST: %25s  %g s per MPI_Ip%7.3f%%  variation   | Best=%g on Task=%d/Node=%s, Worst=%g on Task=%d/Node=%s, test took %g "
      "sec\n",
      tag, tavg, 100.0 * variation, min_time.t, min_time.rank, name_minnode, max_time.t, max_time.rank, name_maxnode,
      Logs.timediff(ta, tb));
}