From fd597a5231665c554ca46076f0231f13c1ae3216 Mon Sep 17 00:00:00 2001
From: Cristian C Lalescu <Cristian.Lalescu@ds.mpg.de>
Date: Wed, 22 Feb 2017 14:04:48 +0100
Subject: [PATCH] remove trailing spaces

---
 bfps/_code.py       | 51 ++++++++++++++++++++++++++++-----------------
 bfps/_fluid_base.py |  4 ++--
 2 files changed, 34 insertions(+), 21 deletions(-)

diff --git a/bfps/_code.py b/bfps/_code.py
index 7c384ab3..2f21c16a 100644
--- a/bfps/_code.py
+++ b/bfps/_code.py
@@ -45,10 +45,11 @@ class _code(_base):
             work_dir = './',
             simname = 'test'):
         _base.__init__(self, work_dir = work_dir, simname = simname)
-        self.version_message = ('/***********************************************************************\n' +
-                                '* this code automatically generated by bfps\n' +
-                                '* version {0}\n'.format(bfps.__version__) +
-                                '***********************************************************************/\n\n\n')
+        self.version_message = (
+                '/***********************************************************************\n' +
+                '* this code automatically generated by bfps\n' +
+                '* version {0}\n'.format(bfps.__version__) +
+                '***********************************************************************/\n\n\n')
         self.includes = """
                 //begincpp
                 #include "base.hpp"
@@ -135,7 +136,8 @@ class _code(_base):
                 //endcpp
                 """
         for ostream in ['cout', 'cerr']:
-            self.main_start += 'if (myrank == 0) std::{1} << "{0}" << std::endl;'.format(self.version_message, ostream).replace('\n', '\\n') + '\n'
+            self.main_start += 'if (myrank == 0) std::{1} << "{0}" << std::endl;'.format(
+                    self.version_message, ostream).replace('\n', '\\n') + '\n'
         self.main_end = """
                 //begincpp
                     }
@@ -372,20 +374,24 @@ class _code(_base):
         script_file.write('# @ notify_user = $(user)@rzg.mpg.de\n')
 
         nb_cpus_per_node = self.host_info['deltanprocs']
-        assert(isinstance(nb_cpus_per_node, int) and nb_cpus_per_node >= 1, 'nb_cpus_per_node is {}'.format(nb_cpus_per_node))
+        assert(isinstance(nb_cpus_per_node, int) and nb_cpus_per_node >= 1,
+                'nb_cpus_per_node is {}'.format(nb_cpus_per_node))
 
         # No more threads than the number of cores
-        assert(nb_threads_per_process <= nb_cpus_per_node, "Cannot use more threads ({} asked) than the number of cores ({})".format(nb_threads_per_process, nb_cpus_per_node))
+        assert(nb_threads_per_process <= nb_cpus_per_node,
+               "Cannot use more threads ({} asked) than the number of cores ({})".format(
+                   nb_threads_per_process, nb_cpus_per_node))
         # Warn if some core will not be ued
         if nb_cpus_per_node%nb_threads_per_process != 0:
-            warnings.warn("The number of threads is not correct regarding the number of cores (machin will be under use)", UserWarning)
+            warnings.warn("The number of threads is smaller than the number of cores (machine will be underused)",
+                    UserWarning)
 
         nb_cpus = nb_mpi_processes*nb_threads_per_process
         if (nb_cpus < nb_cpus_per_node):
             # in case we use only a few process on a single node
             nb_nodes = 1
             nb_processes_per_node = nb_mpi_processes
-            first_node_tasks = nb_mpi_processes 
+            first_node_tasks = nb_mpi_processes
         else:
             nb_nodes = int((nb_cpus+nb_cpus_per_node-1) // nb_cpus_per_node)
             # if more than one node we requiere to have a multiple of deltanprocs
@@ -459,22 +465,25 @@ class _code(_base):
         script_file.write('# @ job_type = MPICH\n')
         script_file.write('# @ node_usage = not_shared\n')
         script_file.write('#\n')
-        
+
         nb_cpus_per_node = self.host_info['deltanprocs']
         assert(isinstance(nb_cpus_per_node, int) and nb_cpus_per_node >= 1, 'nb_cpus_per_node is {}'.format(nb_cpus_per_node))
 
         # No more threads than the number of cores
-        assert(nb_threads_per_process <= nb_cpus_per_node, "Cannot use more threads ({} asked) than the number of cores ({})".format(nb_threads_per_process, nb_cpus_per_node))
+        assert(nb_threads_per_process <= nb_cpus_per_node,
+               "Cannot use more threads ({} asked) than the number of cores ({})".format(
+                   nb_threads_per_process, nb_cpus_per_node))
         # Warn if some core will not be ued
         if nb_cpus_per_node%nb_threads_per_process != 0:
-            warnings.warn("The number of threads is not correct regarding the number of cores (machin will be under use)", UserWarning)
+            warnings.warn("The number of threads is smaller than the number of cores (machine will be underused)",
+                    UserWarning)
 
         nb_cpus = nb_mpi_processes*nb_threads_per_process
         if (nb_cpus < nb_cpus_per_node):
             # in case we use only a few process on a single node
             nb_nodes = 1
             nb_processes_per_node = nb_mpi_processes
-            first_node_tasks = nb_mpi_processes 
+            first_node_tasks = nb_mpi_processes
         else:
             nb_nodes = int((nb_cpus+nb_cpus_per_node-1) // nb_cpus_per_node)
             # if more than one node we requiere to have a multiple of deltanprocs
@@ -517,7 +526,7 @@ class _code(_base):
             ' ' +
             ' '.join(command_atoms[1:]) +
             '\n')
-        
+
         script_file.write('echo "End time is `date`"\n')
         script_file.write('exit 0\n')
         script_file.close()
@@ -588,15 +597,19 @@ class _code(_base):
             script_file.write('#SBATCH -o ' + out_file + '\n')
         script_file.write('#SBATCH --partition={0}\n'.format(
                 self.host_info['environment']))
-        
+
         nb_cpus_per_node = self.host_info['deltanprocs']
-        assert(isinstance(nb_cpus_per_node, int) and nb_cpus_per_node >= 1, 'nb_cpus_per_node is {}'.format(nb_cpus_per_node))
+        assert(isinstance(nb_cpus_per_node, int) and nb_cpus_per_node >= 1,
+               'nb_cpus_per_node is {}'.format(nb_cpus_per_node))
 
         # No more threads than the number of cores
-        assert(nb_threads_per_process <= nb_cpus_per_node, "Cannot use more threads ({} asked) than the number of cores ({})".format(nb_threads_per_process, nb_cpus_per_node))
+        assert(nb_threads_per_process <= nb_cpus_per_node,
+               "Cannot use more threads ({} asked) than the number of cores ({})".format(
+                   nb_threads_per_process, nb_cpus_per_node))
         # Warn if some core will not be ued
         if nb_cpus_per_node%nb_threads_per_process != 0:
-            warnings.warn("The number of threads is not correct regarding the number of cores (machin will be under use)", UserWarning)
+            warnings.warn("The number of threads is smaller than the number of cores (machine will be underused)",
+                    UserWarning)
 
         nb_cpus = nb_mpi_processes*nb_threads_per_process
         if (nb_cpus < nb_cpus_per_node):
@@ -608,7 +621,7 @@ class _code(_base):
             # if more than one node we requiere to have a multiple of deltanprocs
             nb_processes_per_node = int(nb_cpus_per_node // nb_threads_per_process)
 
-        
+
         script_file.write('#SBATCH --nodes={0}\n'.format(nb_nodes))
         script_file.write('#SBATCH --ntasks-per-node={0}\n'.format(nb_processes_per_node))
         script_file.write('#SBATCH --cpus-per-task={0}\n'.format(nb_threads_per_process))
diff --git a/bfps/_fluid_base.py b/bfps/_fluid_base.py
index 79a823a9..8ed88ca6 100644
--- a/bfps/_fluid_base.py
+++ b/bfps/_fluid_base.py
@@ -269,7 +269,7 @@ class _fluid_particle_base(_code):
 
             self.main       += """
                                 #ifdef USE_TIMINGOUTPUT
-                                const std::string loopLabel = "code::main_start::loop-" + std::to_string(iteration);                                
+                                const std::string loopLabel = "code::main_start::loop-" + std::to_string(iteration);
                                 TIMEZONE(loopLabel.c_str());
                                 #endif
                                 """
@@ -288,7 +288,7 @@ class _fluid_particle_base(_code):
             self.main       += '{\n'
             self.main       += """
                                 #ifdef USE_TIMINGOUTPUT
-                                const std::string loopLabel = "code::main_start::loop-" + std::to_string(frame_index);                                
+                                const std::string loopLabel = "code::main_start::loop-" + std::to_string(frame_index);
                                 TIMEZONE(loopLabel.c_str());
                                 #endif
                                 """
-- 
GitLab