diff --git a/example/advect/t8_advection.cxx b/example/advect/t8_advection.cxx index 4d9c85bc2f..0a9775d3b4 100644 --- a/example/advect/t8_advection.cxx +++ b/example/advect/t8_advection.cxx @@ -502,7 +502,6 @@ t8_advect_flux_upwind_hanging (const t8_advect_problem_t * problem, el_hang->fluxes[face][i] = t8_advect_flux_upwind (problem, phi_plus, phi_minus, ltreeid, face_children[i], child_face); - // if (a == 1) printf ("%i %i %f\n",face, i, el_hang->fluxes[face][i]); /* Set the flux of the neighbor element */ dual_face = el_hang->dual_faces[face][i]; if (!adapted_or_partitioned && !neigh_is_ghost) { @@ -511,7 +510,6 @@ t8_advect_flux_upwind_hanging (const t8_advect_problem_t * problem, /* We need to allocate the fluxes */ neigh_data->fluxes[dual_face] = T8_ALLOC (double, 1); } - // printf ("face %i neigh %i df %i\n", face, neigh_id, dual_face); SC_CHECK_ABORT (dual_face < neigh_data->num_faces, "num\n"); // SC_CHECK_ABORT (neigh_data->num_neighbors[dual_face] == 1, "entry\n"); neigh_data->num_neighbors[dual_face] = 1; @@ -1586,7 +1584,6 @@ t8_advect_solve (t8_cmesh_t cmesh, t8_flow_function_3d_fn u, problem->stats[ADVECT_DUMMY].count = 1; } /* Compute time step */ - // printf ("advance %i\n", ielement); t8_advect_advance_element (problem, lelement); } } diff --git a/src/t8_forest/t8_forest_cxx.cxx b/src/t8_forest/t8_forest_cxx.cxx index f092fe1ab1..89603f19ab 100644 --- a/src/t8_forest/t8_forest_cxx.cxx +++ b/src/t8_forest/t8_forest_cxx.cxx @@ -336,11 +336,9 @@ t8_forest_no_overlap (t8_forest_t forest) T8_ASSERT (has_overlap_local_global == 0 || has_overlap_local_global == 1); if (has_overlap_local_global) { T8_ASSERT (has_overlap_local == 1); - //t8_debugf ("[IL] no_overlap end 1 \n"); return 0; } #endif -//t8_debugf ("[IL] no_overlap end 2 \n"); return 1; } diff --git a/src/t8_vtk/t8_vtk_reader.cxx b/src/t8_vtk/t8_vtk_reader.cxx index a2339efb3c..a529e7399e 100644 --- a/src/t8_vtk/t8_vtk_reader.cxx +++ b/src/t8_vtk/t8_vtk_reader.cxx @@ -220,7 +220,6 @@ t8_vtk_iterate_cells (vtkSmartPointer < vtkDataSet > vtkGrid, const int num_data_arrays = cell_data->GetNumberOfArrays (); T8_ASSERT (num_data_arrays >= 0); - t8_debugf ("[D] read %i data-arrays\n", num_data_arrays); /* Prepare attributes */ if (num_data_arrays > 0) { size_t tuple_size; @@ -230,8 +229,6 @@ t8_vtk_iterate_cells (vtkSmartPointer < vtkDataSet > vtkGrid, vtkDataArray *data = cell_data->GetArray (idata); tuple_size = data->GetNumberOfComponents (); data_size[idata] = sizeof (double) * tuple_size; - t8_debugf ("[D] data_size[%i] = %li, tuple_size %li\n", idata, - data_size[idata], tuple_size); /* Allocate memory for a tuple in array i */ tuples[idata] = T8_ALLOC (double, tuple_size); } @@ -279,7 +276,6 @@ t8_vtk_iterate_cells (vtkSmartPointer < vtkDataSet > vtkGrid, } tree_id++; } - t8_debugf ("[D] read %li trees\n", tree_id); /* Clean-up */ cell_it->Delete (); @@ -333,7 +329,6 @@ t8_vtkGrid_to_cmesh (vtkSmartPointer < vtkDataSet > vtkGrid, } /* Communicate the dimension to all processes */ sc_MPI_Bcast (&dim, 1, sc_MPI_INT, main_proc, comm); - t8_debugf ("[D] dim: %i\n", dim); /* Communicate the number of trees to all processes. * TODO: This probably crashes when a vtkGrid is distributed in many * files. */