diff --git a/kadmos/graph/graph_data.py b/kadmos/graph/graph_data.py
index 49ca52487927e9e03fb62cedc90b516d8c384af7..a058d0c04bbdba2eedcd1c82eef15226380c6ead 100644
--- a/kadmos/graph/graph_data.py
+++ b/kadmos/graph/graph_data.py
@@ -1446,7 +1446,8 @@ class RepositoryConnectivityGraph(DataGraph):
         return cmdows_problem_definition
 
     # noinspection PyPep8Naming
-    def create_mathematical_problem(self, n_disciplines, coupling_density=None, **kwargs):
+    def create_mathematical_problem(self, n_disciplines, coupling_density=None, n_clusters=1, cluster_strength=1,
+                                    **kwargs):
         """Function to get a mathematical problem according to the variable complexity problem as described in:
         Zhang D., Song B., Wang P. and He Y. 'Performance Evaluation of MDO Architectures within a Variable
         Complexity Problem', Mathematical Problems in Engineering, 2017.
@@ -1455,6 +1456,13 @@ class RepositoryConnectivityGraph(DataGraph):
         :type n_disciplines: int
         :param coupling_density: percentage of couplings, 0 no couplings, 1 all possible couplings
         :type coupling_density: float
+        :param n_clusters: Number of clusters within the mathematical problem
+        :type n_clusters: int
+        :param cluster_strength: Indicates the strength of the clustering. 0 means that a completely random problem is
+        generated with no bias towards clustering at all. 1 means a complete bias towards clustering, so all couplings
+        are placed within the clusters. If more couplings are required then the amount available within the clusters,
+        couplings outside the clusters are made as well.
+        :type cluster_strength: float
         :return enriched rcg with the mathematical problem
         :return dictionary containing the properties of the mathematical problem
         """
@@ -1462,6 +1470,10 @@ class RepositoryConnectivityGraph(DataGraph):
         # Input assertions
         assert 'B' not in kwargs if coupling_density else 'B' in kwargs, 'Either the coupling density or the ' \
                                                                          'B-matrix must be given'
+        assert 1 <= n_clusters <= n_disciplines, 'Number of clusters must be in the range [1, n_disciplines]'
+        assert 0 <= cluster_strength <= 1, 'Cluster strength must be a float in the range [0, 1]'
+        if 'B' in kwargs:
+            logger.warning('B-matrix is given to create the mathematical problem, so cluster requirements are ignored')
 
         mathematical_problem = dict()
         mathematical_problem['n_disciplines'] = n_disciplines
@@ -1510,24 +1522,62 @@ class RepositoryConnectivityGraph(DataGraph):
                 n_couplings = int(np.ceil(((sum(n_coupling_var)*n_disciplines) - sum(n_coupling_var)) *
                                           coupling_density))
 
-                # Get a list with all possible couplings between variables and disciplines
-                possible_couplings = []
-                for discipline in range(n_disciplines):
-                    for coupling_var in range(sum(n_coupling_var)):
+                # Determine which disciplines are in which cluster
+                disciplines = range(n_disciplines)
+                random.shuffle(disciplines)
+                division = n_disciplines / float(n_clusters)
+                clusters = [disciplines[int(round(division * i)):int(round(division * (i + 1)))] for i in
+                            range(n_clusters)]
+
+                # Get two lists with all possible couplings between variables and disciplines. One lists contains the
+                # couplings within the clusters and one contains the couplings outside the clusters
+                cluster_couplings, remaining_couplings = [], []
+                for discipline1 in range(n_disciplines):
+                    for discipline2 in range(n_disciplines):
                         # An output variable of a discipline cannot be an input to the same discipline
-                        if sum(n_coupling_var[:discipline]) <= coupling_var < sum(n_coupling_var[:discipline + 1]):
+                        if discipline1 == discipline2:
                             continue
-                        possible_couplings.append([coupling_var, discipline])
-
-                # Choose random couplings from all possible couplings
-                couplings = random.sample(range(len(possible_couplings)), n_couplings)
+                        for coupling_var in range(n_coupling_var[discipline1]):
+                            for cluster in clusters:
+                                if discipline1 in cluster and discipline2 in cluster:
+                                    cluster_couplings.append([discipline1, coupling_var, discipline2])
+                                    break
+                            if [discipline1, coupling_var, discipline2] not in cluster_couplings:
+                                remaining_couplings.append([discipline1, coupling_var, discipline2])
+
+                # Determine how many couplings need to be chosen from the ones inside the clusters and how many
+                # couplings need to be chosen from the ones outside the clusters
+                coupling_division = [random.uniform(0, 1) for _ in range(n_couplings)]
+                percentage_cluster_couplings = len(cluster_couplings) / float(len(cluster_couplings) +
+                                                                              len(remaining_couplings))
+                division_criteria = percentage_cluster_couplings + ((1-percentage_cluster_couplings) *
+                                                                    float(cluster_strength))
+
+                # If the number is below the division criteria, the coupling will be part of the cluster, otherwise it
+                # will be outside the cluster
+                n_couplings_in_cluster = len([coupling for coupling in coupling_division if coupling <
+                                              division_criteria])
+                n_couplings_outside_cluster = len(coupling_division) - n_couplings_in_cluster
+
+                # Check if there are too many couplings within or outside the clusters and change accordingly
+                if n_couplings_in_cluster > len(cluster_couplings):
+                    n_couplings_outside_cluster += n_couplings_in_cluster - len(cluster_couplings)
+                    n_couplings_in_cluster = len(cluster_couplings)
+                elif n_couplings_outside_cluster > len(remaining_couplings):
+                    n_couplings_in_cluster += n_couplings_outside_cluster - len(remaining_couplings)
+                    n_couplings_outside_cluster = len(remaining_couplings)
+
+                # Choose random coupligns from all possible couplings
+                couplings = random.sample(cluster_couplings, n_couplings_in_cluster) + \
+                    random.sample(remaining_couplings, n_couplings_outside_cluster)
 
                 # Fill the B-matrix with the chosen couplings
                 for coupling in couplings:
-                    discipline = possible_couplings[coupling][1]
-                    for variable in range(n_coupling_var[discipline]):
-                        B[sum(n_coupling_var[:discipline]) + variable][possible_couplings[coupling][0]] = random.choice(
-                            range(-5, 0)+range(1, 6))  # Zero is not allowed
+                    discipline1, coupling_var, discipline2 = coupling
+                    for variable in range(n_coupling_var[discipline2]):
+                        B[sum(n_coupling_var[:discipline2]) + variable][
+                            sum(n_coupling_var[:discipline1]) + coupling_var] = \
+                            random.choice(range(-5, 0) + range(1, 6))  # Zero is not allowed
 
                 # To ensure convergence the B-matrix must be diagonally dominant
                 B_diag = np.sum(np.abs(B), axis=1)
@@ -3086,7 +3136,9 @@ class FundamentalProblemGraph(DataGraph, KeChainMixin):
         # Input assertions
         if not node_selection:
             assert 'function_ordering' in self.graph['problem_formulation'], 'Function ordering is missing'
-        assert n_parts > 1, 'Number of partitions must be greater than 1'
+        if self.graph['problem_formulation']['convergence_type']:
+            assert 'Gauss-Seidel' not in self.graph['problem_formulation']['convergence_type'], \
+                'Partitioning does not work correctly with a Gauss-Seidel converger, use Jacobi instead'
 
         # Get coupling dictionary
         if not coupling_dict:
@@ -3164,6 +3216,16 @@ class FundamentalProblemGraph(DataGraph, KeChainMixin):
             best_partitions = [[node] for node in nodes_to_partition]
             logger.warning('Number of partitions ({0}) exceeds number of nodes ({1}). The solution for {1} partitions '
                            'will be returned'.format(n_parts, len(nodes_to_partition)))
+        # If the number of partitions is one, determine the correct function order and return the solution
+        elif n_parts == 1:
+            if local_convergers:
+                best_partitions = [self.get_possible_function_order('signle-swap', node_selection=nodes_to_partition,
+                                                                    rcb=rcb_order, coupling_dict=coupling_dict,
+                                                                    use_runtime_info=use_runtime_info)]
+            else:
+                best_partitions = [self.minimize_feedback(nodes_to_partition, 'single-swap', rcb=rcb_order,
+                                                          coupling_dict=coupling_dict,
+                                                          use_runtime_info=use_runtime_info)]
         # Else partition the graph
         else:
             while True:
@@ -3486,36 +3548,25 @@ class FundamentalProblemGraph(DataGraph, KeChainMixin):
         for idx, n_partitions in enumerate(partition_range):
             graph = self.deepcopy()
             logger.info('Calculating the solution for {} partitions'.format(n_partitions))
+            graph.partition_graph(n_partitions, node_selection=nodes_to_partition, coupling_dict=coupling_dict,
+                                  use_runtime_info=use_runtime_info, local_convergers=local_convergers,
+                                  rcb_partitioning=rcb_partitioning, rcb_order=rcb_order)
+            partitions = graph.graph['problem_formulation']['coupled_functions_groups']
+            local_convs = graph.graph['problem_formulation']['local_convergers']
 
-            if n_partitions == 1:
-                # Get function order
-                partitions = graph.minimize_feedback(nodes_to_partition, 'hybrid-swap', rcb=rcb_order,
-                                                     coupling_dict=coupling_dict, use_runtime_info=use_runtime_info)
-                partition_variables, runtime = graph.get_feedback_info(partitions, coupling_dict=coupling_dict,
-                                                                       use_runtime_info=use_runtime_info)
-                # Save information
-                partition_info.append([idx, n_partitions, [partition_variables], 0, partition_variables, runtime])
-                partition_results.append([runtime, partition_variables, idx, partitions, []])
-            else:
-                graph.partition_graph(n_partitions, node_selection=nodes_to_partition, coupling_dict=coupling_dict,
-                                      use_runtime_info=use_runtime_info, local_convergers=local_convergers,
-                                      rcb_partitioning=rcb_partitioning, rcb_order=rcb_order)
-                partitions = graph.graph['problem_formulation']['coupled_functions_groups']
-                local_convs = graph.graph['problem_formulation']['local_convergers']
-
-                # Evaluate graph
-                total_var, partition_variables, system_variables, runtime = graph.get_partition_info(
-                    partitions, use_runtime_info=use_runtime_info, coupling_dict=coupling_dict,
-                    local_convergers=local_convergers)
+            # Evaluate graph
+            total_var, partition_variables, system_variables, runtime = graph.get_partition_info(
+                partitions, use_runtime_info=use_runtime_info, coupling_dict=coupling_dict,
+                local_convergers=local_convergers)
 
-                # Get number of partitions (Metis can return less partitions in case the number of partitions is close
-                # to the number of nodes in the partitions)
-                n_parts = len(partitions)
+            # Get number of partitions (Metis can return less partitions in case the number of partitions is close
+            # to the number of nodes in the partitions)
+            n_parts = len(partitions)
 
-                # Save partition information
-                partition_info.append([idx, n_parts, partition_variables, system_variables, total_var,
-                                       max(runtime)])
-                partition_results.append([max(runtime), total_var, idx, partitions, local_convs])
+            # Save partition information
+            partition_info.append([idx, n_parts, partition_variables, system_variables, total_var,
+                                   max(runtime)])
+            partition_results.append([max(runtime), total_var, idx, partitions, local_convs])
 
         # If pareto front, get optimal results
         if pareto:
@@ -3563,19 +3614,16 @@ class FundamentalProblemGraph(DataGraph, KeChainMixin):
                                                    allow_multi=False)
         idx = partition_range.index(int(sel[0]))
 
-        # Get result
-        if partition_range[idx] != 1:
-
-            # Add partition id to the nodes
-            for part_nr, partition in enumerate(partition_results[idx][3]):
-                for node in partition:
-                    self.nodes[node]['partition_id'] = part_nr
+        # Add partition id to the nodes
+        for part_nr, partition in enumerate(partition_results[idx][3]):
+            for node in partition:
+                self.nodes[node]['partition_id'] = part_nr
 
-            # Add partition to the input graph
-            self.graph['problem_formulation']['coupled_functions_groups'] = partition_results[idx][3]
-            self.graph['problem_formulation']['local_convergers'] = partition_results[idx][4]
-            self.graph['problem_formulation']['jacobi_convergence'] = []
-            self.graph['problem_formulation']['sequence_partitions'] = []
+        # Add partition to the input graph
+        self.graph['problem_formulation']['coupled_functions_groups'] = partition_results[idx][3]
+        self.graph['problem_formulation']['local_convergers'] = partition_results[idx][4]
+        self.graph['problem_formulation']['jacobi_convergence'] = []
+        self.graph['problem_formulation']['sequence_partitions'] = []
 
         return
 
@@ -4480,6 +4528,7 @@ class FundamentalProblemGraph(DataGraph, KeChainMixin):
                 # Connect partitions
                 # noinspection PyUnboundLocalVariable
                 mdg.connect_partitions(mdao_arch, sub_func_orderings, coup_functions)
+                _, sys_conv, _ = mdg.get_architecture_node_ids(mdao_arch, number_of_groups=len(partitions))
             else:
                 sys_conv, sys_conv_label = graph.CONVERGER_STRING, graph.CONVERGER_LABEL
                 # Connect system converger
@@ -4488,6 +4537,10 @@ class FundamentalProblemGraph(DataGraph, KeChainMixin):
             mdg.connect_qoi_nodes_as_input(qoi_nodes, graph.COORDINATOR_STRING, True)
             # Connect remaining system inputs and outputs to the coordinator
             mdg.connect_coordinator()
+            # If the system converger does not have any variables connected to it due to the local convergers, it is
+            # redundant and can be removed
+            if not mdg.get_sources(sys_conv) and not mdg.get_targets(sys_conv):
+                mdg.remove_node(sys_conv)
         elif mdao_arch == graph.OPTIONS_ARCHITECTURES[2]:  # IDF
             if partitions:
                 sys_opt = graph.SYS_PREFIX + graph.OPTIMIZER_STRING
@@ -4514,6 +4567,7 @@ class FundamentalProblemGraph(DataGraph, KeChainMixin):
                 # Connect partitions
                 # noinspection PyUnboundLocalVariable
                 mdg.connect_partitions(mdao_arch, sub_func_orderings, coup_functions)
+                _, sys_conv, _ = mdg.get_architecture_node_ids(mdao_arch, number_of_groups=len(partitions))
             else:
                 sys_opt, sys_opt_label = graph.OPTIMIZER_STRING, graph.OPTIMIZER_LABEL
                 sys_conv, sys_conv_label = graph.CONVERGER_STRING, graph.CONVERGER_LABEL
@@ -4526,6 +4580,10 @@ class FundamentalProblemGraph(DataGraph, KeChainMixin):
             mdg.connect_qoi_nodes_as_input(qoi_nodes, graph.COORDINATOR_STRING, True)
             # Connect remaining system inputs and outputs to the coordinator
             mdg.connect_coordinator()
+            # If the system converger does not have any variables connected to it due to the local convergers, it is
+            # redundant and can be removed
+            if not mdg.get_sources(sys_conv) and not mdg.get_targets(sys_conv):
+                mdg.remove_node(sys_conv)
         elif mdao_arch == graph.OPTIONS_ARCHITECTURES[4]:  # unconverged-OPT
             opt = graph.OPTIMIZER_STRING
             if allow_unconverged_couplings:
@@ -6524,20 +6582,25 @@ class MdaoDataGraph(DataGraph, MdaoMixin):
         elif mdao_arch == mdg.OPTIONS_ARCHITECTURES[1]:  # converged-MDA
             _, sys_conv, _ = self.get_architecture_node_ids(mdao_arch, number_of_groups=len(partitions)) if \
                 partitions else ([], mdg.CONVERGER_STRING, [])
-            sequence1 = [coor] + pre_functions + [sys_conv]
+            sys_conv = [] if sys_conv not in mdg.nodes() else sys_conv
+            sequence1 = [coor] + pre_functions + [sys_conv] if sys_conv else [coor] + pre_functions
             mpg.add_process(sequence1, 0, mdg)
-            if partitions:
-                mpg.add_process_partitions([sys_conv], partitions, [], mpg.nodes[sequence1[-1]]['process_step'], mdg,
-                                           end_in_iterative_node=sys_conv)
-            else:
-                sequence2 = [sys_conv] + coup_functions
-                mpg.add_process(sequence2, mpg.nodes[sequence1[-1]]['process_step'], mdg,
-                                end_in_iterative_node=sys_conv)
-            if post_functions:
-                sequence3 = [sys_conv] + post_functions
-                mpg.add_process(sequence3, mpg.nodes[sys_conv]['converger_step'], mdg, end_in_iterative_node=coor)
+            if sys_conv:
+                if partitions:
+                    mpg.add_process_partitions([sys_conv], partitions, [], mpg.nodes[sequence1[-1]]['process_step'], mdg,
+                                               end_in_iterative_node=sys_conv)
+                else:
+                    sequence2 = [sys_conv] + coup_functions
+                    mpg.add_process(sequence2, mpg.nodes[sequence1[-1]]['process_step'], mdg,
+                                    end_in_iterative_node=sys_conv)
+                if post_functions:
+                    sequence3 = [sys_conv] + post_functions
+                    mpg.add_process(sequence3, mpg.nodes[sys_conv]['converger_step'], mdg, end_in_iterative_node=coor)
+                else:
+                    mpg.connect_nested_iterators(coor, sys_conv)
             else:
-                mpg.connect_nested_iterators(coor, sys_conv)
+                mpg.add_process_partitions(sequence1, partitions, post_functions, mpg.nodes[sequence1[0]][
+                    'process_step'], mdg, end_in_iterative_node=coor)
         elif mdao_arch == mdg.OPTIONS_ARCHITECTURES[2]:  # IDF
             sys_opt, _, _ = self.get_architecture_node_ids(mdao_arch, number_of_groups=len(partitions)) if \
                 partitions else (mdg.OPTIMIZER_STRING, [], [])
@@ -6555,19 +6618,24 @@ class MdaoDataGraph(DataGraph, MdaoMixin):
         elif mdao_arch == mdg.OPTIONS_ARCHITECTURES[3]:  # MDF
             sys_opt, sys_conv, _ = self.get_architecture_node_ids(mdao_arch, number_of_groups=len(partitions)) if \
                 partitions else (mdg.OPTIMIZER_STRING, mdg.CONVERGER_STRING, [])
+            sys_conv = [] if sys_conv not in mdg.nodes() else sys_conv
             sequence1 = [coor] + pre_desvars_funcs + [sys_opt]
             mpg.add_process(sequence1, 0, mdg)
-            sequence2 = [sys_opt] + post_desvars_funcs + [sys_conv]
+            sequence2 = [sys_opt] + post_desvars_funcs + [sys_conv] if sys_conv else [sys_opt] + post_desvars_funcs
             mpg.add_process(sequence2, mpg.nodes[sequence1[-1]]['process_step'], mdg)
-            if partitions:
-                mpg.add_process_partitions([sys_conv], partitions, [], mpg.nodes[sequence2[-1]]['process_step'], mdg,
-                                           end_in_iterative_node=sys_conv)
+            if sys_conv:
+                if partitions:
+                    mpg.add_process_partitions([sys_conv], partitions, [], mpg.nodes[sequence2[-1]]['process_step'], mdg,
+                                               end_in_iterative_node=sys_conv)
+                else:
+                    sequence3 = [sys_conv] + coup_functions
+                    mpg.add_process(sequence3, mpg.nodes[sequence2[-1]]['process_step'], mdg,
+                                    end_in_iterative_node=sys_conv)
+                sequence4 = [sys_conv] + post_functions
+                mpg.add_process(sequence4, mpg.nodes[sys_conv]['converger_step'], mdg, end_in_iterative_node=sys_opt)
             else:
-                sequence3 = [sys_conv] + coup_functions
-                mpg.add_process(sequence3, mpg.nodes[sequence2[-1]]['process_step'], mdg,
-                                end_in_iterative_node=sys_conv)
-            sequence4 = [sys_conv] + post_functions
-            mpg.add_process(sequence4, mpg.nodes[sys_conv]['converger_step'], mdg, end_in_iterative_node=sys_opt)
+                mpg.add_process_partitions(sequence2, partitions, post_functions,
+                                           mpg.nodes[sequence2[0]]['process_step'], mdg, end_in_iterative_node=sys_opt)
             mpg.connect_nested_iterators(coor, sys_opt)
         elif mdao_arch == mdg.OPTIONS_ARCHITECTURES[4]:  # unconverged-OPT
             opt = mdg.OPTIMIZER_STRING
diff --git a/kadmos/graph/graph_process.py b/kadmos/graph/graph_process.py
index 0246a1c1544eff3c6e7596ea0aeddcabde17fe6f..2e2b6621263723ea7232a22838c17ff7cec9a792 100644
--- a/kadmos/graph/graph_process.py
+++ b/kadmos/graph/graph_process.py
@@ -329,6 +329,10 @@ class MdaoProcessGraph(ProcessGraph):
                     if distr_conv and mdao_architecture not in self.OPTIONS_ARCHITECTURES[1] + \
                             self.OPTIONS_ARCHITECTURES[3]:
                         assert len(sys_conv) == 0, '{} system convergers found, none expected'.format(len(sys_conv))
+                    elif distr_conv and mdao_architecture in self.OPTIONS_ARCHITECTURES[1] + \
+                        self.OPTIONS_ARCHITECTURES[3]:
+                        assert len(sys_conv) <= 1, '{} system convergers found, one or none expected'.format(
+                            len(sys_conv))
                     else:
                         assert len(sys_conv) == 1, '{} system convergers found, one expected.'.format(len(sys_conv))
                     convs = sys_conv
@@ -471,9 +475,9 @@ class MdaoProcessGraph(ProcessGraph):
 
             # If nodes_1 contains an iterator as first element + other nodes, then split into nodes_1 and nodes_2
             if len(nodes_1) > 1 and self.nodes[nodes_1[0]]['architecture_role'] in self.ARCHITECTURE_ROLES_FUNS[:4]:
-                nodes_2 = nodes_1[1:]
+                sequence = nodes_1[1:] + sequence
+                nodes_2, _ = get_executable_functions(coupling_matrix_1[1:, 1:], sequence)
                 nodes_1 = [nodes_1[0]]
-                sequence = nodes_2 + sequence
             # Else if iterator as last element
             elif len(nodes_1) > 1 and self.nodes[nodes_1[-1]]['architecture_role'] in self.ARCHITECTURE_ROLES_FUNS[:4]:
                 nodes_2 = [nodes_1[-1]]