lagrangian_relaxation.py

This example is inspired by an entry on the “adventures in optimization” blog. The sources of the article can be found here. This example solves the generalized assignment problem, with or without Lagrangian relaxation.

  1# --------------------------------------------------------------------------
  2# Source file provided under Apache License, Version 2.0, January 2004,
  3# http://www.apache.org/licenses/
  4# (c) Copyright IBM Corp. 2015, 2018
  5# --------------------------------------------------------------------------
  6
  7import json
  8import builtins
  9
 10from docplex.util.environment import get_environment
 11from docplex.mp.model import Model
 12
 13
 14# ----------------------------------------------------------------------------
 15# Initialize the problem data
 16# ----------------------------------------------------------------------------
 17B = [15, 15, 15]
 18C = [
 19    [ 6, 10, 1],
 20    [12, 12, 5],
 21    [15,  4, 3],
 22    [10,  3, 9],
 23    [8,   9, 5]
 24]
 25A = [
 26    [ 5,  7,  2],
 27    [14,  8,  7],
 28    [10,  6, 12],
 29    [ 8,  4, 15],
 30    [ 6, 12,  5]
 31]
 32
 33
 34# ----------------------------------------------------------------------------
 35# Build the model
 36# ----------------------------------------------------------------------------
 37def run_GAP_model(As, Bs, Cs, **kwargs):
 38    with Model('GAP per Wolsey -without- Lagrangian Relaxation', **kwargs) as mdl:
 39        print("#As={}, #Bs={}, #Cs={}".format(len(As), len(Bs), len(Cs)))
 40        number_of_cs = len(C)
 41        # variables
 42        x_vars = [mdl.binary_var_list(c, name=None) for c in Cs]
 43
 44        # constraints
 45        mdl.add_constraints(mdl.sum(xv) <= 1 for xv in x_vars)
 46
 47        mdl.add_constraints(mdl.sum(x_vars[ii][j] * As[ii][j] for ii in range(number_of_cs)) <= bs
 48                            for j, bs in enumerate(Bs))
 49
 50        # objective
 51        total_profit = mdl.sum(mdl.scal_prod(x_i, c_i) for c_i, x_i in zip(Cs, x_vars))
 52        mdl.maximize(total_profit)
 53        #  mdl.print_information()
 54        s = mdl.solve()
 55        assert s is not None
 56        obj = s.objective_value
 57        print("* GAP with no relaxation run OK, best objective is: {:g}".format(obj))
 58    return obj
 59
 60
 61def run_GAP_model_with_Lagrangian_relaxation(As, Bs, Cs, max_iters=101, **kwargs):
 62    with Model('GAP per Wolsey -with- Lagrangian Relaxation', **kwargs) as mdl:
 63        print("#As={}, #Bs={}, #Cs={}".format(len(As), len(Bs), len(Cs)))
 64        number_of_cs = len(Cs)
 65        c_range = range(number_of_cs)
 66        # variables
 67        x_vars = [mdl.binary_var_list(c, name=None) for c in Cs]
 68        p_vars = mdl.continuous_var_list(Cs, name='p')  # new for relaxation
 69
 70        mdl.add_constraints(mdl.sum(xv) == 1 - pv for xv, pv in zip(x_vars, p_vars))
 71
 72        mdl.add_constraints(mdl.sum(x_vars[ii][j] * As[ii][j] for ii in c_range) <= bs
 73                            for j, bs in enumerate(Bs))
 74
 75        # lagrangian relaxation loop
 76        eps = 1e-6
 77        loop_count = 0
 78        best = 0
 79        initial_multiplier = 1
 80        multipliers = [initial_multiplier] * len(Cs)
 81
 82        total_profit = mdl.sum(mdl.scal_prod(x_i, c_i) for c_i, x_i in zip(Cs, x_vars))
 83        mdl.add_kpi(total_profit, "Total profit")
 84
 85        while loop_count <= max_iters:
 86            loop_count += 1
 87            # rebuilt at each loop iteration
 88            total_penalty = mdl.scal_prod(p_vars, multipliers)
 89            mdl.maximize(total_profit + total_penalty)
 90            s = mdl.solve()
 91            if not s:
 92                print("*** solve fails, stopping at iteration: %d" % loop_count)
 93                break
 94            best = s.objective_value
 95            penalties = [pv.solution_value for pv in p_vars]
 96            print('%d> new lagrangian iteration:\n\t obj=%g, m=%s, p=%s' % (loop_count, best, str(multipliers), str(penalties)))
 97
 98            do_stop = True
 99            justifier = 0
100            for k in c_range:
101                penalized_violation = penalties[k] * multipliers[k]
102                if penalized_violation >= eps:
103                    do_stop = False
104                    justifier = penalized_violation
105                    break
106
107            if do_stop:
108                print("* Lagrangian relaxation succeeds, best={:g}, penalty={:g}, #iterations={}"
109                      .format(best, total_penalty.solution_value, loop_count))
110                break
111            else:
112                # update multipliers and start loop again.
113                scale_factor = 1.0 / float(loop_count)
114                multipliers = [builtins.max(multipliers[i] - scale_factor * penalties[i], 0.) for i in c_range]
115                print('{0}> -- loop continues, m={1!s}, justifier={2:g}'.format(loop_count, multipliers, justifier))
116
117    return best
118
119
120def run_default_GAP_model_with_lagrangian_relaxation(**kwargs):
121    return run_GAP_model_with_Lagrangian_relaxation(As=A, Bs=B, Cs=C, **kwargs)
122
123
124# ----------------------------------------------------------------------------
125# Solve the model and display the result
126# ----------------------------------------------------------------------------
127if __name__ == '__main__':
128    # Run the model. If a key has been specified above, the model will run on
129    # IBM Decision Optimization on cloud.
130    gap_best_obj = run_GAP_model(A, B, C)
131    relaxed_best = run_GAP_model_with_Lagrangian_relaxation(A, B, C)
132    # save the relaxed solution
133    with get_environment().get_output_stream("solution.json") as fp:
134        fp.write(json.dumps({"objectiveValue": relaxed_best}).encode('utf-8'))