Batch distillation problem (TACO)

From mintOC
Revision as of 21:28, 30 December 2015 by JonasSchulze (Talk | contribs) (Text replacement - "<bibreferences/>" to "<biblist />")

Jump to: navigation, search

This page contains a model of the Batch distillation problem in AMPL format, making use of the TACO toolkit for AMPL control optimization extensions. The original model can be found in <bibref>Diehl2006c</bibref>. Note that you will need to include a generic AMPL/TACO support file, OptimalControl.mod. To solve this model, you require an optimal control or NLP code that uses the TACO toolkit to support the AMPL optimal control extensions.

AMPL

This is the source file batchdist_taco.mod

# ----------------------------------------------------------------
# Batch distillation problem using AMPL and TACO
# (c) Christian Kirches, Sven Leyffer
#
# Source: M.Diehl/H.G.Bock/E.Kostina'06
# ----------------------------------------------------------------
include OptimalControl.mod;
 
# time and free end-time
 
var t;
var tf := 2.5, >= 0.5, <= 10.0;
 
# constant parameters
 
param Pur := 0.99;		# percent
param V := 100.0;		# mol/h
param m := 0.1;			# mol
param mC := 0.1;		# mol
 
# control
 
var R := 8.0, >= 0.0, <= 15.0;
let R.type := "u1";
let R.scale := 0.1;
 
# differential states
 
param NDIS := 5;		# PDE discretization points
 
var M0;
var x{0..NDIS+1};
var MD;
var xD;
var alpha;
 
 
# algebraic expressions eliminated by AMPL's presolve
 
var L = R/(1+R)*V;
var y{i in 0..NDIS} = (1+alpha)*x[i]/(alpha+x[i]);
 
var dot0 = ( L*x[1] - V*y[0] + (V-L)*x[0] ) / M0;
var dot{i in 1..NDIS} = ( L*x[i+1] - V*y[i] + V*y[i-1] - L*x[i] )/m;
var dotNDISp1 = V/mC * (-x[NDIS+1] + y[NDIS]);
 
 
# objective function
 
minimize Compromise:
	eval (t - MD, tf);
 
 
# terminal constraint
 
subject to Purity_Constraint:
	eval(xD, tf) >= Pur;
 
 
# ODE system
 
subject to ODE_M0:
	diff(M0, t) = -V+L;
 
subject to ODE_x_0:
	diff(x[0], t) = dot0;
 
subject to ODE_x{i in 1..NDIS}:
	diff(x[i], t) = dot[i];
 
subject to ODE_x_NDISp1:
	diff(x[NDIS+1], t) = dotNDISp1;
 
subject to ODE_MD:
	diff(MD, t) = V-L;
 
subject to ODE_xD:
	diff(xD, t) = (V-L) * (x[NDIS] - xD)/MD;
 
subject to ODE_alpha:
	diff(alpha, t) = 0.0;
 
 
# Initial value constraints
 
subject to IVC_M0:
	eval(M0, 0) = 100.0;
 
subject to IVC_x_0:
	eval(x[0], 0) = 0.5;
 
subject to IVC_x{i in 1..NDIS+1}:
	eval(x[i], 0) = 1;
 
subject to IVC_MD:
	eval(MD, 0) = 0.1;
 
subject to IVC_xD:
	eval(xD, 0) = 1;
 
subject to IVC_alpha:
	eval(alpha, 0) = 0.2;
 
option solver ...;
 
solve;

Other Descriptions

Other descriptions of this problem are available in

References

There were no citations found in the article.