diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..340d8a3b --- /dev/null +++ b/.flake8 @@ -0,0 +1,4 @@ +[flake8] +ignore = D203,E741,W503 +max-complexity = 10 +max-line-length = 160 \ No newline at end of file diff --git a/Dip/bugs/OsiCbc/main.cpp b/Dip/bugs/OsiCbc/main.cpp index 7a8855e6..3e99e453 100644 --- a/Dip/bugs/OsiCbc/main.cpp +++ b/Dip/bugs/OsiCbc/main.cpp @@ -1,50 +1,46 @@ -//[magala@orclus71 OsiCbc]$ g++ main.cpp -L ../../../build-g/lib -I ../../../build-g/include/coin/ -lCbc -lOsi -lCbc -lCgl -lCoinUtils -lOsiCbc -lClp -lOsi -lCoinUtils -lCbcSolver -lCbc -lOsiClp -lClp -lCgl -lCoinUtils -lOsi - +//[magala@orclus71 OsiCbc]$ g++ main.cpp -L ../../../build-g/lib -I +//../../../build-g/include/coin/ -lCbc -lOsi -lCbc -lCgl -lCoinUtils -lOsiCbc +//-lClp -lOsi -lCoinUtils -lCbcSolver -lCbc -lOsiClp -lClp -lCgl -lCoinUtils +//-lOsi #include "OsiCbcSolverInterface.hpp" #include using namespace std; -int main(int argc, char ** argv){ - const int numCols = 2; - const int numNzs = 10; - const int numRows = 6; - bool isRowOrdered = false; - double objective [numCols] = {1,0}; - int rowIndices [numNzs] = {0,0,1,2,2,3,3,4,5,5}; - int colIndices [numNzs] = {0,1,1,0,1,0,1,1,0,1}; - double elements [numNzs] = { 7.0, -1.0, 1.0, -1.0, 1.0, - -4.0, -1.0, -1.0, 0.2, -1.0}; - CoinPackedMatrix M(isRowOrdered, - rowIndices, colIndices, elements, numNzs); - double rowLB[numRows] = {13.0, 1.0, -3.0, -27.0, -5.0, -4.0}; - double rowUB[numRows] = {OsiCbcInfinity, - OsiCbcInfinity, - OsiCbcInfinity, - OsiCbcInfinity, - OsiCbcInfinity, - OsiCbcInfinity}; - double colLB[numCols] = {0,0}; - double colUB[numCols] = {6,6}; - int integerVars[numCols] = {0,1}; - - OsiCbcSolverInterface osi; - osi.messageHandler()->setLogLevel(0); - osi.loadProblem(M, colLB, colUB, objective, rowLB, rowUB); - osi.setInteger(integerVars, 2); - - osi.branchAndBound(); - assert(!osi.isProvenPrimalInfeasible()); - assert(osi.isProvenOptimal()); +int main(int argc, char **argv) { + const int numCols = 2; + const int numNzs = 10; + const int numRows = 6; + bool isRowOrdered = false; + double objective[numCols] = {1, 0}; + int rowIndices[numNzs] = {0, 0, 1, 2, 2, 3, 3, 4, 5, 5}; + int colIndices[numNzs] = {0, 1, 1, 0, 1, 0, 1, 1, 0, 1}; + double elements[numNzs] = {7.0, -1.0, 1.0, -1.0, 1.0, + -4.0, -1.0, -1.0, 0.2, -1.0}; + CoinPackedMatrix M(isRowOrdered, rowIndices, colIndices, elements, numNzs); + double rowLB[numRows] = {13.0, 1.0, -3.0, -27.0, -5.0, -4.0}; + double rowUB[numRows] = {OsiCbcInfinity, OsiCbcInfinity, OsiCbcInfinity, + OsiCbcInfinity, OsiCbcInfinity, OsiCbcInfinity}; + double colLB[numCols] = {0, 0}; + double colUB[numCols] = {6, 6}; + int integerVars[numCols] = {0, 1}; + + OsiCbcSolverInterface osi; + osi.messageHandler()->setLogLevel(0); + osi.loadProblem(M, colLB, colUB, objective, rowLB, rowUB); + osi.setInteger(integerVars, 2); - //osi-cbc changes internal column bounds, must reset - double redCostX[numCols] = {1.34019,-0.10562}; - osi.setColLower(colLB); - osi.setColUpper(colUB); - osi.setObjective(redCostX); - osi.writeMps("tmp"); - osi.branchAndBound(); - assert(!osi.isProvenPrimalInfeasible()); - assert(osi.isProvenOptimal()); + osi.branchAndBound(); + assert(!osi.isProvenPrimalInfeasible()); + assert(osi.isProvenOptimal()); + // osi-cbc changes internal column bounds, must reset + double redCostX[numCols] = {1.34019, -0.10562}; + osi.setColLower(colLB); + osi.setColUpper(colUB); + osi.setObjective(redCostX); + osi.writeMps("tmp"); + osi.branchAndBound(); + assert(!osi.isProvenPrimalInfeasible()); + assert(osi.isProvenOptimal()); } diff --git a/Dip/bugs/main.cpp b/Dip/bugs/main.cpp index 900245b1..30af8ec7 100644 --- a/Dip/bugs/main.cpp +++ b/Dip/bugs/main.cpp @@ -1,33 +1,34 @@ -// [magala@orclus71 bugs]$ g++ main.cpp -L ../../build-g/lib -I ../../build-g/incli -lClp -lCbc -lCoinUtils -lCgl -lOsi -lCbcSolver -lCbc -lCgl -lClp - +// [magala@orclus71 bugs]$ g++ main.cpp -L ../../build-g/lib -I +// ../../build-g/incli -lClp -lCbc -lCoinUtils -lCgl -lOsi -lCbcSolver -lCbc +// -lCgl -lClp #include "CbcSolver.hpp" #include "OsiClpSolverInterface.hpp" #include using namespace std; -int main(int argc, char ** argv){ - string lpFile = argv[1]; +int main(int argc, char **argv) { + string lpFile = argv[1]; - OsiClpSolverInterface si; - si.readLp(lpFile.c_str()); + OsiClpSolverInterface si; + si.readLp(lpFile.c_str()); - CbcModel cbc(si); - CbcMain0(cbc); + CbcModel cbc(si); + CbcMain0(cbc); - const char * cbcArgv[20]; - int cbcArgc = 0; - string cbcExe = "cbc"; - string cbcSolve = "-solve"; - string cbcQuit = "-quit"; - string cbcLog = "-log"; - string cbcLogSet = "3"; - cbcArgv[cbcArgc++] = cbcExe.c_str(); - cbcArgv[cbcArgc++] = cbcLog.c_str(); - cbcArgv[cbcArgc++] = cbcLogSet.c_str(); - cbcArgv[cbcArgc++] = cbcSolve.c_str(); - cbcArgv[cbcArgc++] = cbcQuit.c_str(); - CbcMain1(cbcArgc, cbcArgv, cbc); - printf("cbc.status() = %d\n", cbc.status()); - printf("cbc.isProveOptimal() = %d\n", cbc.isProvenOptimal()); + const char *cbcArgv[20]; + int cbcArgc = 0; + string cbcExe = "cbc"; + string cbcSolve = "-solve"; + string cbcQuit = "-quit"; + string cbcLog = "-log"; + string cbcLogSet = "3"; + cbcArgv[cbcArgc++] = cbcExe.c_str(); + cbcArgv[cbcArgc++] = cbcLog.c_str(); + cbcArgv[cbcArgc++] = cbcLogSet.c_str(); + cbcArgv[cbcArgc++] = cbcSolve.c_str(); + cbcArgv[cbcArgc++] = cbcQuit.c_str(); + CbcMain1(cbcArgc, cbcArgv, cbc); + printf("cbc.status() = %d\n", cbc.status()); + printf("cbc.isProveOptimal() = %d\n", cbc.isProvenOptimal()); } diff --git a/Dip/examples/AP3/AP3_DecompApp.cpp b/Dip/examples/AP3/AP3_DecompApp.cpp index 48589376..e7d8856b 100644 --- a/Dip/examples/AP3/AP3_DecompApp.cpp +++ b/Dip/examples/AP3/AP3_DecompApp.cpp @@ -12,257 +12,250 @@ // All Rights Reserved. // //===========================================================================// +#include "AP3_DecompApp.h" #include "DecompAlgo.h" #include "DecompCutOsi.h" -#include "AP3_DecompApp.h" -//TODO: brute force solve +// TODO: brute force solve // --------------------------------------------------------------------- // -void AP3_DecompApp::initializeApp(UtilParameters & utilParam) - throw(CoinError) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_param.LogDebugLevel, 2); - - //--- - //--- get application parameters - //--- - m_appParam.getSettings(utilParam); - - string fileName = m_appParam.DataDir - + UtilDirSlash() + m_appParam.Instance + ".txt"; - m_ap3data.readInstance(fileName.c_str()); - m_ap3data.m_instance = m_appParam.Instance; - - string fileNameSol = m_appParam.DataDir - + UtilDirSlash() + m_appParam.Instance + ".sol"; - m_ap3data.readOptimalBound(fileNameSol.c_str()); - - int d; - int dimension = m_ap3data.m_dimension; - m_assigncostMin = new double*[dimension]; - m_assignindexMin = new int* [dimension]; - CoinAssertHint(m_assigncostMin && m_assignindexMin, "Error: Out of Memory"); - for(d = 0; d < dimension; d++){ - m_assigncostMin[d] = new double[dimension]; - m_assignindexMin[d] = new int [dimension]; - CoinAssertHint(m_assigncostMin[d] && m_assignindexMin[d], - "Error: Out of Memory"); - } - - //TODO: don't bother building if not using LP solver for this - { - m_siAP = m_decompAlgo->getOsiLpSolverInterface(); - CoinAssertHint(m_siAP, "Error: Out of Memory"); - - //--- - //--- Two-Indexed Assignment Problem Relaxation (e.g., MODEL_I): - //--- - //--- min {jk in J x K} c'_jk x_jk - //--- sum {k in K} x_jk = 1, forall j in J - //--- sum {j in J} x_jk = 1, forall k in K - //--- x_jk in {0,1}, for all jk in J x K - //--- where c_'jk = min{i in I} c_ijk - //--- - //--- The model stays the same, only objective changes - //--- so, construct only once. - //--- - int dimensionSq = dimension * dimension; - int n_rows = 2 * dimension; - int n_nonzeros = 2 * dimensionSq; - int * rowInd = new int [n_nonzeros]; - int * rowBeg = new int [n_rows + 1]; - int * rowLen = new int [n_rows ]; - double * rowEls = new double[n_nonzeros]; - double * colLB = new double[dimensionSq]; - double * colUB = new double[dimensionSq]; - double * rowB = new double[n_rows]; - CoinAssertHint(rowInd && rowBeg && rowEls && colLB && colUB && rowB, - "Error: Out of Memory"); - - CoinFillN(rowEls, n_nonzeros, 1.0); - CoinFillN(rowB, n_rows, 1.0); - CoinFillN(colLB, dimensionSq, 0.0); - CoinFillN(colUB, dimensionSq, 1.0); - - int rowIndex, ind1, ind2, nz_index; - - rowIndex = 0; - rowBeg[rowIndex] = 0; - nz_index = 0; - - //--- - //--- sum {k in K} x_jk = 1, forall j in J - //--- - for(ind1 = 0; ind1 < dimension; ind1++){ - for(ind2 = 0; ind2 < dimension; ind2++){ - rowInd[nz_index++] = index2(ind1,ind2); - } - rowBeg[rowIndex+1] = rowBeg[rowIndex] + dimension; - rowLen[rowIndex] = dimension; - rowIndex++; +void AP3_DecompApp::initializeApp(UtilParameters &utilParam) throw(CoinError) { + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", + m_param.LogDebugLevel, 2); + + //--- + //--- get application parameters + //--- + m_appParam.getSettings(utilParam); + + string fileName = + m_appParam.DataDir + UtilDirSlash() + m_appParam.Instance + ".txt"; + m_ap3data.readInstance(fileName.c_str()); + m_ap3data.m_instance = m_appParam.Instance; + + string fileNameSol = + m_appParam.DataDir + UtilDirSlash() + m_appParam.Instance + ".sol"; + m_ap3data.readOptimalBound(fileNameSol.c_str()); + + int d; + int dimension = m_ap3data.m_dimension; + m_assigncostMin = new double *[dimension]; + m_assignindexMin = new int *[dimension]; + CoinAssertHint(m_assigncostMin && m_assignindexMin, "Error: Out of Memory"); + for (d = 0; d < dimension; d++) { + m_assigncostMin[d] = new double[dimension]; + m_assignindexMin[d] = new int[dimension]; + CoinAssertHint(m_assigncostMin[d] && m_assignindexMin[d], + "Error: Out of Memory"); + } + + // TODO: don't bother building if not using LP solver for this + { + m_siAP = m_decompAlgo->getOsiLpSolverInterface(); + CoinAssertHint(m_siAP, "Error: Out of Memory"); + + //--- + //--- Two-Indexed Assignment Problem Relaxation (e.g., MODEL_I): + //--- + //--- min {jk in J x K} c'_jk x_jk + //--- sum {k in K} x_jk = 1, forall j in J + //--- sum {j in J} x_jk = 1, forall k in K + //--- x_jk in {0,1}, for all jk in J x K + //--- where c_'jk = min{i in I} c_ijk + //--- + //--- The model stays the same, only objective changes + //--- so, construct only once. + //--- + int dimensionSq = dimension * dimension; + int n_rows = 2 * dimension; + int n_nonzeros = 2 * dimensionSq; + int *rowInd = new int[n_nonzeros]; + int *rowBeg = new int[n_rows + 1]; + int *rowLen = new int[n_rows]; + double *rowEls = new double[n_nonzeros]; + double *colLB = new double[dimensionSq]; + double *colUB = new double[dimensionSq]; + double *rowB = new double[n_rows]; + CoinAssertHint(rowInd && rowBeg && rowEls && colLB && colUB && rowB, + "Error: Out of Memory"); + + CoinFillN(rowEls, n_nonzeros, 1.0); + CoinFillN(rowB, n_rows, 1.0); + CoinFillN(colLB, dimensionSq, 0.0); + CoinFillN(colUB, dimensionSq, 1.0); + + int rowIndex, ind1, ind2, nz_index; + + rowIndex = 0; + rowBeg[rowIndex] = 0; + nz_index = 0; + + //--- + //--- sum {k in K} x_jk = 1, forall j in J + //--- + for (ind1 = 0; ind1 < dimension; ind1++) { + for (ind2 = 0; ind2 < dimension; ind2++) { + rowInd[nz_index++] = index2(ind1, ind2); } - - //--- - //--- sum {j in J} x_jk = 1, forall k in K - //--- - for(ind2 = 0; ind2 < dimension; ind2++){ - for(ind1 = 0; ind1 < dimension; ind1++){ - rowInd[nz_index++] = index2(ind1,ind2); - } - rowBeg[rowIndex+1] = rowBeg[rowIndex] + dimension; - rowLen[rowIndex] = dimension; - rowIndex++; + rowBeg[rowIndex + 1] = rowBeg[rowIndex] + dimension; + rowLen[rowIndex] = dimension; + rowIndex++; + } + + //--- + //--- sum {j in J} x_jk = 1, forall k in K + //--- + for (ind2 = 0; ind2 < dimension; ind2++) { + for (ind1 = 0; ind1 < dimension; ind1++) { + rowInd[nz_index++] = index2(ind1, ind2); } - - CoinPackedMatrix M(false, dimensionSq, n_rows, n_nonzeros, - rowEls, rowInd, rowBeg, rowLen); - m_siAP->loadProblem(M, colLB, colUB, colLB, rowB, rowB); - - UTIL_DELARR(rowInd); - UTIL_DELARR(rowBeg); - UTIL_DELARR(rowLen); - UTIL_DELARR(rowEls); - UTIL_DELARR(colLB); - UTIL_DELARR(colUB); - UTIL_DELARR(rowB); - } - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_param.LogDebugLevel, 2); + rowBeg[rowIndex + 1] = rowBeg[rowIndex] + dimension; + rowLen[rowIndex] = dimension; + rowIndex++; + } + + CoinPackedMatrix M(false, dimensionSq, n_rows, n_nonzeros, rowEls, rowInd, + rowBeg, rowLen); + m_siAP->loadProblem(M, colLB, colUB, colLB, rowB, rowB); + + UTIL_DELARR(rowInd); + UTIL_DELARR(rowBeg); + UTIL_DELARR(rowLen); + UTIL_DELARR(rowEls); + UTIL_DELARR(colLB); + UTIL_DELARR(colUB); + UTIL_DELARR(rowB); + } + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", + m_param.LogDebugLevel, 2); } // --------------------------------------------------------------------- // -void -AP3_DecompApp::APPcreateModel(double *& objCoeff, - map & modelCore, - map > & modelRelax) -{ - - //--- - //--- createModel is a pure virtual method of DecompApp and must - //--- be derived by the application class to define the partitioning - //--- of constraints into [A,b] = [A',b'] union [A'', b''] - //--- - UtilPrintFuncEnd(m_osLog, m_classTag, - "APPcreateModel()", m_param.LogDebugLevel, 2); - - //--- - //--- Three-Indexed Assignment Problem: - //--- - //--- min {ijk in I x J x K} c_ijk x_ijk - //--- sum {jk in J x K} x_ijk = 1, forall i in I - //--- sum {ik in I x K} x_ijk = 1, forall j in J - //--- sum {ij in I x J} x_ijk = 1, forall k in K - //--- x_ijk in {0,1}, for all ijk in I x J x K - //--- - - //--- - //--- Partition the problem into I/J/K. - //--- The modelCore will be one set of constraints (I, J, or K). - //--- The modelRelax will be the rest (an instance of 2AP). - //--- - int dimension = m_ap3data.m_dimension; - int dimensionSq = dimension * dimension; - int n_cols = m_ap3data.m_ncolsFull; - - //--- - //--- open temporary memory for use in storing rows - //--- - int * rowInd = new int [dimensionSq]; - double * rowEls = new double[dimensionSq]; - //if(!(rowInd && rowEls)) - // CoinAssert("Error: Out of Memory"); - - //--- - //--- open memory for the objective coefficients of modelCore - //--- who is responsible to open this memory? AP3_DecompApp - //--- who is responsible to free this memory? DecompAlgo - //--- - objCoeff = new double[n_cols]; - memcpy(objCoeff, m_ap3data.m_assigncost, n_cols * sizeof(double)); - - //--- - //--- create Model I - //--- - vector< DecompConstraintSet* > modelRelaxVI; - DecompConstraintSet * modelCoreI = new DecompConstraintSet(); - DecompConstraintSet * modelRelaxI = new DecompConstraintSet(); - createModelPart(MODEL_I, rowInd, rowEls, modelCoreI, modelRelaxI); - modelRelaxVI.push_back(modelRelaxI); - modelCore.insert(make_pair(MODEL_I, modelCoreI)); - modelRelax.insert(make_pair(MODEL_I, modelRelaxVI)); - - //--- - //--- create Model J - //--- - vector< DecompConstraintSet* > modelRelaxVJ; - DecompConstraintSet * modelCoreJ = new DecompConstraintSet(); - DecompConstraintSet * modelRelaxJ = new DecompConstraintSet(); - createModelPart(MODEL_J, rowInd, rowEls, modelCoreJ, modelRelaxJ); - modelRelaxVJ.push_back(modelRelaxJ); - modelCore.insert(make_pair(MODEL_J, modelCoreJ)); - modelRelax.insert(make_pair(MODEL_J, modelRelaxVJ)); - - //--- - //--- create Model K - //--- - vector< DecompConstraintSet* > modelRelaxVK; - DecompConstraintSet * modelCoreK = new DecompConstraintSet(); - DecompConstraintSet * modelRelaxK = new DecompConstraintSet(); - createModelPart(MODEL_K, rowInd, rowEls, modelCoreK, modelRelaxK); - modelRelaxVK.push_back(modelRelaxK); - modelCore.insert(make_pair(MODEL_K, modelCoreK)); - modelRelax.insert(make_pair(MODEL_K, modelRelaxVK)); - - //--- - //--- TODO: weird if objCoeff is allocated here, but not deleted - //--- free local memory - //--- - UTIL_DELARR(rowInd); - UTIL_DELARR(rowEls); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "APPcreateModel()", m_param.LogDebugLevel, 2); +void AP3_DecompApp::APPcreateModel( + double *&objCoeff, map &modelCore, + map> &modelRelax) { + + //--- + //--- createModel is a pure virtual method of DecompApp and must + //--- be derived by the application class to define the partitioning + //--- of constraints into [A,b] = [A',b'] union [A'', b''] + //--- + UtilPrintFuncEnd(m_osLog, m_classTag, "APPcreateModel()", + m_param.LogDebugLevel, 2); + + //--- + //--- Three-Indexed Assignment Problem: + //--- + //--- min {ijk in I x J x K} c_ijk x_ijk + //--- sum {jk in J x K} x_ijk = 1, forall i in I + //--- sum {ik in I x K} x_ijk = 1, forall j in J + //--- sum {ij in I x J} x_ijk = 1, forall k in K + //--- x_ijk in {0,1}, for all ijk in I x J x K + //--- + + //--- + //--- Partition the problem into I/J/K. + //--- The modelCore will be one set of constraints (I, J, or K). + //--- The modelRelax will be the rest (an instance of 2AP). + //--- + int dimension = m_ap3data.m_dimension; + int dimensionSq = dimension * dimension; + int n_cols = m_ap3data.m_ncolsFull; + + //--- + //--- open temporary memory for use in storing rows + //--- + int *rowInd = new int[dimensionSq]; + double *rowEls = new double[dimensionSq]; + // if(!(rowInd && rowEls)) + // CoinAssert("Error: Out of Memory"); + + //--- + //--- open memory for the objective coefficients of modelCore + //--- who is responsible to open this memory? AP3_DecompApp + //--- who is responsible to free this memory? DecompAlgo + //--- + objCoeff = new double[n_cols]; + memcpy(objCoeff, m_ap3data.m_assigncost, n_cols * sizeof(double)); + + //--- + //--- create Model I + //--- + vector modelRelaxVI; + DecompConstraintSet *modelCoreI = new DecompConstraintSet(); + DecompConstraintSet *modelRelaxI = new DecompConstraintSet(); + createModelPart(MODEL_I, rowInd, rowEls, modelCoreI, modelRelaxI); + modelRelaxVI.push_back(modelRelaxI); + modelCore.insert(make_pair(MODEL_I, modelCoreI)); + modelRelax.insert(make_pair(MODEL_I, modelRelaxVI)); + + //--- + //--- create Model J + //--- + vector modelRelaxVJ; + DecompConstraintSet *modelCoreJ = new DecompConstraintSet(); + DecompConstraintSet *modelRelaxJ = new DecompConstraintSet(); + createModelPart(MODEL_J, rowInd, rowEls, modelCoreJ, modelRelaxJ); + modelRelaxVJ.push_back(modelRelaxJ); + modelCore.insert(make_pair(MODEL_J, modelCoreJ)); + modelRelax.insert(make_pair(MODEL_J, modelRelaxVJ)); + + //--- + //--- create Model K + //--- + vector modelRelaxVK; + DecompConstraintSet *modelCoreK = new DecompConstraintSet(); + DecompConstraintSet *modelRelaxK = new DecompConstraintSet(); + createModelPart(MODEL_K, rowInd, rowEls, modelCoreK, modelRelaxK); + modelRelaxVK.push_back(modelRelaxK); + modelCore.insert(make_pair(MODEL_K, modelCoreK)); + modelRelax.insert(make_pair(MODEL_K, modelRelaxVK)); + + //--- + //--- TODO: weird if objCoeff is allocated here, but not deleted + //--- free local memory + //--- + UTIL_DELARR(rowInd); + UTIL_DELARR(rowEls); + + UtilPrintFuncEnd(m_osLog, m_classTag, "APPcreateModel()", + m_param.LogDebugLevel, 2); } //--------------------------------------------------------------------- // -void AP3_DecompApp::createModelPart(const int modelType, - int * rowInd, - double * rowEls, - DecompConstraintSet * modelCore, - DecompConstraintSet * modelRelax) - throw(CoinError) -{ - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelPart()", m_param.LogDebugLevel, 2); - - const int dimension = m_ap3data.m_dimension; - const int dimensionSq = dimension * dimension; - const int n_cols = m_ap3data.m_ncolsFull; - const int n_rows = m_ap3data.m_nrowsFull; - const int n_rowsThird = static_cast(n_rows / 3); - - //--- - //--- for multi-polytope, we must choose a fixed A'' - //--- A = [A'', A'[k]] - //--- - //--- it always must be true that - //--- A'' inter A[k] contains A - //--- - //--- so, if A[k] is a partition (rather than nested), as - //--- it is in AP3, then we are forced to use A'' = A - //--- - - //--- - //--- set the constraint matrix of modelCore and modelRelax - //--- who is responsible to open this memory? AP3_DecompApp - //--- who is responsible to free this memory? DecompAlgo - //--- - modelCore->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(modelCore->M, "Error: Out of Memory"); - modelCore->M->setDimensions(0, n_cols); +void AP3_DecompApp::createModelPart( + const int modelType, int *rowInd, double *rowEls, + DecompConstraintSet *modelCore, + DecompConstraintSet *modelRelax) throw(CoinError) { + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelPart()", + m_param.LogDebugLevel, 2); + + const int dimension = m_ap3data.m_dimension; + const int dimensionSq = dimension * dimension; + const int n_cols = m_ap3data.m_ncolsFull; + const int n_rows = m_ap3data.m_nrowsFull; + const int n_rowsThird = static_cast(n_rows / 3); + + //--- + //--- for multi-polytope, we must choose a fixed A'' + //--- A = [A'', A'[k]] + //--- + //--- it always must be true that + //--- A'' inter A[k] contains A + //--- + //--- so, if A[k] is a partition (rather than nested), as + //--- it is in AP3, then we are forced to use A'' = A + //--- + + //--- + //--- set the constraint matrix of modelCore and modelRelax + //--- who is responsible to open this memory? AP3_DecompApp + //--- who is responsible to free this memory? DecompAlgo + //--- + modelCore->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(modelCore->M, "Error: Out of Memory"); + modelCore->M->setDimensions(0, n_cols); #if 0 if(m_param.PriceMultiPoly) @@ -270,82 +263,81 @@ void AP3_DecompApp::createModelPart(const int modelType, else modelCore->M->reserve(n_rowsThird, n_rowsThird * dimensionSq); #endif - modelCore->M->reserve(n_rowsThird, n_rowsThird * dimensionSq); - - modelRelax->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(modelRelax->M, "Error: Out of Memory"); - modelRelax->M->setDimensions(0, n_cols); - modelRelax->M->reserve(2 * n_rowsThird, 2 * n_rowsThird * dimensionSq); - - int (AP3_DecompApp::*indexFunc)(const int, const int, const int) const = 0; - switch(modelType){ - case MODEL_I: - //--- - //--- sum {jk in J x K} x_ijk = 1, forall i in I [Core ] - //--- sum {ik in I x K} x_ijk = 1, forall j in J [Relax] - //--- sum {ij in I x J} x_ijk = 1, forall k in K [Relax] - //--- - indexFunc = &AP3_DecompApp::indexIJK; - break; - case MODEL_J: - //--- - //--- sum {jk in J x K} x_ijk = 1, forall i in I [Relax] - //--- sum {ik in I x K} x_ijk = 1, forall j in J [Core ] - //--- sum {ij in I x J} x_ijk = 1, forall k in K [Relax] - //--- - indexFunc = &AP3_DecompApp::indexJIK; - break; - case MODEL_K: - //--- - //--- sum {jk in J x K} x_ijk = 1, forall i in I [Relax] - //--- sum {ik in I x K} x_ijk = 1, forall j in J [Relax] - //--- sum {ij in I x J} x_ijk = 1, forall k in K [Core ] - //--- - indexFunc = &AP3_DecompApp::indexKIJ; - break; - default: - CoinAssertHint(0, "Error: Bad Argument for modelType"); - } - - - int ind1, ind2, ind3, len; - CoinFillN(rowEls, dimensionSq, 1.0); - for(ind1 = 0; ind1 < dimension; ind1++){ - len = 0; - for(ind2 = 0; ind2 < dimension; ind2++){ - for(ind3 = 0; ind3 < dimension; ind3++){ - rowInd[len++] = (this->*indexFunc)(ind1,ind2,ind3); - } + modelCore->M->reserve(n_rowsThird, n_rowsThird * dimensionSq); + + modelRelax->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(modelRelax->M, "Error: Out of Memory"); + modelRelax->M->setDimensions(0, n_cols); + modelRelax->M->reserve(2 * n_rowsThird, 2 * n_rowsThird * dimensionSq); + + int (AP3_DecompApp::*indexFunc)(const int, const int, const int) const = 0; + switch (modelType) { + case MODEL_I: + //--- + //--- sum {jk in J x K} x_ijk = 1, forall i in I [Core ] + //--- sum {ik in I x K} x_ijk = 1, forall j in J [Relax] + //--- sum {ij in I x J} x_ijk = 1, forall k in K [Relax] + //--- + indexFunc = &AP3_DecompApp::indexIJK; + break; + case MODEL_J: + //--- + //--- sum {jk in J x K} x_ijk = 1, forall i in I [Relax] + //--- sum {ik in I x K} x_ijk = 1, forall j in J [Core ] + //--- sum {ij in I x J} x_ijk = 1, forall k in K [Relax] + //--- + indexFunc = &AP3_DecompApp::indexJIK; + break; + case MODEL_K: + //--- + //--- sum {jk in J x K} x_ijk = 1, forall i in I [Relax] + //--- sum {ik in I x K} x_ijk = 1, forall j in J [Relax] + //--- sum {ij in I x J} x_ijk = 1, forall k in K [Core ] + //--- + indexFunc = &AP3_DecompApp::indexKIJ; + break; + default: + CoinAssertHint(0, "Error: Bad Argument for modelType"); + } + + int ind1, ind2, ind3, len; + CoinFillN(rowEls, dimensionSq, 1.0); + for (ind1 = 0; ind1 < dimension; ind1++) { + len = 0; + for (ind2 = 0; ind2 < dimension; ind2++) { + for (ind3 = 0; ind3 < dimension; ind3++) { + rowInd[len++] = (this->*indexFunc)(ind1, ind2, ind3); } - CoinAssertHint(len == dimensionSq, "Error in construction len != n^2"); - modelCore->M->appendRow(len, rowInd, rowEls); - - len = 0; - for(ind2 = 0; ind2 < dimension; ind2++){ - for(ind3 = 0; ind3 < dimension; ind3++){ - rowInd[len++] = (this->*indexFunc)(ind2,ind1,ind3); - } + } + CoinAssertHint(len == dimensionSq, "Error in construction len != n^2"); + modelCore->M->appendRow(len, rowInd, rowEls); + + len = 0; + for (ind2 = 0; ind2 < dimension; ind2++) { + for (ind3 = 0; ind3 < dimension; ind3++) { + rowInd[len++] = (this->*indexFunc)(ind2, ind1, ind3); } - CoinAssertHint(len == dimensionSq, "Error in construction len != n^2"); + } + CoinAssertHint(len == dimensionSq, "Error in construction len != n^2"); #if 0 if(m_param.PriceMultiPoly) modelCore->M->appendRow(len, rowInd, rowEls); #endif - modelRelax->M->appendRow(len, rowInd, rowEls); - - len = 0; - for(ind2 = 0; ind2 < dimension; ind2++){ - for(ind3 = 0; ind3 < dimension; ind3++){ - rowInd[len++] = (this->*indexFunc)(ind2,ind3,ind1); - } + modelRelax->M->appendRow(len, rowInd, rowEls); + + len = 0; + for (ind2 = 0; ind2 < dimension; ind2++) { + for (ind3 = 0; ind3 < dimension; ind3++) { + rowInd[len++] = (this->*indexFunc)(ind2, ind3, ind1); } - CoinAssertHint(len == dimensionSq, "Error in construction len != n^2"); + } + CoinAssertHint(len == dimensionSq, "Error in construction len != n^2"); #if 0 if(m_param.PriceMultiPoly) modelCore->M->appendRow(len, rowInd, rowEls); #endif - modelRelax->M->appendRow(len, rowInd, rowEls); - } + modelRelax->M->appendRow(len, rowInd, rowEls); + } #if 0 if(m_param.PriceMultiPoly){ CoinAssert(modelCore->M->getNumRows() == 3*dimension); @@ -354,45 +346,44 @@ void AP3_DecompApp::createModelPart(const int modelType, CoinAssert(modelCore->M->getNumRows() == dimension); } #endif - CoinAssert(modelCore->M->getNumRows() == dimension); - CoinAssert(modelRelax->M->getNumRows() == 2*dimension); - - //--- - //--- set the row upper and lower bounds - //--- set the col upper and lower bounds - //--- - int n_CoreRows = modelCore->M->getNumRows(); - int n_RelaxRows = modelRelax->M->getNumRows(); - UtilFillN(modelCore->rowLB, n_CoreRows, 1.0); - UtilFillN(modelCore->rowUB, n_CoreRows, 1.0); - UtilFillN(modelRelax->rowLB, n_RelaxRows, 1.0); - UtilFillN(modelRelax->rowUB, n_RelaxRows, 1.0); - - //THINK: is colLB/UB for Core vs Relax ever different? - UtilFillN(modelCore->colLB, n_cols, 0.0); - UtilFillN(modelCore->colUB, n_cols, 1.0); - UtilFillN(modelRelax->colLB, n_cols, 0.0); - UtilFillN(modelRelax->colUB, n_cols, 1.0); - - //#define DEBUG_AP3_10_3 + CoinAssert(modelCore->M->getNumRows() == dimension); + CoinAssert(modelRelax->M->getNumRows() == 2 * dimension); + + //--- + //--- set the row upper and lower bounds + //--- set the col upper and lower bounds + //--- + int n_CoreRows = modelCore->M->getNumRows(); + int n_RelaxRows = modelRelax->M->getNumRows(); + UtilFillN(modelCore->rowLB, n_CoreRows, 1.0); + UtilFillN(modelCore->rowUB, n_CoreRows, 1.0); + UtilFillN(modelRelax->rowLB, n_RelaxRows, 1.0); + UtilFillN(modelRelax->rowUB, n_RelaxRows, 1.0); + + // THINK: is colLB/UB for Core vs Relax ever different? + UtilFillN(modelCore->colLB, n_cols, 0.0); + UtilFillN(modelCore->colUB, n_cols, 1.0); + UtilFillN(modelRelax->colLB, n_cols, 0.0); + UtilFillN(modelRelax->colUB, n_cols, 1.0); + + //#define DEBUG_AP3_10_3 #ifdef DEBUG_AP3_10_3 - modelCore->colUB[372] = 1; - modelRelax->colUB[372] = 1; - modelCore->colLB[889] = 1; - modelRelax->colLB[889] = 1; + modelCore->colUB[372] = 1; + modelRelax->colUB[372] = 1; + modelCore->colLB[889] = 1; + modelRelax->colLB[889] = 1; #endif - - //--- - //--- set the indices of the integer variables of modelRelax - //--- - UtilIotaN(modelRelax->integerVars, n_cols, 0); - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelPart()", m_param.LogDebugLevel, 2); -} + //--- + //--- set the indices of the integer variables of modelRelax + //--- + UtilIotaN(modelRelax->integerVars, n_cols, 0); + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelPart()", + m_param.LogDebugLevel, 2); +} -//TODO: +// TODO: // rethink even this design. user doesn't need to know DecompVar or any // of this - user can just provide a vector, a solution to oracle, and // framework can handle checking rc, etc and pushing that into vars @@ -401,43 +392,38 @@ void AP3_DecompApp::createModelPart(const int modelType, // advanced user can do things like check for negRC... the advanced user // can also derive their own DecompVar (if they know how to store it more // compactly (see TSP, as an example)... but, in end, the framework must -// expand it anyway... +// expand it anyway... //--------------------------------------------------------------------- // -DecompStatus AP3_DecompApp::APPsolveRelaxed(const int whichModel, - const double * redCostX, - const double * origCost, - const double alpha, - const int n_origCols, - const bool checkRC, - const bool checkDup, - OsiSolverInterface * m_subprobSI, - list & vars){ - - //TODO: use lp solver - //TODO: use ap solver - - //--- - //--- Three-Indexed Assignment Problem: - //--- - //--- min {ijk in I x J x K} c_ijk x_ijk - //--- sum {jk in J x K} x_ijk = 1, forall i in I - //--- sum {ik in I x K} x_ijk = 1, forall j in J - //--- sum {ij in I x J} x_ijk = 1, forall k in K - //--- x_ijk in {0,1}, for all ijk in I x J x K - //--- - //--- Two-Indexed Assignment Problem Relaxation (e.g., MODEL_I): - //--- - //--- min {jk in J x K} c'_jk x_jk - //--- sum {k in K} x_jk = 1, forall j in J - //--- sum {j in J} x_jk = 1, forall k in K - //--- x_jk in {0,1}, for all jk in J x K - //--- where c_'jk = min{i in I} c_ijk - //--- - - UtilPrintFuncBegin(m_osLog, m_classTag, - "APPsolveRelaxed()", m_param.LogDebugLevel, 2); - +DecompStatus AP3_DecompApp::APPsolveRelaxed( + const int whichModel, const double *redCostX, const double *origCost, + const double alpha, const int n_origCols, const bool checkRC, + const bool checkDup, OsiSolverInterface *m_subprobSI, + list &vars) { + + // TODO: use lp solver + // TODO: use ap solver + + //--- + //--- Three-Indexed Assignment Problem: + //--- + //--- min {ijk in I x J x K} c_ijk x_ijk + //--- sum {jk in J x K} x_ijk = 1, forall i in I + //--- sum {ik in I x K} x_ijk = 1, forall j in J + //--- sum {ij in I x J} x_ijk = 1, forall k in K + //--- x_ijk in {0,1}, for all ijk in I x J x K + //--- + //--- Two-Indexed Assignment Problem Relaxation (e.g., MODEL_I): + //--- + //--- min {jk in J x K} c'_jk x_jk + //--- sum {k in K} x_jk = 1, forall j in J + //--- sum {j in J} x_jk = 1, forall k in K + //--- x_jk in {0,1}, for all jk in J x K + //--- where c_'jk = min{i in I} c_ijk + //--- + + UtilPrintFuncBegin(m_osLog, m_classTag, "APPsolveRelaxed()", + m_param.LogDebugLevel, 2); #if 0 { @@ -452,149 +438,139 @@ DecompStatus AP3_DecompApp::APPsolveRelaxed(const int whichModel, } #endif - - //--- - //--- calculate c_'jk = min{i in I} c_ijk, or - //--- calculate c_'ik = min{i in J} c_ijk, or - //--- calculate c_'ij = min{i in K} c_ijk - //--- - int (AP3_DecompApp::*indexFunc)(const int, const int, const int) const = 0; - switch(whichModel){ - case MODEL_I: - indexFunc = &AP3_DecompApp::indexIJK; - break; - case MODEL_J: - indexFunc = &AP3_DecompApp::indexJIK; - break; - case MODEL_K: - indexFunc = &AP3_DecompApp::indexKIJ; - break; - default: - CoinAssertHint(0, "Error: Bad Argument for modelType"); - } - - double min_cost; - int index, ind1, ind2, ind3, min_index; - int dimension = m_ap3data.m_dimension; - for(ind2 = 0; ind2 < dimension; ind2++){ - for(ind3 = 0; ind3 < dimension; ind3++){ - min_cost = m_infinity; - min_index = 0; - for(ind1 = 0; ind1 < dimension; ind1++){ - index = (this->*indexFunc)(ind1,ind2,ind3); - if(redCostX[index] < min_cost){ - min_cost = redCostX[index]; - min_index = index; - } - } - m_assigncostMin [ind2][ind3] = min_cost; - m_assignindexMin[ind2][ind3] = min_index; - CoinAssert(min_cost < (m_infinity/2.0)); + //--- + //--- calculate c_'jk = min{i in I} c_ijk, or + //--- calculate c_'ik = min{i in J} c_ijk, or + //--- calculate c_'ij = min{i in K} c_ijk + //--- + int (AP3_DecompApp::*indexFunc)(const int, const int, const int) const = 0; + switch (whichModel) { + case MODEL_I: + indexFunc = &AP3_DecompApp::indexIJK; + break; + case MODEL_J: + indexFunc = &AP3_DecompApp::indexJIK; + break; + case MODEL_K: + indexFunc = &AP3_DecompApp::indexKIJ; + break; + default: + CoinAssertHint(0, "Error: Bad Argument for modelType"); + } + + double min_cost; + int index, ind1, ind2, ind3, min_index; + int dimension = m_ap3data.m_dimension; + for (ind2 = 0; ind2 < dimension; ind2++) { + for (ind3 = 0; ind3 < dimension; ind3++) { + min_cost = m_infinity; + min_index = 0; + for (ind1 = 0; ind1 < dimension; ind1++) { + index = (this->*indexFunc)(ind1, ind2, ind3); + if (redCostX[index] < min_cost) { + min_cost = redCostX[index]; + min_index = index; + } } - } - - //TODO: option to use ap solver vs lp solver - - //--- - //--- assigncostMin double array format is good for ap solver - //--- but lp solver needs a single array version - //--- - int col_index = 0; - for(ind2 = 0; ind2 < dimension; ind2++){ - for(ind3 = 0; ind3 < dimension; ind3++){ - m_siAP->setObjCoeff(col_index, m_assigncostMin[ind2][ind3]); - col_index++; + m_assigncostMin[ind2][ind3] = min_cost; + m_assignindexMin[ind2][ind3] = min_index; + CoinAssert(min_cost < (m_infinity / 2.0)); + } + } + + // TODO: option to use ap solver vs lp solver + + //--- + //--- assigncostMin double array format is good for ap solver + //--- but lp solver needs a single array version + //--- + int col_index = 0; + for (ind2 = 0; ind2 < dimension; ind2++) { + for (ind3 = 0; ind3 < dimension; ind3++) { + m_siAP->setObjCoeff(col_index, m_assigncostMin[ind2][ind3]); + col_index++; + } + } + + //--- + //--- solve the LP relaxation of the 2AP (integral polytope) + //--- + m_siAP->messageHandler()->setLogLevel(m_param.LogLpLevel); + m_siAP->initialSolve(); + CoinAssert(m_siAP->isProvenOptimal()); + + // deal with status issues + + //--- + //--- store the solution as a DecompVar and push into list + //--- + int i; + pair p; + vector apInd; + vector apEls(dimension, 1.0); + apInd.reserve(dimension); + + double varRedCost = 0.0; + double varOrigCost = 0.0; + const double *lpSol = m_siAP->getColSolution(); + for (i = 0; i < m_siAP->getNumCols(); i++) { + CoinAssertDebug(UtilIsZero(lpSol[i], 1.0e-4) || + UtilIsZero(1.0 - lpSol[i], 1.0e-4)); + if (lpSol[i] > 0.5) { + //--- + //--- convert back from 2D to 3D case (stored in m_assignindexMin) + //--- + p = index2Inv(i); + index = m_assignindexMin[p.first][p.second]; + CoinAssertDebug(index < n_origCols); + + varRedCost += redCostX[index]; + varOrigCost += origCost[index]; + + apInd.push_back(index); + } + } + varRedCost += alpha; // RC = c-uA''s - alpha + + DecompVar *var = new DecompVar(apInd, apEls, varRedCost, varOrigCost); + + // TODO: framework should do all this for the user! + bool doPush = true; + if (checkRC && varRedCost > -1.e-10) // THINK: dualTol? + doPush = false; + else if (checkDup) { + DecompVarList::iterator it; + for (it = vars.begin(); it != vars.end(); it++) { + if ((*it)->isEquivalent(*var)) { + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "\nDuplicate variable, not adding.";); + doPush = false; + break; } - } - - - //--- - //--- solve the LP relaxation of the 2AP (integral polytope) - //--- - m_siAP->messageHandler()->setLogLevel(m_param.LogLpLevel); - m_siAP->initialSolve(); - CoinAssert(m_siAP->isProvenOptimal()); + } + } - //deal with status issues + // just use as sanity check doPush = 0 + // doPush = 0; + if (doPush) { + UTIL_DEBUG(m_param.LogDebugLevel, 5, var->print();); + vars.push_back(var); + } else + UTIL_DELPTR(var); + // vars.push_back(new DecompVar(apInd, apEls, varRedCost, varOrigCost)); - //--- - //--- store the solution as a DecompVar and push into list - //--- - int i; - pair p; - vector apInd; - vector apEls(dimension, 1.0); - apInd.reserve(dimension); - - double varRedCost = 0.0; - double varOrigCost = 0.0; - const double * lpSol = m_siAP->getColSolution(); - for(i = 0; i < m_siAP->getNumCols(); i++){ - CoinAssertDebug( UtilIsZero(lpSol[i], 1.0e-4) || - UtilIsZero(1.0 - lpSol[i], 1.0e-4)); - if(lpSol[i] > 0.5){ - //--- - //--- convert back from 2D to 3D case (stored in m_assignindexMin) - //--- - p = index2Inv(i); - index = m_assignindexMin[p.first][p.second]; - CoinAssertDebug(index < n_origCols); - - varRedCost += redCostX[index]; - varOrigCost += origCost[index]; - - apInd.push_back(index); - } - } - varRedCost += alpha; //RC = c-uA''s - alpha + // TODO: why can't framework do the work of calculating redCost and obj? + // overload var constructor so user can supply or not?, and why does user + // need to know alpha? because they need to know what is acceptable... + // to push? they might want to filter out nonnegative rc + UtilPrintFuncEnd(m_osLog, m_classTag, "APPsolveRelaxed()", + m_param.LogDebugLevel, 2); - DecompVar * var = new DecompVar(apInd, apEls, varRedCost, varOrigCost); - - //TODO: framework should do all this for the user! - bool doPush = true; - if(checkRC && varRedCost > -1.e-10) //THINK: dualTol? - doPush = false; - else if(checkDup){ - DecompVarList::iterator it; - for(it = vars.begin(); it != vars.end(); it++){ - if((*it)->isEquivalent(*var)){ - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "\nDuplicate variable, not adding."; - ); - doPush = false; - break; - } - } - } - - //just use as sanity check doPush = 0 - //doPush = 0; - if(doPush){ - UTIL_DEBUG(m_param.LogDebugLevel, 5, - var->print(); - ); - vars.push_back(var); - } - else - UTIL_DELPTR(var); - - //vars.push_back(new DecompVar(apInd, apEls, varRedCost, varOrigCost)); - - - //TODO: why can't framework do the work of calculating redCost and obj? - //overload var constructor so user can supply or not?, and why does user - //need to know alpha? because they need to know what is acceptable... - //to push? they might want to filter out nonnegative rc - - UtilPrintFuncEnd(m_osLog, m_classTag, - "APPsolveRelaxed()", m_param.LogDebugLevel, 2); - - return STAT_FEASIBLE; //think + return STAT_FEASIBLE; // think } - #if 0 //--------------------------------------------------------------------- // //STOP - see old/AAP/LP for both version, x* and DECOMP on s @@ -626,9 +602,8 @@ int AP3_DecompApp::generateCuts(const double * x, #endif //--------------------------------------------------------------------- // -void AP3_DecompApp::printOriginalColumn(const int index, - ostream * os) const { - int i, j, k; - index3Inv(index, i, j, k); - (*os) << "x[ " << i << " , " << j << " , " << k << "]"; +void AP3_DecompApp::printOriginalColumn(const int index, ostream *os) const { + int i, j, k; + index3Inv(index, i, j, k); + (*os) << "x[ " << i << " , " << j << " , " << k << "]"; } diff --git a/Dip/examples/AP3/AP3_Main.cpp b/Dip/examples/AP3/AP3_Main.cpp old mode 100755 new mode 100644 index 2c6b1dfc..c1ed9e85 --- a/Dip/examples/AP3/AP3_Main.cpp +++ b/Dip/examples/AP3/AP3_Main.cpp @@ -13,8 +13,8 @@ //===========================================================================// //===========================================================================// -#include "UtilParameters.h" #include "AP3_DecompApp.h" +#include "UtilParameters.h" #include "AlpsDecompModel.h" #include "AlpsKnowledgeBroker.h" @@ -22,177 +22,179 @@ #include "DecompAlgoPC2.h" #include "DecompAlgoRC.h" -#include "CoinError.hpp" #include "AlpsTime.h" +#include "CoinError.hpp" //===========================================================================// -int main(int argc, char ** argv){ - try{ - +int main(int argc, char **argv) { + try { + //--- //--- create the utility class for parsing parameters //--- - UtilParameters utilParam(argc, argv); - - bool useAlps = utilParam.GetSetting("useAlps", true); - - bool doCut = utilParam.GetSetting("doCut", true); - bool doPrice = utilParam.GetSetting("doPrice", false); - bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - bool doRelaxCut = utilParam.GetSetting("doRelaxCut", false); - - bool doModelI = utilParam.GetSetting("doModelI", true); - bool doModelJ = utilParam.GetSetting("doModelJ", false); - bool doModelK = utilParam.GetSetting("doModelK", false); + UtilParameters utilParam(argc, argv); + + bool useAlps = utilParam.GetSetting("useAlps", true); + + bool doCut = utilParam.GetSetting("doCut", true); + bool doPrice = utilParam.GetSetting("doPrice", false); + bool doPriceCut = utilParam.GetSetting("doPriceCut", false); + bool doRelaxCut = utilParam.GetSetting("doRelaxCut", false); + + bool doModelI = utilParam.GetSetting("doModelI", true); + bool doModelJ = utilParam.GetSetting("doModelJ", false); + bool doModelK = utilParam.GetSetting("doModelK", false); AlpsTimer timer; - double timeSetupReal = 0.0; - double timeSetupCpu = 0.0; - double timeSolveReal = 0.0; - double timeSolveCpu = 0.0; + double timeSetupReal = 0.0; + double timeSetupCpu = 0.0; + double timeSolveReal = 0.0; + double timeSolveCpu = 0.0; timer.start(); //--- //--- create the user application (a DecompApp) //--- - AP3_DecompApp ap3(utilParam); + AP3_DecompApp ap3(utilParam); ap3.createModel(); - + //--- //--- create the algorithm(s) (a DecompAlgo) //--- - DecompAlgoC2 * cut = NULL; - DecompAlgoPC2 * priceI = NULL; - DecompAlgoPC2 * priceJ = NULL; - DecompAlgoPC2 * priceK = NULL; - DecompAlgoPC2 * pcI = NULL; - DecompAlgoPC2 * pcJ = NULL; - DecompAlgoPC2 * pcK = NULL; - DecompAlgoRC * rcI = NULL; - DecompAlgoRC * rcJ = NULL; - DecompAlgoRC * rcK = NULL; - - if(doCut){ - cut = new DecompAlgoC2(&ap3, &utilParam); + DecompAlgoC2 *cut = NULL; + DecompAlgoPC2 *priceI = NULL; + DecompAlgoPC2 *priceJ = NULL; + DecompAlgoPC2 *priceK = NULL; + DecompAlgoPC2 *pcI = NULL; + DecompAlgoPC2 *pcJ = NULL; + DecompAlgoPC2 *pcK = NULL; + DecompAlgoRC *rcI = NULL; + DecompAlgoRC *rcJ = NULL; + DecompAlgoRC *rcK = NULL; + + if (doCut) { + cut = new DecompAlgoC2(&ap3, &utilParam); } - if(doPrice){ + if (doPrice) { CoinAssertHint(doModelI || doModelJ || doModelK, - "Error: must pick some base model to price"); - if(doModelI){ - priceI = new DecompAlgoPC2(&ap3, &utilParam, "PRICE", true, - AP3_DecompApp::MODEL_I); + "Error: must pick some base model to price"); + if (doModelI) { + priceI = new DecompAlgoPC2(&ap3, &utilParam, "PRICE", true, + AP3_DecompApp::MODEL_I); } - if(doModelJ){ - priceJ = new DecompAlgoPC2(&ap3, &utilParam, "PRICE", true, - AP3_DecompApp::MODEL_J); + if (doModelJ) { + priceJ = new DecompAlgoPC2(&ap3, &utilParam, "PRICE", true, + AP3_DecompApp::MODEL_J); } - if(doModelK){ - priceK = new DecompAlgoPC2(&ap3, &utilParam, "PRICE", true, - AP3_DecompApp::MODEL_K); + if (doModelK) { + priceK = new DecompAlgoPC2(&ap3, &utilParam, "PRICE", true, + AP3_DecompApp::MODEL_K); } } - - if(doPriceCut){ + + if (doPriceCut) { CoinAssertHint(doModelI || doModelJ || doModelK, - "Error: must pick some base model to price"); - if(doModelI){ - pcI = new DecompAlgoPC2(&ap3, &utilParam, - AP3_DecompApp::MODEL_I); + "Error: must pick some base model to price"); + if (doModelI) { + pcI = new DecompAlgoPC2(&ap3, &utilParam, AP3_DecompApp::MODEL_I); } - if(doModelJ){ - pcJ = new DecompAlgoPC2(&ap3, &utilParam, - AP3_DecompApp::MODEL_J); + if (doModelJ) { + pcJ = new DecompAlgoPC2(&ap3, &utilParam, AP3_DecompApp::MODEL_J); } - if(doModelK){ - pcK = new DecompAlgoPC2(&ap3, &utilParam, - AP3_DecompApp::MODEL_K); + if (doModelK) { + pcK = new DecompAlgoPC2(&ap3, &utilParam, AP3_DecompApp::MODEL_K); } } - if(doRelaxCut){ + if (doRelaxCut) { CoinAssertHint(doModelI || doModelJ || doModelK, - "Error: must pick some base model to price"); - if(doModelI){ - rcI = new DecompAlgoRC(&ap3, &utilParam, - AP3_DecompApp::MODEL_I); + "Error: must pick some base model to price"); + if (doModelI) { + rcI = new DecompAlgoRC(&ap3, &utilParam, AP3_DecompApp::MODEL_I); } - if(doModelJ){ - rcJ = new DecompAlgoRC(&ap3, &utilParam, - AP3_DecompApp::MODEL_J); + if (doModelJ) { + rcJ = new DecompAlgoRC(&ap3, &utilParam, AP3_DecompApp::MODEL_J); } - if(doModelK){ - rcK = new DecompAlgoRC(&ap3, &utilParam, - AP3_DecompApp::MODEL_K); + if (doModelK) { + rcK = new DecompAlgoRC(&ap3, &utilParam, AP3_DecompApp::MODEL_K); } } - if(useAlps){ + if (useAlps) { //--- //--- create the driver AlpsDecomp model //--- AlpsDecompModel alpsModel(utilParam); - if(cut) - alpsModel.addDecompAlgo(cut); - if(priceI) - alpsModel.addDecompAlgo(priceI); - if(priceJ) - alpsModel.addDecompAlgo(priceJ); - if(priceK) - alpsModel.addDecompAlgo(priceK); - if(pcI) - alpsModel.addDecompAlgo(pcI); - if(pcJ) - alpsModel.addDecompAlgo(pcJ); - if(pcK) - alpsModel.addDecompAlgo(pcK); - if(rcI) - alpsModel.addDecompAlgo(rcI); - if(rcJ) - alpsModel.addDecompAlgo(rcJ); - if(rcK) - alpsModel.addDecompAlgo(rcK); + if (cut) + alpsModel.addDecompAlgo(cut); + if (priceI) + alpsModel.addDecompAlgo(priceI); + if (priceJ) + alpsModel.addDecompAlgo(priceJ); + if (priceK) + alpsModel.addDecompAlgo(priceK); + if (pcI) + alpsModel.addDecompAlgo(pcI); + if (pcJ) + alpsModel.addDecompAlgo(pcJ); + if (pcK) + alpsModel.addDecompAlgo(pcK); + if (rcI) + alpsModel.addDecompAlgo(rcI); + if (rcJ) + alpsModel.addDecompAlgo(rcJ); + if (rcK) + alpsModel.addDecompAlgo(rcK); timer.stop(); - timeSetupCpu = timer.getCpuTime(); + timeSetupCpu = timer.getCpuTime(); timeSetupReal = timer.getWallClock(); timer.start(); alpsModel.solve(); timer.stop(); - timeSolveCpu = timer.getCpuTime(); + timeSolveCpu = timer.getCpuTime(); timeSolveReal = timer.getWallClock(); //--- //--- sanity check //--- cout << "Instance = " << ap3.getInstanceName() - << " Solution = " << alpsModel.getBestObj() - << " SetupCPU = " << timeSetupCpu - << " SolveCPU = " << timeSolveCpu << endl; + << " Solution = " << alpsModel.getBestObj() + << " SetupCPU = " << timeSetupCpu << " SolveCPU = " << timeSolveCpu + << endl; double diff = alpsModel.getBestObj() - ap3.getKnownOptimalBound(); CoinAssert(UtilIsZero(diff)); - - }else{ + } else { //--- //--- just solve the bounding problem (root node) //--- } - if(cut) delete cut; - if(priceI) delete priceI; - if(priceJ) delete priceJ; - if(priceK) delete priceK; - if(pcI) delete pcI; - if(pcJ) delete pcJ; - if(pcK) delete pcK; - if(rcI) delete rcI; - if(rcJ) delete rcJ; - if(rcK) delete rcK; - } - catch(CoinError & ex){ - cerr << "COIN Exception:" << ex.message() << endl - << " from method " << ex.methodName() << endl - << " from class " << ex.className() << endl; + if (cut) + delete cut; + if (priceI) + delete priceI; + if (priceJ) + delete priceJ; + if (priceK) + delete priceK; + if (pcI) + delete pcI; + if (pcJ) + delete pcJ; + if (pcK) + delete pcK; + if (rcI) + delete rcI; + if (rcJ) + delete rcJ; + if (rcK) + delete rcK; + } catch (CoinError &ex) { + cerr << "COIN Exception:" << ex.message() << endl + << " from method " << ex.methodName() << endl + << " from class " << ex.className() << endl; } -} +} diff --git a/Dip/examples/ATM/ATM_DecompApp.cpp b/Dip/examples/ATM/ATM_DecompApp.cpp index 39fa9a87..e422b65a 100644 --- a/Dip/examples/ATM/ATM_DecompApp.cpp +++ b/Dip/examples/ATM/ATM_DecompApp.cpp @@ -17,258 +17,241 @@ //===========================================================================// void ATM_DecompApp::initializeApp() { - - - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); - - //--- - //--- read instance - // - string fileNameA, fileNameD, fileNameAD; - if (m_appParam.DataDir != "") { - fileNameA = m_appParam.DataDir + UtilDirSlash() + m_appParam.DataAtm; - fileNameD = m_appParam.DataDir + UtilDirSlash() + m_appParam.DataDate; - fileNameAD = m_appParam.DataDir + UtilDirSlash() + m_appParam.DataAtmDate; - } else { - fileNameA = m_appParam.DataAtm; - fileNameD = m_appParam.DataDate; - fileNameAD = m_appParam.DataAtmDate; - } - m_instance.readInstance(fileNameA, - fileNameD, - fileNameAD); - - //--- - //--- create models - //--- - createModels(); - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", + m_appParam.LogLevel, 2); + + //--- + //--- read instance + // + string fileNameA, fileNameD, fileNameAD; + if (m_appParam.DataDir != "") { + fileNameA = m_appParam.DataDir + UtilDirSlash() + m_appParam.DataAtm; + fileNameD = m_appParam.DataDir + UtilDirSlash() + m_appParam.DataDate; + fileNameAD = m_appParam.DataDir + UtilDirSlash() + m_appParam.DataAtmDate; + } else { + fileNameA = m_appParam.DataAtm; + fileNameD = m_appParam.DataDate; + fileNameAD = m_appParam.DataAtmDate; + } + m_instance.readInstance(fileNameA, fileNameD, fileNameAD); + + //--- + //--- create models + //--- + createModels(); + + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", m_appParam.LogLevel, + 2); } //===========================================================================// -void ATM_DecompApp::addColumnNamesA(DecompConstraintSet * model, - const string prefix, - const int offset){ - - int a, colIndex; - string colName; - const int nAtms = m_instance.getNAtms(); - colIndex = offset; - for(a = 0; a < nAtms; a++){ - colName = prefix + "(" - + UtilIntToStr(colIndex) + "_" - + m_instance.getAtmName(a) + ")"; - model->colNames.push_back(colName); - colIndex++; - } +void ATM_DecompApp::addColumnNamesA(DecompConstraintSet *model, + const string prefix, const int offset) { + + int a, colIndex; + string colName; + const int nAtms = m_instance.getNAtms(); + colIndex = offset; + for (a = 0; a < nAtms; a++) { + colName = prefix + "(" + UtilIntToStr(colIndex) + "_" + + m_instance.getAtmName(a) + ")"; + model->colNames.push_back(colName); + colIndex++; + } } //===========================================================================// -void ATM_DecompApp::addColumnNamesAT(DecompConstraintSet * model, - const string prefix, - const int offset){ - - int a, t, colIndex; - string colName; - const int nAtms = m_instance.getNAtms(); - const int nSteps = m_appParam.NumSteps; - colIndex = offset; - for(a = 0; a < nAtms; a++){ - if(m_appParam.UseTightModel){ - for(t = 0; t <= nSteps; t++){ - colName = prefix + "(" - + UtilIntToStr(colIndex) + "_" - + m_instance.getAtmName(a) + "," - + UtilIntToStr(t) + ")"; - model->colNames.push_back(colName); - colIndex++; - } +void ATM_DecompApp::addColumnNamesAT(DecompConstraintSet *model, + const string prefix, const int offset) { + + int a, t, colIndex; + string colName; + const int nAtms = m_instance.getNAtms(); + const int nSteps = m_appParam.NumSteps; + colIndex = offset; + for (a = 0; a < nAtms; a++) { + if (m_appParam.UseTightModel) { + for (t = 0; t <= nSteps; t++) { + colName = prefix + "(" + UtilIntToStr(colIndex) + "_" + + m_instance.getAtmName(a) + "," + UtilIntToStr(t) + ")"; + model->colNames.push_back(colName); + colIndex++; } - else{ - for(t = 0; t < nSteps; t++){ - colName = prefix + "(" - + UtilIntToStr(colIndex) + "_" - + m_instance.getAtmName(a) + "," - + UtilIntToStr(t+1) + ")"; - model->colNames.push_back(colName); - colIndex++; - } + } else { + for (t = 0; t < nSteps; t++) { + colName = prefix + "(" + UtilIntToStr(colIndex) + "_" + + m_instance.getAtmName(a) + "," + UtilIntToStr(t + 1) + ")"; + model->colNames.push_back(colName); + colIndex++; } - } + } + } } //===========================================================================// -void ATM_DecompApp::addColumnNamesAD(DecompConstraintSet * model, - const string prefix, - const int offset){ - - int a, d, colIndex; - string colName; - pair adP; - vector::const_iterator vi; - const vector & pairsAD = m_instance.getPairsAD(); - colIndex = offset; - for(vi = pairsAD.begin(); vi != pairsAD.end(); vi++){ - adP = m_instance.getIndexADInv(*vi); - a = adP.first; - d = adP.second; - colName = prefix + "(" - + UtilIntToStr(colIndex) + "_" - + m_instance.getAtmName(a) + "," - + m_instance.getDateName(d) + ")"; - model->colNames.push_back(colName); - colIndex++; - } +void ATM_DecompApp::addColumnNamesAD(DecompConstraintSet *model, + const string prefix, const int offset) { + + int a, d, colIndex; + string colName; + pair adP; + vector::const_iterator vi; + const vector &pairsAD = m_instance.getPairsAD(); + colIndex = offset; + for (vi = pairsAD.begin(); vi != pairsAD.end(); vi++) { + adP = m_instance.getIndexADInv(*vi); + a = adP.first; + d = adP.second; + colName = prefix + "(" + UtilIntToStr(colIndex) + "_" + + m_instance.getAtmName(a) + "," + m_instance.getDateName(d) + ")"; + model->colNames.push_back(colName); + colIndex++; + } } //===========================================================================// -void ATM_DecompApp::createModelColumns(DecompConstraintSet * model, - const int atmIndex, - const int dateIndex){ - - //--- - //--- Column bounds: - //--- for a in A, d in D: - //--- f+[a,d], f-[a,d] >= 0 - //--- f-[a,d] <= w[a,d] - //--- v[a,d] in {0,1} - //--- for a in A: - //--- x2[a] in [0,1], x3[a] >= 0 - //--- NOTE: x3 no UB causing unbounded subproblems? try <= 1000 - //--- for a in A, t in T - //--- x1[a,t] in {0,1}, z[a,t] in [0,1] - //--- - //--- If this is for block atmIndex(>=0), - //--- fix all a!=atmIndex in A columns to 0. - //--- - //--- If this is for block dateIndex(>=0), - //--- fix all d!=dateIndex in D columns to 0. - //--- - string colName; - const int nPairs = m_instance.getNPairs(); - const int nAtms = m_instance.getNAtms(); - const int nSteps = m_appParam.NumSteps; - const int nAtmsSteps = getNAtmsSteps(); - const int nCols = numCoreCols(); - - const double * w_ad = m_instance.get_w_ad(); //dense storage - const vector & pairsAD = m_instance.getPairsAD(); - vector::const_iterator vi; - - //--- - //--- set the col upper and lower bounds - //--- - UtilFillN(model->colLB, nCols, 0.0); - UtilFillN(model->colUB, nCols, 1.0); - - int index; - index = getColOffset_fm(); - for(vi = pairsAD.begin(); vi != pairsAD.end(); vi++){ - model->colUB[index] = w_ad[*vi]; - index++; - } - - UtilFillN(&model->colUB[0] + getColOffset_fp(), nPairs, m_infinity); - - //Another case of needing extreme rays?? - //UtilFillN(&model->colUB[0] + getColOffset_x3(), nAtms, m_infinity); - UtilFillN(&model->colUB[0] + getColOffset_x3(), nAtms, 1000.0); - - //--- - //--- if this is for block a, fix all others to 0. - //--- - if(atmIndex >= 0){ - int a, t; - int index_x1 = getColOffset_x1(); - int index_z = getColOffset_z(); - int index_x2 = getColOffset_x2(); - int index_x3 = getColOffset_x3(); - int end = nSteps; - if(m_appParam.UseTightModel) - end++; - for(a = 0; a < nAtms; a++){ - if(a != atmIndex){ - model->colUB[index_x2] = 0.0; - model->colUB[index_x3] = 0.0; - for(t = 0; t < end; t++){ - model->colUB[index_x1++] = 0.0; - model->colUB[index_z ++] = 0.0; - } - } - else{ - for(t = 0; t < end; t++){ - model->activeColumns.push_back(index_x1++); - model->activeColumns.push_back(index_z++); - } - model->activeColumns.push_back(index_x2); - model->activeColumns.push_back(index_x3); - } - index_x2++; - index_x3++; +void ATM_DecompApp::createModelColumns(DecompConstraintSet *model, + const int atmIndex, + const int dateIndex) { + + //--- + //--- Column bounds: + //--- for a in A, d in D: + //--- f+[a,d], f-[a,d] >= 0 + //--- f-[a,d] <= w[a,d] + //--- v[a,d] in {0,1} + //--- for a in A: + //--- x2[a] in [0,1], x3[a] >= 0 + //--- NOTE: x3 no UB causing unbounded subproblems? try <= 1000 + //--- for a in A, t in T + //--- x1[a,t] in {0,1}, z[a,t] in [0,1] + //--- + //--- If this is for block atmIndex(>=0), + //--- fix all a!=atmIndex in A columns to 0. + //--- + //--- If this is for block dateIndex(>=0), + //--- fix all d!=dateIndex in D columns to 0. + //--- + string colName; + const int nPairs = m_instance.getNPairs(); + const int nAtms = m_instance.getNAtms(); + const int nSteps = m_appParam.NumSteps; + const int nAtmsSteps = getNAtmsSteps(); + const int nCols = numCoreCols(); + + const double *w_ad = m_instance.get_w_ad(); // dense storage + const vector &pairsAD = m_instance.getPairsAD(); + vector::const_iterator vi; + + //--- + //--- set the col upper and lower bounds + //--- + UtilFillN(model->colLB, nCols, 0.0); + UtilFillN(model->colUB, nCols, 1.0); + + int index; + index = getColOffset_fm(); + for (vi = pairsAD.begin(); vi != pairsAD.end(); vi++) { + model->colUB[index] = w_ad[*vi]; + index++; + } + + UtilFillN(&model->colUB[0] + getColOffset_fp(), nPairs, m_infinity); + + // Another case of needing extreme rays?? + // UtilFillN(&model->colUB[0] + getColOffset_x3(), nAtms, m_infinity); + UtilFillN(&model->colUB[0] + getColOffset_x3(), nAtms, 1000.0); + + //--- + //--- if this is for block a, fix all others to 0. + //--- + if (atmIndex >= 0) { + int a, t; + int index_x1 = getColOffset_x1(); + int index_z = getColOffset_z(); + int index_x2 = getColOffset_x2(); + int index_x3 = getColOffset_x3(); + int end = nSteps; + if (m_appParam.UseTightModel) + end++; + for (a = 0; a < nAtms; a++) { + if (a != atmIndex) { + model->colUB[index_x2] = 0.0; + model->colUB[index_x3] = 0.0; + for (t = 0; t < end; t++) { + model->colUB[index_x1++] = 0.0; + model->colUB[index_z++] = 0.0; + } + } else { + for (t = 0; t < end; t++) { + model->activeColumns.push_back(index_x1++); + model->activeColumns.push_back(index_z++); + } + model->activeColumns.push_back(index_x2); + model->activeColumns.push_back(index_x3); } - int index_fp = getColOffset_fp(); - int index_fm = getColOffset_fm(); - int index_v = getColOffset_v(); - for(vi = pairsAD.begin(); vi != pairsAD.end(); vi++){ - a = m_instance.getIndexADInv(*vi).first; - if(a != atmIndex){ - model->colUB[index_fp] = 0.0; - model->colUB[index_fm] = 0.0; - model->colUB[index_v] = 0.0; - } - else{ - model->activeColumns.push_back(index_fp); - model->activeColumns.push_back(index_fm); - model->activeColumns.push_back(index_v); - } - index_fp++; - index_fm++; - index_v++; + index_x2++; + index_x3++; + } + int index_fp = getColOffset_fp(); + int index_fm = getColOffset_fm(); + int index_v = getColOffset_v(); + for (vi = pairsAD.begin(); vi != pairsAD.end(); vi++) { + a = m_instance.getIndexADInv(*vi).first; + if (a != atmIndex) { + model->colUB[index_fp] = 0.0; + model->colUB[index_fm] = 0.0; + model->colUB[index_v] = 0.0; + } else { + model->activeColumns.push_back(index_fp); + model->activeColumns.push_back(index_fm); + model->activeColumns.push_back(index_v); } - } - - if(dateIndex >= 0){ - int d; - int index_fp = getColOffset_fm(); - int index_fm = getColOffset_fm(); - int index_v = getColOffset_v(); - for(vi = pairsAD.begin(); vi != pairsAD.end(); vi++){ - d = m_instance.getIndexADInv(*vi).second; - if(d != dateIndex){ - model->colUB[index_fp] = 0.0; - model->colUB[index_fm] = 0.0; - model->colUB[index_v] = 0.0; - } - else{ - model->activeColumns.push_back(index_fp); - model->activeColumns.push_back(index_fm); - model->activeColumns.push_back(index_v); - } - index_fp++; - index_fm++; - index_v++; + index_fp++; + index_fm++; + index_v++; + } + } + + if (dateIndex >= 0) { + int d; + int index_fp = getColOffset_fm(); + int index_fm = getColOffset_fm(); + int index_v = getColOffset_v(); + for (vi = pairsAD.begin(); vi != pairsAD.end(); vi++) { + d = m_instance.getIndexADInv(*vi).second; + if (d != dateIndex) { + model->colUB[index_fp] = 0.0; + model->colUB[index_fm] = 0.0; + model->colUB[index_v] = 0.0; + } else { + model->activeColumns.push_back(index_fp); + model->activeColumns.push_back(index_fm); + model->activeColumns.push_back(index_v); } - } - - //--- - //--- set the indices of the integer variables of model: x1, v - //--- - UtilIotaN(model->integerVars, nAtmsSteps, getColOffset_x1()); - UtilIotaN(model->integerVars, nPairs, getColOffset_v()); - - //--- - //--- set column names for debugging - //--- - addColumnNamesAT(model, "x1", getColOffset_x1()); - addColumnNamesAT(model, "z", getColOffset_z()); - addColumnNamesAD(model, "fp", getColOffset_fp()); - addColumnNamesAD(model, "fm", getColOffset_fm()); - addColumnNamesA (model, "x2", getColOffset_x2()); - addColumnNamesA (model, "x3", getColOffset_x3()); - addColumnNamesAD(model, "v", getColOffset_v()); + index_fp++; + index_fm++; + index_v++; + } + } + + //--- + //--- set the indices of the integer variables of model: x1, v + //--- + UtilIotaN(model->integerVars, nAtmsSteps, getColOffset_x1()); + UtilIotaN(model->integerVars, nPairs, getColOffset_v()); + + //--- + //--- set column names for debugging + //--- + addColumnNamesAT(model, "x1", getColOffset_x1()); + addColumnNamesAT(model, "z", getColOffset_z()); + addColumnNamesAD(model, "fp", getColOffset_fp()); + addColumnNamesAD(model, "fm", getColOffset_fm()); + addColumnNamesA(model, "x2", getColOffset_x2()); + addColumnNamesA(model, "x3", getColOffset_x3()); + addColumnNamesAD(model, "v", getColOffset_v()); #if 0 { @@ -285,1216 +268,1190 @@ void ATM_DecompApp::createModelColumns(DecompConstraintSet * model, } //===========================================================================// -int ATM_DecompApp::createConZtoX(DecompConstraintSet * model, - const int atmIndex){ - - //--- - //--- for a in A, t in T: - //--- z[a,t] = x1[a,t] * x2[a] - //--- <==> - //--- OLD: - //--- for a in A: - //--- sum{t in T} x1[a,t] <= 1 (where, T = 1..n) - //--- for a in A, t in T: - //--- z[a,t] >= 0 <= 1, - //--- z[a,t] <= x1[a,t], - //--- z[a,t] <= x2[a], - //--- z[a,t] >= x1[a,t] + x2[a] - 1. - //--- NEW: - //--- for a in A: - //--- sum{t in T} x1[a,t] = 1 (where, T = 0..n) - //--- sum{t in T} z[a,t] = x2[a] - //--- for a in A, t in T: - //--- z[a,t] >= 0 <= 1, - //--- z[a,t] <= x1[a,t]. - //--- - int t; - int nRows = 0; - const int nSteps = m_appParam.NumSteps; - if(m_appParam.UseTightModel){ - for(t = 0; t <= nSteps; t++){ - CoinPackedVector row; - string strAT = "(a_" + m_instance.getAtmName(atmIndex) - + ",t_" + UtilIntToStr(t) + ")"; - string rowName = "ztox1" + strAT; - row.insert(colIndex_z (atmIndex,t), 1.0); - row.insert(colIndex_x1(atmIndex,t), -1.0); - model->appendRow(row, -m_infinity, 0.0, rowName); - nRows++; - } - CoinPackedVector row2; - string rowName2 = "ztox2(a_" - + m_instance.getAtmName(atmIndex) + ")"; +int ATM_DecompApp::createConZtoX(DecompConstraintSet *model, + const int atmIndex) { + + //--- + //--- for a in A, t in T: + //--- z[a,t] = x1[a,t] * x2[a] + //--- <==> + //--- OLD: + //--- for a in A: + //--- sum{t in T} x1[a,t] <= 1 (where, T = 1..n) + //--- for a in A, t in T: + //--- z[a,t] >= 0 <= 1, + //--- z[a,t] <= x1[a,t], + //--- z[a,t] <= x2[a], + //--- z[a,t] >= x1[a,t] + x2[a] - 1. + //--- NEW: + //--- for a in A: + //--- sum{t in T} x1[a,t] = 1 (where, T = 0..n) + //--- sum{t in T} z[a,t] = x2[a] + //--- for a in A, t in T: + //--- z[a,t] >= 0 <= 1, + //--- z[a,t] <= x1[a,t]. + //--- + int t; + int nRows = 0; + const int nSteps = m_appParam.NumSteps; + if (m_appParam.UseTightModel) { + for (t = 0; t <= nSteps; t++) { + CoinPackedVector row; + string strAT = "(a_" + m_instance.getAtmName(atmIndex) + ",t_" + + UtilIntToStr(t) + ")"; + string rowName = "ztox1" + strAT; + row.insert(colIndex_z(atmIndex, t), 1.0); + row.insert(colIndex_x1(atmIndex, t), -1.0); + model->appendRow(row, -m_infinity, 0.0, rowName); + nRows++; + } + CoinPackedVector row2; + string rowName2 = "ztox2(a_" + m_instance.getAtmName(atmIndex) + ")"; + row2.insert(colIndex_x2(atmIndex), -1.0); + for (t = 0; t <= nSteps; t++) { + row2.insert(colIndex_z(atmIndex, t), 1.0); + } + model->appendRow(row2, 0.0, 0.0, rowName2); + nRows++; + } else { + for (t = 0; t < nSteps; t++) { + CoinPackedVector row1, row2, row3; + string strAT = "(a_" + m_instance.getAtmName(atmIndex) + ",t_" + + UtilIntToStr(t + 1) + ")"; + string rowName1 = "ztox1" + strAT; + string rowName2 = "ztox2" + strAT; + string rowName3 = "ztox3" + strAT; + + row1.insert(colIndex_z(atmIndex, t), 1.0); + row1.insert(colIndex_x1(atmIndex, t), -1.0); + model->appendRow(row1, -m_infinity, 0.0, rowName1); + + row2.insert(colIndex_z(atmIndex, t), 1.0); row2.insert(colIndex_x2(atmIndex), -1.0); - for(t = 0; t <= nSteps; t++){ - row2.insert(colIndex_z (atmIndex,t), 1.0); - } - model->appendRow(row2, 0.0, 0.0, rowName2); - nRows++; - } - else{ - for(t = 0; t < nSteps; t++){ - CoinPackedVector row1, row2, row3; - string strAT = "(a_" + m_instance.getAtmName(atmIndex) - + ",t_" + UtilIntToStr(t+1) + ")"; - string rowName1 = "ztox1" + strAT; - string rowName2 = "ztox2" + strAT; - string rowName3 = "ztox3" + strAT; - - row1.insert(colIndex_z (atmIndex,t), 1.0); - row1.insert(colIndex_x1(atmIndex,t), -1.0); - model->appendRow(row1, -m_infinity, 0.0, rowName1); - - row2.insert(colIndex_z (atmIndex,t), 1.0); - row2.insert(colIndex_x2(atmIndex) , -1.0); - model->appendRow(row2, -m_infinity, 0.0, rowName2); - - row3.insert(colIndex_z (atmIndex,t), 1.0); - row3.insert(colIndex_x1(atmIndex,t), -1.0); - row3.insert(colIndex_x2(atmIndex) , -1.0); - model->appendRow(row3, -1.0, m_infinity, rowName3); - - nRows+=3; - } - } - return nRows; -} + model->appendRow(row2, -m_infinity, 0.0, rowName2); + row3.insert(colIndex_z(atmIndex, t), 1.0); + row3.insert(colIndex_x1(atmIndex, t), -1.0); + row3.insert(colIndex_x2(atmIndex), -1.0); + model->appendRow(row3, -1.0, m_infinity, rowName3); -//===========================================================================// -int ATM_DecompApp::createConPickOne(DecompConstraintSet * model, - const int atmIndex){ - //--- - //--- sum{t in T} x1[a,t] <= 1 - //--- - int t; - CoinPackedVector row; - string rowName = "pickone_x1(a_" + m_instance.getAtmName(atmIndex) + ")"; - if(m_appParam.UseTightModel){ - for(t = 0; t <= m_appParam.NumSteps; t++) - row.insert(colIndex_x1(atmIndex,t), 1.0); - model->appendRow(row, 1.0, 1.0, rowName); - } - else{ - for(t = 0; t < m_appParam.NumSteps; t++) - row.insert(colIndex_x1(atmIndex,t), 1.0); - model->appendRow(row, -m_infinity, 1.0, rowName); - } - return 1; + nRows += 3; + } + } + return nRows; } //===========================================================================// -int ATM_DecompApp::createConCount(DecompConstraintSet * model, - const int atmIndex){ - - //--- - //--- sum{d in D} v[a,d] <= K[a] (for count) - //--- - CoinPackedVector row; - string rowName = "count(a_" + m_instance.getAtmName(atmIndex) + ")"; - - pair adP; - int pairIndex = 0; - const vector & pairsAD = m_instance.getPairsAD(); - const double * K_a = m_instance.get_K_a(); - vector::const_iterator vi; - //TODO: this can be faster by storing the incident d's for each a - for(vi = pairsAD.begin(); vi != pairsAD.end(); vi++){ - adP = m_instance.getIndexADInv(*vi); - if(atmIndex != adP.first){ - pairIndex++; - continue; - } - row.insert(colIndex_v(pairIndex), 1.0); - pairIndex++; - } - model->appendRow(row, -m_infinity, K_a[atmIndex], rowName); - return 1; +int ATM_DecompApp::createConPickOne(DecompConstraintSet *model, + const int atmIndex) { + //--- + //--- sum{t in T} x1[a,t] <= 1 + //--- + int t; + CoinPackedVector row; + string rowName = "pickone_x1(a_" + m_instance.getAtmName(atmIndex) + ")"; + if (m_appParam.UseTightModel) { + for (t = 0; t <= m_appParam.NumSteps; t++) + row.insert(colIndex_x1(atmIndex, t), 1.0); + model->appendRow(row, 1.0, 1.0, rowName); + } else { + for (t = 0; t < m_appParam.NumSteps; t++) + row.insert(colIndex_x1(atmIndex, t), 1.0); + model->appendRow(row, -m_infinity, 1.0, rowName); + } + return 1; } //===========================================================================// -DecompConstraintSet * ATM_DecompApp::createModelCore1(bool includeCount){ - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelCore1()", m_appParam.LogLevel, 2); - - int a, d, pairIndex; - - pair adP; - vector::const_iterator vi; - - const int nAtms = m_instance.getNAtms(); - const int nDates = m_instance.getNDates(); - const vector & pairsAD = m_instance.getPairsAD(); - const double * B_d = m_instance.get_B_d(); - const int nCols = numCoreCols(); - int nRows = nDates; - - if(includeCount) - nRows += nAtms; - - DecompConstraintSet * model = new DecompConstraintSet(); - CoinAssertHint(model, "Error: Out of Memory"); - - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(model->M, "Error: Out of Memory"); - model->M->setDimensions(0, nCols); - model->reserve(nRows, nCols); - - //--- - //--- for d in D: - //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] - //--- - CoinPackedVector * rowsD = new CoinPackedVector[nDates]; - pairIndex = 0; - for(vi = pairsAD.begin(); vi != pairsAD.end(); vi++){ - adP = m_instance.getIndexADInv(*vi); - a = adP.first; - d = adP.second; - rowsD[d].insert(getColOffset_fp() + pairIndex, 1.0); - rowsD[d].insert(getColOffset_fm() + pairIndex, -1.0); +int ATM_DecompApp::createConCount(DecompConstraintSet *model, + const int atmIndex) { + + //--- + //--- sum{d in D} v[a,d] <= K[a] (for count) + //--- + CoinPackedVector row; + string rowName = "count(a_" + m_instance.getAtmName(atmIndex) + ")"; + + pair adP; + int pairIndex = 0; + const vector &pairsAD = m_instance.getPairsAD(); + const double *K_a = m_instance.get_K_a(); + vector::const_iterator vi; + // TODO: this can be faster by storing the incident d's for each a + for (vi = pairsAD.begin(); vi != pairsAD.end(); vi++) { + adP = m_instance.getIndexADInv(*vi); + if (atmIndex != adP.first) { pairIndex++; - } - - for(d = 0; d < nDates; d++){ - string rowName = "budget(d_" - + m_instance.getDateName(d) + ")"; - model->appendRow(rowsD[d], -m_infinity, B_d[d], rowName); - } - UTIL_DELARR(rowsD); - - if(includeCount){ - //--- for a in A: - //--- sum{d in D} v[a,d] <= K[a] - //--- - for(a = 0; a < nAtms; a++){ - createConCount(model, a); - } - } - - //--- - //--- create model columns - //--- - createModelColumns(model); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelCore1()", m_appParam.LogLevel, 2); - - return model; + continue; + } + row.insert(colIndex_v(pairIndex), 1.0); + pairIndex++; + } + model->appendRow(row, -m_infinity, K_a[atmIndex], rowName); + return 1; } //===========================================================================// -DecompConstraintSet * ATM_DecompApp::createModelCore2(){ - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelCore2()", m_appParam.LogLevel, 2); - - int a; - const int nAtms = m_instance.getNAtms(); - const int nCols = numCoreCols(); - const int nRows = (2*nAtms); - int rowCnt = 0; - - //--- - //--- A'' (core): - //--- for a in A: - //--- sum{t in T} x1[a,t] <= 1 - //--- sum{d in D} v[a,d] <= K[a] - //--- for a in A, t in T: - //--- z[a,t] <= x1[a,t], - //--- z[a,t] <= x2[a], - //--- z[a,t] >= x1[a,t] + x2[a] - 1. - //--- - - DecompConstraintSet * model = new DecompConstraintSet(); - CoinAssertHint(model, "Error: Out of Memory"); - - //--- - //--- create new matrix - //--- - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(model->M, "Error: Out of Memory"); - model->M->setDimensions(0, nCols); - model->reserve(nRows, nCols); - - //--- - //--- create model constraints - //--- - for(a = 0; a < nAtms; a++){ - rowCnt += createConPickOne(model, a); - rowCnt += createConCount (model, a); - } - printf("nRows = %d, rowCnt = %d\n", nRows, rowCnt); - assert(rowCnt == nRows); - - //--- - //--- create model variables/columns - //--- - createModelColumns(model); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelCore2()", m_appParam.LogLevel, 2); - - return model; +DecompConstraintSet *ATM_DecompApp::createModelCore1(bool includeCount) { + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelCore1()", + m_appParam.LogLevel, 2); + + int a, d, pairIndex; + + pair adP; + vector::const_iterator vi; + + const int nAtms = m_instance.getNAtms(); + const int nDates = m_instance.getNDates(); + const vector &pairsAD = m_instance.getPairsAD(); + const double *B_d = m_instance.get_B_d(); + const int nCols = numCoreCols(); + int nRows = nDates; + + if (includeCount) + nRows += nAtms; + + DecompConstraintSet *model = new DecompConstraintSet(); + CoinAssertHint(model, "Error: Out of Memory"); + + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(model->M, "Error: Out of Memory"); + model->M->setDimensions(0, nCols); + model->reserve(nRows, nCols); + + //--- + //--- for d in D: + //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] + //--- + CoinPackedVector *rowsD = new CoinPackedVector[nDates]; + pairIndex = 0; + for (vi = pairsAD.begin(); vi != pairsAD.end(); vi++) { + adP = m_instance.getIndexADInv(*vi); + a = adP.first; + d = adP.second; + rowsD[d].insert(getColOffset_fp() + pairIndex, 1.0); + rowsD[d].insert(getColOffset_fm() + pairIndex, -1.0); + pairIndex++; + } + + for (d = 0; d < nDates; d++) { + string rowName = "budget(d_" + m_instance.getDateName(d) + ")"; + model->appendRow(rowsD[d], -m_infinity, B_d[d], rowName); + } + UTIL_DELARR(rowsD); + + if (includeCount) { + //--- for a in A: + //--- sum{d in D} v[a,d] <= K[a] + //--- + for (a = 0; a < nAtms; a++) { + createConCount(model, a); + } + } + + //--- + //--- create model columns + //--- + createModelColumns(model); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelCore1()", + m_appParam.LogLevel, 2); + + return model; } //===========================================================================// -DecompConstraintSet * ATM_DecompApp::createModelCoreCount(){ - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelCoreCount()", m_appParam.LogLevel, 2); - - int a, nRows; - const int nAtms = m_instance.getNAtms(); - const int nAtmsSteps = getNAtmsSteps(); - const int nCols = numCoreCols(); - if(m_appParam.UseTightModel) - nRows = 3*nAtms + nAtmsSteps; - else - nRows = 2*nAtms + 3*nAtmsSteps; - int rowCnt = 0; - - //--- - //--- A'' (core): - //--- for a in A: - //--- sum{d in D} v[a,d] <= K[a] - //--- - - DecompConstraintSet * model = new DecompConstraintSet(); - CoinAssertHint(model, "Error: Out of Memory"); - - //--- - //--- create new matrix - //--- - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(model->M, "Error: Out of Memory"); - model->M->setDimensions(0, nCols); - model->reserve(nRows, nCols); - - //--- - //--- create model constraints - //--- - for(a = 0; a < nAtms; a++){ - rowCnt += createConCount (model, a); - rowCnt += createConZtoX (model, a);//OPT - rowCnt += createConPickOne(model, a);//OPT - } - //THINK - add conZtoX to core and remove from relax? - printf("nRows = %d, rowCnt = %d\n", nRows, rowCnt); - assert(rowCnt == nRows); - - //--- - //--- create model variables/columns - //--- - createModelColumns(model); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelCoreCount()", m_appParam.LogLevel, 2); - - return model; +DecompConstraintSet *ATM_DecompApp::createModelCore2() { + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelCore2()", + m_appParam.LogLevel, 2); + + int a; + const int nAtms = m_instance.getNAtms(); + const int nCols = numCoreCols(); + const int nRows = (2 * nAtms); + int rowCnt = 0; + + //--- + //--- A'' (core): + //--- for a in A: + //--- sum{t in T} x1[a,t] <= 1 + //--- sum{d in D} v[a,d] <= K[a] + //--- for a in A, t in T: + //--- z[a,t] <= x1[a,t], + //--- z[a,t] <= x2[a], + //--- z[a,t] >= x1[a,t] + x2[a] - 1. + //--- + + DecompConstraintSet *model = new DecompConstraintSet(); + CoinAssertHint(model, "Error: Out of Memory"); + + //--- + //--- create new matrix + //--- + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(model->M, "Error: Out of Memory"); + model->M->setDimensions(0, nCols); + model->reserve(nRows, nCols); + + //--- + //--- create model constraints + //--- + for (a = 0; a < nAtms; a++) { + rowCnt += createConPickOne(model, a); + rowCnt += createConCount(model, a); + } + printf("nRows = %d, rowCnt = %d\n", nRows, rowCnt); + assert(rowCnt == nRows); + + //--- + //--- create model variables/columns + //--- + createModelColumns(model); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelCore2()", + m_appParam.LogLevel, 2); + + return model; } //===========================================================================// -DecompConstraintSet * -ATM_DecompApp::createModelRelax1(const int a, - bool includeCount){ - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelRelax1()", m_appParam.LogLevel, 2); - - int t, d, nRows, pairIndex; - double rhs, coefA, coefC; - pair adP; - vector::const_iterator vi; - - const int nSteps = m_appParam.NumSteps; - const int nDates = m_instance.getNDates(); - const vector & pairsAD = m_instance.getPairsAD(); - - const double * a_ad = m_instance.get_a_ad(); //dense storage - const double * b_ad = m_instance.get_b_ad(); //dense storage - const double * c_ad = m_instance.get_c_ad(); //dense storage - const double * d_ad = m_instance.get_d_ad(); //dense storage - const double * e_ad = m_instance.get_e_ad(); //dense storage - const double * w_ad = m_instance.get_w_ad(); //dense storage - - const int nCols = numCoreCols(); - int nRowsMax = nDates + 1 + (3*nSteps); - if(includeCount) - nRowsMax += nDates + 1; - - //--- - //--- for a in A, d in D: - //--- f+[a,d] - f-[a,d] = - //--- a[a,d] sum{t in T} (t/n) x1[a,t] + - //--- b[a,d] x2[a] + - //--- c[a,d] sum{t in T} (t/n) z[a,t] + - //--- d[a,d] x3[a] + - //--- e[a,d] - //--- f-[a,d] <= w[a,d] * v[a,d] (for count) - //--- for a in A: - //--- sum{t in T} x1[a,t] <= 1 - //--- sum{d in D} v[a,d] <= K[a] (for count) [OPT] - - //Probably not... - //THINK: can't we just post-process this? z=x1*x2 - //--- for a in A, t in T: - //--- z[a,t] <= x1[a,t], - //--- z[a,t] <= x2[a], - //--- z[a,t] >= x1[a,t] + x2[a] - 1. - //--- - - DecompConstraintSet * model = new DecompConstraintSet(); - CoinAssertHint(model, "Error: Out of Memory"); - - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(model->M, "Error: Out of Memory"); - model->M->setDimensions(0, nCols); - model->M->reserve(nRowsMax, nCols); - model->rowLB.reserve(nRowsMax); - model->rowUB.reserve(nRowsMax); - - //--- - //--- for a in A, d in D: - //--- f+[a,d] - f-[a,d] = - //--- a[a,d] sum{t in T} (t/n) x1[a,t] + - //--- b[a,d] x2[a] + - //--- c[a,d] sum{t in T} (t/n) z[a,t] + - //--- d[a,d] x3[a] + - //--- e[a,d] - //--- f-[a,d] <= w[a,d] * v[a,d] (for count) - //--- - nRows = 0; - pairIndex = 0; - for(vi = pairsAD.begin(); vi != pairsAD.end(); vi++){ - adP = m_instance.getIndexADInv(*vi); - if(a != adP.first){ - pairIndex++; - continue; - } - d = adP.second; - - CoinPackedVector row; - string rowName = "demand_def(a_" - + m_instance.getAtmName(a) + ",d_" - + m_instance.getDateName(d) + ")"; - - row.insert(colIndex_fp(pairIndex), 1.0); - row.insert(colIndex_fm(pairIndex), -1.0); - coefA = -a_ad[*vi] / nSteps; - coefC = -c_ad[*vi] / nSteps; - if(m_appParam.UseTightModel){ - for(t = 0; t <= nSteps; t++){ - row.insert(colIndex_x1(a,t), t * coefA); - row.insert(colIndex_z (a,t), t * coefC); - } - } - else{ - for(t = 0; t < nSteps; t++){ - row.insert(colIndex_x1(a,t), (t+1) * coefA); - row.insert(colIndex_z (a,t), (t+1) * coefC); - } - } - row.insert(colIndex_x2(a), -b_ad[*vi]); - row.insert(colIndex_x3(a), -d_ad[*vi]); - - rhs = e_ad[*vi]; - model->M->appendRow(row); - model->rowLB.push_back(rhs); - model->rowUB.push_back(rhs); - model->rowNames.push_back(rowName); - nRows++; - - CoinPackedVector rowLink; - string rowNameLink = "linkv(a_" - + m_instance.getAtmName(a) + ",d_" - + m_instance.getDateName(d) + ")"; - rowLink.insert(colIndex_fm(pairIndex), 1.0); - rowLink.insert(colIndex_v (pairIndex), -w_ad[*vi]); - model->M->appendRow(rowLink); - model->rowLB.push_back(-m_infinity); - model->rowUB.push_back(0.0); - model->rowNames.push_back(rowNameLink); - nRows++; - - pairIndex++; - } - - //--- - //--- for a in A: - //--- sum{t in T} x1[a,t] <= 1 - //--- sum{d in D} v[a,d] <= K[a] (for count) - //--- - nRows += createConPickOne(model, a); - if(includeCount) - nRows += createConCount(model, a); - - //--- - //--- for a in A, t in T: - //--- z[a,t] <= x1[a,t], - //--- z[a,t] <= x2[a], - //--- z[a,t] >= x1[a,t] + x2[a] - 1. - //--- - nRows += createConZtoX(model, a); - - //--- - //--- create model columns - //--- - createModelColumns(model, a); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelRelax1()", m_appParam.LogLevel, 2); - - return model; +DecompConstraintSet *ATM_DecompApp::createModelCoreCount() { + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelCoreCount()", + m_appParam.LogLevel, 2); + + int a, nRows; + const int nAtms = m_instance.getNAtms(); + const int nAtmsSteps = getNAtmsSteps(); + const int nCols = numCoreCols(); + if (m_appParam.UseTightModel) + nRows = 3 * nAtms + nAtmsSteps; + else + nRows = 2 * nAtms + 3 * nAtmsSteps; + int rowCnt = 0; + + //--- + //--- A'' (core): + //--- for a in A: + //--- sum{d in D} v[a,d] <= K[a] + //--- + + DecompConstraintSet *model = new DecompConstraintSet(); + CoinAssertHint(model, "Error: Out of Memory"); + + //--- + //--- create new matrix + //--- + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(model->M, "Error: Out of Memory"); + model->M->setDimensions(0, nCols); + model->reserve(nRows, nCols); + + //--- + //--- create model constraints + //--- + for (a = 0; a < nAtms; a++) { + rowCnt += createConCount(model, a); + rowCnt += createConZtoX(model, a); // OPT + rowCnt += createConPickOne(model, a); // OPT + } + // THINK - add conZtoX to core and remove from relax? + printf("nRows = %d, rowCnt = %d\n", nRows, rowCnt); + assert(rowCnt == nRows); + + //--- + //--- create model variables/columns + //--- + createModelColumns(model); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelCoreCount()", + m_appParam.LogLevel, 2); + + return model; } //===========================================================================// -DecompConstraintSet * ATM_DecompApp::createModelRelax2(const int d){ - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelRelax2()", m_appParam.LogLevel, 2); - - int a, t, nRows, pairIndex, nRowsMax; - double rhs, coefA, coefC; - pair adP; - vector::const_iterator vi; - - const int nSteps = m_appParam.NumSteps; - const int nAtms = m_instance.getNAtms(); - const vector & pairsAD = m_instance.getPairsAD(); - - const double * a_ad = m_instance.get_a_ad(); //dense storage - const double * b_ad = m_instance.get_b_ad(); //dense storage - const double * c_ad = m_instance.get_c_ad(); //dense storage - const double * d_ad = m_instance.get_d_ad(); //dense storage - const double * e_ad = m_instance.get_e_ad(); //dense storage - const double * w_ad = m_instance.get_w_ad(); //dense storage - const double * B_d = m_instance.get_B_d(); - - const int nCols = numCoreCols(); - if(m_appParam.UseTightModel) - nRowsMax = 3*nAtms + 1 + getNAtmsSteps(); - else - nRowsMax = 2*nAtms + 1 + (3*getNAtmsSteps()); - - //--- - //--- A'[d] for d in D (independent blocks) - //--- for a in A - //--- f+[a,d] - f-[a,d] = - //--- a[a,d] sum{t in T} (t/n) x1[a,t] + - //--- b[a,d] x2[a] + - //--- c[a,d] sum{t in T} (t/n) z[a,t] + - //--- d[a,d] x3[a] + - //--- e[a,d] - //--- f-[a,d] <= w[a,d] * v[a,d] - //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] - //--- for a in A, t in T: - //--- z[a,t] <= x1[a,t], - //--- z[a,t] <= x2[a], - //--- z[a,t] >= x1[a,t] + x2[a] - 1. - //--- - - DecompConstraintSet * model = new DecompConstraintSet(); - CoinAssertHint(model, "Error: Out of Memory"); - - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(model->M, "Error: Out of Memory"); - model->M->setDimensions(0, nCols); - model->reserve(nRowsMax, nCols); - - CoinPackedVector rowBudget; - nRows = 0; - pairIndex = 0; - for(vi = pairsAD.begin(); vi != pairsAD.end(); vi++){ - adP = m_instance.getIndexADInv(*vi); - if(d != adP.second){ - pairIndex++; - continue; - } - a = adP.first; - - //--- - //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] - //--- - rowBudget.insert(getColOffset_fp() + pairIndex, 1.0); - rowBudget.insert(getColOffset_fm() + pairIndex, -1.0); - - - //--- - //--- f+[a,d] - f-[a,d] = - //--- a[a,d] sum{t in T} (t/n) x1[a,t] + - //--- b[a,d] x2[a] + - //--- c[a,d] sum{t in T} (t/n) z[a,t] + - //--- d[a,d] x3[a] + - //--- e[a,d] - //--- - CoinPackedVector row; - string rowName = "demand_def(a_" - + m_instance.getAtmName(a) + ",d_" - + m_instance.getDateName(d) + ")"; - row.insert(colIndex_fp(pairIndex), 1.0); - row.insert(colIndex_fm(pairIndex), -1.0); - coefA = -a_ad[*vi] / nSteps; - coefC = -c_ad[*vi] / nSteps; - if(m_appParam.UseTightModel){ - for(t = 0; t <= nSteps; t++){ - row.insert(colIndex_x1(a,t), t * coefA); - row.insert(colIndex_z (a,t), t * coefC); - } +DecompConstraintSet *ATM_DecompApp::createModelRelax1(const int a, + bool includeCount) { + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelRelax1()", + m_appParam.LogLevel, 2); + + int t, d, nRows, pairIndex; + double rhs, coefA, coefC; + pair adP; + vector::const_iterator vi; + + const int nSteps = m_appParam.NumSteps; + const int nDates = m_instance.getNDates(); + const vector &pairsAD = m_instance.getPairsAD(); + + const double *a_ad = m_instance.get_a_ad(); // dense storage + const double *b_ad = m_instance.get_b_ad(); // dense storage + const double *c_ad = m_instance.get_c_ad(); // dense storage + const double *d_ad = m_instance.get_d_ad(); // dense storage + const double *e_ad = m_instance.get_e_ad(); // dense storage + const double *w_ad = m_instance.get_w_ad(); // dense storage + + const int nCols = numCoreCols(); + int nRowsMax = nDates + 1 + (3 * nSteps); + if (includeCount) + nRowsMax += nDates + 1; + + //--- + //--- for a in A, d in D: + //--- f+[a,d] - f-[a,d] = + //--- a[a,d] sum{t in T} (t/n) x1[a,t] + + //--- b[a,d] x2[a] + + //--- c[a,d] sum{t in T} (t/n) z[a,t] + + //--- d[a,d] x3[a] + + //--- e[a,d] + //--- f-[a,d] <= w[a,d] * v[a,d] (for count) + //--- for a in A: + //--- sum{t in T} x1[a,t] <= 1 + //--- sum{d in D} v[a,d] <= K[a] (for count) [OPT] + + // Probably not... + // THINK: can't we just post-process this? z=x1*x2 + //--- for a in A, t in T: + //--- z[a,t] <= x1[a,t], + //--- z[a,t] <= x2[a], + //--- z[a,t] >= x1[a,t] + x2[a] - 1. + //--- + + DecompConstraintSet *model = new DecompConstraintSet(); + CoinAssertHint(model, "Error: Out of Memory"); + + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(model->M, "Error: Out of Memory"); + model->M->setDimensions(0, nCols); + model->M->reserve(nRowsMax, nCols); + model->rowLB.reserve(nRowsMax); + model->rowUB.reserve(nRowsMax); + + //--- + //--- for a in A, d in D: + //--- f+[a,d] - f-[a,d] = + //--- a[a,d] sum{t in T} (t/n) x1[a,t] + + //--- b[a,d] x2[a] + + //--- c[a,d] sum{t in T} (t/n) z[a,t] + + //--- d[a,d] x3[a] + + //--- e[a,d] + //--- f-[a,d] <= w[a,d] * v[a,d] (for count) + //--- + nRows = 0; + pairIndex = 0; + for (vi = pairsAD.begin(); vi != pairsAD.end(); vi++) { + adP = m_instance.getIndexADInv(*vi); + if (a != adP.first) { + pairIndex++; + continue; + } + d = adP.second; + + CoinPackedVector row; + string rowName = "demand_def(a_" + m_instance.getAtmName(a) + ",d_" + + m_instance.getDateName(d) + ")"; + + row.insert(colIndex_fp(pairIndex), 1.0); + row.insert(colIndex_fm(pairIndex), -1.0); + coefA = -a_ad[*vi] / nSteps; + coefC = -c_ad[*vi] / nSteps; + if (m_appParam.UseTightModel) { + for (t = 0; t <= nSteps; t++) { + row.insert(colIndex_x1(a, t), t * coefA); + row.insert(colIndex_z(a, t), t * coefC); } - else{ - for(t = 0; t < nSteps; t++){ - row.insert(colIndex_x1(a,t), (t+1) * coefA); - row.insert(colIndex_z (a,t), (t+1) * coefC); - } + } else { + for (t = 0; t < nSteps; t++) { + row.insert(colIndex_x1(a, t), (t + 1) * coefA); + row.insert(colIndex_z(a, t), (t + 1) * coefC); } - row.insert(colIndex_x2(a), -b_ad[*vi]); - row.insert(colIndex_x3(a), -d_ad[*vi]); - - rhs = e_ad[*vi]; - model->appendRow(row, rhs, rhs, rowName); - nRows++; - - //--- - //--- f-[a,d] <= w[a,d] * v[a,d] - //--- - CoinPackedVector rowLink; - string rowNameLink = "linkv(a_" - + m_instance.getAtmName(a) + ",d_" - + m_instance.getDateName(d) + ")"; - rowLink.insert(colIndex_fm(pairIndex), 1.0); - rowLink.insert(colIndex_v (pairIndex), -w_ad[*vi]); - model->appendRow(rowLink, -m_infinity, 0.0, rowNameLink); - nRows++; - - pairIndex++; - } - - string rowNameBudget = "budget(d_" + m_instance.getDateName(d) + ")"; - model->appendRow(rowBudget, -m_infinity, B_d[d], rowNameBudget); - nRows++; - - for(a = 0; a < nAtms; a++) - nRows += createConZtoX(model, a); - assert(nRows <= nRowsMax); - - //--- - //--- create model columns - //--- - createModelColumns(model, -1, d); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelRelax2()", m_appParam.LogLevel, 2); - - return model; + } + row.insert(colIndex_x2(a), -b_ad[*vi]); + row.insert(colIndex_x3(a), -d_ad[*vi]); + + rhs = e_ad[*vi]; + model->M->appendRow(row); + model->rowLB.push_back(rhs); + model->rowUB.push_back(rhs); + model->rowNames.push_back(rowName); + nRows++; + + CoinPackedVector rowLink; + string rowNameLink = "linkv(a_" + m_instance.getAtmName(a) + ",d_" + + m_instance.getDateName(d) + ")"; + rowLink.insert(colIndex_fm(pairIndex), 1.0); + rowLink.insert(colIndex_v(pairIndex), -w_ad[*vi]); + model->M->appendRow(rowLink); + model->rowLB.push_back(-m_infinity); + model->rowUB.push_back(0.0); + model->rowNames.push_back(rowNameLink); + nRows++; + + pairIndex++; + } + + //--- + //--- for a in A: + //--- sum{t in T} x1[a,t] <= 1 + //--- sum{d in D} v[a,d] <= K[a] (for count) + //--- + nRows += createConPickOne(model, a); + if (includeCount) + nRows += createConCount(model, a); + + //--- + //--- for a in A, t in T: + //--- z[a,t] <= x1[a,t], + //--- z[a,t] <= x2[a], + //--- z[a,t] >= x1[a,t] + x2[a] - 1. + //--- + nRows += createConZtoX(model, a); + + //--- + //--- create model columns + //--- + createModelColumns(model, a); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelRelax1()", + m_appParam.LogLevel, 2); + + return model; } - //===========================================================================// -DecompConstraintSet * ATM_DecompApp::createModelRelaxCount(){ - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelRelaxCount()", m_appParam.LogLevel, 2); - - int a, d, t, nRows, pairIndex, nRowsMax; - double rhs, coefA, coefC; - pair adP; - vector::const_iterator vi; - - const int nSteps = m_appParam.NumSteps; - const int nAtms = m_instance.getNAtms(); - const int nDates = m_instance.getNDates(); - const int nPairs = m_instance.getNPairs(); - const int nAtmsSteps = getNAtmsSteps(); - const vector & pairsAD = m_instance.getPairsAD(); - - const double * a_ad = m_instance.get_a_ad(); //dense storage - const double * b_ad = m_instance.get_b_ad(); //dense storage - const double * c_ad = m_instance.get_c_ad(); //dense storage - const double * d_ad = m_instance.get_d_ad(); //dense storage - const double * e_ad = m_instance.get_e_ad(); //dense storage - const double * w_ad = m_instance.get_w_ad(); //dense storage - const double * B_d = m_instance.get_B_d(); - - const int nCols = numCoreCols(); - if(m_appParam.UseTightModel) - nRowsMax = nDates + 2*nAtms + nAtmsSteps + 2*nPairs; - else - nRowsMax = nDates + nAtms + 3*nAtmsSteps + 2*nPairs; - - //TODO: - // try the nested idea - so master has ConZtoX and we price without that - // but we solve with gap the harder oracle with those constraints - - //--- - //--- A' (relax): - //--- for d in D: - //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] <---- makes it hard - //OPT - //--- for a in A: - //--- sum{t in T} x1[a,t] <= 1 - //OPT - //--- for a in A, t in T: - //--- z[a,t] <= x1[a,t], - //--- z[a,t] <= x2[a], - //--- z[a,t] >= x1[a,t] + x2[a] - 1. - - //--- for a in A, d in D: - //--- f+[a,d] - f-[a,d] = - //--- a[a,d] sum{t in T} (t/n) x1[a,t] + - //--- b[a,d] x2[a] + - //--- c[a,d] sum{t in T} (t/n) z[a,t] + - //--- d[a,d] x3[a] + - //--- e[a,d] - //--- f-[a,d] <= w[a,d] * v[a,d] - //--- - - DecompConstraintSet * model = new DecompConstraintSet(); - CoinAssertHint(model, "Error: Out of Memory"); - - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(model->M, "Error: Out of Memory"); - model->M->setDimensions(0, nCols); - model->reserve(nRowsMax, nCols); - - //--- - //--- create nDates empty rowBudget packed vectors - //--- - vector rowBudget; - for(d = 0; d < nDates; d++){ - CoinPackedVector row; - rowBudget.push_back(row); - } - - nRows = 0; - pairIndex = 0; - for(vi = pairsAD.begin(); vi != pairsAD.end(); vi++){ - adP = m_instance.getIndexADInv(*vi); - a = adP.first; - d = adP.second; - - //--- - //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] - //--- - rowBudget[d].insert(getColOffset_fp() + pairIndex, 1.0); - rowBudget[d].insert(getColOffset_fm() + pairIndex, -1.0); - - - //--- - //--- f+[a,d] - f-[a,d] = - //--- a[a,d] sum{t in T} (t/n) x1[a,t] + - //--- b[a,d] x2[a] + - //--- c[a,d] sum{t in T} (t/n) z[a,t] + - //--- d[a,d] x3[a] + - //--- e[a,d] - //--- - CoinPackedVector row; - string rowName = "demand_def(a_" - + m_instance.getAtmName(a) + ",d_" - + m_instance.getDateName(d) + ")"; - row.insert(colIndex_fp(pairIndex), 1.0); - row.insert(colIndex_fm(pairIndex), -1.0); - coefA = -a_ad[*vi] / nSteps; - coefC = -c_ad[*vi] / nSteps; - if(m_appParam.UseTightModel){ - for(t = 0; t <= nSteps; t++){ - row.insert(colIndex_x1(a,t), t * coefA); - row.insert(colIndex_z (a,t), t * coefC); - } +DecompConstraintSet *ATM_DecompApp::createModelRelax2(const int d) { + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelRelax2()", + m_appParam.LogLevel, 2); + + int a, t, nRows, pairIndex, nRowsMax; + double rhs, coefA, coefC; + pair adP; + vector::const_iterator vi; + + const int nSteps = m_appParam.NumSteps; + const int nAtms = m_instance.getNAtms(); + const vector &pairsAD = m_instance.getPairsAD(); + + const double *a_ad = m_instance.get_a_ad(); // dense storage + const double *b_ad = m_instance.get_b_ad(); // dense storage + const double *c_ad = m_instance.get_c_ad(); // dense storage + const double *d_ad = m_instance.get_d_ad(); // dense storage + const double *e_ad = m_instance.get_e_ad(); // dense storage + const double *w_ad = m_instance.get_w_ad(); // dense storage + const double *B_d = m_instance.get_B_d(); + + const int nCols = numCoreCols(); + if (m_appParam.UseTightModel) + nRowsMax = 3 * nAtms + 1 + getNAtmsSteps(); + else + nRowsMax = 2 * nAtms + 1 + (3 * getNAtmsSteps()); + + //--- + //--- A'[d] for d in D (independent blocks) + //--- for a in A + //--- f+[a,d] - f-[a,d] = + //--- a[a,d] sum{t in T} (t/n) x1[a,t] + + //--- b[a,d] x2[a] + + //--- c[a,d] sum{t in T} (t/n) z[a,t] + + //--- d[a,d] x3[a] + + //--- e[a,d] + //--- f-[a,d] <= w[a,d] * v[a,d] + //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] + //--- for a in A, t in T: + //--- z[a,t] <= x1[a,t], + //--- z[a,t] <= x2[a], + //--- z[a,t] >= x1[a,t] + x2[a] - 1. + //--- + + DecompConstraintSet *model = new DecompConstraintSet(); + CoinAssertHint(model, "Error: Out of Memory"); + + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(model->M, "Error: Out of Memory"); + model->M->setDimensions(0, nCols); + model->reserve(nRowsMax, nCols); + + CoinPackedVector rowBudget; + nRows = 0; + pairIndex = 0; + for (vi = pairsAD.begin(); vi != pairsAD.end(); vi++) { + adP = m_instance.getIndexADInv(*vi); + if (d != adP.second) { + pairIndex++; + continue; + } + a = adP.first; + + //--- + //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] + //--- + rowBudget.insert(getColOffset_fp() + pairIndex, 1.0); + rowBudget.insert(getColOffset_fm() + pairIndex, -1.0); + + //--- + //--- f+[a,d] - f-[a,d] = + //--- a[a,d] sum{t in T} (t/n) x1[a,t] + + //--- b[a,d] x2[a] + + //--- c[a,d] sum{t in T} (t/n) z[a,t] + + //--- d[a,d] x3[a] + + //--- e[a,d] + //--- + CoinPackedVector row; + string rowName = "demand_def(a_" + m_instance.getAtmName(a) + ",d_" + + m_instance.getDateName(d) + ")"; + row.insert(colIndex_fp(pairIndex), 1.0); + row.insert(colIndex_fm(pairIndex), -1.0); + coefA = -a_ad[*vi] / nSteps; + coefC = -c_ad[*vi] / nSteps; + if (m_appParam.UseTightModel) { + for (t = 0; t <= nSteps; t++) { + row.insert(colIndex_x1(a, t), t * coefA); + row.insert(colIndex_z(a, t), t * coefC); } - else{ - for(t = 0; t < nSteps; t++){ - row.insert(colIndex_x1(a,t), (t+1) * coefA); - row.insert(colIndex_z (a,t), (t+1) * coefC); - } + } else { + for (t = 0; t < nSteps; t++) { + row.insert(colIndex_x1(a, t), (t + 1) * coefA); + row.insert(colIndex_z(a, t), (t + 1) * coefC); } - row.insert(colIndex_x2(a), -b_ad[*vi]); - row.insert(colIndex_x3(a), -d_ad[*vi]); - - rhs = e_ad[*vi]; - model->appendRow(row, rhs, rhs, rowName); - nRows++; - - //--- - //--- f-[a,d] <= w[a,d] * v[a,d] - //--- - CoinPackedVector rowLink; - string rowNameLink = "linkv(a_" - + m_instance.getAtmName(a) + ",d_" - + m_instance.getDateName(d) + ")"; - rowLink.insert(colIndex_fm(pairIndex), 1.0); - rowLink.insert(colIndex_v (pairIndex), -w_ad[*vi]); - model->appendRow(rowLink, -m_infinity, 0.0, rowNameLink); - nRows++; - - pairIndex++; - } - - for(d = 0; d < nDates; d++){ - string rowNameBudget = "budget(d_" + m_instance.getDateName(d) + ")"; - model->appendRow(rowBudget[d], -m_infinity, B_d[d], rowNameBudget); - nRows++; - } - - //--- - //--- for a in A: - //--- sum{t in T} x1[a,t] <= 1 - //--- for a in A, t in T: - //--- z[a,t] <= x1[a,t], - //--- z[a,t] <= x2[a], - //--- z[a,t] >= x1[a,t] + x2[a] - 1. - //--- - //THINK: if you use this, shouldn't we postprocess z=x1*x2? - // putting those constaints in master are unneccessary... - // in fact, why not just do that in block version too... - //for(a = 0; a < nAtms; a++){ - //nRows += createConZtoX(model, a); - //nRows += createConPickOne(model, a); - //} - assert(nRows <= nRowsMax); - - //--- - //--- create model columns - //--- - createModelColumns(model); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelRelaxCount()", m_appParam.LogLevel, 2); - - return model; + } + row.insert(colIndex_x2(a), -b_ad[*vi]); + row.insert(colIndex_x3(a), -d_ad[*vi]); + + rhs = e_ad[*vi]; + model->appendRow(row, rhs, rhs, rowName); + nRows++; + + //--- + //--- f-[a,d] <= w[a,d] * v[a,d] + //--- + CoinPackedVector rowLink; + string rowNameLink = "linkv(a_" + m_instance.getAtmName(a) + ",d_" + + m_instance.getDateName(d) + ")"; + rowLink.insert(colIndex_fm(pairIndex), 1.0); + rowLink.insert(colIndex_v(pairIndex), -w_ad[*vi]); + model->appendRow(rowLink, -m_infinity, 0.0, rowNameLink); + nRows++; + + pairIndex++; + } + + string rowNameBudget = "budget(d_" + m_instance.getDateName(d) + ")"; + model->appendRow(rowBudget, -m_infinity, B_d[d], rowNameBudget); + nRows++; + + for (a = 0; a < nAtms; a++) + nRows += createConZtoX(model, a); + assert(nRows <= nRowsMax); + + //--- + //--- create model columns + //--- + createModelColumns(model, -1, d); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelRelax2()", + m_appParam.LogLevel, 2); + + return model; } //===========================================================================// -void ATM_DecompApp::createModels(){ - - //--- - //--- This function does the work to create the different models - //--- that will be used. This memory is owned by the user. It will - //--- be passed to the application interface and used by the algorithms. - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "APPcreateModel()", m_appParam.LogLevel, 2); - - //--- - //--- The original problem is in the form of a MINLP: - //--- min sum{a in A, d in D} | f[a,d] | - //--- s.t. - //--- for a in A, d in D: - //--- f[a,d] = - //--- a[a,d] x1[a] + - //--- b[a,d] x2[a] + - //--- c[a,d] x1[a] x2[a] + - //--- d[a,d] x3[a] + - //--- e[a,d] - //--- for d in D: - //--- sum{a in A} f[a,d] <= B[d] - //--- for a in A: - //--- |{d in D : f[a,d] < 0} | <= K[a] (notice strict ineq) - //--- for a in A, d in D: - //--- f[a,d] free - //--- for a in A: - //--- x1[a], x2[a] in [0,1], x3[a] >= 0 - //--- - - //--- - //--- Discretization of continuous variable. - //--- n = number of steps - //--- T = {1, ..., n} - //--- sum{t in T} (t/n) * x1[a,t] = x1[a], for a in A - //--- sum{t in T} x1[a,t] <= 1 , for a in A - //--- so, if n=10, x1[a] in {0,0.1,0.2,...,1.0}. - //--- - - //--- - //--- Linearization of product of a binary and a continuous. - //--- For a in A, t in T: - //--- z[a,t] = x1[a,t] * x2[a] - //--- <==> - //--- z[a,t] >= 0 <= 1, - //--- z[a,t] <= x1[a,t], - //--- z[a,t] <= x2[a], - //--- z[a,t] >= x1[a,t] + x2[a] - 1. - //--- - - //--- - //--- Linearization of the absolute value. - //--- For a in A, d in D: - //--- f+[a,d], f-[a,d] >= 0 - //--- f[a,d] = f+[a,d] - f-[a,d] - //--- |f[a,d]|= f+[a,d] + f-[a,d] - //--- - - //--- - //--- To model the count constraints. - //--- For a in A: - //--- |{d in D : f[a,d] < 0}| <= K[a] - //--- <==> - //--- |{d in D : f+[a,d] - f-[a,d] < 0}| <= K[a] - //--- - //--- At optimality, if f[a,d] != 0, then either - //--- f+[a,d] > 0 and f-[a,d] = 0, or - //--- f+[a,d] = 0 and f-[a,d] > 0. - //--- So, to count the cases when f[a,d] < 0, we can restrict - //--- attention to the cases where f-[a,d] > 0. - //--- - //--- With some application specific stuff, - //--- we know that f-[a,d] <= w[a,d]. - //--- - //--- So, to count the number of cases we can use the following: - //--- f-[a,d] > 0 ==> v[a,d] = 1 - //--- f-[a,d] <= 0 <== v[a,d] = 0 - //--- <==> - //--- f-[a,d] <= w[a,d] * v[a,d] - //--- - //--- and, then - //--- |{d in D : f+[a,d] - f-[a,d] < 0}| <= K[a] - //--- <==> - //--- sum{d in D} v[a,d] <= K[a] - //--- - - //--- - //--- (Approximate) MILP Reformulation - //--- - //--- min sum{a in A, d in D} f+[a,d] + f-[a,d] - //--- s.t. - //--- for a in A, d in D: - //--- f+[a,d] - f-[a,d] = - //--- a[a,d] sum{t in T} (t/n) x1[a,t] + - //--- b[a,d] x2[a] + - //--- c[a,d] sum{t in T} (t/n) z[a,t] + - //--- d[a,d] x3[a] + - //--- e[a,d] - //--- f-[a,d] <= w[a,d] * v[a,d] - //--- for a in A: - //--- sum{t in T} x1[a,t] <= 1 - //--- for a in A, t in T: - //--- z[a,t] <= x1[a,t], - //--- z[a,t] <= x2[a], - //--- z[a,t] >= x1[a,t] + x2[a] - 1. - //--- for d in D: - //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] - //--- for a in A: - //--- sum{d in D} v[a,d] <= K[a] - //--- for a in A, d in D: - //--- f+[a,d], f-[a,d] >= 0 - //--- f-[a,d] <= w[a,d] - //--- v[a,d] in {0,1} - //--- for a in A: - //--- x2[a], x3[a] in [0,1] - //--- for a in A, t in T - //--- x1[a,t] in {0,1}, z[a,t] in [0,1] - //--- - - //--- - //--- UPDATE (April 2010). - //--- - //--- A tighter formulation of the - //--- linearization of product of a binary and a continuous - //--- is possible due to the constraint: sum{t in T} x1[a,t] <= 1 - //--- - //--- For a in A, t in T: - //--- z[a,t] = x1[a,t] * x2[a] - //--- <==> - //--- OLD: - //--- for a in A: - //--- sum{t in T} x1[a,t] <= 1 (where, T = 1..n) - //--- for a in A, t in T: - //--- z[a,t] >= 0 <= 1, - //--- z[a,t] <= x1[a,t], - //--- z[a,t] <= x2[a], - //--- z[a,t] >= x1[a,t] + x2[a] - 1. - //--- NEW: - //--- for a in A: - //--- sum{t in T} x1[a,t] = 1 (where, T = 0..n) - //--- sum{t in T} z[a,t] = x2[a] - //--- for a in A, t in T: - //--- z[a,t] >= 0 <= 1, - //--- z[a,t] <= x1[a,t]. - //--- - - - //--- - //--- Columns - //--- x1[a,t] (binary) - //--- z [a,t] - //--- f+[a,d] - //--- f-[a,d] - //--- x2[a] - //--- x3[a] - //---- v[a,d] (binary) - //--- - int i, a; - int nAtms = m_instance.getNAtms(); - int numCols = numCoreCols(); - - //--- - //--- Construct the objective function. - //--- Coefficients of f+ and f- are 1.0, the rest are 0. - //--- - m_objective = new double[numCols]; - if(!m_objective) - throw UtilExceptionMemory("createModels", "MMKP_DecompApp"); - UtilFillN(m_objective, numCols, 0.0); - for(i = getColOffset_fp(); i < getColOffset_x2(); i++) - m_objective[i] = 1.0; - setModelObjective(m_objective, numCols); - - - //--- - //--- A'' (core): - //--- for d in D: - //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] - //--- for a in A: <--- option ( core or relax ) - //--- sum{d in D} v[a,d] <= K[a] - //--- - //--- A'[a] for a in A (independent blocks) - //--- for d in D: - //--- f+[a,d] - f-[a,d] = - //--- a[a,d] sum{t in T} (t/n) x1[a,t] + - //--- b[a,d] x2[a] + - //--- c[a,d] sum{t in T} (t/n) z[a,t] + - //--- d[a,d] x3[a] + - //--- e[a,d] - //--- f-[a,d] <= w[a,d] * v[a,d] - //--- sum{t in T} x1[a,t] <= 1 - //--- for t in T: - //--- z[a,t] <= x1[a,t], - //--- z[a,t] <= x2[a], - //--- z[a,t] >= x1[a,t] + x2[a] - 1. - //--- sum{d in D} v[a,d] <= K[a] <--- option ( core or relax ) - //--- - if(m_appParam.ModelNameCore == "BUDGET"){ - DecompConstraintSet * modelCore = createModelCore1(false); - m_models.push_back(modelCore); - setModelCore(modelCore, "BUDGET"); - } - if(m_appParam.ModelNameCore == "BUDGET_COUNT"){ - DecompConstraintSet * modelCore = createModelCore1(); - m_models.push_back(modelCore); - setModelCore(modelCore, "BUDGET_COUNT"); - } - if(m_appParam.ModelNameRelax == "CASH"){ - for(a = 0; a < nAtms; a++){ - DecompConstraintSet * modelRelax = createModelRelax1(a, false); - m_models.push_back(modelRelax); - setModelRelax(modelRelax, "CASH" + UtilIntToStr(a), a); +DecompConstraintSet *ATM_DecompApp::createModelRelaxCount() { + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelRelaxCount()", + m_appParam.LogLevel, 2); + + int a, d, t, nRows, pairIndex, nRowsMax; + double rhs, coefA, coefC; + pair adP; + vector::const_iterator vi; + + const int nSteps = m_appParam.NumSteps; + const int nAtms = m_instance.getNAtms(); + const int nDates = m_instance.getNDates(); + const int nPairs = m_instance.getNPairs(); + const int nAtmsSteps = getNAtmsSteps(); + const vector &pairsAD = m_instance.getPairsAD(); + + const double *a_ad = m_instance.get_a_ad(); // dense storage + const double *b_ad = m_instance.get_b_ad(); // dense storage + const double *c_ad = m_instance.get_c_ad(); // dense storage + const double *d_ad = m_instance.get_d_ad(); // dense storage + const double *e_ad = m_instance.get_e_ad(); // dense storage + const double *w_ad = m_instance.get_w_ad(); // dense storage + const double *B_d = m_instance.get_B_d(); + + const int nCols = numCoreCols(); + if (m_appParam.UseTightModel) + nRowsMax = nDates + 2 * nAtms + nAtmsSteps + 2 * nPairs; + else + nRowsMax = nDates + nAtms + 3 * nAtmsSteps + 2 * nPairs; + + // TODO: + // try the nested idea - so master has ConZtoX and we price without that + // but we solve with gap the harder oracle with those constraints + + //--- + //--- A' (relax): + //--- for d in D: + //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] <---- makes it hard + // OPT + //--- for a in A: + //--- sum{t in T} x1[a,t] <= 1 + // OPT + //--- for a in A, t in T: + //--- z[a,t] <= x1[a,t], + //--- z[a,t] <= x2[a], + //--- z[a,t] >= x1[a,t] + x2[a] - 1. + + //--- for a in A, d in D: + //--- f+[a,d] - f-[a,d] = + //--- a[a,d] sum{t in T} (t/n) x1[a,t] + + //--- b[a,d] x2[a] + + //--- c[a,d] sum{t in T} (t/n) z[a,t] + + //--- d[a,d] x3[a] + + //--- e[a,d] + //--- f-[a,d] <= w[a,d] * v[a,d] + //--- + + DecompConstraintSet *model = new DecompConstraintSet(); + CoinAssertHint(model, "Error: Out of Memory"); + + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(model->M, "Error: Out of Memory"); + model->M->setDimensions(0, nCols); + model->reserve(nRowsMax, nCols); + + //--- + //--- create nDates empty rowBudget packed vectors + //--- + vector rowBudget; + for (d = 0; d < nDates; d++) { + CoinPackedVector row; + rowBudget.push_back(row); + } + + nRows = 0; + pairIndex = 0; + for (vi = pairsAD.begin(); vi != pairsAD.end(); vi++) { + adP = m_instance.getIndexADInv(*vi); + a = adP.first; + d = adP.second; + + //--- + //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] + //--- + rowBudget[d].insert(getColOffset_fp() + pairIndex, 1.0); + rowBudget[d].insert(getColOffset_fm() + pairIndex, -1.0); + + //--- + //--- f+[a,d] - f-[a,d] = + //--- a[a,d] sum{t in T} (t/n) x1[a,t] + + //--- b[a,d] x2[a] + + //--- c[a,d] sum{t in T} (t/n) z[a,t] + + //--- d[a,d] x3[a] + + //--- e[a,d] + //--- + CoinPackedVector row; + string rowName = "demand_def(a_" + m_instance.getAtmName(a) + ",d_" + + m_instance.getDateName(d) + ")"; + row.insert(colIndex_fp(pairIndex), 1.0); + row.insert(colIndex_fm(pairIndex), -1.0); + coefA = -a_ad[*vi] / nSteps; + coefC = -c_ad[*vi] / nSteps; + if (m_appParam.UseTightModel) { + for (t = 0; t <= nSteps; t++) { + row.insert(colIndex_x1(a, t), t * coefA); + row.insert(colIndex_z(a, t), t * coefC); } - } - if(m_appParam.ModelNameRelax == "CASH_COUNT"){ - for(a = 0; a < nAtms; a++){ - DecompConstraintSet * modelRelax = createModelRelax1(a); - m_models.push_back(modelRelax); - setModelRelax(modelRelax, "CASH_COUNT" + UtilIntToStr(a), a); + } else { + for (t = 0; t < nSteps; t++) { + row.insert(colIndex_x1(a, t), (t + 1) * coefA); + row.insert(colIndex_z(a, t), (t + 1) * coefC); } - } - if(m_appParam.ModelNameRelaxNest == "CASH_COUNT"){ - for(a = 0; a < nAtms; a++){ - DecompConstraintSet * modelRelax = createModelRelax1(a); - m_models.push_back(modelRelax); - setModelRelaxNest(modelRelax, "CASH_COUNT" + UtilIntToStr(a), a); - } - } - + } + row.insert(colIndex_x2(a), -b_ad[*vi]); + row.insert(colIndex_x3(a), -d_ad[*vi]); + + rhs = e_ad[*vi]; + model->appendRow(row, rhs, rhs, rowName); + nRows++; + + //--- + //--- f-[a,d] <= w[a,d] * v[a,d] + //--- + CoinPackedVector rowLink; + string rowNameLink = "linkv(a_" + m_instance.getAtmName(a) + ",d_" + + m_instance.getDateName(d) + ")"; + rowLink.insert(colIndex_fm(pairIndex), 1.0); + rowLink.insert(colIndex_v(pairIndex), -w_ad[*vi]); + model->appendRow(rowLink, -m_infinity, 0.0, rowNameLink); + nRows++; + + pairIndex++; + } + + for (d = 0; d < nDates; d++) { + string rowNameBudget = "budget(d_" + m_instance.getDateName(d) + ")"; + model->appendRow(rowBudget[d], -m_infinity, B_d[d], rowNameBudget); + nRows++; + } + + //--- + //--- for a in A: + //--- sum{t in T} x1[a,t] <= 1 + //--- for a in A, t in T: + //--- z[a,t] <= x1[a,t], + //--- z[a,t] <= x2[a], + //--- z[a,t] >= x1[a,t] + x2[a] - 1. + //--- + // THINK: if you use this, shouldn't we postprocess z=x1*x2? + // putting those constaints in master are unneccessary... + // in fact, why not just do that in block version too... + // for(a = 0; a < nAtms; a++){ + // nRows += createConZtoX(model, a); + // nRows += createConPickOne(model, a); + //} + assert(nRows <= nRowsMax); + + //--- + //--- create model columns + //--- + createModelColumns(model); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelRelaxCount()", + m_appParam.LogLevel, 2); + + return model; +} - //TODO: solve this A' with gap as relaxNest - but - // we are doing blocks - so find a column then break it out - // tell framework to do that? or do as user? show how we can - // get stuff back from framework to setup and solve... - /*{ - //--- - //--- Version MODEL_MASTER_COUNT - //--- is relaxation too hard? - //--- - //--- A'' (core): - //--- for a in A: - //--- sum{d in D} v[a,d] <= K[a] - //--- - //--- A' (relax): - //--- for d in D: - //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] - //--- for a in A: - //--- sum{t in T} x1[a,t] <= 1 - //--- for a in A, t in T: - //--- z[a,t] <= x1[a,t], - //--- z[a,t] <= x2[a], - //--- z[a,t] >= x1[a,t] + x2[a] - 1. - //--- for a in A, d in D: - //--- f+[a,d] - f-[a,d] = - //--- a[a,d] sum{t in T} (t/n) x1[a,t] + - //--- b[a,d] x2[a] + - //--- c[a,d] sum{t in T} (t/n) z[a,t] + - //--- d[a,d] x3[a] + - //--- e[a,d] - //--- f-[a,d] <= w[a,d] * v[a,d] - //--- - vector< DecompConstraintSet* > modelRelaxV; - DecompConstraintSet * modelCoreCount = createModelCoreCount(); - DecompConstraintSet * modelRelaxCount = createModelRelaxCount(); - modelRelaxV.push_back(modelRelaxCount); - modelCore.insert (make_pair(MODEL_COUNT, modelCoreCount)); - modelRelax.insert(make_pair(MODEL_COUNT, modelRelaxV)); - }*/ - - UtilPrintFuncEnd(m_osLog, m_classTag, - "APPcreateModel()", m_appParam.LogLevel, 2); +//===========================================================================// +void ATM_DecompApp::createModels() { + + //--- + //--- This function does the work to create the different models + //--- that will be used. This memory is owned by the user. It will + //--- be passed to the application interface and used by the algorithms. + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "APPcreateModel()", + m_appParam.LogLevel, 2); + + //--- + //--- The original problem is in the form of a MINLP: + //--- min sum{a in A, d in D} | f[a,d] | + //--- s.t. + //--- for a in A, d in D: + //--- f[a,d] = + //--- a[a,d] x1[a] + + //--- b[a,d] x2[a] + + //--- c[a,d] x1[a] x2[a] + + //--- d[a,d] x3[a] + + //--- e[a,d] + //--- for d in D: + //--- sum{a in A} f[a,d] <= B[d] + //--- for a in A: + //--- |{d in D : f[a,d] < 0} | <= K[a] (notice strict ineq) + //--- for a in A, d in D: + //--- f[a,d] free + //--- for a in A: + //--- x1[a], x2[a] in [0,1], x3[a] >= 0 + //--- + + //--- + //--- Discretization of continuous variable. + //--- n = number of steps + //--- T = {1, ..., n} + //--- sum{t in T} (t/n) * x1[a,t] = x1[a], for a in A + //--- sum{t in T} x1[a,t] <= 1 , for a in A + //--- so, if n=10, x1[a] in {0,0.1,0.2,...,1.0}. + //--- + + //--- + //--- Linearization of product of a binary and a continuous. + //--- For a in A, t in T: + //--- z[a,t] = x1[a,t] * x2[a] + //--- <==> + //--- z[a,t] >= 0 <= 1, + //--- z[a,t] <= x1[a,t], + //--- z[a,t] <= x2[a], + //--- z[a,t] >= x1[a,t] + x2[a] - 1. + //--- + + //--- + //--- Linearization of the absolute value. + //--- For a in A, d in D: + //--- f+[a,d], f-[a,d] >= 0 + //--- f[a,d] = f+[a,d] - f-[a,d] + //--- |f[a,d]|= f+[a,d] + f-[a,d] + //--- + + //--- + //--- To model the count constraints. + //--- For a in A: + //--- |{d in D : f[a,d] < 0}| <= K[a] + //--- <==> + //--- |{d in D : f+[a,d] - f-[a,d] < 0}| <= K[a] + //--- + //--- At optimality, if f[a,d] != 0, then either + //--- f+[a,d] > 0 and f-[a,d] = 0, or + //--- f+[a,d] = 0 and f-[a,d] > 0. + //--- So, to count the cases when f[a,d] < 0, we can restrict + //--- attention to the cases where f-[a,d] > 0. + //--- + //--- With some application specific stuff, + //--- we know that f-[a,d] <= w[a,d]. + //--- + //--- So, to count the number of cases we can use the following: + //--- f-[a,d] > 0 ==> v[a,d] = 1 + //--- f-[a,d] <= 0 <== v[a,d] = 0 + //--- <==> + //--- f-[a,d] <= w[a,d] * v[a,d] + //--- + //--- and, then + //--- |{d in D : f+[a,d] - f-[a,d] < 0}| <= K[a] + //--- <==> + //--- sum{d in D} v[a,d] <= K[a] + //--- + + //--- + //--- (Approximate) MILP Reformulation + //--- + //--- min sum{a in A, d in D} f+[a,d] + f-[a,d] + //--- s.t. + //--- for a in A, d in D: + //--- f+[a,d] - f-[a,d] = + //--- a[a,d] sum{t in T} (t/n) x1[a,t] + + //--- b[a,d] x2[a] + + //--- c[a,d] sum{t in T} (t/n) z[a,t] + + //--- d[a,d] x3[a] + + //--- e[a,d] + //--- f-[a,d] <= w[a,d] * v[a,d] + //--- for a in A: + //--- sum{t in T} x1[a,t] <= 1 + //--- for a in A, t in T: + //--- z[a,t] <= x1[a,t], + //--- z[a,t] <= x2[a], + //--- z[a,t] >= x1[a,t] + x2[a] - 1. + //--- for d in D: + //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] + //--- for a in A: + //--- sum{d in D} v[a,d] <= K[a] + //--- for a in A, d in D: + //--- f+[a,d], f-[a,d] >= 0 + //--- f-[a,d] <= w[a,d] + //--- v[a,d] in {0,1} + //--- for a in A: + //--- x2[a], x3[a] in [0,1] + //--- for a in A, t in T + //--- x1[a,t] in {0,1}, z[a,t] in [0,1] + //--- + + //--- + //--- UPDATE (April 2010). + //--- + //--- A tighter formulation of the + //--- linearization of product of a binary and a continuous + //--- is possible due to the constraint: sum{t in T} x1[a,t] <= 1 + //--- + //--- For a in A, t in T: + //--- z[a,t] = x1[a,t] * x2[a] + //--- <==> + //--- OLD: + //--- for a in A: + //--- sum{t in T} x1[a,t] <= 1 (where, T = 1..n) + //--- for a in A, t in T: + //--- z[a,t] >= 0 <= 1, + //--- z[a,t] <= x1[a,t], + //--- z[a,t] <= x2[a], + //--- z[a,t] >= x1[a,t] + x2[a] - 1. + //--- NEW: + //--- for a in A: + //--- sum{t in T} x1[a,t] = 1 (where, T = 0..n) + //--- sum{t in T} z[a,t] = x2[a] + //--- for a in A, t in T: + //--- z[a,t] >= 0 <= 1, + //--- z[a,t] <= x1[a,t]. + //--- + + //--- + //--- Columns + //--- x1[a,t] (binary) + //--- z [a,t] + //--- f+[a,d] + //--- f-[a,d] + //--- x2[a] + //--- x3[a] + //---- v[a,d] (binary) + //--- + int i, a; + int nAtms = m_instance.getNAtms(); + int numCols = numCoreCols(); + + //--- + //--- Construct the objective function. + //--- Coefficients of f+ and f- are 1.0, the rest are 0. + //--- + m_objective = new double[numCols]; + if (!m_objective) + throw UtilExceptionMemory("createModels", "MMKP_DecompApp"); + UtilFillN(m_objective, numCols, 0.0); + for (i = getColOffset_fp(); i < getColOffset_x2(); i++) + m_objective[i] = 1.0; + setModelObjective(m_objective, numCols); + + //--- + //--- A'' (core): + //--- for d in D: + //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] + //--- for a in A: <--- option ( core or relax ) + //--- sum{d in D} v[a,d] <= K[a] + //--- + //--- A'[a] for a in A (independent blocks) + //--- for d in D: + //--- f+[a,d] - f-[a,d] = + //--- a[a,d] sum{t in T} (t/n) x1[a,t] + + //--- b[a,d] x2[a] + + //--- c[a,d] sum{t in T} (t/n) z[a,t] + + //--- d[a,d] x3[a] + + //--- e[a,d] + //--- f-[a,d] <= w[a,d] * v[a,d] + //--- sum{t in T} x1[a,t] <= 1 + //--- for t in T: + //--- z[a,t] <= x1[a,t], + //--- z[a,t] <= x2[a], + //--- z[a,t] >= x1[a,t] + x2[a] - 1. + //--- sum{d in D} v[a,d] <= K[a] <--- option ( core or relax ) + //--- + if (m_appParam.ModelNameCore == "BUDGET") { + DecompConstraintSet *modelCore = createModelCore1(false); + m_models.push_back(modelCore); + setModelCore(modelCore, "BUDGET"); + } + if (m_appParam.ModelNameCore == "BUDGET_COUNT") { + DecompConstraintSet *modelCore = createModelCore1(); + m_models.push_back(modelCore); + setModelCore(modelCore, "BUDGET_COUNT"); + } + if (m_appParam.ModelNameRelax == "CASH") { + for (a = 0; a < nAtms; a++) { + DecompConstraintSet *modelRelax = createModelRelax1(a, false); + m_models.push_back(modelRelax); + setModelRelax(modelRelax, "CASH" + UtilIntToStr(a), a); + } + } + if (m_appParam.ModelNameRelax == "CASH_COUNT") { + for (a = 0; a < nAtms; a++) { + DecompConstraintSet *modelRelax = createModelRelax1(a); + m_models.push_back(modelRelax); + setModelRelax(modelRelax, "CASH_COUNT" + UtilIntToStr(a), a); + } + } + if (m_appParam.ModelNameRelaxNest == "CASH_COUNT") { + for (a = 0; a < nAtms; a++) { + DecompConstraintSet *modelRelax = createModelRelax1(a); + m_models.push_back(modelRelax); + setModelRelaxNest(modelRelax, "CASH_COUNT" + UtilIntToStr(a), a); + } + } + + // TODO: solve this A' with gap as relaxNest - but + // we are doing blocks - so find a column then break it out + // tell framework to do that? or do as user? show how we can + // get stuff back from framework to setup and solve... + /*{ + //--- + //--- Version MODEL_MASTER_COUNT + //--- is relaxation too hard? + //--- + //--- A'' (core): + //--- for a in A: + //--- sum{d in D} v[a,d] <= K[a] + //--- + //--- A' (relax): + //--- for d in D: + //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] + //--- for a in A: + //--- sum{t in T} x1[a,t] <= 1 + //--- for a in A, t in T: + //--- z[a,t] <= x1[a,t], + //--- z[a,t] <= x2[a], + //--- z[a,t] >= x1[a,t] + x2[a] - 1. + //--- for a in A, d in D: + //--- f+[a,d] - f-[a,d] = + //--- a[a,d] sum{t in T} (t/n) x1[a,t] + + //--- b[a,d] x2[a] + + //--- c[a,d] sum{t in T} (t/n) z[a,t] + + //--- d[a,d] x3[a] + + //--- e[a,d] + //--- f-[a,d] <= w[a,d] * v[a,d] + //--- + vector< DecompConstraintSet* > modelRelaxV; + DecompConstraintSet * modelCoreCount = createModelCoreCount(); + DecompConstraintSet * modelRelaxCount = createModelRelaxCount(); + modelRelaxV.push_back(modelRelaxCount); + modelCore.insert (make_pair(MODEL_COUNT, modelCoreCount)); + modelRelax.insert(make_pair(MODEL_COUNT, modelRelaxV)); + }*/ + + UtilPrintFuncEnd(m_osLog, m_classTag, "APPcreateModel()", m_appParam.LogLevel, + 2); } //===========================================================================// -bool ATM_DecompApp::APPisUserFeasible(const double * x, - const int nCols, - const double tolZero){ - //--- - //--- sanity check - is this solution feasible? - //--- since we provide a full matrix, DECOMP will also check - //--- that the algebra gives a feasible point - //--- - - UtilPrintFuncBegin(m_osLog, m_classTag, - "APPisUserFeasible()", m_appParam.LogLevel, 2); - - - double lhs, rhs, coeff; - int a, d, t; - int pairIndex = 0; - bool isFeas = true; - - const int nSteps = m_appParam.NumSteps; - const int nAtms = m_instance.getNAtms(); - const vector & pairsAD = m_instance.getPairsAD(); - const double * K_a = m_instance.get_K_a(); - const double * a_ad = m_instance.get_a_ad(); //dense storage - const double * b_ad = m_instance.get_b_ad(); //dense storage - const double * c_ad = m_instance.get_c_ad(); //dense storage - const double * d_ad = m_instance.get_d_ad(); //dense storage - const double * e_ad = m_instance.get_e_ad(); //dense storage - double * count = new double[nAtms]; - - //--- - //--- is the flow variable matching x,z variables - //--- for a in A, d in D: - //--- f+[a,d] - f-[a,d] = - //--- a[a,d] sum{t in T} (t/n) x1[a,t] + - //--- b[a,d] x2[a] + - //--- c[a,d] sum{t in T} (t/n) z[a,t] + - //--- d[a,d] x3[a] + - //--- e[a,d] - //--- - pair adP; - vector::const_iterator vi; - double actViol = 0.0; - double relViol = 0.0; - for(vi = pairsAD.begin(); vi != pairsAD.end(); vi++){ - adP = m_instance.getIndexADInv(*vi); - a = adP.first; - - lhs = x[getColOffset_fp() + pairIndex]; - lhs -= x[getColOffset_fm() + pairIndex]; - - rhs = e_ad[*vi] - + b_ad[*vi] * x[colIndex_x2(a)] - + d_ad[*vi] * x[colIndex_x3(a)]; - if(m_appParam.UseTightModel){ - for(t = 0; t <= nSteps; t++){ - coeff = (double)(t) / (double)nSteps; - rhs += a_ad[*vi] * coeff * x[colIndex_x1(a,t)]; - rhs += c_ad[*vi] * coeff * x[colIndex_z (a,t)]; - } +bool ATM_DecompApp::APPisUserFeasible(const double *x, const int nCols, + const double tolZero) { + //--- + //--- sanity check - is this solution feasible? + //--- since we provide a full matrix, DECOMP will also check + //--- that the algebra gives a feasible point + //--- + + UtilPrintFuncBegin(m_osLog, m_classTag, "APPisUserFeasible()", + m_appParam.LogLevel, 2); + + double lhs, rhs, coeff; + int a, d, t; + int pairIndex = 0; + bool isFeas = true; + + const int nSteps = m_appParam.NumSteps; + const int nAtms = m_instance.getNAtms(); + const vector &pairsAD = m_instance.getPairsAD(); + const double *K_a = m_instance.get_K_a(); + const double *a_ad = m_instance.get_a_ad(); // dense storage + const double *b_ad = m_instance.get_b_ad(); // dense storage + const double *c_ad = m_instance.get_c_ad(); // dense storage + const double *d_ad = m_instance.get_d_ad(); // dense storage + const double *e_ad = m_instance.get_e_ad(); // dense storage + double *count = new double[nAtms]; + + //--- + //--- is the flow variable matching x,z variables + //--- for a in A, d in D: + //--- f+[a,d] - f-[a,d] = + //--- a[a,d] sum{t in T} (t/n) x1[a,t] + + //--- b[a,d] x2[a] + + //--- c[a,d] sum{t in T} (t/n) z[a,t] + + //--- d[a,d] x3[a] + + //--- e[a,d] + //--- + pair adP; + vector::const_iterator vi; + double actViol = 0.0; + double relViol = 0.0; + for (vi = pairsAD.begin(); vi != pairsAD.end(); vi++) { + adP = m_instance.getIndexADInv(*vi); + a = adP.first; + + lhs = x[getColOffset_fp() + pairIndex]; + lhs -= x[getColOffset_fm() + pairIndex]; + + rhs = e_ad[*vi] + b_ad[*vi] * x[colIndex_x2(a)] + + d_ad[*vi] * x[colIndex_x3(a)]; + if (m_appParam.UseTightModel) { + for (t = 0; t <= nSteps; t++) { + coeff = (double)(t) / (double)nSteps; + rhs += a_ad[*vi] * coeff * x[colIndex_x1(a, t)]; + rhs += c_ad[*vi] * coeff * x[colIndex_z(a, t)]; } - else{ - for(t = 0; t < nSteps; t++){ - coeff = (double)(t+1) / (double)nSteps; - rhs += a_ad[*vi] * coeff * x[colIndex_x1(a,t)]; - rhs += c_ad[*vi] * coeff * x[colIndex_z (a,t)]; - } + } else { + for (t = 0; t < nSteps; t++) { + coeff = (double)(t + 1) / (double)nSteps; + rhs += a_ad[*vi] * coeff * x[colIndex_x1(a, t)]; + rhs += c_ad[*vi] * coeff * x[colIndex_z(a, t)]; } - actViol = fabs(lhs-rhs); - if(UtilIsZero(lhs,1.0e-3)) - relViol = actViol; - else - relViol = actViol / std::fabs(lhs); - if(relViol > 0.05){ - printf("NOT FEASIBLE lhs=%12.10f, rhs=%12.10f\n", lhs, rhs); - isFeas = false; - } - pairIndex++; - } - - - //--- - //--- did the indicators work correctly? - //--- f+[a,d] - f-[a,d] < 0 ==> v[a,d] = 1 - //--- - pairIndex = 0; - for(vi = pairsAD.begin(); vi != pairsAD.end(); vi++){ - adP = m_instance.getIndexADInv(*vi); - a = adP.first; - lhs = x[getColOffset_fp() + pairIndex]; - lhs -= x[getColOffset_fm() + pairIndex]; - //if(lhs < -DecompEpsilon){ - if(lhs < -0.01){ - if(fabs(x[getColOffset_v() + pairIndex] - 1.0) > tolZero){ - printf("NOT FEASIBLE fp=%10.5f fm=%10.5f f=%10.5f < 0, but v=%g not 1\n", - x[getColOffset_fp() + pairIndex], - x[getColOffset_fm() + pairIndex], - lhs, x[getColOffset_v() + pairIndex]); - isFeas = false; - } + } + actViol = fabs(lhs - rhs); + if (UtilIsZero(lhs, 1.0e-3)) + relViol = actViol; + else + relViol = actViol / std::fabs(lhs); + if (relViol > 0.05) { + printf("NOT FEASIBLE lhs=%12.10f, rhs=%12.10f\n", lhs, rhs); + isFeas = false; + } + pairIndex++; + } + + //--- + //--- did the indicators work correctly? + //--- f+[a,d] - f-[a,d] < 0 ==> v[a,d] = 1 + //--- + pairIndex = 0; + for (vi = pairsAD.begin(); vi != pairsAD.end(); vi++) { + adP = m_instance.getIndexADInv(*vi); + a = adP.first; + lhs = x[getColOffset_fp() + pairIndex]; + lhs -= x[getColOffset_fm() + pairIndex]; + // if(lhs < -DecompEpsilon){ + if (lhs < -0.01) { + if (fabs(x[getColOffset_v() + pairIndex] - 1.0) > tolZero) { + printf( + "NOT FEASIBLE fp=%10.5f fm=%10.5f f=%10.5f < 0, but v=%g not 1\n", + x[getColOffset_fp() + pairIndex], x[getColOffset_fm() + pairIndex], + lhs, x[getColOffset_v() + pairIndex]); + isFeas = false; } - pairIndex++; - } - - //--- - //--- did the surrogate z work correctly? - //--- z[a,t] = sum{t in T} x1[a,t] * x2[a] - //--- - - //--- - //--- is budget satisfied? - //--- for d in D: - //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] - //--- - - //--- - //--- is the count constraint satisifed - //--- for a in A: - //--- sum{d in D} v[a,d] <= K[a] - //--- - pairIndex = 0; - UtilFillN(count, nAtms, 0.0); - for(vi = pairsAD.begin(); vi != pairsAD.end(); vi++){ - adP = m_instance.getIndexADInv(*vi); - a = adP.first; - d = adP.second; - count[a] += x[getColOffset_v() + pairIndex]; - pairIndex++; - } - for(a = 0; a < nAtms; a++){ + } + pairIndex++; + } + + //--- + //--- did the surrogate z work correctly? + //--- z[a,t] = sum{t in T} x1[a,t] * x2[a] + //--- + + //--- + //--- is budget satisfied? + //--- for d in D: + //--- sum{a in A} (f+[a,d] - f-[a,d]) <= B[d] + //--- + + //--- + //--- is the count constraint satisifed + //--- for a in A: + //--- sum{d in D} v[a,d] <= K[a] + //--- + pairIndex = 0; + UtilFillN(count, nAtms, 0.0); + for (vi = pairsAD.begin(); vi != pairsAD.end(); vi++) { + adP = m_instance.getIndexADInv(*vi); + a = adP.first; + d = adP.second; + count[a] += x[getColOffset_v() + pairIndex]; + pairIndex++; + } + for (a = 0; a < nAtms; a++) { #if 0 printf("COUNT[a=%3d->%10s]: %5g <= K=%5g\n", a, m_instance.getAtmName(a).c_str(), count[a], K_a[a]); -#endif - if(count[a] > (K_a[a] + 0.01)){ +#endif + if (count[a] > (K_a[a] + 0.01)) { #if 0 printf("NOT FEASIBLE a:%d count=%g K=%g\n", a, count[a], K_a[a]); #endif - isFeas = false; - } - } + isFeas = false; + } + } #if 0 printf("IsUserFeas = %d\n", isFeas); #endif - //--- - //--- free local memory - //--- - UTIL_DELARR(count); + //--- + //--- free local memory + //--- + UTIL_DELARR(count); + + UtilPrintFuncEnd(m_osLog, m_classTag, "APPisUserFeasible()", + m_appParam.LogLevel, 2); - UtilPrintFuncEnd(m_osLog, m_classTag, - "APPisUserFeasible()", m_appParam.LogLevel, 2); - - return isFeas; + return isFeas; } diff --git a/Dip/examples/ATM/ATM_Instance.cpp b/Dip/examples/ATM/ATM_Instance.cpp index c8399b4a..27486d18 100644 --- a/Dip/examples/ATM/ATM_Instance.cpp +++ b/Dip/examples/ATM/ATM_Instance.cpp @@ -4,478 +4,465 @@ // Decomp is distributed under the Common Public License as part of the // // COIN-OR repository (http://www.coin-or.org). // // // -// Authors: Matthew Galati, SAS Institute Inc. (matthew.galati@sas.com) // -// Ted Ralphs, Lehigh University (ted@lehigh.edu) // -// Jiadong Wang, Lehigh University (jiw408@lehigh.edu) // +// Authors: Matthew Galati, SAS Institute Inc. (matthew.galati@sas.com) // +// Ted Ralphs, Lehigh University (ted@lehigh.edu) // +// Jiadong Wang, Lehigh University (jiw408@lehigh.edu) // // // // Copyright (C) 2002-2019, Lehigh University, Matthew Galati, and Ted Ralphs// // All Rights Reserved. // //===========================================================================// //===========================================================================// -#include "UtilMacros.h" #include "ATM_Instance.h" - +#include "UtilMacros.h" //===========================================================================// -void ATM_Instance::readInstance(string & fileNameA, - string & fileNameD, - string & fileNameAD){ - - string atm, date; - int a, d, ad, n_ad; - double K, B; - - ifstream isA, isD, isAD; - ifstream isA2, isD2; //MSVS bug? - char dummy[10000]; - const int maxLine = 10000; - - //--- - //--- File format (.../Decomp/data/ATM) - //--- - //--- dataA.txt: - //--- a K[a] - //--- dataD.txt: - //--- d B[d] - //--- dataAD.txt: - //--- a d a[a,d] b[a,d] c[a,d] d[a,d] e[a,d] - //--- - - //--- - //--- open file streams - //--- - UtilOpenFile(isA, fileNameA.c_str()); - UtilOpenFile(isD, fileNameD.c_str()); - UtilOpenFile(isAD, fileNameAD.c_str()); - - //--- - //--- get number of atms - //--- - printf("Reading %s\n", fileNameA.c_str()); - m_nAtms = 0; - isA.getline(dummy, maxLine); - while(!isA.eof()){ - isA >> atm; - if(isA.eof()) - break; - isA >> K; - m_nAtms++; - } - isA.close(); - - //--- - //--- get number of dates - //--- - printf("Reading %s\n", fileNameD.c_str()); - m_nDates = 0; - isD.getline(dummy, maxLine); - while(!isD.eof()){ - isD >> date; - if(isD.eof()) - break; - isD >> B; - m_nDates++; - } - isD.close(); - - //--- - //--- allocate memory for storage of coefficients - //--- open enough space as if dense - //--- - n_ad = m_nAtms * m_nDates; - m_a_ad = new double[n_ad]; - m_b_ad = new double[n_ad]; - m_c_ad = new double[n_ad]; //(=-b) - m_d_ad = new double[n_ad]; - m_e_ad = new double[n_ad]; - m_w_ad = new double[n_ad]; - m_B_d = new double[m_nDates]; - m_K_a = new double[m_nAtms]; - assert(m_a_ad && - m_b_ad && - m_c_ad && - m_d_ad && - m_e_ad && - m_w_ad && - m_B_d && - m_K_a); - - //--- - //--- get data for atms - //--- - UtilOpenFile(isA2, fileNameA.c_str()); - UtilOpenFile(isD2, fileNameD.c_str()); - m_nAtms = 0; - isA2.getline(dummy, maxLine); - while(!isA2.eof()){ - isA2 >> atm; - if(isA2.eof()) - break; - m_strToIntAtms.insert(make_pair(atm, m_nAtms)); - m_intToStrAtms.push_back(atm); - isA2 >> m_K_a[m_nAtms]; - m_nAtms++; - } - isA2.close(); - - //--- - //--- get data for dates - //--- - m_nDates = 0; - isD2.getline(dummy, maxLine); - while(!isD2.eof()){ - isD2 >> date; - if(isD2.eof()) - break; - m_strToIntDates.insert(make_pair(date, m_nDates)); - m_intToStrDates.push_back(date); - isD2 >> m_B_d[m_nDates]; - m_nDates++; - } - isD2.close(); - - //--- - //--- get data for ATMS x DATES (we don't have data for all pairs) - //--- - printf("Reading %s\n", fileNameAD.c_str()); - map::iterator mi; - isAD.getline(dummy, maxLine); - while(!isAD.eof()){ - isAD >> atm >> date; - if(isAD.eof()) - break; - - //get a,d index for this pair - mi = m_strToIntAtms.find(atm); - if(mi == m_strToIntAtms.end()){ - printf("ERROR atm not found: %s\n", atm.c_str()); - } - assert(mi != m_strToIntAtms.end()); - a = mi->second; - - mi = m_strToIntDates.find(date); - if(mi == m_strToIntDates.end()){ - printf("ERROR dates not found: %s\n", date.c_str()); - } - assert(mi != m_strToIntDates.end()); - d = mi->second; - - ad = getIndexAD(a,d); - m_pairsAD.push_back(ad); - - isAD >> m_a_ad[ad]; - isAD >> m_b_ad[ad]; - isAD >> m_c_ad[ad]; - isAD >> m_d_ad[ad]; - isAD >> m_e_ad[ad]; - isAD >> m_w_ad[ad]; - - //printf("ad=%d atm=%s date=%s a=%g b=%g c=%g d=%g e=%g w=%g\n", - // ad, atm.c_str(), date.c_str(), - // m_a_ad[ad], - // m_b_ad[ad], - // m_c_ad[ad], - // m_d_ad[ad], - // m_e_ad[ad], - // m_w_ad[ad]); - } - isAD.close(); - - printf("Number of ATMS = %d\n", getNAtms()); - printf("Number of Dates = %d\n", getNDates()); - printf("Number of Pairs = %d\n", getNPairs()); - +void ATM_Instance::readInstance(string &fileNameA, string &fileNameD, + string &fileNameAD) { + + string atm, date; + int a, d, ad, n_ad; + double K, B; + + ifstream isA, isD, isAD; + ifstream isA2, isD2; // MSVS bug? + char dummy[10000]; + const int maxLine = 10000; + + //--- + //--- File format (.../Decomp/data/ATM) + //--- + //--- dataA.txt: + //--- a K[a] + //--- dataD.txt: + //--- d B[d] + //--- dataAD.txt: + //--- a d a[a,d] b[a,d] c[a,d] d[a,d] e[a,d] + //--- + + //--- + //--- open file streams + //--- + UtilOpenFile(isA, fileNameA.c_str()); + UtilOpenFile(isD, fileNameD.c_str()); + UtilOpenFile(isAD, fileNameAD.c_str()); + + //--- + //--- get number of atms + //--- + printf("Reading %s\n", fileNameA.c_str()); + m_nAtms = 0; + isA.getline(dummy, maxLine); + while (!isA.eof()) { + isA >> atm; + if (isA.eof()) + break; + isA >> K; + m_nAtms++; + } + isA.close(); + + //--- + //--- get number of dates + //--- + printf("Reading %s\n", fileNameD.c_str()); + m_nDates = 0; + isD.getline(dummy, maxLine); + while (!isD.eof()) { + isD >> date; + if (isD.eof()) + break; + isD >> B; + m_nDates++; + } + isD.close(); + + //--- + //--- allocate memory for storage of coefficients + //--- open enough space as if dense + //--- + n_ad = m_nAtms * m_nDates; + m_a_ad = new double[n_ad]; + m_b_ad = new double[n_ad]; + m_c_ad = new double[n_ad]; //(=-b) + m_d_ad = new double[n_ad]; + m_e_ad = new double[n_ad]; + m_w_ad = new double[n_ad]; + m_B_d = new double[m_nDates]; + m_K_a = new double[m_nAtms]; + assert(m_a_ad && m_b_ad && m_c_ad && m_d_ad && m_e_ad && m_w_ad && m_B_d && + m_K_a); + + //--- + //--- get data for atms + //--- + UtilOpenFile(isA2, fileNameA.c_str()); + UtilOpenFile(isD2, fileNameD.c_str()); + m_nAtms = 0; + isA2.getline(dummy, maxLine); + while (!isA2.eof()) { + isA2 >> atm; + if (isA2.eof()) + break; + m_strToIntAtms.insert(make_pair(atm, m_nAtms)); + m_intToStrAtms.push_back(atm); + isA2 >> m_K_a[m_nAtms]; + m_nAtms++; + } + isA2.close(); + + //--- + //--- get data for dates + //--- + m_nDates = 0; + isD2.getline(dummy, maxLine); + while (!isD2.eof()) { + isD2 >> date; + if (isD2.eof()) + break; + m_strToIntDates.insert(make_pair(date, m_nDates)); + m_intToStrDates.push_back(date); + isD2 >> m_B_d[m_nDates]; + m_nDates++; + } + isD2.close(); + + //--- + //--- get data for ATMS x DATES (we don't have data for all pairs) + //--- + printf("Reading %s\n", fileNameAD.c_str()); + map::iterator mi; + isAD.getline(dummy, maxLine); + while (!isAD.eof()) { + isAD >> atm >> date; + if (isAD.eof()) + break; + + // get a,d index for this pair + mi = m_strToIntAtms.find(atm); + if (mi == m_strToIntAtms.end()) { + printf("ERROR atm not found: %s\n", atm.c_str()); + } + assert(mi != m_strToIntAtms.end()); + a = mi->second; + + mi = m_strToIntDates.find(date); + if (mi == m_strToIntDates.end()) { + printf("ERROR dates not found: %s\n", date.c_str()); + } + assert(mi != m_strToIntDates.end()); + d = mi->second; + + ad = getIndexAD(a, d); + m_pairsAD.push_back(ad); + + isAD >> m_a_ad[ad]; + isAD >> m_b_ad[ad]; + isAD >> m_c_ad[ad]; + isAD >> m_d_ad[ad]; + isAD >> m_e_ad[ad]; + isAD >> m_w_ad[ad]; + + // printf("ad=%d atm=%s date=%s a=%g b=%g c=%g d=%g e=%g w=%g\n", + // ad, atm.c_str(), date.c_str(), + // m_a_ad[ad], + // m_b_ad[ad], + // m_c_ad[ad], + // m_d_ad[ad], + // m_e_ad[ad], + // m_w_ad[ad]); + } + isAD.close(); + + printf("Number of ATMS = %d\n", getNAtms()); + printf("Number of Dates = %d\n", getNDates()); + printf("Number of Pairs = %d\n", getNPairs()); } //===========================================================================// -void ATM_Instance::generateRandom(const int nAtms, - const int nDates, - const int seed){ - - /* - Data from original: - \\ordsrv3\ormp\sas\ATM_Badshah\atm_20ATMS_3\atm_doc - nDates=272, nAtms=20, nPairs=4730 (max=5440) - - proc means data=FTPLIB.amul_atms_dates; - var withdrawal allocation NET_IMPACT_AVG - NET_IMPACT_STD NORMAL_AVG NORMAL_STD TS1 TS2; - run; - - The MEANS Procedure - Variable Label Std Dev Mean - ................................................................ - WITHDRAWAL WITHDRAWAL 1456368.37 1457077.41 - ALLOCATION ALLOCATION 1752334.72 2068196.66 - NET_IMPACT_AVG NET_IMPACT_AVG 0.8990607 1.1961954 - NET_IMPACT_STD NET_IMPACT_STD 1.8979644 1.4240460 - NORMAL_AVG NORMAL_AVG 1352731.38 1440849.71 - NORMAL_STD NORMAL_STD 352658.50 364123.38 - TS1 TS1 1267244.80 1371637.24 - S2 TS2 1246864.33 1361954.95 - ................................................................ - - - Variable Label Minimum Maximum - ................................................................ - WITHDRAWAL WITHDRAWAL 8000.00 7080400.00 - ALLOCATION ALLOCATION 100000.00 7020000.00 - NET_IMPACT_AVG NET_IMPACT_AVG 0.0053384 18.7119586 - NET_IMPACT_STD NET_IMPACT_STD 0.0046809 54.0086478 - NORMAL_AVG NORMAL_AVG 38864.52 4375539.71 - NORMAL_STD NORMAL_STD 26833.85 1141006.06 - TS1 TS1 25245.45 5250885.71 - TS2 TS2 700.0000000 4182207.14 - ................................................................ - - for{ in ATMS_DATES_THIS} do; - CA[a,d] = (normal_avg[a,d] * net_impact_avg[a,d] - ts_period_2[a,d]); - CB[a,d] = (ts_period_1[a,d] - ts_period_2[a,d]); - CC[a,d] = (ts_period_2[a,d] - ts_period_1[a,d]); - CD[a,d] = (normal_std[a,d] * net_impact_std[a,d]); - CE[a,d] = (-actual_withdrawal[a,d] + ts_period_2[a,d]); - end; - - These numbers are annoying big and causing lots of numerical - round-off issues. So, Let's scale by 1000. - */ +void ATM_Instance::generateRandom(const int nAtms, const int nDates, + const int seed) { + + /* + Data from original: + \\ordsrv3\ormp\sas\ATM_Badshah\atm_20ATMS_3\atm_doc + nDates=272, nAtms=20, nPairs=4730 (max=5440) + + proc means data=FTPLIB.amul_atms_dates; + var withdrawal allocation NET_IMPACT_AVG + NET_IMPACT_STD NORMAL_AVG NORMAL_STD TS1 TS2; + run; + + + + The MEANS Procedure + Variable Label Std Dev Mean + ................................................................ + WITHDRAWAL WITHDRAWAL 1456368.37 1457077.41 + ALLOCATION ALLOCATION 1752334.72 2068196.66 + NET_IMPACT_AVG NET_IMPACT_AVG 0.8990607 1.1961954 + NET_IMPACT_STD NET_IMPACT_STD 1.8979644 1.4240460 + NORMAL_AVG NORMAL_AVG 1352731.38 1440849.71 + NORMAL_STD NORMAL_STD 352658.50 364123.38 + TS1 TS1 1267244.80 1371637.24 + S2 TS2 1246864.33 1361954.95 + ................................................................ + + + + + Variable Label Minimum Maximum + ................................................................ + WITHDRAWAL WITHDRAWAL 8000.00 7080400.00 + ALLOCATION ALLOCATION 100000.00 7020000.00 + NET_IMPACT_AVG NET_IMPACT_AVG 0.0053384 18.7119586 + NET_IMPACT_STD NET_IMPACT_STD 0.0046809 54.0086478 + NORMAL_AVG NORMAL_AVG 38864.52 4375539.71 + NORMAL_STD NORMAL_STD 26833.85 1141006.06 + TS1 TS1 25245.45 5250885.71 + TS2 TS2 700.0000000 4182207.14 + ................................................................ + + + + for{ in ATMS_DATES_THIS} do; + CA[a,d] = (normal_avg[a,d] * net_impact_avg[a,d] - ts_period_2[a,d]); + CB[a,d] = (ts_period_1[a,d] - ts_period_2[a,d]); + CC[a,d] = (ts_period_2[a,d] - ts_period_1[a,d]); + CD[a,d] = (normal_std[a,d] * net_impact_std[a,d]); + CE[a,d] = (-actual_withdrawal[a,d] + ts_period_2[a,d]); + end; + + These numbers are annoying big and causing lots of numerical + round-off issues. So, Let's scale by 1000. + */ #define STDD 0 #define MEAN 1 -#define MIN 2 -#define MAX 3 - double s_withdrawal[4] = {1456368, 1457077, 8000, 7080400}; - double s_allocation[4] = {1752334, 2068196, 100000, 7020000}; - double s_netimpactAve[4] = {0.8990607, 1.1961954, 0.0053384, 18.7119586}; - double s_netimpactStd[4] = {1.8979644, 1.4240460, 0.0046809, 54.0086478}; - double s_normalAve[4] = {13527318, 1440849, 38864, 4375539.71}; - double s_normalStd[4] = {352658, 364123, 26833, 1141006}; - double s_ts1[4] = {1267244, 1371637, 25245, 5250885}; - double s_ts2[4] = {1246864, 1361954, 700, 4182207}; - double scale = 1000; - int i; - for(i = 0; i < 4; i++){ - s_withdrawal[i] /= scale; - s_allocation[i] /= scale; - s_normalAve[i] /= scale; - s_normalStd[i] /= scale; - s_ts1[i] /= scale; - s_ts2[i] /= scale; - } - - - int nAD = nAtms * nDates; - double * withdrawal = new double[nAD]; - double * allocation = new double[nAD]; - double * netimpactAve = new double[nAD]; - double * netimpactStd = new double[nAD]; - double * normalAve = new double[nAD]; - double * normalStd = new double[nAD]; - double * ts1 = new double[nAD]; - double * ts2 = new double[nAD]; - assert(withdrawal && allocation && netimpactAve && - netimpactStd && normalAve && normalStd && - ts1 && ts2); - - string fileNameAD = "atm_randAD_"; - fileNameAD += UtilIntToStr(nAtms) + "_"; - fileNameAD += UtilIntToStr(nDates) + "_"; - fileNameAD += UtilIntToStr(seed) + ".txt"; - string fileNameA = "atm_randA_"; - fileNameA += UtilIntToStr(nAtms) + "_"; - fileNameA += UtilIntToStr(nDates) + "_"; - fileNameA += UtilIntToStr(seed) + ".txt"; - string fileNameD = "atm_randD_"; - fileNameD += UtilIntToStr(nAtms) + "_"; - fileNameD += UtilIntToStr(nDates) + "_"; - fileNameD += UtilIntToStr(seed) + ".txt"; - - ofstream osAD, osA, osD; - UtilOpenFile(osAD, fileNameAD.c_str()); - UtilOpenFile(osA, fileNameA.c_str()); - UtilOpenFile(osD, fileNameD.c_str()); - - int a, d; - srand(seed); - //--- - //--- generate 'raw data' in N[mean,std-dev] - //--- - int index = 0;//a * nDates + d - for(a = 0; a < nAtms; a++){ - for(d = 0; d < nDates; d++){ - do{ - withdrawal[index] = UtilNormRand(s_withdrawal[MEAN] , - s_withdrawal[STDD]); - }while( withdrawal[index] < s_withdrawal[MIN] || - withdrawal[index] > s_withdrawal[MAX]); - do{ - allocation[index] = UtilNormRand(s_allocation[MEAN] , - s_allocation[STDD]); - }while( allocation[index] < s_allocation[MIN] || - allocation[index] > s_allocation[MAX]); - do{ - netimpactAve[index] = UtilNormRand(s_netimpactAve[MEAN], - s_netimpactAve[STDD]); - }while( netimpactAve[index] < s_netimpactAve[MIN] || - netimpactAve[index] > s_netimpactAve[MAX]); - do{ - netimpactStd[index] = UtilNormRand(s_netimpactStd[MEAN], - s_netimpactStd[STDD]); - }while( netimpactStd[index] < s_netimpactStd[MIN] || - netimpactStd[index] > s_netimpactStd[MAX]); - do{ - normalAve[index] = UtilNormRand(s_normalAve[MEAN] , - s_normalAve[STDD]); - }while( normalAve[index] < s_normalAve[MIN] || - normalAve[index] > s_normalAve[MAX]); - do{ - normalStd[index] = UtilNormRand(s_normalStd[MEAN] , - s_normalStd[STDD]); - }while( normalStd[index] < s_normalStd[MIN] || - normalStd[index] > s_normalStd[MAX]); - do{ - ts1[index] = UtilNormRand(s_ts1[MEAN] , - s_ts1[STDD]); - }while( ts1[index] < s_ts1[MIN] || - ts1[index] > s_ts1[MAX]); - do{ - ts2[index] = UtilNormRand(s_ts2[MEAN] , - s_ts2[STDD]); - }while ( ts2[index] < s_ts2[MIN] || - ts2[index] > s_ts2[MAX]); - index++; - } - } - - //--- - //--- generate coefficients - //--- - //CA[a,d] = (normal_avg[a,d] * net_impact_avg[a,d] - ts_period_2[a,d]); - //CB[a,d] = (ts_period_1[a,d] - ts_period_2[a,d]); - //CC[a,d] = (ts_period_2[a,d] - ts_period_1[a,d]); - //CD[a,d] = (normal_std[a,d] * net_impact_std[a,d]); - //CE[a,d] = (-actual_withdrawal[a,d] + ts_period_2[a,d]); - double * ca = new double[nAD]; - double * cb = new double[nAD]; - double * cc = new double[nAD]; - double * cd = new double[nAD]; - double * ce = new double[nAD]; - assert(ca && cb && cc && cd && ce); - - index = 0; - osAD << "a\td\tCA\tCB\tCC\tCD\tCD\tCE\tCW\n"; - for(a = 0; a < nAtms; a++){ - for(d = 0; d < nDates; d++){ - ca[index] = normalAve[index] * netimpactAve[index] - ts2[index]; - cb[index] = ts1[index] - ts2[index]; - cc[index] = -cb[index]; - cd[index] = normalStd[index] * netimpactStd[index]; - ce[index] = -withdrawal[index] + ts2[index]; - osAD << "ATM" << UtilIntToStr(a) << "\t" - << "DATE" << UtilIntToStr(d) << "\t" - << setw(10) << UtilDblToStr(ca[index],0) - << setw(10) << UtilDblToStr(cb[index],0) - << setw(10) << UtilDblToStr(cc[index],0) - << setw(10) << UtilDblToStr(cd[index],0) - << setw(10) << UtilDblToStr(ce[index],0) - << setw(10) << UtilDblToStr(withdrawal[index],0) - << "\n"; - index++; - } - } - - //--- - //--- generate B and K - //--- - //--- f(a,d) = ca*x1[a] + cb*x2[a] + cc*x1[a]*x2[a] + cd*x3[a] + ce - //--- x1,x2 in {0,1}, x3 >= 0 - //--- - //--- sum{a} f(a,d) <= B[d], for d - //--- |{d in D | f(a,d) <= 0} <= K[a], for a - //--- - double x01, x1, x2, x3; - double * f = new double[nAD]; - assert(f); - index = 0; - for(a = 0; a < nAtms; a++){ - x01 = UtilURand(0.0,1.0); - x1 = x01 >= 0.5 ? 1 : 0; - x01 = UtilURand(0.0,1.0); - x2 = x01 >= 0.5 ? 1 : 0; - x3 = UtilURand(0.0,1.0); - printf("x1=%g x2=%g x3=%g\n", x1, x2, x3); - for(d = 0; d < nDates; d++){ - f[index] = ca[index] * x1; - f[index] += cb[index] * x2; - f[index] += cc[index] * x1 * x2; - f[index] += cd[index] * x3; - f[index] += ce[index]; - index++; - } - } - - double * B = new double[nDates]; - double maxB = -1e20; - for(d = 0; d < nDates; d++){ - B[d] = 0; - for(a = 0; a < nAtms; a++){ - B[d] += f[a * nDates + d]; - } - if(B[d] > maxB) maxB=B[d]; - } - //--- - //--- B=budget for cash flow - //--- if negative does not make sense - //--- protect against this - //--- - osD << "d\tB\n"; - for(d = 0; d < nDates; d++){ - if(B[d] < 0) - B[d] = maxB / 2.0; - osD << "DATE" << UtilIntToStr(d) << "\t" - << setw(10) << UtilDblToStr(B[d],0) << endl; - } - - int * K = new int[nAtms]; - int maxK = 0; - index = 0; - for(a = 0; a < nAtms; a++){ - K[a] = 0; - for(d = 0; d < nDates; d++){ - K[a] += f[index] <= 0 ? 1 : 0; - index++; - } - if(K[a] > maxK) maxK=K[a]; - } - //--- - //--- randomize it (and tighten constraint) - //--- - osA << "a\tK\n"; - for(a = 0; a < nAtms; a++){ - //K[a] -= UtilURand(1, maxK/4); - osA << "ATM" << UtilIntToStr(a) << "\t" - << setw(10) << K[a] << endl; - } - - osAD.close(); - osA.close(); - osD.close(); - - UTIL_DELARR(withdrawal); - UTIL_DELARR(allocation); - UTIL_DELARR(netimpactAve); - UTIL_DELARR(netimpactStd); - UTIL_DELARR(normalAve); - UTIL_DELARR(normalStd); - UTIL_DELARR(ts1); - UTIL_DELARR(ts2); - UTIL_DELARR(ca); - UTIL_DELARR(cb); - UTIL_DELARR(cc); - UTIL_DELARR(cd); - UTIL_DELARR(ce); - UTIL_DELARR(f); - UTIL_DELARR(B); - UTIL_DELARR(K); -} +#define MIN 2 +#define MAX 3 + double s_withdrawal[4] = {1456368, 1457077, 8000, 7080400}; + double s_allocation[4] = {1752334, 2068196, 100000, 7020000}; + double s_netimpactAve[4] = {0.8990607, 1.1961954, 0.0053384, 18.7119586}; + double s_netimpactStd[4] = {1.8979644, 1.4240460, 0.0046809, 54.0086478}; + double s_normalAve[4] = {13527318, 1440849, 38864, 4375539.71}; + double s_normalStd[4] = {352658, 364123, 26833, 1141006}; + double s_ts1[4] = {1267244, 1371637, 25245, 5250885}; + double s_ts2[4] = {1246864, 1361954, 700, 4182207}; + double scale = 1000; + int i; + for (i = 0; i < 4; i++) { + s_withdrawal[i] /= scale; + s_allocation[i] /= scale; + s_normalAve[i] /= scale; + s_normalStd[i] /= scale; + s_ts1[i] /= scale; + s_ts2[i] /= scale; + } + int nAD = nAtms * nDates; + double *withdrawal = new double[nAD]; + double *allocation = new double[nAD]; + double *netimpactAve = new double[nAD]; + double *netimpactStd = new double[nAD]; + double *normalAve = new double[nAD]; + double *normalStd = new double[nAD]; + double *ts1 = new double[nAD]; + double *ts2 = new double[nAD]; + assert(withdrawal && allocation && netimpactAve && netimpactStd && + normalAve && normalStd && ts1 && ts2); + + string fileNameAD = "atm_randAD_"; + fileNameAD += UtilIntToStr(nAtms) + "_"; + fileNameAD += UtilIntToStr(nDates) + "_"; + fileNameAD += UtilIntToStr(seed) + ".txt"; + string fileNameA = "atm_randA_"; + fileNameA += UtilIntToStr(nAtms) + "_"; + fileNameA += UtilIntToStr(nDates) + "_"; + fileNameA += UtilIntToStr(seed) + ".txt"; + string fileNameD = "atm_randD_"; + fileNameD += UtilIntToStr(nAtms) + "_"; + fileNameD += UtilIntToStr(nDates) + "_"; + fileNameD += UtilIntToStr(seed) + ".txt"; + + ofstream osAD, osA, osD; + UtilOpenFile(osAD, fileNameAD.c_str()); + UtilOpenFile(osA, fileNameA.c_str()); + UtilOpenFile(osD, fileNameD.c_str()); + + int a, d; + srand(seed); + //--- + //--- generate 'raw data' in N[mean,std-dev] + //--- + int index = 0; // a * nDates + d + for (a = 0; a < nAtms; a++) { + for (d = 0; d < nDates; d++) { + do { + withdrawal[index] = + UtilNormRand(s_withdrawal[MEAN], s_withdrawal[STDD]); + } while (withdrawal[index] < s_withdrawal[MIN] || + withdrawal[index] > s_withdrawal[MAX]); + do { + allocation[index] = + UtilNormRand(s_allocation[MEAN], s_allocation[STDD]); + } while (allocation[index] < s_allocation[MIN] || + allocation[index] > s_allocation[MAX]); + do { + netimpactAve[index] = + UtilNormRand(s_netimpactAve[MEAN], s_netimpactAve[STDD]); + } while (netimpactAve[index] < s_netimpactAve[MIN] || + netimpactAve[index] > s_netimpactAve[MAX]); + do { + netimpactStd[index] = + UtilNormRand(s_netimpactStd[MEAN], s_netimpactStd[STDD]); + } while (netimpactStd[index] < s_netimpactStd[MIN] || + netimpactStd[index] > s_netimpactStd[MAX]); + do { + normalAve[index] = UtilNormRand(s_normalAve[MEAN], s_normalAve[STDD]); + } while (normalAve[index] < s_normalAve[MIN] || + normalAve[index] > s_normalAve[MAX]); + do { + normalStd[index] = UtilNormRand(s_normalStd[MEAN], s_normalStd[STDD]); + } while (normalStd[index] < s_normalStd[MIN] || + normalStd[index] > s_normalStd[MAX]); + do { + ts1[index] = UtilNormRand(s_ts1[MEAN], s_ts1[STDD]); + } while (ts1[index] < s_ts1[MIN] || ts1[index] > s_ts1[MAX]); + do { + ts2[index] = UtilNormRand(s_ts2[MEAN], s_ts2[STDD]); + } while (ts2[index] < s_ts2[MIN] || ts2[index] > s_ts2[MAX]); + index++; + } + } + + //--- + //--- generate coefficients + //--- + // CA[a,d] = (normal_avg[a,d] * net_impact_avg[a,d] - ts_period_2[a,d]); + // CB[a,d] = (ts_period_1[a,d] - ts_period_2[a,d]); + // CC[a,d] = (ts_period_2[a,d] - ts_period_1[a,d]); + // CD[a,d] = (normal_std[a,d] * net_impact_std[a,d]); + // CE[a,d] = (-actual_withdrawal[a,d] + ts_period_2[a,d]); + double *ca = new double[nAD]; + double *cb = new double[nAD]; + double *cc = new double[nAD]; + double *cd = new double[nAD]; + double *ce = new double[nAD]; + assert(ca && cb && cc && cd && ce); + + index = 0; + osAD << "a\td\tCA\tCB\tCC\tCD\tCD\tCE\tCW\n"; + for (a = 0; a < nAtms; a++) { + for (d = 0; d < nDates; d++) { + ca[index] = normalAve[index] * netimpactAve[index] - ts2[index]; + cb[index] = ts1[index] - ts2[index]; + cc[index] = -cb[index]; + cd[index] = normalStd[index] * netimpactStd[index]; + ce[index] = -withdrawal[index] + ts2[index]; + osAD << "ATM" << UtilIntToStr(a) << "\t" + << "DATE" << UtilIntToStr(d) << "\t" << setw(10) + << UtilDblToStr(ca[index], 0) << setw(10) + << UtilDblToStr(cb[index], 0) << setw(10) + << UtilDblToStr(cc[index], 0) << setw(10) + << UtilDblToStr(cd[index], 0) << setw(10) + << UtilDblToStr(ce[index], 0) << setw(10) + << UtilDblToStr(withdrawal[index], 0) << "\n"; + index++; + } + } + + //--- + //--- generate B and K + //--- + //--- f(a,d) = ca*x1[a] + cb*x2[a] + cc*x1[a]*x2[a] + cd*x3[a] + ce + //--- x1,x2 in {0,1}, x3 >= 0 + //--- + //--- sum{a} f(a,d) <= B[d], for d + //--- |{d in D | f(a,d) <= 0} <= K[a], for a + //--- + double x01, x1, x2, x3; + double *f = new double[nAD]; + assert(f); + index = 0; + for (a = 0; a < nAtms; a++) { + x01 = UtilURand(0.0, 1.0); + x1 = x01 >= 0.5 ? 1 : 0; + x01 = UtilURand(0.0, 1.0); + x2 = x01 >= 0.5 ? 1 : 0; + x3 = UtilURand(0.0, 1.0); + printf("x1=%g x2=%g x3=%g\n", x1, x2, x3); + for (d = 0; d < nDates; d++) { + f[index] = ca[index] * x1; + f[index] += cb[index] * x2; + f[index] += cc[index] * x1 * x2; + f[index] += cd[index] * x3; + f[index] += ce[index]; + index++; + } + } + + double *B = new double[nDates]; + double maxB = -1e20; + for (d = 0; d < nDates; d++) { + B[d] = 0; + for (a = 0; a < nAtms; a++) { + B[d] += f[a * nDates + d]; + } + if (B[d] > maxB) + maxB = B[d]; + } + //--- + //--- B=budget for cash flow + //--- if negative does not make sense + //--- protect against this + //--- + osD << "d\tB\n"; + for (d = 0; d < nDates; d++) { + if (B[d] < 0) + B[d] = maxB / 2.0; + osD << "DATE" << UtilIntToStr(d) << "\t" << setw(10) + << UtilDblToStr(B[d], 0) << endl; + } + + int *K = new int[nAtms]; + int maxK = 0; + index = 0; + for (a = 0; a < nAtms; a++) { + K[a] = 0; + for (d = 0; d < nDates; d++) { + K[a] += f[index] <= 0 ? 1 : 0; + index++; + } + if (K[a] > maxK) + maxK = K[a]; + } + //--- + //--- randomize it (and tighten constraint) + //--- + osA << "a\tK\n"; + for (a = 0; a < nAtms; a++) { + // K[a] -= UtilURand(1, maxK/4); + osA << "ATM" << UtilIntToStr(a) << "\t" << setw(10) << K[a] << endl; + } + + osAD.close(); + osA.close(); + osD.close(); + + UTIL_DELARR(withdrawal); + UTIL_DELARR(allocation); + UTIL_DELARR(netimpactAve); + UTIL_DELARR(netimpactStd); + UTIL_DELARR(normalAve); + UTIL_DELARR(normalStd); + UTIL_DELARR(ts1); + UTIL_DELARR(ts2); + UTIL_DELARR(ca); + UTIL_DELARR(cb); + UTIL_DELARR(cc); + UTIL_DELARR(cd); + UTIL_DELARR(ce); + UTIL_DELARR(f); + UTIL_DELARR(B); + UTIL_DELARR(K); +} diff --git a/Dip/examples/ATM/ATM_Main.cpp b/Dip/examples/ATM/ATM_Main.cpp index 6f6fc421..e263e1e5 100644 --- a/Dip/examples/ATM/ATM_Main.cpp +++ b/Dip/examples/ATM/ATM_Main.cpp @@ -26,143 +26,134 @@ #include "UtilTimer.h" //===========================================================================// -int main(int argc, char ** argv){ - try{ +int main(int argc, char **argv) { + try { + //--- + //--- create the utility class for parsing parameters + //--- + UtilParameters utilParam(argc, argv); + + bool doGenRandom = utilParam.GetSetting("doGenRandom", false); + int randSeed = utilParam.GetSetting("randSeed", 1); + int randNumAtms = utilParam.GetSetting("randNumAtms", 5); + int randNumDates = utilParam.GetSetting("randNumDates", 10); + + bool doCut = utilParam.GetSetting("doCut", true); + bool doPriceCut = utilParam.GetSetting("doPriceCut", false); + bool doDirect = utilParam.GetSetting("doDirect", false); + + UtilTimer timer; + double timeSetupReal = 0.0; + double timeSetupCpu = 0.0; + double timeSolveReal = 0.0; + double timeSolveCpu = 0.0; + + //--- + //--- start overall timer + //--- + timer.start(); + if (doGenRandom) { + //--- + //--- generate a random instance + //--- + ATM_Instance instance; + instance.generateRandom(randNumAtms, randNumDates, randSeed); + } else { //--- - //--- create the utility class for parsing parameters + //--- create the user application (a DecompApp) //--- - UtilParameters utilParam(argc, argv); - - bool doGenRandom = utilParam.GetSetting("doGenRandom", false); - int randSeed = utilParam.GetSetting("randSeed", 1 ); - int randNumAtms = utilParam.GetSetting("randNumAtms", 5 ); - int randNumDates = utilParam.GetSetting("randNumDates", 10 ); - - bool doCut = utilParam.GetSetting("doCut", true); - bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - bool doDirect = utilParam.GetSetting("doDirect", false); - - UtilTimer timer; - double timeSetupReal = 0.0; - double timeSetupCpu = 0.0; - double timeSolveReal = 0.0; - double timeSolveCpu = 0.0; + ATM_DecompApp atm(utilParam); //--- - //--- start overall timer + //--- create the algorithm (a DecompAlgo) //--- - timer.start(); - if(doGenRandom){ - //--- - //--- generate a random instance - //--- - ATM_Instance instance; - instance.generateRandom(randNumAtms, randNumDates, randSeed); - } - else{ - //--- - //--- create the user application (a DecompApp) - //--- - ATM_DecompApp atm(utilParam); - - //--- - //--- create the algorithm (a DecompAlgo) - //--- - DecompAlgo * algo = NULL; - assert(doCut + doPriceCut == 1); - - //--- - //--- create the CPM algorithm object - //--- - if(doCut) - algo = new DecompAlgoC(&atm, utilParam); - - //--- - //--- create the PC algorithm object - //--- - if(doPriceCut) - algo = new DecompAlgoPC(&atm, utilParam); - - - if(doCut && doDirect){ - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - - //--- - //--- solve - //--- - timer.start(); - algo->solveDirect(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); - } - else{ - //--- - //--- create the driver AlpsDecomp model - //--- - int status = 0; - AlpsDecompModel alpsModel(utilParam, algo); - - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - - //--- - //--- solve - //--- - timer.start(); - status = alpsModel.solve(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); - - //TODO: move doDirect solve into alpsModel so access - // solution the same way? - - //--- - //--- sanity check - //--- - cout << setiosflags(ios::fixed|ios::showpoint); - cout << "Status= " << status - << " BestLB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalLB(),2) - << " BestUB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalUB(),2) - << " Nodes= " << setw(6) - << alpsModel.getNumNodesProcessed() - << " SetupCPU= " << timeSetupCpu - << " SolveCPU= " << timeSolveCpu - << " TotalCPU= " << timeSetupCpu + timeSolveCpu - << " SetupReal= " << timeSetupReal - << " SolveReal= " << timeSolveReal - << " TotalReal= " << timeSetupReal + timeSolveReal - << endl; - - //--- - //--- now, initialize direct solve with best - //--- solution to PC - //--- TODO: only useful if stop early on time or nodes - //--- TODO: cbc currently doesn't use warm-start, only cpx - //--- - //DecompAlgo * algoC = new DecompAlgoC(&atm, &utilParam); - //algoC->solveDirect(algo->getXhatIPBest()); - //delete algoC; - } - - //--- - //--- free local memory - //--- - delete algo; + DecompAlgo *algo = NULL; + assert(doCut + doPriceCut == 1); + + //--- + //--- create the CPM algorithm object + //--- + if (doCut) + algo = new DecompAlgoC(&atm, utilParam); + + //--- + //--- create the PC algorithm object + //--- + if (doPriceCut) + algo = new DecompAlgoPC(&atm, utilParam); + + if (doCut && doDirect) { + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); + + //--- + //--- solve + //--- + timer.start(); + algo->solveDirect(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); + } else { + //--- + //--- create the driver AlpsDecomp model + //--- + int status = 0; + AlpsDecompModel alpsModel(utilParam, algo); + + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); + + //--- + //--- solve + //--- + timer.start(); + status = alpsModel.solve(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); + + // TODO: move doDirect solve into alpsModel so access + // solution the same way? + + //--- + //--- sanity check + //--- + cout << setiosflags(ios::fixed | ios::showpoint); + cout << "Status= " << status << " BestLB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalLB(), 2) + << " BestUB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalUB(), 2) + << " Nodes= " << setw(6) << alpsModel.getNumNodesProcessed() + << " SetupCPU= " << timeSetupCpu << " SolveCPU= " << timeSolveCpu + << " TotalCPU= " << timeSetupCpu + timeSolveCpu + << " SetupReal= " << timeSetupReal + << " SolveReal= " << timeSolveReal + << " TotalReal= " << timeSetupReal + timeSolveReal << endl; + + //--- + //--- now, initialize direct solve with best + //--- solution to PC + //--- TODO: only useful if stop early on time or nodes + //--- TODO: cbc currently doesn't use warm-start, only cpx + //--- + // DecompAlgo * algoC = new DecompAlgoC(&atm, &utilParam); + // algoC->solveDirect(algo->getXhatIPBest()); + // delete algoC; } - } - catch(CoinError & ex){ - cerr << "COIN Exception [ " << ex.message() << " ]" - << " at " << ex.fileName() << ":L" << ex.lineNumber() - << " in " << ex.className() << "::" << ex.methodName() << endl; - } - return 0; + + //--- + //--- free local memory + //--- + delete algo; + } + } catch (CoinError &ex) { + cerr << "COIN Exception [ " << ex.message() << " ]" + << " at " << ex.fileName() << ":L" << ex.lineNumber() << " in " + << ex.className() << "::" << ex.methodName() << endl; + } + return 0; } - diff --git a/Dip/examples/GAP/GAP_DecompApp.cpp b/Dip/examples/GAP/GAP_DecompApp.cpp index 2250cea3..15ca2212 100644 --- a/Dip/examples/GAP/GAP_DecompApp.cpp +++ b/Dip/examples/GAP/GAP_DecompApp.cpp @@ -13,282 +13,264 @@ //===========================================================================// //===========================================================================// -#include "GAP_Status.h" #include "GAP_DecompApp.h" +#include "GAP_Status.h" //===========================================================================// #include "DecompVar.h" //===========================================================================// -void GAP_DecompApp::initializeApp() -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); +void GAP_DecompApp::initializeApp() { + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", + m_appParam.LogLevel, 2); - //--- - //--- read instance - // - string instanceFile = m_appParam.DataDir - + UtilDirSlash() + m_appParam.Instance; - m_instance.readInstance(instanceFile); - //--- - //--- read best known lb/ub - //--- - string bestKnownFile = m_appParam.DataDir + UtilDirSlash() + "gap.opt"; - m_instance.readBestKnown(bestKnownFile, m_appParam.Instance); - setBestKnownLB(m_instance.getBestKnownLB()); - setBestKnownUB(m_instance.getBestKnownUB()); - //--- - //--- open space for GAP_Knapsack objects - //--- - int k; - const int nTasks = m_instance.getNTasks(); - const int nMachines = m_instance.getNMachines(); - const int* capacity = m_instance.getCapacity(); - const int* weight = m_instance.getWeight(); - const int* profit = m_instance.getProfit(); - GAP_KnapPisinger* knapK = 0; - m_knap.reserve(nMachines); + //--- + //--- read instance + // + string instanceFile = + m_appParam.DataDir + UtilDirSlash() + m_appParam.Instance; + m_instance.readInstance(instanceFile); + //--- + //--- read best known lb/ub + //--- + string bestKnownFile = m_appParam.DataDir + UtilDirSlash() + "gap.opt"; + m_instance.readBestKnown(bestKnownFile, m_appParam.Instance); + setBestKnownLB(m_instance.getBestKnownLB()); + setBestKnownUB(m_instance.getBestKnownUB()); + //--- + //--- open space for GAP_Knapsack objects + //--- + int k; + const int nTasks = m_instance.getNTasks(); + const int nMachines = m_instance.getNMachines(); + const int *capacity = m_instance.getCapacity(); + const int *weight = m_instance.getWeight(); + const int *profit = m_instance.getProfit(); + GAP_KnapPisinger *knapK = 0; + m_knap.reserve(nMachines); - for (k = 0; k < nMachines; k++) { - knapK = new GAP_KnapPisinger(nTasks, - capacity[k], - weight + (k * nTasks), - profit + (k * nTasks)); - m_knap.push_back(knapK); - } + for (k = 0; k < nMachines; k++) { + knapK = new GAP_KnapPisinger(nTasks, capacity[k], weight + (k * nTasks), + profit + (k * nTasks)); + m_knap.push_back(knapK); + } - //--- - //--- create models - //--- - createModels(); - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); + //--- + //--- create models + //--- + createModels(); + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", m_appParam.LogLevel, + 2); } // --------------------------------------------------------------------- // -int GAP_DecompApp::createModelPartAP(DecompConstraintSet* model) -{ - int i, j, colIndex; - int status = GAPStatusOk; - int nTasks = m_instance.getNTasks(); //n - int nMachines = m_instance.getNMachines(); //m - int nCols = nTasks * nMachines; - int nRows = nTasks; - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelPartAP()", m_appParam.LogLevel, 2); - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(model->M, "Error: Out of Memory"); - model->M->setDimensions(0, nCols); - model->reserve(nRows, nCols); +int GAP_DecompApp::createModelPartAP(DecompConstraintSet *model) { + int i, j, colIndex; + int status = GAPStatusOk; + int nTasks = m_instance.getNTasks(); // n + int nMachines = m_instance.getNMachines(); // m + int nCols = nTasks * nMachines; + int nRows = nTasks; + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelPartAP()", + m_appParam.LogLevel, 2); + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(model->M, "Error: Out of Memory"); + model->M->setDimensions(0, nCols); + model->reserve(nRows, nCols); - //--- - //--- m is number of machines (index i) - //--- n is number of tasks (index j) - //--- - //--- sum{i in 1..m} x[i,j] = 1, j in 1..n - //--- - //--- Example structure: m=3, n=4 - //--- x x x = 1 [j=1] - //--- x x x = 1 [j=2] - //--- x x x = 1 [j=3] - //--- x x x = 1 [j=4] - //--- - for (j = 0; j < nTasks; j++) { - CoinPackedVector row; - string rowName = "a(j_" + UtilIntToStr(j) + ")"; + //--- + //--- m is number of machines (index i) + //--- n is number of tasks (index j) + //--- + //--- sum{i in 1..m} x[i,j] = 1, j in 1..n + //--- + //--- Example structure: m=3, n=4 + //--- x x x = 1 [j=1] + //--- x x x = 1 [j=2] + //--- x x x = 1 [j=3] + //--- x x x = 1 [j=4] + //--- + for (j = 0; j < nTasks; j++) { + CoinPackedVector row; + string rowName = "a(j_" + UtilIntToStr(j) + ")"; - for (i = 0; i < nMachines; i++) { - colIndex = getIndexIJ(i, j); - row.insert(colIndex, 1.0); - } + for (i = 0; i < nMachines; i++) { + colIndex = getIndexIJ(i, j); + row.insert(colIndex, 1.0); + } - model->appendRow(row, 1.0, 1.0, rowName); - } + model->appendRow(row, 1.0, 1.0, rowName); + } - //--- - //--- set the col upper and lower bounds - //--- - UtilFillN(model->colLB, nCols, 0.0); - UtilFillN(model->colUB, nCols, 1.0); - //--- - //--- set column names for debugging - //--- - colIndex = 0; + //--- + //--- set the col upper and lower bounds + //--- + UtilFillN(model->colLB, nCols, 0.0); + UtilFillN(model->colUB, nCols, 1.0); + //--- + //--- set column names for debugging + //--- + colIndex = 0; - for (i = 0; i < nMachines; i++) { - for (j = 0; j < nTasks; j++) { - string colName = "x(" - + UtilIntToStr(colIndex) + "_" - + UtilIntToStr(i) + "," + UtilIntToStr(j) + ")"; - model->colNames.push_back(colName); - colIndex++; - } - } + for (i = 0; i < nMachines; i++) { + for (j = 0; j < nTasks; j++) { + string colName = "x(" + UtilIntToStr(colIndex) + "_" + UtilIntToStr(i) + + "," + UtilIntToStr(j) + ")"; + model->colNames.push_back(colName); + colIndex++; + } + } - //--- - //--- set the indices of the integer variables of model - //--- - UtilIotaN(model->integerVars, nCols, 0); - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelPartAP()", m_appParam.LogLevel, 2); - return status; + //--- + //--- set the indices of the integer variables of model + //--- + UtilIotaN(model->integerVars, nCols, 0); + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelPartAP()", + m_appParam.LogLevel, 2); + return status; } // --------------------------------------------------------------------- // -int GAP_DecompApp::createModels() -{ - //--- - //--- This function does the work to create the different models - //--- that will be used. This memory is owned by the user. It will - //--- be passed to the application interface and used by the algorithms. - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - //--- - //--- Generalized Assignment Problem (GAP) - //--- m is number of machines (index i) - //--- n is number of tasks (index j) - //--- - //--- min sum{i in 1..m, j in 1..n} p[i,j] x[i,j] - //--- s.t. sum{ j in 1..n} w[i,j] x[i,j] <= b[i], i in 1..m - //--- sum{i in 1..m } x[i,j] = 1 , j in 1..n - //--- x[i,j] in {0,1}, i in 1..m, j in 1..n - //--- - //--- Example structure: m=3, n=4 - //--- xxxx <= b[i=1] - //--- xxxx <= b[i=2] - //--- xxxx <= b[i=3] - //--- x x x = 1 [j=1] - //--- x x x = 1 [j=2] - //--- x x x = 1 [j=3] - //--- x x x = 1 [j=4] - //--- - //--- - //--- Get information about this problem instance. - //-- - int i; - string modelName; - int status = GAPStatusOk; - int nTasks = m_instance.getNTasks(); //n - int nMachines = m_instance.getNMachines(); //m - const int* profit = m_instance.getProfit(); - int nCols = nTasks * nMachines; - //--- - //--- Construct the objective function (the original problem is - //--- a maximization, so we flip the sign to make it minimization). - //--- - m_objective = new double[nCols]; - assert(m_objective); +int GAP_DecompApp::createModels() { + //--- + //--- This function does the work to create the different models + //--- that will be used. This memory is owned by the user. It will + //--- be passed to the application interface and used by the algorithms. + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + //--- + //--- Generalized Assignment Problem (GAP) + //--- m is number of machines (index i) + //--- n is number of tasks (index j) + //--- + //--- min sum{i in 1..m, j in 1..n} p[i,j] x[i,j] + //--- s.t. sum{ j in 1..n} w[i,j] x[i,j] <= b[i], i in 1..m + //--- sum{i in 1..m } x[i,j] = 1 , j in 1..n + //--- x[i,j] in {0,1}, i in 1..m, j in 1..n + //--- + //--- Example structure: m=3, n=4 + //--- xxxx <= b[i=1] + //--- xxxx <= b[i=2] + //--- xxxx <= b[i=3] + //--- x x x = 1 [j=1] + //--- x x x = 1 [j=2] + //--- x x x = 1 [j=3] + //--- x x x = 1 [j=4] + //--- + //--- + //--- Get information about this problem instance. + //-- + int i; + string modelName; + int status = GAPStatusOk; + int nTasks = m_instance.getNTasks(); // n + int nMachines = m_instance.getNMachines(); // m + const int *profit = m_instance.getProfit(); + int nCols = nTasks * nMachines; + //--- + //--- Construct the objective function (the original problem is + //--- a maximization, so we flip the sign to make it minimization). + //--- + m_objective = new double[nCols]; + assert(m_objective); - if (!m_objective) { - return GAPStatusOutOfMemory; - } + if (!m_objective) { + return GAPStatusOutOfMemory; + } - for (i = 0; i < nCols; i++) { - m_objective[i] = profit[i]; - } + for (i = 0; i < nCols; i++) { + m_objective[i] = profit[i]; + } - //--- - //--- A'[i] for i=1..m: m independent knapsacks - //--- sum{j in 1..n} w[i,j] x[i,j] <= b[i] - //--- x[i,j] in {0,1}, i in 1..m, j in 1..n - //--- - //--- A'': - //--- sum{i in 1..m} x[i,j] = 1, j in 1..n - //--- - //--- Example structure: m=3, n=4 - //--- A'[i=1]: - //--- xxxx <= b[i=1] - //--- A'[i=2]: - //--- xxxx <= b[i=2] - //--- A'[i=3]: - //--- xxxx <= b[i=3] - //--- - //--- A'': - //--- x x x = 1 [j=1] - //--- x x x = 1 [j=2] - //--- x x x = 1 [j=3] - //--- x x x = 1 [j=4] - //--- - setModelObjective(m_objective, nCols); - DecompConstraintSet* modelCore = new DecompConstraintSet(); - status = createModelPartAP(modelCore); + //--- + //--- A'[i] for i=1..m: m independent knapsacks + //--- sum{j in 1..n} w[i,j] x[i,j] <= b[i] + //--- x[i,j] in {0,1}, i in 1..m, j in 1..n + //--- + //--- A'': + //--- sum{i in 1..m} x[i,j] = 1, j in 1..n + //--- + //--- Example structure: m=3, n=4 + //--- A'[i=1]: + //--- xxxx <= b[i=1] + //--- A'[i=2]: + //--- xxxx <= b[i=2] + //--- A'[i=3]: + //--- xxxx <= b[i=3] + //--- + //--- A'': + //--- x x x = 1 [j=1] + //--- x x x = 1 [j=2] + //--- x x x = 1 [j=3] + //--- x x x = 1 [j=4] + //--- + setModelObjective(m_objective, nCols); + DecompConstraintSet *modelCore = new DecompConstraintSet(); + status = createModelPartAP(modelCore); - if (status) { - return status; - } + if (status) { + return status; + } - setModelCore(modelCore, "AP"); - m_models.insert(make_pair("AP", modelCore)); + setModelCore(modelCore, "AP"); + m_models.insert(make_pair("AP", modelCore)); - for (i = 0; i < nMachines; i++) { - modelName = "KP" + UtilIntToStr(i); - setModelRelax(NULL, modelName, i); - } + for (i = 0; i < nMachines; i++) { + modelName = "KP" + UtilIntToStr(i); + setModelRelax(NULL, modelName, i); + } - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - return status; + UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + return status; } //--------------------------------------------------------------------- // -DecompSolverStatus -GAP_DecompApp::solveRelaxed(const int whichBlock, - const double* redCostX, - const double target, - list& vars) -{ - if (!m_appParam.UsePisinger) { - return DecompSolStatNoSolution; - } +DecompSolverStatus GAP_DecompApp::solveRelaxed(const int whichBlock, + const double *redCostX, + const double target, + list &vars) { + if (!m_appParam.UsePisinger) { + return DecompSolStatNoSolution; + } - UtilPrintFuncBegin(m_osLog, m_classTag, - "solveRelaxed()", m_appParam.LogLevel, 2); - vector solInd; - vector solEls; - double varRedCost = 0.0; - double varOrigCost = 0.0; - double* origCost = m_objective; - const double* redCostXB = redCostX + getOffsetI(whichBlock); - const double* origCostB = origCost + getOffsetI(whichBlock); - //--- - //--- print out red cost - //--- - /*{ - int j; - const int nTasks = m_instance.getNTasks(); - const int * weight = m_instance.getWeight() + getOffsetI(b); - for(j = 0; j < nTasks; j++){ - printf("RedCost[j=%d, wt=%d]: %g\n", j, weight[j], redCostXB[j]); - } - }*/ - m_knap[whichBlock]->solve(whichBlock, - redCostXB, - origCostB, - solInd, - solEls, - varRedCost, - varOrigCost); - //printf("b=%d alpha = %g\n", b, alpha); - //printf("b=%d varRedCost - alpha = %g\n", b, varRedCost - alpha); - //printf("b=%d varOrigCost = %g\n", b, varOrigCost); - UTIL_DEBUG(m_appParam.LogLevel, 4, - printf("PUSH var with RC = %g\n", varRedCost); - ); - DecompVar* var = new DecompVar(solInd, solEls, - varRedCost, varOrigCost); - var->setBlockId(whichBlock); - vars.push_back(var); - UtilPrintFuncEnd(m_osLog, m_classTag, - "APPsolveRelaxed()", m_appParam.LogLevel, 2); - return DecompSolStatOptimal; + UtilPrintFuncBegin(m_osLog, m_classTag, "solveRelaxed()", m_appParam.LogLevel, + 2); + vector solInd; + vector solEls; + double varRedCost = 0.0; + double varOrigCost = 0.0; + double *origCost = m_objective; + const double *redCostXB = redCostX + getOffsetI(whichBlock); + const double *origCostB = origCost + getOffsetI(whichBlock); + //--- + //--- print out red cost + //--- + /*{ + int j; + const int nTasks = m_instance.getNTasks(); + const int * weight = m_instance.getWeight() + getOffsetI(b); + for(j = 0; j < nTasks; j++){ + printf("RedCost[j=%d, wt=%d]: %g\n", j, weight[j], redCostXB[j]); + } + }*/ + m_knap[whichBlock]->solve(whichBlock, redCostXB, origCostB, solInd, solEls, + varRedCost, varOrigCost); + // printf("b=%d alpha = %g\n", b, alpha); + // printf("b=%d varRedCost - alpha = %g\n", b, varRedCost - alpha); + // printf("b=%d varOrigCost = %g\n", b, varOrigCost); + UTIL_DEBUG(m_appParam.LogLevel, 4, + printf("PUSH var with RC = %g\n", varRedCost);); + DecompVar *var = new DecompVar(solInd, solEls, varRedCost, varOrigCost); + var->setBlockId(whichBlock); + vars.push_back(var); + UtilPrintFuncEnd(m_osLog, m_classTag, "APPsolveRelaxed()", + m_appParam.LogLevel, 2); + return DecompSolStatOptimal; } - //--------------------------------------------------------------------- // -void GAP_DecompApp::printOriginalColumn(const int index, - ostream* os) const -{ - pair p = m_instance.getIndexInv(index); - (*os) << "x[ " << index << " : " << p.first << " , " << p.second << " ]"; +void GAP_DecompApp::printOriginalColumn(const int index, ostream *os) const { + pair p = m_instance.getIndexInv(index); + (*os) << "x[ " << index << " : " << p.first << " , " << p.second << " ]"; } diff --git a/Dip/examples/GAP/GAP_DecompApp3.cpp b/Dip/examples/GAP/GAP_DecompApp3.cpp index 40dcb70c..52d0bf55 100644 --- a/Dip/examples/GAP/GAP_DecompApp3.cpp +++ b/Dip/examples/GAP/GAP_DecompApp3.cpp @@ -13,365 +13,355 @@ //===========================================================================// //===========================================================================// -#include "GAP_Status.h" #include "GAP_DecompApp3.h" +#include "GAP_Status.h" //===========================================================================// #include "DecompVar.h" //===========================================================================// -void GAP_DecompApp::initializeApp(UtilParameters& utilParam) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); - //--- - //--- get application parameters - //--- - m_appParam.getSettings(utilParam); - - if (m_appParam.LogLevel >= 1) { - m_appParam.dumpSettings(m_osLog); - } - - //--- - //--- read instance - // - string instanceFile = m_appParam.DataDir - + UtilDirSlash() + m_appParam.Instance; - m_instance.readInstance(instanceFile); - //--- - //--- read best known lb/ub (for debugging) - //--- - string bestKnownFile = m_appParam.DataDir + UtilDirSlash() + "gap.opt"; - m_instance.readBestKnown(bestKnownFile, m_appParam.Instance); - setBestKnownLB(m_instance.getBestKnownLB()); - setBestKnownUB(m_instance.getBestKnownUB()); - //--- - //--- create models - //--- - createModels(); - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); +void GAP_DecompApp::initializeApp(UtilParameters &utilParam) { + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", + m_appParam.LogLevel, 2); + //--- + //--- get application parameters + //--- + m_appParam.getSettings(utilParam); + + if (m_appParam.LogLevel >= 1) { + m_appParam.dumpSettings(m_osLog); + } + + //--- + //--- read instance + // + string instanceFile = + m_appParam.DataDir + UtilDirSlash() + m_appParam.Instance; + m_instance.readInstance(instanceFile); + //--- + //--- read best known lb/ub (for debugging) + //--- + string bestKnownFile = m_appParam.DataDir + UtilDirSlash() + "gap.opt"; + m_instance.readBestKnown(bestKnownFile, m_appParam.Instance); + setBestKnownLB(m_instance.getBestKnownLB()); + setBestKnownUB(m_instance.getBestKnownUB()); + //--- + //--- create models + //--- + createModels(); + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", m_appParam.LogLevel, + 2); } // --------------------------------------------------------------------- // -int GAP_DecompApp::createModelPartAP(DecompConstraintSet* model) -{ - int i, j, colIndex; - int status = GAPStatusOk; - int nTasks = m_instance.getNTasks(); //n - int nMachines = m_instance.getNMachines(); //m - int nCols = nTasks * nMachines; - int nRows = nTasks; - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelPartAP()", m_appParam.LogLevel, 2); - //--- - //--- Build the core model constraints (AP = assignment problem). - //--- - //--- m is number of machines (index i) - //--- n is number of tasks (index j) - //--- - //--- sum{i in 1..m} x[i,j] = 1, j in 1..n - //--- - //--- Example structure: m=3, n=4 - //--- x x x = 1 [j=1] - //--- x x x = 1 [j=2] - //--- x x x = 1 [j=3] - //--- x x x = 1 [j=4] - //--- - //--- - //--- Allocate an empty row-ordered CoinPackedMatrix. Since we plan - //--- to add rows, set the column dimension and let the row dimension - //--- be set dynamically. - //--- - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(model->M, "Error: Out of Memory"); - model->M->setDimensions(0, nCols); - //--- - //--- we know the sizes needed, so reserve space for them (for efficiency) - //--- - model->reserve(nRows, nCols); - - //--- - //--- create one row per task - //--- rowNames are not needed, they are used for debugging - //--- - for (j = 0; j < nTasks; j++) { - CoinPackedVector row; - string rowName = "a(j_" + UtilIntToStr(j) + ")"; - - for (i = 0; i < nMachines; i++) { - colIndex = getIndexIJ(i, j); - row.insert(colIndex, 1.0); - } - - model->appendRow(row, 1.0, 1.0, rowName); - } - - //--- - //--- set the col upper and lower bounds (all in [0,1]) - //--- - UtilFillN(model->colLB, nCols, 0.0); - UtilFillN(model->colUB, nCols, 1.0); - //--- - //--- set the indices of the integer variables of model - //--- (all vars are binary) - //--- - UtilIotaN(model->integerVars, nCols, 0); - //--- - //--- set column names for debugging - //--- - colIndex = 0; - - for (i = 0; i < nMachines; i++) { - for (j = 0; j < nTasks; j++) { - string colName = "x(" - + UtilIntToStr(colIndex) + "_" - + UtilIntToStr(i) + "," + UtilIntToStr(j) + ")"; - model->colNames.push_back(colName); - colIndex++; - } - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelPartAP()", m_appParam.LogLevel, 2); - return status; +int GAP_DecompApp::createModelPartAP(DecompConstraintSet *model) { + int i, j, colIndex; + int status = GAPStatusOk; + int nTasks = m_instance.getNTasks(); // n + int nMachines = m_instance.getNMachines(); // m + int nCols = nTasks * nMachines; + int nRows = nTasks; + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelPartAP()", + m_appParam.LogLevel, 2); + //--- + //--- Build the core model constraints (AP = assignment problem). + //--- + //--- m is number of machines (index i) + //--- n is number of tasks (index j) + //--- + //--- sum{i in 1..m} x[i,j] = 1, j in 1..n + //--- + //--- Example structure: m=3, n=4 + //--- x x x = 1 [j=1] + //--- x x x = 1 [j=2] + //--- x x x = 1 [j=3] + //--- x x x = 1 [j=4] + //--- + //--- + //--- Allocate an empty row-ordered CoinPackedMatrix. Since we plan + //--- to add rows, set the column dimension and let the row dimension + //--- be set dynamically. + //--- + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(model->M, "Error: Out of Memory"); + model->M->setDimensions(0, nCols); + //--- + //--- we know the sizes needed, so reserve space for them (for efficiency) + //--- + model->reserve(nRows, nCols); + + //--- + //--- create one row per task + //--- rowNames are not needed, they are used for debugging + //--- + for (j = 0; j < nTasks; j++) { + CoinPackedVector row; + string rowName = "a(j_" + UtilIntToStr(j) + ")"; + + for (i = 0; i < nMachines; i++) { + colIndex = getIndexIJ(i, j); + row.insert(colIndex, 1.0); + } + + model->appendRow(row, 1.0, 1.0, rowName); + } + + //--- + //--- set the col upper and lower bounds (all in [0,1]) + //--- + UtilFillN(model->colLB, nCols, 0.0); + UtilFillN(model->colUB, nCols, 1.0); + //--- + //--- set the indices of the integer variables of model + //--- (all vars are binary) + //--- + UtilIotaN(model->integerVars, nCols, 0); + //--- + //--- set column names for debugging + //--- + colIndex = 0; + + for (i = 0; i < nMachines; i++) { + for (j = 0; j < nTasks; j++) { + string colName = "x(" + UtilIntToStr(colIndex) + "_" + UtilIntToStr(i) + + "," + UtilIntToStr(j) + ")"; + model->colNames.push_back(colName); + colIndex++; + } + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelPartAP()", + m_appParam.LogLevel, 2); + return status; } //===========================================================================// -int GAP_DecompApp::createModelPartKP(DecompConstraintSet* model) -{ - //--- - //--- helper method - create model with nMachines KPs - //--- - vector whichKnaps; - int nMachines = m_instance.getNMachines(); - UtilIotaN(whichKnaps, nMachines, 0); - return createModelPartKP(model, whichKnaps); +int GAP_DecompApp::createModelPartKP(DecompConstraintSet *model) { + //--- + //--- helper method - create model with nMachines KPs + //--- + vector whichKnaps; + int nMachines = m_instance.getNMachines(); + UtilIotaN(whichKnaps, nMachines, 0); + return createModelPartKP(model, whichKnaps); } //===========================================================================// -int GAP_DecompApp::createModelPartKP(DecompConstraintSet* model, - int whichKnap) -{ - //--- - //--- helper method - create model with one (whichKnap) KP - //--- - vector whichKnaps; - whichKnaps.push_back(whichKnap); - return createModelPartKP(model, whichKnaps); +int GAP_DecompApp::createModelPartKP(DecompConstraintSet *model, + int whichKnap) { + //--- + //--- helper method - create model with one (whichKnap) KP + //--- + vector whichKnaps; + whichKnaps.push_back(whichKnap); + return createModelPartKP(model, whichKnaps); } //===========================================================================// -int GAP_DecompApp::createModelPartKP(DecompConstraintSet* model, - vector& whichKnaps) -{ - int i, j, b, colIndex; - int status = GAPStatusOk; - int nTasks = m_instance.getNTasks(); //n - int nMachines = m_instance.getNMachines(); //m - int nKnaps = static_cast(whichKnaps.size()); - const int* weight = m_instance.getWeight(); - const int* capacity = m_instance.getCapacity(); - int nCols = nTasks * nMachines; - int nRows = nKnaps; - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelPartKP()", m_appParam.LogLevel, 2); - //--- - //--- Build the relax model constraints (KP = assignment problem). - //--- - //--- m is number of machines (index i) - //--- n is number of tasks (index j) - //--- - //--- sum{j in 1..n} w[i,j] x[i,j] <= b[i], i in 1..m - //--- x[i,j] in {0,1}, i in 1..m, j in 1..n - //--- - //--- Example structure: m=3, n=4 - //--- xxxx <= b[i=1] - //--- xxxx <= b[i=2] - //--- xxxx <= b[i=3] - //--- - //--- - //--- Allocate an empty row-ordered CoinPackedMatrix. Since we plan - //--- to add rows, set the column dimension and let the row dimension - //--- be set dynamically. - //--- - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(model->M, "Error: Out of Memory"); - model->M->setDimensions(0, nCols); - //--- - //--- we know the sizes needed, so reserve space for them (for efficiency) - //--- - model->reserve(nRows, nCols); - //--- - //--- create one row per knapsack - //--- rowNames are not needed, they are used for debugging - //--- - vector::iterator it; - - for (it = whichKnaps.begin(); it != whichKnaps.end(); it++) { - i = *it; - CoinPackedVector row; - string rowName = "k(i_" + UtilIntToStr(i) + ")"; - +int GAP_DecompApp::createModelPartKP(DecompConstraintSet *model, + vector &whichKnaps) { + int i, j, b, colIndex; + int status = GAPStatusOk; + int nTasks = m_instance.getNTasks(); // n + int nMachines = m_instance.getNMachines(); // m + int nKnaps = static_cast(whichKnaps.size()); + const int *weight = m_instance.getWeight(); + const int *capacity = m_instance.getCapacity(); + int nCols = nTasks * nMachines; + int nRows = nKnaps; + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelPartKP()", + m_appParam.LogLevel, 2); + //--- + //--- Build the relax model constraints (KP = assignment problem). + //--- + //--- m is number of machines (index i) + //--- n is number of tasks (index j) + //--- + //--- sum{j in 1..n} w[i,j] x[i,j] <= b[i], i in 1..m + //--- x[i,j] in {0,1}, i in 1..m, j in 1..n + //--- + //--- Example structure: m=3, n=4 + //--- xxxx <= b[i=1] + //--- xxxx <= b[i=2] + //--- xxxx <= b[i=3] + //--- + //--- + //--- Allocate an empty row-ordered CoinPackedMatrix. Since we plan + //--- to add rows, set the column dimension and let the row dimension + //--- be set dynamically. + //--- + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(model->M, "Error: Out of Memory"); + model->M->setDimensions(0, nCols); + //--- + //--- we know the sizes needed, so reserve space for them (for efficiency) + //--- + model->reserve(nRows, nCols); + //--- + //--- create one row per knapsack + //--- rowNames are not needed, they are used for debugging + //--- + vector::iterator it; + + for (it = whichKnaps.begin(); it != whichKnaps.end(); it++) { + i = *it; + CoinPackedVector row; + string rowName = "k(i_" + UtilIntToStr(i) + ")"; + + for (j = 0; j < nTasks; j++) { + colIndex = getIndexIJ(i, j); + row.insert(colIndex, weight[colIndex]); + } + + model->appendRow(row, -m_infinity, capacity[i], rowName); + } + + //--- + //--- set the col upper and lower bounds (all in [0,1]) + //--- + UtilFillN(model->colLB, nCols, 0.0); + UtilFillN(model->colUB, nCols, 1.0); + //--- + //--- set the indices of the integer variables of model + //--- + UtilIotaN(model->integerVars, nCols, 0); + + //--- + //--- tell the solver which columns are active (in this block) + //--- + for (it = whichKnaps.begin(); it != whichKnaps.end(); it++) { + b = *it; + + for (i = 0; i < nMachines; i++) { for (j = 0; j < nTasks; j++) { - colIndex = getIndexIJ(i, j); - row.insert(colIndex, weight[colIndex]); - } + colIndex = getIndexIJ(i, j); - model->appendRow(row, -m_infinity, capacity[i], rowName); - } - - //--- - //--- set the col upper and lower bounds (all in [0,1]) - //--- - UtilFillN(model->colLB, nCols, 0.0); - UtilFillN(model->colUB, nCols, 1.0); - //--- - //--- set the indices of the integer variables of model - //--- - UtilIotaN(model->integerVars, nCols, 0); - - //--- - //--- tell the solver which columns are active (in this block) - //--- - for (it = whichKnaps.begin(); it != whichKnaps.end(); it++) { - b = *it; - - for (i = 0; i < nMachines; i++) { - for (j = 0; j < nTasks; j++) { - colIndex = getIndexIJ(i, j); - - if (i == b) { - model->activeColumns.push_back(colIndex); - } - } + if (i == b) { + model->activeColumns.push_back(colIndex); + } } - } - - //--- - //--- set column names for debugging - //--- - colIndex = 0; - - for (i = 0; i < nMachines; i++) { - for (j = 0; j < nTasks; j++) { - string colName = "x(" - + UtilIntToStr(colIndex) + "_" - + UtilIntToStr(i) + "," + UtilIntToStr(j) + ")"; - model->colNames.push_back(colName); - colIndex++; - } - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelPartKP()", m_appParam.LogLevel, 2); - return status; + } + } + + //--- + //--- set column names for debugging + //--- + colIndex = 0; + + for (i = 0; i < nMachines; i++) { + for (j = 0; j < nTasks; j++) { + string colName = "x(" + UtilIntToStr(colIndex) + "_" + UtilIntToStr(i) + + "," + UtilIntToStr(j) + ")"; + model->colNames.push_back(colName); + colIndex++; + } + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelPartKP()", + m_appParam.LogLevel, 2); + return status; } // --------------------------------------------------------------------- // -int GAP_DecompApp::createModels() -{ - //--- - //--- This function does the work to create the different models - //--- that will be used. This memory is owned by the user. It will - //--- be passed to the application interface and used by the algorithms. - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - //--- - //--- Generalized Assignment Problem (GAP) - //--- m is number of machines (index i) - //--- n is number of tasks (index j) - //--- - //--- min sum{i in 1..m, j in 1..n} p[i,j] x[i,j] - //--- s.t. sum{ j in 1..n} w[i,j] x[i,j] <= b[i], i in 1..m - //--- sum{i in 1..m } x[i,j] = 1 , j in 1..n - //--- x[i,j] in {0,1}, i in 1..m, j in 1..n - //--- - //--- Example structure: m=3, n=4 - //--- xxxx <= b[i=1] - //--- xxxx <= b[i=2] - //--- xxxx <= b[i=3] - //--- x x x = 1 [j=1] - //--- x x x = 1 [j=2] - //--- x x x = 1 [j=3] - //--- x x x = 1 [j=4] - //--- - //--- - //--- Get information about this problem instance. - //-- - int i; - string modelName; - int status = GAPStatusOk; - int nTasks = m_instance.getNTasks(); //n - int nMachines = m_instance.getNMachines(); //m - const int* profit = m_instance.getProfit(); - int nCols = nTasks * nMachines; - //--- - //--- Construct the objective function (the original problem is - //--- a maximization, so we flip the sign to make it minimization). - //--- - m_objective = new double[nCols]; - assert(m_objective); - - if (!m_objective) { - return GAPStatusOutOfMemory; - } - - for (i = 0; i < nCols; i++) { - m_objective[i] = profit[i]; - } - - //--- - //--- A'[i] for i=1..m: m independent knapsacks - //--- sum{j in 1..n} w[i,j] x[i,j] <= b[i] - //--- x[i,j] in {0,1}, i in 1..m, j in 1..n - //--- - //--- A'': - //--- sum{i in 1..m} x[i,j] = 1, j in 1..n - //--- - //--- Example structure: m=3, n=4 - //--- A'[i=1]: - //--- xxxx <= b[i=1] - //--- A'[i=2]: - //--- xxxx <= b[i=2] - //--- A'[i=3]: - //--- xxxx <= b[i=3] - //--- - //--- A'': - //--- x x x = 1 [j=1] - //--- x x x = 1 [j=2] - //--- x x x = 1 [j=3] - //--- x x x = 1 [j=4] - //--- - setModelObjective(m_objective, nCols); - DecompConstraintSet* modelCore = new DecompConstraintSet(); - status = createModelPartAP(modelCore); - - if (status) { - return status; - } - - setModelCore(modelCore, "AP"); - m_models.push_back(modelCore); - - for (i = 0; i < nMachines; i++) { - DecompConstraintSet* modelRelax = new DecompConstraintSet(); - status = createModelPartKP(modelRelax, i); - modelName = "KP" + UtilIntToStr(i); - setModelRelax(modelRelax, modelName, i); - m_models.push_back(modelRelax); - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - return status; +int GAP_DecompApp::createModels() { + //--- + //--- This function does the work to create the different models + //--- that will be used. This memory is owned by the user. It will + //--- be passed to the application interface and used by the algorithms. + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + //--- + //--- Generalized Assignment Problem (GAP) + //--- m is number of machines (index i) + //--- n is number of tasks (index j) + //--- + //--- min sum{i in 1..m, j in 1..n} p[i,j] x[i,j] + //--- s.t. sum{ j in 1..n} w[i,j] x[i,j] <= b[i], i in 1..m + //--- sum{i in 1..m } x[i,j] = 1 , j in 1..n + //--- x[i,j] in {0,1}, i in 1..m, j in 1..n + //--- + //--- Example structure: m=3, n=4 + //--- xxxx <= b[i=1] + //--- xxxx <= b[i=2] + //--- xxxx <= b[i=3] + //--- x x x = 1 [j=1] + //--- x x x = 1 [j=2] + //--- x x x = 1 [j=3] + //--- x x x = 1 [j=4] + //--- + //--- + //--- Get information about this problem instance. + //-- + int i; + string modelName; + int status = GAPStatusOk; + int nTasks = m_instance.getNTasks(); // n + int nMachines = m_instance.getNMachines(); // m + const int *profit = m_instance.getProfit(); + int nCols = nTasks * nMachines; + //--- + //--- Construct the objective function (the original problem is + //--- a maximization, so we flip the sign to make it minimization). + //--- + m_objective = new double[nCols]; + assert(m_objective); + + if (!m_objective) { + return GAPStatusOutOfMemory; + } + + for (i = 0; i < nCols; i++) { + m_objective[i] = profit[i]; + } + + //--- + //--- A'[i] for i=1..m: m independent knapsacks + //--- sum{j in 1..n} w[i,j] x[i,j] <= b[i] + //--- x[i,j] in {0,1}, i in 1..m, j in 1..n + //--- + //--- A'': + //--- sum{i in 1..m} x[i,j] = 1, j in 1..n + //--- + //--- Example structure: m=3, n=4 + //--- A'[i=1]: + //--- xxxx <= b[i=1] + //--- A'[i=2]: + //--- xxxx <= b[i=2] + //--- A'[i=3]: + //--- xxxx <= b[i=3] + //--- + //--- A'': + //--- x x x = 1 [j=1] + //--- x x x = 1 [j=2] + //--- x x x = 1 [j=3] + //--- x x x = 1 [j=4] + //--- + setModelObjective(m_objective, nCols); + DecompConstraintSet *modelCore = new DecompConstraintSet(); + status = createModelPartAP(modelCore); + + if (status) { + return status; + } + + setModelCore(modelCore, "AP"); + m_models.push_back(modelCore); + + for (i = 0; i < nMachines; i++) { + DecompConstraintSet *modelRelax = new DecompConstraintSet(); + status = createModelPartKP(modelRelax, i); + modelName = "KP" + UtilIntToStr(i); + setModelRelax(modelRelax, modelName, i); + m_models.push_back(modelRelax); + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + return status; } //--------------------------------------------------------------------- // -void GAP_DecompApp::printOriginalColumn(const int index, - ostream* os) const -{ - pair p = m_instance.getIndexInv(index); - (*os) << "x[ " << index << " : " << p.first << " , " << p.second << " ]"; +void GAP_DecompApp::printOriginalColumn(const int index, ostream *os) const { + pair p = m_instance.getIndexInv(index); + (*os) << "x[ " << index << " : " << p.first << " , " << p.second << " ]"; } diff --git a/Dip/examples/GAP/GAP_DecompApp4.cpp b/Dip/examples/GAP/GAP_DecompApp4.cpp index ff04619d..9fcd6833 100644 --- a/Dip/examples/GAP/GAP_DecompApp4.cpp +++ b/Dip/examples/GAP/GAP_DecompApp4.cpp @@ -13,365 +13,355 @@ //===========================================================================// //===========================================================================// -#include "GAP_Status.h" #include "GAP_DecompApp3.h" +#include "GAP_Status.h" //===========================================================================// #include "DecompVar.h" //===========================================================================// -void GAP_DecompApp::initializeApp(UtilParameters& utilParam) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); - //--- - //--- get application parameters - //--- - m_appParam.getSettings(utilParam); +void GAP_DecompApp::initializeApp(UtilParameters &utilParam) { + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", + m_appParam.LogLevel, 2); + //--- + //--- get application parameters + //--- + m_appParam.getSettings(utilParam); - if (m_appParam.LogLevel >= 1) { - m_appParam.dumpSettings(m_osLog); - } + if (m_appParam.LogLevel >= 1) { + m_appParam.dumpSettings(m_osLog); + } - //--- - //--- read instance - // - string instanceFile = m_appParam.DataDir - + UtilDirSlash() + m_appParam.Instance; - m_instance.readInstance(instanceFile); - //--- - //--- read best known lb/ub (for debugging) - //--- - string bestKnownFile = m_appParam.DataDir + UtilDirSlash() + "gap.opt"; - m_instance.readBestKnown(bestKnownFile, m_appParam.Instance); - setBestKnownLB(m_instance.getBestKnownLB()); - setBestKnownUB(m_instance.getBestKnownUB()); - //--- - //--- create models - //--- - createModels(); - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); + //--- + //--- read instance + // + string instanceFile = + m_appParam.DataDir + UtilDirSlash() + m_appParam.Instance; + m_instance.readInstance(instanceFile); + //--- + //--- read best known lb/ub (for debugging) + //--- + string bestKnownFile = m_appParam.DataDir + UtilDirSlash() + "gap.opt"; + m_instance.readBestKnown(bestKnownFile, m_appParam.Instance); + setBestKnownLB(m_instance.getBestKnownLB()); + setBestKnownUB(m_instance.getBestKnownUB()); + //--- + //--- create models + //--- + createModels(); + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", m_appParam.LogLevel, + 2); } // --------------------------------------------------------------------- // -int GAP_DecompApp::createModelPartAP(DecompConstraintSet* model) -{ - int i, j, colIndex; - int status = GAPStatusOk; - int nTasks = m_instance.getNTasks(); //n - int nMachines = m_instance.getNMachines(); //m - int nCols = nTasks * nMachines; - int nRows = nTasks; - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelPartAP()", m_appParam.LogLevel, 2); - //--- - //--- Build the core model constraints (AP = assignment problem). - //--- - //--- m is number of machines (index i) - //--- n is number of tasks (index j) - //--- - //--- sum{i in 1..m} x[i,j] = 1, j in 1..n - //--- - //--- Example structure: m=3, n=4 - //--- x x x = 1 [j=1] - //--- x x x = 1 [j=2] - //--- x x x = 1 [j=3] - //--- x x x = 1 [j=4] - //--- - //--- - //--- Allocate an empty row-ordered CoinPackedMatrix. Since we plan - //--- to add rows, set the column dimension and let the row dimension - //--- be set dynamically. - //--- - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(model->M, "Error: Out of Memory"); - model->M->setDimensions(0, nCols); - //--- - //--- we know the sizes needed, so reserve space for them (for efficiency) - //--- - model->reserve(nRows, nCols); +int GAP_DecompApp::createModelPartAP(DecompConstraintSet *model) { + int i, j, colIndex; + int status = GAPStatusOk; + int nTasks = m_instance.getNTasks(); // n + int nMachines = m_instance.getNMachines(); // m + int nCols = nTasks * nMachines; + int nRows = nTasks; + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelPartAP()", + m_appParam.LogLevel, 2); + //--- + //--- Build the core model constraints (AP = assignment problem). + //--- + //--- m is number of machines (index i) + //--- n is number of tasks (index j) + //--- + //--- sum{i in 1..m} x[i,j] = 1, j in 1..n + //--- + //--- Example structure: m=3, n=4 + //--- x x x = 1 [j=1] + //--- x x x = 1 [j=2] + //--- x x x = 1 [j=3] + //--- x x x = 1 [j=4] + //--- + //--- + //--- Allocate an empty row-ordered CoinPackedMatrix. Since we plan + //--- to add rows, set the column dimension and let the row dimension + //--- be set dynamically. + //--- + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(model->M, "Error: Out of Memory"); + model->M->setDimensions(0, nCols); + //--- + //--- we know the sizes needed, so reserve space for them (for efficiency) + //--- + model->reserve(nRows, nCols); - //--- - //--- create one row per task - //--- rowNames are not needed, they are used for debugging - //--- - for (j = 0; j < nTasks; j++) { - CoinPackedVector row; - string rowName = "a(j_" + UtilIntToStr(j) + ")"; + //--- + //--- create one row per task + //--- rowNames are not needed, they are used for debugging + //--- + for (j = 0; j < nTasks; j++) { + CoinPackedVector row; + string rowName = "a(j_" + UtilIntToStr(j) + ")"; - for (i = 0; i < nMachines; i++) { - colIndex = getIndexIJ(i, j); - row.insert(colIndex, 1.0); - } + for (i = 0; i < nMachines; i++) { + colIndex = getIndexIJ(i, j); + row.insert(colIndex, 1.0); + } - model->appendRow(row, 1.0, 1.0, rowName); - } + model->appendRow(row, 1.0, 1.0, rowName); + } - //--- - //--- set the col upper and lower bounds (all in [0,1]) - //--- - UtilFillN(model->colLB, nCols, 0.0); - UtilFillN(model->colUB, nCols, 1.0); - //--- - //--- set the indices of the integer variables of model - //--- (all vars are binary) - //--- - UtilIotaN(model->integerVars, nCols, 0); - //--- - //--- set column names for debugging - //--- - colIndex = 0; + //--- + //--- set the col upper and lower bounds (all in [0,1]) + //--- + UtilFillN(model->colLB, nCols, 0.0); + UtilFillN(model->colUB, nCols, 1.0); + //--- + //--- set the indices of the integer variables of model + //--- (all vars are binary) + //--- + UtilIotaN(model->integerVars, nCols, 0); + //--- + //--- set column names for debugging + //--- + colIndex = 0; - for (i = 0; i < nMachines; i++) { - for (j = 0; j < nTasks; j++) { - string colName = "x(" - + UtilIntToStr(colIndex) + "_" - + UtilIntToStr(i) + "," + UtilIntToStr(j) + ")"; - model->colNames.push_back(colName); - colIndex++; - } - } + for (i = 0; i < nMachines; i++) { + for (j = 0; j < nTasks; j++) { + string colName = "x(" + UtilIntToStr(colIndex) + "_" + UtilIntToStr(i) + + "," + UtilIntToStr(j) + ")"; + model->colNames.push_back(colName); + colIndex++; + } + } - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelPartAP()", m_appParam.LogLevel, 2); - return status; + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelPartAP()", + m_appParam.LogLevel, 2); + return status; } //===========================================================================// -int GAP_DecompApp::createModelPartKP(DecompConstraintSet* model) -{ - //--- - //--- helper method - create model with nMachines KPs - //--- - vector whichKnaps; - int nMachines = m_instance.getNMachines(); - UtilIotaN(whichKnaps, nMachines, 0); - return createModelPartKP(model, whichKnaps); +int GAP_DecompApp::createModelPartKP(DecompConstraintSet *model) { + //--- + //--- helper method - create model with nMachines KPs + //--- + vector whichKnaps; + int nMachines = m_instance.getNMachines(); + UtilIotaN(whichKnaps, nMachines, 0); + return createModelPartKP(model, whichKnaps); } //===========================================================================// -int GAP_DecompApp::createModelPartKP(DecompConstraintSet* model, - int whichKnap) -{ - //--- - //--- helper method - create model with one (whichKnap) KP - //--- - vector whichKnaps; - whichKnaps.push_back(whichKnap); - return createModelPartKP(model, whichKnaps); +int GAP_DecompApp::createModelPartKP(DecompConstraintSet *model, + int whichKnap) { + //--- + //--- helper method - create model with one (whichKnap) KP + //--- + vector whichKnaps; + whichKnaps.push_back(whichKnap); + return createModelPartKP(model, whichKnaps); } //===========================================================================// -int GAP_DecompApp::createModelPartKP(DecompConstraintSet* model, - vector& whichKnaps) -{ - int i, j, b, colIndex; - int status = GAPStatusOk; - int nTasks = m_instance.getNTasks(); //n - int nMachines = m_instance.getNMachines(); //m - int nKnaps = static_cast(whichKnaps.size()); - const int* weight = m_instance.getWeight(); - const int* capacity = m_instance.getCapacity(); - int nCols = nTasks * nKnaps; - int nRows = nKnaps; - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelPartKP()", m_appParam.LogLevel, 2); - //--- - //--- Build the relax model constraints (KP = assignment problem). - //--- - //--- m is number of machines (index i) - //--- n is number of tasks (index j) - //--- - //--- sum{j in 1..n} w[i,j] x[i,j] <= b[i], i in 1..m - //--- x[i,j] in {0,1}, i in 1..m, j in 1..n - //--- - //--- Example structure: m=3, n=4 - //--- xxxx <= b[i=1] - //--- xxxx <= b[i=2] - //--- xxxx <= b[i=3] - //--- - //--- - //--- Allocate an empty row-ordered CoinPackedMatrix. Since we plan - //--- to add rows, set the column dimension and let the row dimension - //--- be set dynamically. - //--- NOTE: this matrix is sparse version. So, nCols is just the number - //--- of active columns. That is, if we are generating one KP per block, - //--- then there are only nTasks columns in this model. - //--- - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(model->M, "Error: Out of Memory"); - model->M->setDimensions(0, nCols); - //--- - //--- we know the sizes needed, so reserve space for them (for efficiency) - //--- - model->reserve(nRows, nCols); - //--- - //--- tell the solver that this block is represented sparsely - //--- as an argument, tell the method how many columns are - //--- in the original compact formulation - //--- - model->setSparse(nTasks * nMachines); - //--- - //--- create the columns using the pushCol interface - //--- and setup the mapping between sparse and dense models - //--- - //--- pushCol( - //--- column lower bound - //--- column upper bound - //--- is it integer? - //--- index for the original compact formulation ) - //--- - vector::iterator it; +int GAP_DecompApp::createModelPartKP(DecompConstraintSet *model, + vector &whichKnaps) { + int i, j, b, colIndex; + int status = GAPStatusOk; + int nTasks = m_instance.getNTasks(); // n + int nMachines = m_instance.getNMachines(); // m + int nKnaps = static_cast(whichKnaps.size()); + const int *weight = m_instance.getWeight(); + const int *capacity = m_instance.getCapacity(); + int nCols = nTasks * nKnaps; + int nRows = nKnaps; + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelPartKP()", + m_appParam.LogLevel, 2); + //--- + //--- Build the relax model constraints (KP = assignment problem). + //--- + //--- m is number of machines (index i) + //--- n is number of tasks (index j) + //--- + //--- sum{j in 1..n} w[i,j] x[i,j] <= b[i], i in 1..m + //--- x[i,j] in {0,1}, i in 1..m, j in 1..n + //--- + //--- Example structure: m=3, n=4 + //--- xxxx <= b[i=1] + //--- xxxx <= b[i=2] + //--- xxxx <= b[i=3] + //--- + //--- + //--- Allocate an empty row-ordered CoinPackedMatrix. Since we plan + //--- to add rows, set the column dimension and let the row dimension + //--- be set dynamically. + //--- NOTE: this matrix is sparse version. So, nCols is just the number + //--- of active columns. That is, if we are generating one KP per block, + //--- then there are only nTasks columns in this model. + //--- + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(model->M, "Error: Out of Memory"); + model->M->setDimensions(0, nCols); + //--- + //--- we know the sizes needed, so reserve space for them (for efficiency) + //--- + model->reserve(nRows, nCols); + //--- + //--- tell the solver that this block is represented sparsely + //--- as an argument, tell the method how many columns are + //--- in the original compact formulation + //--- + model->setSparse(nTasks * nMachines); + //--- + //--- create the columns using the pushCol interface + //--- and setup the mapping between sparse and dense models + //--- + //--- pushCol( + //--- column lower bound + //--- column upper bound + //--- is it integer? + //--- index for the original compact formulation ) + //--- + vector::iterator it; - for (it = whichKnaps.begin(); it != whichKnaps.end(); it++) { - b = *it; + for (it = whichKnaps.begin(); it != whichKnaps.end(); it++) { + b = *it; - for (i = 0; i < nMachines; i++) - for (j = 0; j < nTasks; j++) - if (i == b) { - //--- - //--- set column names for debugging - //--- - string colName = "x(" - + UtilIntToStr(getIndexIJ(i, j)) + "_" - + UtilIntToStr(i) + "," + UtilIntToStr(j) + ")"; - model->colNames.push_back(colName); - model->pushCol(0.0, 1.0, true, getIndexIJ(i, j)); - } - } + for (i = 0; i < nMachines; i++) + for (j = 0; j < nTasks; j++) + if (i == b) { + //--- + //--- set column names for debugging + //--- + string colName = "x(" + UtilIntToStr(getIndexIJ(i, j)) + "_" + + UtilIntToStr(i) + "," + UtilIntToStr(j) + ")"; + model->colNames.push_back(colName); + model->pushCol(0.0, 1.0, true, getIndexIJ(i, j)); + } + } - //--- - //--- create one row per knapsack - //---- this is a sparse matrix, so use mapping between original - //--- rowNames are not needed, they are used for debugging - //--- - const map& origToSparse = model->getMapOrigToSparse(); - map::const_iterator mit; + //--- + //--- create one row per knapsack + //---- this is a sparse matrix, so use mapping between original + //--- rowNames are not needed, they are used for debugging + //--- + const map &origToSparse = model->getMapOrigToSparse(); + map::const_iterator mit; - for (it = whichKnaps.begin(); it != whichKnaps.end(); it++) { - i = *it; - CoinPackedVector row; - string rowName = "k(i_" + UtilIntToStr(i) + ")"; + for (it = whichKnaps.begin(); it != whichKnaps.end(); it++) { + i = *it; + CoinPackedVector row; + string rowName = "k(i_" + UtilIntToStr(i) + ")"; - for (j = 0; j < nTasks; j++) { - colIndex = getIndexIJ(i, j); //dense - mit = origToSparse.find(colIndex); - assert(mit != origToSparse.end()); - row.insert(mit->second, weight[colIndex]); - } + for (j = 0; j < nTasks; j++) { + colIndex = getIndexIJ(i, j); // dense + mit = origToSparse.find(colIndex); + assert(mit != origToSparse.end()); + row.insert(mit->second, weight[colIndex]); + } - model->appendRow(row, -m_infinity, capacity[i], rowName); - } + model->appendRow(row, -m_infinity, capacity[i], rowName); + } - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelPartKP()", m_appParam.LogLevel, 2); - return status; + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelPartKP()", + m_appParam.LogLevel, 2); + return status; } // --------------------------------------------------------------------- // -int GAP_DecompApp::createModels() -{ - //--- - //--- This function does the work to create the different models - //--- that will be used. This memory is owned by the user. It will - //--- be passed to the application interface and used by the algorithms. - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - //--- - //--- Generalized Assignment Problem (GAP) - //--- m is number of machines (index i) - //--- n is number of tasks (index j) - //--- - //--- min sum{i in 1..m, j in 1..n} p[i,j] x[i,j] - //--- s.t. sum{ j in 1..n} w[i,j] x[i,j] <= b[i], i in 1..m - //--- sum{i in 1..m } x[i,j] = 1 , j in 1..n - //--- x[i,j] in {0,1}, i in 1..m, j in 1..n - //--- - //--- Example structure: m=3, n=4 - //--- xxxx <= b[i=1] - //--- xxxx <= b[i=2] - //--- xxxx <= b[i=3] - //--- x x x = 1 [j=1] - //--- x x x = 1 [j=2] - //--- x x x = 1 [j=3] - //--- x x x = 1 [j=4] - //--- - //--- - //--- Get information about this problem instance. - //-- - int i; - string modelName; - int status = GAPStatusOk; - int nTasks = m_instance.getNTasks(); //n - int nMachines = m_instance.getNMachines(); //m - const int* profit = m_instance.getProfit(); - int nCols = nTasks * nMachines; - //--- - //--- Construct the objective function (the original problem is - //--- a maximization, so we flip the sign to make it minimization). - //--- - m_objective = new double[nCols]; - assert(m_objective); +int GAP_DecompApp::createModels() { + //--- + //--- This function does the work to create the different models + //--- that will be used. This memory is owned by the user. It will + //--- be passed to the application interface and used by the algorithms. + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + //--- + //--- Generalized Assignment Problem (GAP) + //--- m is number of machines (index i) + //--- n is number of tasks (index j) + //--- + //--- min sum{i in 1..m, j in 1..n} p[i,j] x[i,j] + //--- s.t. sum{ j in 1..n} w[i,j] x[i,j] <= b[i], i in 1..m + //--- sum{i in 1..m } x[i,j] = 1 , j in 1..n + //--- x[i,j] in {0,1}, i in 1..m, j in 1..n + //--- + //--- Example structure: m=3, n=4 + //--- xxxx <= b[i=1] + //--- xxxx <= b[i=2] + //--- xxxx <= b[i=3] + //--- x x x = 1 [j=1] + //--- x x x = 1 [j=2] + //--- x x x = 1 [j=3] + //--- x x x = 1 [j=4] + //--- + //--- + //--- Get information about this problem instance. + //-- + int i; + string modelName; + int status = GAPStatusOk; + int nTasks = m_instance.getNTasks(); // n + int nMachines = m_instance.getNMachines(); // m + const int *profit = m_instance.getProfit(); + int nCols = nTasks * nMachines; + //--- + //--- Construct the objective function (the original problem is + //--- a maximization, so we flip the sign to make it minimization). + //--- + m_objective = new double[nCols]; + assert(m_objective); - if (!m_objective) { - return GAPStatusOutOfMemory; - } + if (!m_objective) { + return GAPStatusOutOfMemory; + } - for (i = 0; i < nCols; i++) { - m_objective[i] = profit[i]; - } + for (i = 0; i < nCols; i++) { + m_objective[i] = profit[i]; + } - //--- - //--- A'[i] for i=1..m: m independent knapsacks - //--- sum{j in 1..n} w[i,j] x[i,j] <= b[i] - //--- x[i,j] in {0,1}, i in 1..m, j in 1..n - //--- - //--- A'': - //--- sum{i in 1..m} x[i,j] = 1, j in 1..n - //--- - //--- Example structure: m=3, n=4 - //--- A'[i=1]: - //--- xxxx <= b[i=1] - //--- A'[i=2]: - //--- xxxx <= b[i=2] - //--- A'[i=3]: - //--- xxxx <= b[i=3] - //--- - //--- A'': - //--- x x x = 1 [j=1] - //--- x x x = 1 [j=2] - //--- x x x = 1 [j=3] - //--- x x x = 1 [j=4] - //--- - setModelObjective(m_objective, nCols); - DecompConstraintSet* modelCore = new DecompConstraintSet(); - status = createModelPartAP(modelCore); + //--- + //--- A'[i] for i=1..m: m independent knapsacks + //--- sum{j in 1..n} w[i,j] x[i,j] <= b[i] + //--- x[i,j] in {0,1}, i in 1..m, j in 1..n + //--- + //--- A'': + //--- sum{i in 1..m} x[i,j] = 1, j in 1..n + //--- + //--- Example structure: m=3, n=4 + //--- A'[i=1]: + //--- xxxx <= b[i=1] + //--- A'[i=2]: + //--- xxxx <= b[i=2] + //--- A'[i=3]: + //--- xxxx <= b[i=3] + //--- + //--- A'': + //--- x x x = 1 [j=1] + //--- x x x = 1 [j=2] + //--- x x x = 1 [j=3] + //--- x x x = 1 [j=4] + //--- + setModelObjective(m_objective, nCols); + DecompConstraintSet *modelCore = new DecompConstraintSet(); + status = createModelPartAP(modelCore); - if (status) { - return status; - } + if (status) { + return status; + } - setModelCore(modelCore, "AP"); - m_models.push_back(modelCore); + setModelCore(modelCore, "AP"); + m_models.push_back(modelCore); - for (i = 0; i < nMachines; i++) { - DecompConstraintSet* modelRelax = new DecompConstraintSet(); - status = createModelPartKP(modelRelax, i); - modelName = "KP" + UtilIntToStr(i); - setModelRelax(modelRelax, modelName, i); - m_models.push_back(modelRelax); - } + for (i = 0; i < nMachines; i++) { + DecompConstraintSet *modelRelax = new DecompConstraintSet(); + status = createModelPartKP(modelRelax, i); + modelName = "KP" + UtilIntToStr(i); + setModelRelax(modelRelax, modelName, i); + m_models.push_back(modelRelax); + } - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - return status; + UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + return status; } //--------------------------------------------------------------------- // -void GAP_DecompApp::printOriginalColumn(const int index, - ostream* os) const -{ - pair p = m_instance.getIndexInv(index); - (*os) << "x[ " << index << " : " << p.first << " , " << p.second << " ]"; +void GAP_DecompApp::printOriginalColumn(const int index, ostream *os) const { + pair p = m_instance.getIndexInv(index); + (*os) << "x[ " << index << " : " << p.first << " , " << p.second << " ]"; } diff --git a/Dip/examples/GAP/GAP_Instance.cpp b/Dip/examples/GAP/GAP_Instance.cpp index 15f16130..212fc195 100644 --- a/Dip/examples/GAP/GAP_Instance.cpp +++ b/Dip/examples/GAP/GAP_Instance.cpp @@ -17,90 +17,86 @@ #include "UtilMacrosDecomp.h" //===========================================================================// -void GAP_Instance::readInstance(string& fileName) -{ - int i, j, n_ij, indexIJ; - ifstream is; - //--- - //--- File format (.../Decomp/data/GAP) - //--- - //--- agents = machines (m, i index) - //--- jobs = tasks (n, j index) - //--- - //--- number of machines (m), number of tasks (n) - //--- for each machine i (i=1,...,m) in turn: - //--- cost of allocating task j to machine i (j=1,...,n) - //--- for each machine i (i=1,...,m) in turn: - //--- resource consumed in allocating task j to machine i (j=1,...,n) - //--- resource capacity of machine j (j=1,...,m) - //--- - UtilOpenFile(is, fileName.c_str()); - is >> m_nMachines - >> m_nTasks; - //--- - //--- allocate memory for capacity, value and weight - //--- - n_ij = m_nMachines * m_nTasks; - m_capacity = new int[m_nMachines]; - m_profit = new int[n_ij]; - m_weight = new int[n_ij]; +void GAP_Instance::readInstance(string &fileName) { + int i, j, n_ij, indexIJ; + ifstream is; + //--- + //--- File format (.../Decomp/data/GAP) + //--- + //--- agents = machines (m, i index) + //--- jobs = tasks (n, j index) + //--- + //--- number of machines (m), number of tasks (n) + //--- for each machine i (i=1,...,m) in turn: + //--- cost of allocating task j to machine i (j=1,...,n) + //--- for each machine i (i=1,...,m) in turn: + //--- resource consumed in allocating task j to machine i (j=1,...,n) + //--- resource capacity of machine j (j=1,...,m) + //--- + UtilOpenFile(is, fileName.c_str()); + is >> m_nMachines >> m_nTasks; + //--- + //--- allocate memory for capacity, value and weight + //--- + n_ij = m_nMachines * m_nTasks; + m_capacity = new int[m_nMachines]; + m_profit = new int[n_ij]; + m_weight = new int[n_ij]; - if (!(m_capacity && m_profit && m_weight)) { - throw UtilExceptionMemory("readInstance", "GAP_Instance"); - } + if (!(m_capacity && m_profit && m_weight)) { + throw UtilExceptionMemory("readInstance", "GAP_Instance"); + } - indexIJ = 0; + indexIJ = 0; - for (i = 0; i < m_nMachines; i++) { - for (j = 0; j < m_nTasks; j++) { - is >> m_profit[indexIJ++];//TODO: bad name - since cost - } - } + for (i = 0; i < m_nMachines; i++) { + for (j = 0; j < m_nTasks; j++) { + is >> m_profit[indexIJ++]; // TODO: bad name - since cost + } + } - indexIJ = 0; + indexIJ = 0; - for (i = 0; i < m_nMachines; i++) { - for (j = 0; j < m_nTasks; j++) { - is >> m_weight[indexIJ++]; - } - } + for (i = 0; i < m_nMachines; i++) { + for (j = 0; j < m_nTasks; j++) { + is >> m_weight[indexIJ++]; + } + } - for (j = 0; j < m_nMachines; j++) { - is >> m_capacity[j]; - } + for (j = 0; j < m_nMachines; j++) { + is >> m_capacity[j]; + } - is.close(); + is.close(); } //===========================================================================// -void GAP_Instance::readBestKnown(string& fileName, - string& instanceName) -{ - ifstream is; - string instance; - double bestUpperBound; - bool isProvenOptimal; - int status = 0; - status = UtilOpenFile(is, fileName); +void GAP_Instance::readBestKnown(string &fileName, string &instanceName) { + ifstream is; + string instance; + double bestUpperBound; + bool isProvenOptimal; + int status = 0; + status = UtilOpenFile(is, fileName); - if (status) - throw UtilException("Failed to best-known file", - "readBestKnown", "GAP_Instance"); + if (status) + throw UtilException("Failed to best-known file", "readBestKnown", + "GAP_Instance"); - while (!is.eof()) { - is >> instance >> bestUpperBound >> isProvenOptimal; - instance = UtilStrTrim(instance); + while (!is.eof()) { + is >> instance >> bestUpperBound >> isProvenOptimal; + instance = UtilStrTrim(instance); - if (instance == instanceName) { - if (isProvenOptimal) { - m_bestKnownLB = bestUpperBound; - } else { - m_bestKnownLB = -COIN_DBL_MAX; - } - - m_bestKnownUB = bestUpperBound; - m_isProvenOptimal = isProvenOptimal; - break; + if (instance == instanceName) { + if (isProvenOptimal) { + m_bestKnownLB = bestUpperBound; + } else { + m_bestKnownLB = -COIN_DBL_MAX; } - } + + m_bestKnownUB = bestUpperBound; + m_isProvenOptimal = isProvenOptimal; + break; + } + } } diff --git a/Dip/examples/GAP/GAP_Main.cpp b/Dip/examples/GAP/GAP_Main.cpp index 70dfaa03..f195b05f 100644 --- a/Dip/examples/GAP/GAP_Main.cpp +++ b/Dip/examples/GAP/GAP_Main.cpp @@ -29,130 +29,121 @@ #include "UtilTimer.h" //===========================================================================// -int main(int argc, char** argv) -{ - try { - //--- - //--- create the utility class for parsing parameters - //--- - UtilParameters utilParam(argc, argv); +int main(int argc, char **argv) { + try { + //--- + //--- create the utility class for parsing parameters + //--- + UtilParameters utilParam(argc, argv); + + utilParam.Add("DECOMP", "BranchEnforceInMaster", "1"); + utilParam.Add("DECOMP", "BranchEnforceInSubProb", "0"); + + bool doCut = utilParam.GetSetting("doCut", true); + bool doPriceCut = utilParam.GetSetting("doPriceCut", false); + bool doDirect = utilParam.GetSetting("doDirect", false); - utilParam.Add("DECOMP", "BranchEnforceInMaster", "1"); - utilParam.Add("DECOMP", "BranchEnforceInSubProb", "0"); + UtilTimer timer; + double timeSetupReal = 0.0; + double timeSetupCpu = 0.0; + double timeSolveReal = 0.0; + double timeSolveCpu = 0.0; + //--- + //--- start overall timer + //--- + timer.start(); + //--- + //--- create the user application (a DecompApp) + //--- + GAP_DecompApp gap(utilParam); + //--- + //--- create the algorithm (a DecompAlgo) + //--- + DecompAlgo *algo = NULL; + assert(doCut + doPriceCut == 1); - bool doCut = utilParam.GetSetting("doCut", true); - bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - bool doDirect = utilParam.GetSetting("doDirect", false); + //--- + //--- create the CPM algorithm object + //--- + if (doCut) { + algo = new DecompAlgoC(&gap, utilParam); + } - UtilTimer timer; - double timeSetupReal = 0.0; - double timeSetupCpu = 0.0; - double timeSolveReal = 0.0; - double timeSolveCpu = 0.0; + //--- + //--- create the PC algorithm object + //--- + if (doPriceCut) { + algo = new DecompAlgoPC(&gap, utilParam); + } + + if (doCut && doDirect) { + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); //--- - //--- start overall timer + //--- solve //--- timer.start(); + algo->solveDirect(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); + } else { + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); //--- - //--- create the user application (a DecompApp) - //--- - GAP_DecompApp gap(utilParam); + //--- create the driver AlpsDecomp model //--- - //--- create the algorithm (a DecompAlgo) - //--- - DecompAlgo* algo = NULL; - assert(doCut + doPriceCut == 1); - + int status = 0; + AlpsDecompModel alpsModel(utilParam, algo); //--- - //--- create the CPM algorithm object + //--- solve //--- - if (doCut) { - algo = new DecompAlgoC(&gap, utilParam); - } - + timer.start(); + status = alpsModel.solve(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); //--- - //--- create the PC algorithm object + //--- sanity check //--- - if (doPriceCut) { - algo = new DecompAlgoPC(&gap, utilParam); - } + cout << setiosflags(ios::fixed | ios::showpoint); + cout << "Status= " << status << " BestLB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalLB(), 5) + << " BestUB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalUB(), 5) << " Nodes= " << setw(6) + << alpsModel.getNumNodesProcessed() << " SetupCPU= " << timeSetupCpu + << " SolveCPU= " << timeSolveCpu + << " TotalCPU= " << timeSetupCpu + timeSolveCpu + << " SetupReal= " << timeSetupReal << " SetupReal= " << timeSolveReal + << " TotalReal= " << timeSetupReal + timeSetupReal << endl; - if (doCut && doDirect) { - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - //--- - //--- solve - //--- - timer.start(); - algo->solveDirect(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); - } else { - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - //--- - //--- create the driver AlpsDecomp model - //--- - int status = 0; - AlpsDecompModel alpsModel(utilParam, algo); - //--- - //--- solve - //--- - timer.start(); - status = alpsModel.solve(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); - //--- - //--- sanity check - //--- - cout << setiosflags(ios::fixed | ios::showpoint); - cout << "Status= " << status - << " BestLB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalLB(), 5) - << " BestUB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalUB(), 5) - << " Nodes= " << setw(6) - << alpsModel.getNumNodesProcessed() - << " SetupCPU= " << timeSetupCpu - << " SolveCPU= " << timeSolveCpu - << " TotalCPU= " << timeSetupCpu + timeSolveCpu - << " SetupReal= " << timeSetupReal - << " SetupReal= " << timeSolveReal - << " TotalReal= " << timeSetupReal + timeSetupReal - << endl; + if (status == AlpsExitStatusOptimal && gap.getBestKnownUB() < 1.0e50) { + //--- + //--- the assumption here is that the BestKnownLB/UB is optimal + //--- + double diff = fabs(gap.getBestKnownUB() - alpsModel.getGlobalUB()); - if (status == AlpsExitStatusOptimal && gap.getBestKnownUB() < 1.0e50) { - //--- - //--- the assumption here is that the BestKnownLB/UB is optimal - //--- - double diff - = fabs(gap.getBestKnownUB() - alpsModel.getGlobalUB()); - - if (diff > 1.0e-4) { - cerr << "ERROR. BestKnownUB= " << gap.getBestKnownUB() - << " but DECOMP claims GlobalUB= " - << alpsModel.getGlobalUB() << endl; - throw UtilException("Invalid claim of optimal.", - "main", "DECOMP"); - } - } + if (diff > 1.0e-4) { + cerr << "ERROR. BestKnownUB= " << gap.getBestKnownUB() + << " but DECOMP claims GlobalUB= " << alpsModel.getGlobalUB() + << endl; + throw UtilException("Invalid claim of optimal.", "main", "DECOMP"); + } } + } - //--- - //--- free local memory - //--- - delete algo; - } catch (CoinError& ex) { - cerr << "COIN Exception [ " << ex.message() << " ]" - << " at " << ex.fileName() << ":L" << ex.lineNumber() - << " in " << ex.className() << "::" << ex.methodName() << endl; - return 1; - } + //--- + //--- free local memory + //--- + delete algo; + } catch (CoinError &ex) { + cerr << "COIN Exception [ " << ex.message() << " ]" + << " at " << ex.fileName() << ":L" << ex.lineNumber() << " in " + << ex.className() << "::" << ex.methodName() << endl; + return 1; + } - return 0; + return 0; } - diff --git a/Dip/examples/MAD/MAD_DecompApp.cpp b/Dip/examples/MAD/MAD_DecompApp.cpp index 66b4e643..826a812e 100644 --- a/Dip/examples/MAD/MAD_DecompApp.cpp +++ b/Dip/examples/MAD/MAD_DecompApp.cpp @@ -12,913 +12,821 @@ // All Rights Reserved. // //===========================================================================// -#include "DecompCutOsi.h" #include "MAD_DecompApp.h" +#include "DecompCutOsi.h" -#include "CoinMpsIO.hpp" #include "CoinModel.hpp" +#include "CoinMpsIO.hpp" #define USE_QUALEX -//TODO: should be easy to write heuristics to get ip feasible points +// TODO: should be easy to write heuristics to get ip feasible points // from subproblem points - since capacity is only extra constraint -// this should be point out as another advantage of Decomp methods... +// this should be point out as another advantage of Decomp methods... // --------------------------------------------------------------------- // -int MAD_DecompApp::isOrtho(const int * rowInd1, - const int * rowInd2, - const int rowLen1, - const int rowLen2){ - - int i1, i2; - - /* - --- - --- CAREFUL: this is only correct if the input is sorted - --- - */ - - i1 = 0; - i2 = 0; - - while((i1 < rowLen1) && (i2 < rowLen2)){ - /* - --- - --- if there is a row in common, then the rows are not orthogonal - --- - */ - if(rowInd1[i1] == rowInd2[i2]) - return 0; - - /* - --- - --- advance the row pointer for the lesser valued row index - --- - */ - if(rowInd1[i1] < rowInd2[i2]) - ++i1; - else - ++i2; - } - return 1; +int MAD_DecompApp::isOrtho(const int *rowInd1, const int *rowInd2, + const int rowLen1, const int rowLen2) { + + int i1, i2; + + /* + --- + --- CAREFUL: this is only correct if the input is sorted + --- + */ + + i1 = 0; + i2 = 0; + + while ((i1 < rowLen1) && (i2 < rowLen2)) { + /* + --- + --- if there is a row in common, then the rows are not orthogonal + --- + */ + if (rowInd1[i1] == rowInd2[i2]) + return 0; + + /* + --- + --- advance the row pointer for the lesser valued row index + --- + */ + if (rowInd1[i1] < rowInd2[i2]) + ++i1; + else + ++i2; + } + return 1; } // --------------------------------------------------------------------- // -void MAD_DecompApp::initializeApp(UtilParameters & utilParam) { - - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_param.LogDebugLevel, 2); - - - //--- - //--- get application parameters - //--- - m_appParam.getSettings(utilParam); - m_appParam.dumpSettings(m_osLog); //use message handler - - //--- - //--- read instance from lp file (from MADLIB) - //--- http://elib.zib.de/pub/mp-testdata/madlib/index.html - //--- - string lpFile = m_appParam.DataDir + UtilDirSlash(); - lpFile += m_appParam.Instance; - if(m_appParam.DataSubDir == "miplib"){ - lpFile += ".p.lp"; - } else if(m_appParam.DataSubDir == "netlib"){ - lpFile += ".ob4"; - } - m_instance.readLp(lpFile.c_str()); - - m_nOrigRows = m_instance.getNumRows(); - m_beta = m_appParam.NumBlocks; - - //--- - //--- read best known lb/ub - //--- - string bestKnownFile = m_appParam.DataDir + UtilDirSlash(); - bestKnownFile += "madlib." + m_appParam.DataSubDir + ".opt"; - { - ifstream is; - string instanceName; - double bestLB, bestUB; - UtilOpenFile(is, bestKnownFile); - while(!is.eof()){ - //--- - //--- these are the number of rows in the border (less is better) - //--- - is >> instanceName >> bestLB >> bestUB; - //printf("Instance = %15s bestLB = %6g bestUB = %6g\n", - // instanceName.c_str(), bestLB, bestUB); - - instanceName = UtilStrTrim(instanceName); - if(instanceName == m_appParam.Instance){ - //--- - //--- the paper solves z = max sum x, - //--- where x is an assignment to a block - //--- so, nBorder = nRows - z - //--- or, z = nRows - nBorder - //--- - //--- but we only do min, so we solve -z = min sum (-x) - //---- so, -z = nBorder - nRows - //--- - m_bestKnownLB = bestLB - m_nOrigRows; - m_bestKnownUB = bestUB - m_nOrigRows; - break; - } +void MAD_DecompApp::initializeApp(UtilParameters &utilParam) { + + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", + m_param.LogDebugLevel, 2); + + //--- + //--- get application parameters + //--- + m_appParam.getSettings(utilParam); + m_appParam.dumpSettings(m_osLog); // use message handler + + //--- + //--- read instance from lp file (from MADLIB) + //--- http://elib.zib.de/pub/mp-testdata/madlib/index.html + //--- + string lpFile = m_appParam.DataDir + UtilDirSlash(); + lpFile += m_appParam.Instance; + if (m_appParam.DataSubDir == "miplib") { + lpFile += ".p.lp"; + } else if (m_appParam.DataSubDir == "netlib") { + lpFile += ".ob4"; + } + m_instance.readLp(lpFile.c_str()); + + m_nOrigRows = m_instance.getNumRows(); + m_beta = m_appParam.NumBlocks; + + //--- + //--- read best known lb/ub + //--- + string bestKnownFile = m_appParam.DataDir + UtilDirSlash(); + bestKnownFile += "madlib." + m_appParam.DataSubDir + ".opt"; + { + ifstream is; + string instanceName; + double bestLB, bestUB; + UtilOpenFile(is, bestKnownFile); + while (!is.eof()) { + //--- + //--- these are the number of rows in the border (less is better) + //--- + is >> instanceName >> bestLB >> bestUB; + // printf("Instance = %15s bestLB = %6g bestUB = %6g\n", + // instanceName.c_str(), bestLB, bestUB); + + instanceName = UtilStrTrim(instanceName); + if (instanceName == m_appParam.Instance) { + //--- + //--- the paper solves z = max sum x, + //--- where x is an assignment to a block + //--- so, nBorder = nRows - z + //--- or, z = nRows - nBorder + //--- + //--- but we only do min, so we solve -z = min sum (-x) + //---- so, -z = nBorder - nRows + //--- + m_bestKnownLB = bestLB - m_nOrigRows; + m_bestKnownUB = bestUB - m_nOrigRows; + break; } - } + } + } + + //--- + //--- set capacity based on MADLIB study (if it is not set): + //--- http://elib.zib.de/pub/mp-testdata/madlib/index.en.html + //--- + if (m_appParam.Capacity != -1) { + m_kappa = m_appParam.Capacity; + } else { + m_kappa = static_cast(ceil(static_cast(m_nOrigRows) / m_beta)); + if (m_appParam.DataSubDir == "netlib" || + m_appParam.DataSubDir == "equipart") { + m_kappa = + static_cast(ceil(static_cast(m_nOrigRows) / m_beta)); + } else if (m_appParam.DataSubDir == "miplib" || + m_appParam.DataSubDir == "miplibT") { + m_kappa = static_cast(ceil(1.05 * m_nOrigRows / m_beta)); + } else if (m_appParam.DataSubDir == "steiner") { + if (m_appParam.Instance[0] == 'g') { + m_kappa = 30; + } else if (m_appParam.Instance[0] == 'd') { + m_kappa = 50; + } + } + } + UTIL_DEBUG(m_param.LogDebugLevel, 1, + (*m_osLog) << "Instance = " << m_appParam.Instance << endl + << " nRows = " << m_nOrigRows << endl + << " bestLB = " << m_bestKnownLB << endl + << " bestUB = " << m_bestKnownUB << endl + << " Beta = " << m_beta << endl + << " Kappa = " << m_kappa << endl;); - //--- - //--- set capacity based on MADLIB study (if it is not set): - //--- http://elib.zib.de/pub/mp-testdata/madlib/index.en.html - //--- - if(m_appParam.Capacity != -1){ - m_kappa = m_appParam.Capacity; - } - else{ - m_kappa - = static_cast(ceil(static_cast(m_nOrigRows)/m_beta)); - if(m_appParam.DataSubDir == "netlib" || - m_appParam.DataSubDir == "equipart"){ - m_kappa - = static_cast(ceil(static_cast(m_nOrigRows)/m_beta)); - } else if(m_appParam.DataSubDir == "miplib" || - m_appParam.DataSubDir == "miplibT"){ - m_kappa = static_cast(ceil( 1.05 * m_nOrigRows / m_beta) ); - } else if(m_appParam.DataSubDir == "steiner"){ - if(m_appParam.Instance[0] == 'g'){ - m_kappa = 30; - } else if (m_appParam.Instance[0] == 'd'){ - m_kappa = 50; - } - } - } + int n_cols = m_nOrigRows * m_beta; - UTIL_DEBUG(m_param.LogDebugLevel, 1, - (*m_osLog) - << "Instance = " << m_appParam.Instance << endl - << " nRows = " << m_nOrigRows << endl - << " bestLB = " << m_bestKnownLB << endl - << " bestUB = " << m_bestKnownUB << endl - << " Beta = " << m_beta << endl - << " Kappa = " << m_kappa << endl; - ); - - int n_cols = m_nOrigRows * m_beta; - #ifdef __MAD_USE_CLIQUER__ - m_cliquer = new MAD_Cliquer(n_cols); -#endif + m_cliquer = new MAD_Cliquer(n_cols); +#endif #ifdef __MAD_USE_QUALEX__ - m_qualex = new MAD_Qualex(n_cols); + m_qualex = new MAD_Qualex(n_cols); #endif - //TODO: not the best name - maybe column intersection graph? - m_conflictGraph = graph_new(m_nOrigRows); - m_auxMemPool.allocateMemory(m_nOrigRows, n_cols, m_beta); + // TODO: not the best name - maybe column intersection graph? + m_conflictGraph = graph_new(m_nOrigRows); + m_auxMemPool.allocateMemory(m_nOrigRows, n_cols, m_beta); - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_param.LogDebugLevel, 2); + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", + m_param.LogDebugLevel, 2); } #if 1 // --------------------------------------------------------------------- // -void -MAD_DecompApp::APPcreateModel(double *& objCoeff, - map & modelCore, - map > & modelRelax) { - - //--- - //--- createModel is a pure virtual method of DecompApp and must - //--- be derived by the application class to define the partitioning - //--- of constraints into [A,b] = [A',b'] union [A'', b''] - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "APPcreateModel()", m_param.LogDebugLevel, 2); - - //--- - //--- x[i,b] in {0,1}, 1 = row i is assigned to block b - //--- i in M = {1, ..., m} - //--- b in B = {1, ..., beta} - //--- beta <= m (possibly an input = number of processors available) - //--- - //--- max sum{i in M, b in B} x[i,b] <==> - //--- min sum{i in M, b in B} -x[i,b] - //--- s.t. - //--- (1) sum{b in B} x[i,b] <= 1, for i in M - //--- (2) sum{i in M} x[i,b] <= k, for b in B - //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', - //--- i,j in M, i != j, such that - //--- a[i,k] != 0 != a[j,k], for some k - //--- (4) x[i,b] in {0,1} - //--- - //--- (1),(3), and (4) forms a (big) clique problem [modelRelax] - //--- (2) forms the core [modelCore ] - //--- - int n_cols = m_nOrigRows * m_beta; - - //--- - //--- open memory for the objective coefficients of modelCore - //--- who is responsible to open this memory? MAD_DecompApp - //--- who is responsible to free this memory? DecompAlgo - //--- - objCoeff = new double[n_cols]; - CoinFillN(objCoeff, n_cols, -1.0); - - //--- - //--- set the constraint matrix of modelCore - //--- who is responsible to open this memory? MAD_DecompApp - //--- who is responsible to free this memory? DecompAlgo - //--- - //--- (2) sum{i in M} x[i,b] <= k, for b in B - //--- - DecompConstraintSet * modelCoreCl = new DecompConstraintSet(); - modelCoreCl->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(modelCoreCl->M, "Error: Out of Memory"); - modelCoreCl->M->setDimensions(0, n_cols); - modelCoreCl->M->reserve(m_beta, m_beta * m_nOrigRows); - - //TODO: for speed - do in blocks - int b1, b2, i, j, b; - for(b = 0; b < m_beta; b++){ - CoinPackedVector row; - for(i = 0; i < m_nOrigRows; i++){ - row.insert(xIndex(i,b), 1.0); - } - modelCoreCl->M->appendRow(row); - } - - //--- - //--- set the row upper and lower bounds - //--- set the col upper and lower bounds - //--- - UtilFillN(modelCoreCl->rowLB, m_beta, -m_infinity); - UtilFillN(modelCoreCl->rowUB, m_beta, static_cast(m_kappa)); - UtilFillN(modelCoreCl->colLB, n_cols, 0.0); - UtilFillN(modelCoreCl->colUB, n_cols, 1.0); - - - - //--- - //--- set the constraint matrix of modelRelax - //--- who is responsible to open this memory? MAD_DecompApp - //--- who is responsible to free this memory? DecompAlgo - //--- - //--- (1) sum{b in B} x[i,b] <= 1, for i in M - //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', - //--- i,j in M, i != j, such that - //--- a[i,k] != 0 != a[j,k], for some k - //--- (4) x[i,b] in {0,1} - //--- - DecompConstraintSet * modelRelaxCl = new DecompConstraintSet(); - modelRelaxCl->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(modelRelaxCl->M, "Error: Out of Memory"); - modelRelaxCl->M->setDimensions(0, n_cols); - - //how to efficiently construct the complement of conflict graph??? - int n_vertices = m_nOrigRows * m_beta;; +void MAD_DecompApp::APPcreateModel( + double *&objCoeff, map &modelCore, + map> &modelRelax) { + + //--- + //--- createModel is a pure virtual method of DecompApp and must + //--- be derived by the application class to define the partitioning + //--- of constraints into [A,b] = [A',b'] union [A'', b''] + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "APPcreateModel()", + m_param.LogDebugLevel, 2); + + //--- + //--- x[i,b] in {0,1}, 1 = row i is assigned to block b + //--- i in M = {1, ..., m} + //--- b in B = {1, ..., beta} + //--- beta <= m (possibly an input = number of processors available) + //--- + //--- max sum{i in M, b in B} x[i,b] <==> + //--- min sum{i in M, b in B} -x[i,b] + //--- s.t. + //--- (1) sum{b in B} x[i,b] <= 1, for i in M + //--- (2) sum{i in M} x[i,b] <= k, for b in B + //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', + //--- i,j in M, i != j, such that + //--- a[i,k] != 0 != a[j,k], for some k + //--- (4) x[i,b] in {0,1} + //--- + //--- (1),(3), and (4) forms a (big) clique problem [modelRelax] + //--- (2) forms the core [modelCore ] + //--- + int n_cols = m_nOrigRows * m_beta; + + //--- + //--- open memory for the objective coefficients of modelCore + //--- who is responsible to open this memory? MAD_DecompApp + //--- who is responsible to free this memory? DecompAlgo + //--- + objCoeff = new double[n_cols]; + CoinFillN(objCoeff, n_cols, -1.0); + + //--- + //--- set the constraint matrix of modelCore + //--- who is responsible to open this memory? MAD_DecompApp + //--- who is responsible to free this memory? DecompAlgo + //--- + //--- (2) sum{i in M} x[i,b] <= k, for b in B + //--- + DecompConstraintSet *modelCoreCl = new DecompConstraintSet(); + modelCoreCl->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(modelCoreCl->M, "Error: Out of Memory"); + modelCoreCl->M->setDimensions(0, n_cols); + modelCoreCl->M->reserve(m_beta, m_beta * m_nOrigRows); + + // TODO: for speed - do in blocks + int b1, b2, i, j, b; + for (b = 0; b < m_beta; b++) { + CoinPackedVector row; + for (i = 0; i < m_nOrigRows; i++) { + row.insert(xIndex(i, b), 1.0); + } + modelCoreCl->M->appendRow(row); + } + + //--- + //--- set the row upper and lower bounds + //--- set the col upper and lower bounds + //--- + UtilFillN(modelCoreCl->rowLB, m_beta, -m_infinity); + UtilFillN(modelCoreCl->rowUB, m_beta, static_cast(m_kappa)); + UtilFillN(modelCoreCl->colLB, n_cols, 0.0); + UtilFillN(modelCoreCl->colUB, n_cols, 1.0); + + //--- + //--- set the constraint matrix of modelRelax + //--- who is responsible to open this memory? MAD_DecompApp + //--- who is responsible to free this memory? DecompAlgo + //--- + //--- (1) sum{b in B} x[i,b] <= 1, for i in M + //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', + //--- i,j in M, i != j, such that + //--- a[i,k] != 0 != a[j,k], for some k + //--- (4) x[i,b] in {0,1} + //--- + DecompConstraintSet *modelRelaxCl = new DecompConstraintSet(); + modelRelaxCl->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(modelRelaxCl->M, "Error: Out of Memory"); + modelRelaxCl->M->setDimensions(0, n_cols); + + // how to efficiently construct the complement of conflict graph??? + int n_vertices = m_nOrigRows * m_beta; + ; #ifdef __MAD_USE_CLIQUER__ - //construct complete graph -- then delete -- UGH!! TEMP - for(i = 0; i < n_vertices ; i++){ - for(j = (i+1); j < n_vertices; j++){ - m_cliquer->addEdge(m_cliquer->m_g, i, j); - } - } + // construct complete graph -- then delete -- UGH!! TEMP + for (i = 0; i < n_vertices; i++) { + for (j = (i + 1); j < n_vertices; j++) { + m_cliquer->addEdge(m_cliquer->m_g, i, j); + } + } #endif #ifdef __MAD_USE_QUALEX__ - for(i = 0; i < n_vertices ; i++){ - for(j = (i+1); j < n_vertices; j++){ - m_qualex->addEdge(i, j); - } - } + for (i = 0; i < n_vertices; i++) { + for (j = (i + 1); j < n_vertices; j++) { + m_qualex->addEdge(i, j); + } + } #endif - UTIL_DEBUG(m_param.LogDebugLevel, 4, - m_cliquer->printGraph(m_cliquer->m_g); - ); - - //--- - //--- for speed, use CoinModel - //--- - + UTIL_DEBUG(m_param.LogDebugLevel, 4, m_cliquer->printGraph(m_cliquer->m_g);); + + //--- + //--- for speed, use CoinModel + //--- + + // TODO: consider not adding (1) to subproblem, + // at least for solving exact version.... + + //--- + //--- (1) sum{b in B} x[i,b] <= 1, for i in M + //--- + CoinModel cModel; + for (i = 0; i < m_nOrigRows; i++) { + CoinPackedVector row; + for (b = 0; b < m_beta; b++) { + row.insert(xIndex(i, b), 1.0); + } + cModel.addRow(row.getNumElements(), row.getIndices(), row.getElements()); - //TODO: consider not adding (1) to subproblem, - // at least for solving exact version.... - - //--- - //--- (1) sum{b in B} x[i,b] <= 1, for i in M - //--- - CoinModel cModel; - for(i = 0; i < m_nOrigRows; i++){ - CoinPackedVector row; - for(b = 0; b < m_beta; b++){ - row.insert(xIndex(i, b), 1.0); - } - cModel.addRow(row.getNumElements(), - row.getIndices(), row.getElements()); - #ifdef __MAD_USE_CLIQUER__ - for(b1 = 0; b1 < m_beta; b1++){ - for(b2 = 0; b2 < m_beta; b2++){ - if(b1 == b2) - continue; - m_cliquer->delEdge(m_cliquer->m_g, - xIndex(i, b1), xIndex(i, b2)); - } + for (b1 = 0; b1 < m_beta; b1++) { + for (b2 = 0; b2 < m_beta; b2++) { + if (b1 == b2) + continue; + m_cliquer->delEdge(m_cliquer->m_g, xIndex(i, b1), xIndex(i, b2)); } -#endif + } +#endif #ifdef __MAD_USE_QUALEX__ - for(b1 = 0; b1 < m_beta; b1++){ - for(b2 = 0; b2 < m_beta; b2++){ - if(b1 == b2) - continue; - m_qualex->removeEdge(xIndex(i, b1), xIndex(i, b2)); - } + for (b1 = 0; b1 < m_beta; b1++) { + for (b2 = 0; b2 < m_beta; b2++) { + if (b1 == b2) + continue; + m_qualex->removeEdge(xIndex(i, b1), xIndex(i, b2)); } + } #endif - } - UTIL_DEBUG(m_param.LogDebugLevel, 4, - m_cliquer->printGraph(m_cliquer->m_g); - ); - - //--- - //--- In order to do this part fast, construct a row-ordered - //--- matrix which is sorted on column indices. - //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', - //--- i,j in M, i != j, such that - //--- a[i,k] != 0 != a[j,k], for some k - //--- - CoinPackedMatrix Morder(*m_instance.getMatrixByRow()); - Morder.orderMatrix(); - - for(i = 0; i < m_nOrigRows; i++){ - for(j = (i+1); j < m_nOrigRows; j++){ - CoinShallowPackedVector rowI = Morder.getVector(i); - CoinShallowPackedVector rowJ = Morder.getVector(j); - if(!isOrtho(rowI.getIndices(), - rowJ.getIndices(), - rowI.getNumElements(), - rowJ.getNumElements())){ - //--- - //--- add a constraint to the model for every pair of blocks - //--- - for(b1 = 0; b1 < m_beta; b1++){ - for(b2 = 0; b2 < m_beta; b2++){ - if(b1 == b2) - continue; - - CoinPackedVector row; - row.insert(xIndex(i, b1), 1.0); - row.insert(xIndex(j, b2), 1.0); - - cModel.addRow(row.getNumElements(), - row.getIndices(), row.getElements()); + } + UTIL_DEBUG(m_param.LogDebugLevel, 4, m_cliquer->printGraph(m_cliquer->m_g);); + + //--- + //--- In order to do this part fast, construct a row-ordered + //--- matrix which is sorted on column indices. + //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', + //--- i,j in M, i != j, such that + //--- a[i,k] != 0 != a[j,k], for some k + //--- + CoinPackedMatrix Morder(*m_instance.getMatrixByRow()); + Morder.orderMatrix(); + + for (i = 0; i < m_nOrigRows; i++) { + for (j = (i + 1); j < m_nOrigRows; j++) { + CoinShallowPackedVector rowI = Morder.getVector(i); + CoinShallowPackedVector rowJ = Morder.getVector(j); + if (!isOrtho(rowI.getIndices(), rowJ.getIndices(), rowI.getNumElements(), + rowJ.getNumElements())) { + //--- + //--- add a constraint to the model for every pair of blocks + //--- + for (b1 = 0; b1 < m_beta; b1++) { + for (b2 = 0; b2 < m_beta; b2++) { + if (b1 == b2) + continue; + + CoinPackedVector row; + row.insert(xIndex(i, b1), 1.0); + row.insert(xIndex(j, b2), 1.0); + + cModel.addRow(row.getNumElements(), row.getIndices(), + row.getElements()); #ifdef __MAD_USE_CLIQUER__ - m_cliquer->delEdge(m_cliquer->m_g, - xIndex(i, b1), xIndex(j, b2)); - m_cliquer->delEdge(m_cliquer->m_g, - xIndex(j, b1), xIndex(i, b2)); + m_cliquer->delEdge(m_cliquer->m_g, xIndex(i, b1), xIndex(j, b2)); + m_cliquer->delEdge(m_cliquer->m_g, xIndex(j, b1), xIndex(i, b2)); #endif #ifdef __MAD_USE_QUALEX__ - m_qualex->removeEdge(xIndex(i, b1), xIndex(j, b2)); - m_qualex->removeEdge(xIndex(j, b1), xIndex(i, b2)); -#endif - } - } - //if doing this way, then need to add an interface - //class for this graph object - fudge - do we use - //cliquer just for the graph object!? - //or should we have a graph object independent of all of - //of qualex/cliquer so we don't force user - boost? - //but then user must have boost/graph - GRAPH_ADD_EDGE(m_conflictGraph, i, j); - } + m_qualex->removeEdge(xIndex(i, b1), xIndex(j, b2)); + m_qualex->removeEdge(xIndex(j, b1), xIndex(i, b2)); +#endif + } + } + // if doing this way, then need to add an interface + // class for this graph object - fudge - do we use + // cliquer just for the graph object!? + // or should we have a graph object independent of all of + // of qualex/cliquer so we don't force user - boost? + // but then user must have boost/graph + GRAPH_ADD_EDGE(m_conflictGraph, i, j); } - } - UTIL_DEBUG(m_param.LogDebugLevel, 4, - m_cliquer->printGraph(m_cliquer->m_g); - ); - cModel.createPackedMatrix(*modelRelaxCl->M, cModel.associatedArray()); - - //--- - //--- set the row upper and lower bounds - //--- set the col upper and lower bounds - //--- - UtilFillN(modelRelaxCl->rowLB, modelRelaxCl->M->getNumRows(), -m_infinity); - UtilFillN(modelRelaxCl->rowUB, modelRelaxCl->M->getNumRows(), 1.0); - UtilFillN(modelRelaxCl->colLB, n_cols, 0.0); - UtilFillN(modelRelaxCl->colUB, n_cols, 1.0); - - //--- - //--- set the indices of the integer variables of modelRelax - //--- - UtilIotaN(modelRelaxCl->integerVars, n_cols, 0); - - //--- - //--- push core and relaxed into application object - //--- - vector< DecompConstraintSet* > modelRelaxV; - modelRelaxV.push_back(modelRelaxCl); - modelCore.insert(make_pair(MODEL_CLIQUE, modelCoreCl)); - modelRelax.insert(make_pair(MODEL_CLIQUE, modelRelaxV)); - - - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "\nCONFLICT GRAPH:\n"; - graph_print(m_conflictGraph); - ); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "APPcreateModel()", m_param.LogDebugLevel, 2); - + } + } + UTIL_DEBUG(m_param.LogDebugLevel, 4, m_cliquer->printGraph(m_cliquer->m_g);); + cModel.createPackedMatrix(*modelRelaxCl->M, cModel.associatedArray()); + + //--- + //--- set the row upper and lower bounds + //--- set the col upper and lower bounds + //--- + UtilFillN(modelRelaxCl->rowLB, modelRelaxCl->M->getNumRows(), -m_infinity); + UtilFillN(modelRelaxCl->rowUB, modelRelaxCl->M->getNumRows(), 1.0); + UtilFillN(modelRelaxCl->colLB, n_cols, 0.0); + UtilFillN(modelRelaxCl->colUB, n_cols, 1.0); + + //--- + //--- set the indices of the integer variables of modelRelax + //--- + UtilIotaN(modelRelaxCl->integerVars, n_cols, 0); + + //--- + //--- push core and relaxed into application object + //--- + vector modelRelaxV; + modelRelaxV.push_back(modelRelaxCl); + modelCore.insert(make_pair(MODEL_CLIQUE, modelCoreCl)); + modelRelax.insert(make_pair(MODEL_CLIQUE, modelRelaxV)); + + UTIL_DEBUG(m_param.LogDebugLevel, 3, (*m_osLog) << "\nCONFLICT GRAPH:\n"; + graph_print(m_conflictGraph);); + + UtilPrintFuncEnd(m_osLog, m_classTag, "APPcreateModel()", + m_param.LogDebugLevel, 2); } #else // --------------------------------------------------------------------- // -void -MAD_DecompApp::APPcreateModel(double *& objCoeff, - map & modelCore, - map & modelRelax) { - - //--- - //--- createModel is a pure virtual method of DecompApp and must - //--- be derived by the application class to define the partitioning - //--- of constraints into [A,b] = [A',b'] union [A'', b''] - //--- - - UtilPrintFuncBegin(m_osLog, m_classTag, - "APPcreateModel()", m_param.LogDebugLevel, 2); - - //--- x[i,b] in {0,1}, 1 = row i is assigned to block b - //--- i in M = {1, ..., m} - //--- b in B = {1, ..., beta} - //--- beta <= m (possibly an input = number of processors available) - //--- - //--- max sum{i in M, b in B} x[i,b] <==> - //--- min sum{i in M, b in B} -x[i,b] - //--- s.t. - //--- (1) sum{b in B} x[i,b] <= 1, for i in M - //--- (2) sum{i in M} x[i,b] <= k, for b in B - //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', - //--- i,j in M, i != j, such that - //--- a[i,k] != 0 != a[j,k], for some k - //--- (4) x[i,b] in {0,1} - //--- - //--- (3),(4) form the clique problem [modelRelax] - //--- (1),(2) form the core [modelCore ] - //--- - int n_cols = m_nOrigRows * m_beta; - - //--- - //--- open memory for the objective coefficients of modelCore - //--- who is responsible to open this memory? MAD_DecompApp - //--- who is responsible to free this memory? DecompAlgo - //--- - objCoeff = new double[n_cols]; - CoinFillN(objCoeff, n_cols, -1.0); - - //--- - //--- set the constraint matrix of modelCore - //--- who is responsible to open this memory? MAD_DecompApp - //--- who is responsible to free this memory? DecompAlgo - //--- - //--- (1) sum{b in B} x[i,b] <= 1, for i in M - //--- (2) sum{i in M} x[i,b] <= k, for b in B - //--- - DecompConstraintSet * modelCoreCl = new DecompConstraintSet(); - modelCoreCl->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(modelCoreCl->M, "Error: Out of Memory"); - modelCoreCl->M->setDimensions(0, n_cols); - modelCoreCl->M->reserve(m_nOrigRows + m_beta, - 2 * m_beta * m_nOrigRows); - - //TODO: for speed - do in blocks - int b1, b2, i, j, b; - - //TODO: make this optional to add to subproblem or not - //--- - //--- (1) sum{b in B} x[i,b] <= 1, for i in M - //--- - for(i = 0; i < m_nOrigRows; i++){ - CoinPackedVector row; - for(b = 0; b < m_beta; b++){ - row.insert(xIndex(i, b), 1.0); - } - modelCoreCl->M->appendRow(row); - } - - //--- - //--- (2) sum{i in M} x[i,b] <= k, for b in B - //--- - for(b = 0; b < m_beta; b++){ - CoinPackedVector row; - for(i = 0; i < m_nOrigRows; i++){ - row.insert(xIndex(i,b), 1.0); - } - modelCoreCl->M->appendRow(row); - } - - //--- - //--- set the row upper and lower bounds - //--- set the col upper and lower bounds - //--- - double kappa = static_cast(m_kappa); - UtilFillN(modelCoreCl->rowLB, m_nOrigRows, -m_infinity); - UtilFillN(modelCoreCl->rowUB, m_nOrigRows, 1.0); - UtilFillN(modelCoreCl->rowLB, m_beta, -m_infinity); - UtilFillN(modelCoreCl->rowUB, m_beta, kappa); - UtilFillN(modelCoreCl->colLB, n_cols, 0.0); - UtilFillN(modelCoreCl->colUB, n_cols, 1.0); - - //--- - //--- set the constraint matrix of modelRelax - //--- who is responsible to open this memory? MAD_DecompApp - //--- who is responsible to free this memory? DecompAlgo - //--- - //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', - //--- i,j in M, i != j, such that - //--- a[i,k] != 0 != a[j,k], for some k - //--- (4) x[i,b] in {0,1} - //--- - DecompConstraintSet * modelRelaxCl = new DecompConstraintSet(); - modelRelaxCl->M = new CoinPackedMatrix(false, 0.0, 0.0); - CoinAssertHint(modelRelaxCl->M, "Error: Out of Memory"); - modelRelaxCl->M->setDimensions(0, n_cols); - - //how to efficiently construct the complement of conflict graph??? - int n_vertices = m_nOrigRows * m_beta;; +void MAD_DecompApp::APPcreateModel( + double *&objCoeff, map &modelCore, + map &modelRelax) { + + //--- + //--- createModel is a pure virtual method of DecompApp and must + //--- be derived by the application class to define the partitioning + //--- of constraints into [A,b] = [A',b'] union [A'', b''] + //--- + + UtilPrintFuncBegin(m_osLog, m_classTag, "APPcreateModel()", + m_param.LogDebugLevel, 2); + + //--- x[i,b] in {0,1}, 1 = row i is assigned to block b + //--- i in M = {1, ..., m} + //--- b in B = {1, ..., beta} + //--- beta <= m (possibly an input = number of processors available) + //--- + //--- max sum{i in M, b in B} x[i,b] <==> + //--- min sum{i in M, b in B} -x[i,b] + //--- s.t. + //--- (1) sum{b in B} x[i,b] <= 1, for i in M + //--- (2) sum{i in M} x[i,b] <= k, for b in B + //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', + //--- i,j in M, i != j, such that + //--- a[i,k] != 0 != a[j,k], for some k + //--- (4) x[i,b] in {0,1} + //--- + //--- (3),(4) form the clique problem [modelRelax] + //--- (1),(2) form the core [modelCore ] + //--- + int n_cols = m_nOrigRows * m_beta; + + //--- + //--- open memory for the objective coefficients of modelCore + //--- who is responsible to open this memory? MAD_DecompApp + //--- who is responsible to free this memory? DecompAlgo + //--- + objCoeff = new double[n_cols]; + CoinFillN(objCoeff, n_cols, -1.0); + + //--- + //--- set the constraint matrix of modelCore + //--- who is responsible to open this memory? MAD_DecompApp + //--- who is responsible to free this memory? DecompAlgo + //--- + //--- (1) sum{b in B} x[i,b] <= 1, for i in M + //--- (2) sum{i in M} x[i,b] <= k, for b in B + //--- + DecompConstraintSet *modelCoreCl = new DecompConstraintSet(); + modelCoreCl->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(modelCoreCl->M, "Error: Out of Memory"); + modelCoreCl->M->setDimensions(0, n_cols); + modelCoreCl->M->reserve(m_nOrigRows + m_beta, 2 * m_beta * m_nOrigRows); + + // TODO: for speed - do in blocks + int b1, b2, i, j, b; + + // TODO: make this optional to add to subproblem or not + //--- + //--- (1) sum{b in B} x[i,b] <= 1, for i in M + //--- + for (i = 0; i < m_nOrigRows; i++) { + CoinPackedVector row; + for (b = 0; b < m_beta; b++) { + row.insert(xIndex(i, b), 1.0); + } + modelCoreCl->M->appendRow(row); + } + + //--- + //--- (2) sum{i in M} x[i,b] <= k, for b in B + //--- + for (b = 0; b < m_beta; b++) { + CoinPackedVector row; + for (i = 0; i < m_nOrigRows; i++) { + row.insert(xIndex(i, b), 1.0); + } + modelCoreCl->M->appendRow(row); + } + + //--- + //--- set the row upper and lower bounds + //--- set the col upper and lower bounds + //--- + double kappa = static_cast(m_kappa); + UtilFillN(modelCoreCl->rowLB, m_nOrigRows, -m_infinity); + UtilFillN(modelCoreCl->rowUB, m_nOrigRows, 1.0); + UtilFillN(modelCoreCl->rowLB, m_beta, -m_infinity); + UtilFillN(modelCoreCl->rowUB, m_beta, kappa); + UtilFillN(modelCoreCl->colLB, n_cols, 0.0); + UtilFillN(modelCoreCl->colUB, n_cols, 1.0); + + //--- + //--- set the constraint matrix of modelRelax + //--- who is responsible to open this memory? MAD_DecompApp + //--- who is responsible to free this memory? DecompAlgo + //--- + //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', + //--- i,j in M, i != j, such that + //--- a[i,k] != 0 != a[j,k], for some k + //--- (4) x[i,b] in {0,1} + //--- + DecompConstraintSet *modelRelaxCl = new DecompConstraintSet(); + modelRelaxCl->M = new CoinPackedMatrix(false, 0.0, 0.0); + CoinAssertHint(modelRelaxCl->M, "Error: Out of Memory"); + modelRelaxCl->M->setDimensions(0, n_cols); + + // how to efficiently construct the complement of conflict graph??? + int n_vertices = m_nOrigRows * m_beta; + ; #ifdef __MAD_USE_CLIQUER__ - //construct complete graph -- then delete -- UGH!! TEMP - for(i = 0; i < n_vertices ; i++){ - for(j = (i+1); j < n_vertices; j++){ - m_cliquer->addEdge(m_cliquer->m_g, i, j); - } - } - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "m_cliquer complete graph:\n"; - m_cliquer->printGraph(m_cliquer->m_g); - ); + // construct complete graph -- then delete -- UGH!! TEMP + for (i = 0; i < n_vertices; i++) { + for (j = (i + 1); j < n_vertices; j++) { + m_cliquer->addEdge(m_cliquer->m_g, i, j); + } + } + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "m_cliquer complete graph:\n"; + m_cliquer->printGraph(m_cliquer->m_g);); #endif #ifdef __MAD_USE_QUALEX__ - for(i = 0; i < n_vertices ; i++){ - for(j = (i+1); j < n_vertices; j++){ - m_qualex->addEdge(i, j); - } - } - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "m_qualex complete graph:\n"; - m_qualex->printGraph(m_qualex->m_graphOrig); - ); + for (i = 0; i < n_vertices; i++) { + for (j = (i + 1); j < n_vertices; j++) { + m_qualex->addEdge(i, j); + } + } + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "m_qualex complete graph:\n"; + m_qualex->printGraph(m_qualex->m_graphOrig);); #endif - //--- - //--- for speed, use CoinModel - //--- - CoinModel cModel; - - //--- - //--- In order to do this part fast, construct a row-ordered - //--- matrix which is sorted on column indices. - //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', - //--- i,j in M, i != j, such that - //--- a[i,k] != 0 != a[j,k], for some k - //--- - //--- Example: - //--- i:0 j:12, B={0,1,2} - //--- i:12 i:0 , B={0,1,2} - //--- no need to duplicate, can do just i < j - //--- x[12,0] + x[0,1] <= 1 - //--- x[12,0] + x[0,2] <= 1 - //--- x[12,1] + x[0,0] <= 1 - //--- x[12,1] + x[0,2] <= 1 - //--- x[12,2] + x[0,0] <= 1 - //--- x[12,2] + x[0,1] <= 1 - //--- + //--- + //--- for speed, use CoinModel + //--- + CoinModel cModel; + + //--- + //--- In order to do this part fast, construct a row-ordered + //--- matrix which is sorted on column indices. + //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', + //--- i,j in M, i != j, such that + //--- a[i,k] != 0 != a[j,k], for some k + //--- + //--- Example: + //--- i:0 j:12, B={0,1,2} + //--- i:12 i:0 , B={0,1,2} + //--- no need to duplicate, can do just i < j + //--- x[12,0] + x[0,1] <= 1 + //--- x[12,0] + x[0,2] <= 1 + //--- x[12,1] + x[0,0] <= 1 + //--- x[12,1] + x[0,2] <= 1 + //--- x[12,2] + x[0,0] <= 1 + //--- x[12,2] + x[0,1] <= 1 + //--- + + CoinPackedMatrix Morder(*m_instance.getMatrixByRow()); + Morder.orderMatrix(); + + for (i = 0; i < m_nOrigRows; i++) { + for (j = (i + 1); j < m_nOrigRows; j++) { + CoinShallowPackedVector rowI = Morder.getVector(i); + CoinShallowPackedVector rowJ = Morder.getVector(j); + if (!isOrtho(rowI.getIndices(), rowJ.getIndices(), rowI.getNumElements(), + rowJ.getNumElements())) { + + //--- + //--- add a constraint to the model for every pair of blocks + //--- + for (b1 = 0; b1 < m_beta; b1++) { + for (b2 = 0; b2 < m_beta; b2++) { + if (b1 == b2) + continue; + + CoinPackedVector row; + row.insert(xIndex(i, b1), 1.0); + row.insert(xIndex(j, b2), 1.0); + + UTIL_DEBUG(m_param.LogDebugLevel, 5, + (*m_osLog) + << "Row " << cModel.numberRows() << " (" << i << "," + << b1 << ")->" << xIndex(i, b1) << " (" << j << "," + << b2 << ")->" << xIndex(j, b2) << endl;); + + cModel.addRow(row.getNumElements(), row.getIndices(), + row.getElements()); - CoinPackedMatrix Morder(*m_instance.getMatrixByRow()); - Morder.orderMatrix(); - - for(i = 0; i < m_nOrigRows; i++){ - for(j = (i+1); j < m_nOrigRows; j++){ - CoinShallowPackedVector rowI = Morder.getVector(i); - CoinShallowPackedVector rowJ = Morder.getVector(j); - if(!isOrtho(rowI.getIndices(), - rowJ.getIndices(), - rowI.getNumElements(), - rowJ.getNumElements())){ - - //--- - //--- add a constraint to the model for every pair of blocks - //--- - for(b1 = 0; b1 < m_beta; b1++){ - for(b2 = 0; b2 < m_beta; b2++){ - if(b1 == b2) - continue; - - CoinPackedVector row; - row.insert(xIndex(i, b1), 1.0); - row.insert(xIndex(j, b2), 1.0); - - UTIL_DEBUG(m_param.LogDebugLevel, 5, - (*m_osLog) - << "Row " << cModel.numberRows() - << " (" << i << "," << b1 << ")->" - << xIndex(i, b1) - << " (" << j << "," << b2 << ")->" - << xIndex(j, b2) << endl; - ); - - cModel.addRow(row.getNumElements(), - row.getIndices(), row.getElements()); - -#ifdef __MAD_USE_CLIQUER__ - m_cliquer->delEdge(m_cliquer->m_g, - xIndex(i, b1), xIndex(j, b2)); - m_cliquer->delEdge(m_cliquer->m_g, - xIndex(j, b1), xIndex(i, b2)); +#ifdef __MAD_USE_CLIQUER__ + m_cliquer->delEdge(m_cliquer->m_g, xIndex(i, b1), xIndex(j, b2)); + m_cliquer->delEdge(m_cliquer->m_g, xIndex(j, b1), xIndex(i, b2)); #endif - + #ifdef __MAD_USE_QUALEX__ - m_qualex->removeEdge(xIndex(i, b1), xIndex(j, b2)); - m_qualex->removeEdge(xIndex(j, b1), xIndex(i, b2)); + m_qualex->removeEdge(xIndex(i, b1), xIndex(j, b2)); + m_qualex->removeEdge(xIndex(j, b1), xIndex(i, b2)); #endif - - - - //if doing this way, then need to add an interface - //class for this graph object - fudge - do we use - //cliquer just for the graph object!? - //or should we have a graph object independent of all of - //of qualex/cliquer so we don't force user - boost? - //but then user must have boost/graph - GRAPH_ADD_EDGE(m_conflictGraph, i, j); - - } - } - } + + // if doing this way, then need to add an interface + // class for this graph object - fudge - do we use + // cliquer just for the graph object!? + // or should we have a graph object independent of all of + // of qualex/cliquer so we don't force user - boost? + // but then user must have boost/graph + GRAPH_ADD_EDGE(m_conflictGraph, i, j); + } + } } - } + } + } #ifdef __MAD_USE_CLIQUER__ - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "m_cliquer graph:\n"; - m_cliquer->printGraph(m_cliquer->m_g); - ); + UTIL_DEBUG(m_param.LogDebugLevel, 4, (*m_osLog) << "m_cliquer graph:\n"; + m_cliquer->printGraph(m_cliquer->m_g);); #endif #ifdef __MAD_USE_QUALEX__ - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "m_qualex complete graph:\n"; - m_qualex->printGraph(m_qualex->m_graphOrig); - ); + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "m_qualex complete graph:\n"; + m_qualex->printGraph(m_qualex->m_graphOrig);); #endif - - cModel.createPackedMatrix(*modelRelaxCl->M, cModel.associatedArray()); - - //--- - //--- set the row upper and lower bounds - //--- set the col upper and lower bounds - //--- - UtilFillN(modelRelaxCl->rowLB, modelRelaxCl->M->getNumRows(), -m_infinity); - UtilFillN(modelRelaxCl->rowUB, modelRelaxCl->M->getNumRows(), 1.0); - UtilFillN(modelRelaxCl->colLB, n_cols, 0.0); - UtilFillN(modelRelaxCl->colUB, n_cols, 1.0); - - //--- - //--- set the indices of the integer variables of modelRelax - //--- - UtilIotaN(modelRelaxCl->integerVars, n_cols, 0); - - //--- - //--- push core and relaxed into application object - //--- - modelCore.insert(make_pair(MODEL_CLIQUE, modelCoreCl)); - modelRelax.insert(make_pair(MODEL_CLIQUE, modelRelaxCl)); - - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "\nCONFLICT GRAPH:\n"; - graph_print(m_conflictGraph); - ); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "APPcreateModel()", m_param.LogDebugLevel, 2); + cModel.createPackedMatrix(*modelRelaxCl->M, cModel.associatedArray()); + + //--- + //--- set the row upper and lower bounds + //--- set the col upper and lower bounds + //--- + UtilFillN(modelRelaxCl->rowLB, modelRelaxCl->M->getNumRows(), -m_infinity); + UtilFillN(modelRelaxCl->rowUB, modelRelaxCl->M->getNumRows(), 1.0); + UtilFillN(modelRelaxCl->colLB, n_cols, 0.0); + UtilFillN(modelRelaxCl->colUB, n_cols, 1.0); + + //--- + //--- set the indices of the integer variables of modelRelax + //--- + UtilIotaN(modelRelaxCl->integerVars, n_cols, 0); + + //--- + //--- push core and relaxed into application object + //--- + modelCore.insert(make_pair(MODEL_CLIQUE, modelCoreCl)); + modelRelax.insert(make_pair(MODEL_CLIQUE, modelRelaxCl)); + + UTIL_DEBUG(m_param.LogDebugLevel, 4, (*m_osLog) << "\nCONFLICT GRAPH:\n"; + graph_print(m_conflictGraph);); + + UtilPrintFuncEnd(m_osLog, m_classTag, "APPcreateModel()", + m_param.LogDebugLevel, 2); } #endif //--------------------------------------------------------------------- // -//too many args - fix this -DecompStatus MAD_DecompApp::APPsolveRelaxed(const int whichModel, - const double * redCostX, - const double * origCost, - const double alpha, - const int n_origCols, - const bool checkRC, - const bool checkDup, - bool & isExact, - OsiSolverInterface * m_subprobSI, - list & vars){ - - - //TODO: are we using (1) or just (3) in clique definition? - //--- - //--- This can be considered an independent set problem on - //--- the conflict graph defined by these constraints. - //--- - //--- (1) sum{b in B} x[i,b] <= 1, for i in M - //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', - //--- i,j in M, i != j, such that - //--- a[i,k] != 0 != a[j,k], for some k - //--- (4) x[i,b] in {0,1} - //--- - //--- Note: Finding maximal cliques in a graph is equivalent to - //--- finding maximal independent sets in the complement of that graph. - //--- - //--- We are using cliquer, so : - //---- (1) we need to look at complement of conflict graph - //--- (2) cliquer assumes maximization, but we want "least" reduced - //--- cost - so, we need to flip the reduced cost. - //--- (3) cliquer expects integral weights, so we need to scale - //--- (4) cliquer only accepts positive vertex weights - //--- - - UtilPrintFuncBegin(m_osLog, m_classTag, - "APPsolveRelaxed()", m_param.LogDebugLevel, 2); - - - CoinTimer thisTimer; - thisTimer.restart(); - - DecompStatus status = STAT_FEASIBLE; - isExact = false; - - - //--- - //--- try to generate a column with small reduced cost - //--- that is feasible to original problem using primal heuristic - //--- - int i; - int nGreedyPts = 0; - int nGreedyPtsKept = 0; - vector greedyPoints; - nGreedyPts = heuristicGreedy(greedyPoints, - INCREASING, redCostX, origCost, redCostX); - for(i = 0; i < nGreedyPts; i++){ - GreedyPoint & gp = greedyPoints[i]; - - //--- - //--- create a DecompVar from the greedySol - //--- - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) - << "Greedy DecompVar origCost = " << gp.solValueOrigCost - << " redCost = " << gp.solValueRedCost + alpha << endl; - printOriginalSolution(n_origCols, gp.solution, &cout); - ); - //TODO: might be more efficient if return sparse... greedySol - - //TODO: -alpha???? check sign see what changed for MMKP - - if(!checkRC || ((gp.solValueRedCost + alpha) < -DecompEpsilon)){ - vars.push_back(new DecompVar(n_origCols, - gp.solution, - gp.solValueRedCost + alpha, - gp.solValueOrigCost)); - nGreedyPtsKept++; - } - } - - return STAT_FEASIBLE; - - - - - - +// too many args - fix this +DecompStatus MAD_DecompApp::APPsolveRelaxed( + const int whichModel, const double *redCostX, const double *origCost, + const double alpha, const int n_origCols, const bool checkRC, + const bool checkDup, bool &isExact, OsiSolverInterface *m_subprobSI, + list &vars) { + + // TODO: are we using (1) or just (3) in clique definition? + //--- + //--- This can be considered an independent set problem on + //--- the conflict graph defined by these constraints. + //--- + //--- (1) sum{b in B} x[i,b] <= 1, for i in M + //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', + //--- i,j in M, i != j, such that + //--- a[i,k] != 0 != a[j,k], for some k + //--- (4) x[i,b] in {0,1} + //--- + //--- Note: Finding maximal cliques in a graph is equivalent to + //--- finding maximal independent sets in the complement of that graph. + //--- + //--- We are using cliquer, so : + //---- (1) we need to look at complement of conflict graph + //--- (2) cliquer assumes maximization, but we want "least" reduced + //--- cost - so, we need to flip the reduced cost. + //--- (3) cliquer expects integral weights, so we need to scale + //--- (4) cliquer only accepts positive vertex weights + //--- + + UtilPrintFuncBegin(m_osLog, m_classTag, "APPsolveRelaxed()", + m_param.LogDebugLevel, 2); + + CoinTimer thisTimer; + thisTimer.restart(); + + DecompStatus status = STAT_FEASIBLE; + isExact = false; + + //--- + //--- try to generate a column with small reduced cost + //--- that is feasible to original problem using primal heuristic + //--- + int i; + int nGreedyPts = 0; + int nGreedyPtsKept = 0; + vector greedyPoints; + nGreedyPts = + heuristicGreedy(greedyPoints, INCREASING, redCostX, origCost, redCostX); + for (i = 0; i < nGreedyPts; i++) { + GreedyPoint &gp = greedyPoints[i]; + + //--- + //--- create a DecompVar from the greedySol + //--- + UTIL_DEBUG( + m_param.LogDebugLevel, 3, + (*m_osLog) << "Greedy DecompVar origCost = " << gp.solValueOrigCost + << " redCost = " << gp.solValueRedCost + alpha << endl; + printOriginalSolution(n_origCols, gp.solution, &cout);); + // TODO: might be more efficient if return sparse... greedySol + + // TODO: -alpha???? check sign see what changed for MMKP + + if (!checkRC || ((gp.solValueRedCost + alpha) < -DecompEpsilon)) { + vars.push_back(new DecompVar(n_origCols, gp.solution, + gp.solValueRedCost + alpha, + gp.solValueOrigCost)); + nGreedyPtsKept++; + } + } + + return STAT_FEASIBLE; + + // stupid object if pass in graph each time + int n_verts = m_cliquer->getNumVertices(m_cliquer->m_g); + + UTIL_DEBUG(m_param.LogDebugLevel, 4, printf("\nGRAPH m_g:\n"); + m_cliquer->printGraph(m_cliquer->m_g);); + + // if use IP solver over IS and min uA>=0, will pick all 0s + // there is no incentive + + int b; + //--- + //--- the goal is to find a subgraph with negative reduced cost + //--- therefore, it will never make sense to include a node with + //--- non-negative cost + //--- + // how do we compile without cliquer? + graph_t *m_gStar = m_cliquer->graphNew(n_verts); + m_cliquer->copyGraphNonPos(m_gStar, m_cliquer->m_g, redCostX); + printf("timer after copy graph = %12.10f\n", thisTimer.timeElapsed()); + + UTIL_DEBUG( + m_param.LogDebugLevel, 4, printf("\nGRAPH m_gStar:\n"); + m_cliquer->printGraph(m_gStar); + + for (i = 0; i < m_nOrigRows; i++) { + for (b = 0; b < m_beta; b++) { + cout << "x[ " << i << "," << b << " -> " << xIndex(i, b) + << " ] : " << redCostX[xIndex(i, b)] << endl; + } + }); + + if (graph_edge_count(m_gStar) <= 0) { + m_cliquer->graphFree(m_gStar); + return STAT_FEASIBLE; // think + } - - - - - - - - - - - - - - //stupid object if pass in graph each time - int n_verts = m_cliquer->getNumVertices(m_cliquer->m_g); - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - printf("\nGRAPH m_g:\n"); - m_cliquer->printGraph(m_cliquer->m_g); - ); - - - - //if use IP solver over IS and min uA>=0, will pick all 0s - //there is no incentive - - int b; - //--- - //--- the goal is to find a subgraph with negative reduced cost - //--- therefore, it will never make sense to include a node with - //--- non-negative cost - //--- - //how do we compile without cliquer? - graph_t * m_gStar = m_cliquer->graphNew(n_verts); - m_cliquer->copyGraphNonPos(m_gStar, - m_cliquer->m_g, redCostX); - printf("timer after copy graph = %12.10f\n", - thisTimer.timeElapsed()); - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - printf("\nGRAPH m_gStar:\n"); - m_cliquer->printGraph(m_gStar); - - for(i = 0; i < m_nOrigRows; i++){ - for(b = 0; b < m_beta; b++){ - cout << "x[ " << i << "," << b << " -> " << xIndex(i,b) - << " ] : " << redCostX[xIndex(i,b)] << endl; - } - } - ); - - if(graph_edge_count(m_gStar) <= 0){ - m_cliquer->graphFree(m_gStar); - return STAT_FEASIBLE; //think - } - - - - #ifdef USE_QUALEX - //negative weights are bad -- qualex takes sqrt of weights - //negative weights ok? or do we need an offset? - //print to dimacs format - //list apInd; - //m_cliquer->printGraphDimacs(m_gStar); - - double * redCostXNegOffset = m_auxMemPool.dblArrNCoreCols; - memcpy(redCostXNegOffset, redCostX, n_origCols * sizeof(double)); - - //--- - //--- flip reduced costs (max c == min -c) - //--- - UtilNegateArr(n_origCols, redCostXNegOffset); - - //--- - //--- add a constant so that all vertex weights are positive, inc alpha - //--- - double offset = 0.0; - double minrc = *min_element(redCostXNegOffset, - redCostXNegOffset + n_origCols); - if(minrc <= 0){ - offset = -minrc + 1; - UtilAddOffsetArr(n_origCols, offset, redCostXNegOffset); - } - - //--- - //--- if for initial vars, perturb the costs slightly, - //--- finding max clique with all weights equal is harder - //--- - if(!checkRC){ - //make srand a Util func - in case change random function? - UtilPerturbCost(n_origCols, n_origCols, 0.0, 1.0, redCostXNegOffset); - } - - - //m_cliquer->printWeightDimacs(m_gStar->n, redCostXNegOffset); - //m_cliquer->printGraphDimacs(m_cliquer->m_g); - //m_cliquer->printWeightDimacs(m_cliquer->m_g->n, redCostX); - - - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - for(i = 0; i < m_nOrigRows; i++){ - for(b = 0; b < m_beta; b++){ - cout << "x[ " << i << "," << b << " -> " << xIndex(i,b) - << " ] : " << redCostXNegOffset[xIndex(i,b)] << endl; - } - } - ); - - - - m_qualex->setUpForSolve(redCostXNegOffset); - - //--- - //--- we need to remove non-negative vertices from the graph - //--- data structure used by qualex - we are trying to find cliques - //--- so the easiest thing to do, is to remove all edges to each - //--- non-negative vertex - //--- - Graph * m_graph = m_qualex->m_graph; - int j; + // negative weights are bad -- qualex takes sqrt of weights + // negative weights ok? or do we need an offset? + // print to dimacs format + // list apInd; + // m_cliquer->printGraphDimacs(m_gStar); + + double *redCostXNegOffset = m_auxMemPool.dblArrNCoreCols; + memcpy(redCostXNegOffset, redCostX, n_origCols * sizeof(double)); + + //--- + //--- flip reduced costs (max c == min -c) + //--- + UtilNegateArr(n_origCols, redCostXNegOffset); + + //--- + //--- add a constant so that all vertex weights are positive, inc alpha + //--- + double offset = 0.0; + double minrc = + *min_element(redCostXNegOffset, redCostXNegOffset + n_origCols); + if (minrc <= 0) { + offset = -minrc + 1; + UtilAddOffsetArr(n_origCols, offset, redCostXNegOffset); + } + + //--- + //--- if for initial vars, perturb the costs slightly, + //--- finding max clique with all weights equal is harder + //--- + if (!checkRC) { + // make srand a Util func - in case change random function? + UtilPerturbCost(n_origCols, n_origCols, 0.0, 1.0, redCostXNegOffset); + } + + // m_cliquer->printWeightDimacs(m_gStar->n, redCostXNegOffset); + // m_cliquer->printGraphDimacs(m_cliquer->m_g); + // m_cliquer->printWeightDimacs(m_cliquer->m_g->n, redCostX); + + UTIL_DEBUG( + m_param.LogDebugLevel, 4, for (i = 0; i < m_nOrigRows; i++) { + for (b = 0; b < m_beta; b++) { + cout << "x[ " << i << "," << b << " -> " << xIndex(i, b) + << " ] : " << redCostXNegOffset[xIndex(i, b)] << endl; + } + }); + + m_qualex->setUpForSolve(redCostXNegOffset); + + //--- + //--- we need to remove non-negative vertices from the graph + //--- data structure used by qualex - we are trying to find cliques + //--- so the easiest thing to do, is to remove all edges to each + //--- non-negative vertex + //--- + Graph *m_graph = m_qualex->m_graph; + int j; #if 0 for(i = 0; i < m_graph->n; i++){ printf("\n%d: [%g] ", i, m_graph->weights[i]); @@ -930,7 +838,7 @@ DecompStatus MAD_DecompApp::APPsolveRelaxed(const int whichModel, } #endif - m_qualex->removeNonNegVertices(redCostX); + m_qualex->removeNonNegVertices(redCostX); #if 0 for(i = 0; i < m_graph->n; i++){ @@ -943,311 +851,271 @@ DecompStatus MAD_DecompApp::APPsolveRelaxed(const int whichModel, } #endif - printf("timer before greedy call = %12.10f\n", - thisTimer.timeElapsed()); - m_qualex->findMaxIndSetGreedy(redCostXNegOffset); - printf("timer after greedy call = %12.10f\n", - thisTimer.timeElapsed()); - + printf("timer before greedy call = %12.10f\n", thisTimer.timeElapsed()); + m_qualex->findMaxIndSetGreedy(redCostXNegOffset); + printf("timer after greedy call = %12.10f\n", thisTimer.timeElapsed()); + + double varRedCost; + double varOrigCost; + varRedCost = 0.0; + list::iterator it; + for (it = m_qualex->m_clique.begin(); it != m_qualex->m_clique.end(); it++) { + varRedCost += redCostX[*it]; + UTIL_DEBUG(m_param.LogDebugLevel, 4, + printf("\nGreedy ind: %d redCostX: %g varRedCost: %g", *it, + redCostX[*it], varRedCost);); + } + UTIL_DEBUG(m_param.LogDebugLevel, 4, + printf("\nvarRC = %g, alpha = %g", varRedCost, alpha);); + if (!checkRC || ((varRedCost + alpha) < -DecompEpsilon)) { + // then no need to get into qualex + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "\nGreedy is good enough rc = " + << varRedCost + alpha << endl;); + } else { + // status = STAT_UNKNOWN; + // m_qualex->findMaxIndSetQualexMS(); //keeps failing use cbc + } + + //--- + //--- store the solution as a DecompVar and push into list + //--- + + // if(status != STAT_UNKNOWN){ + if (!checkRC || ((varRedCost + alpha) < -DecompEpsilon)) { + + CoinAssert(m_qualex->m_clique.size() > 0); + varRedCost = 0.0; + varOrigCost = 0.0; + vector apInd(m_qualex->m_clique.begin(), m_qualex->m_clique.end()); + vector apEls(m_qualex->m_clique.size(), 1.0); + + for (it = m_qualex->m_clique.begin(); it != m_qualex->m_clique.end(); + it++) { + varRedCost += redCostX[*it]; + varOrigCost += origCost[*it]; - double varRedCost; - double varOrigCost; - varRedCost = 0.0; - list::iterator it; - for(it = m_qualex->m_clique.begin(); - it != m_qualex->m_clique.end(); it++){ - varRedCost += redCostX[*it]; UTIL_DEBUG(m_param.LogDebugLevel, 4, - printf("\nGreedy ind: %d redCostX: %g varRedCost: %g", - *it, redCostX[*it], varRedCost); - ); - } - UTIL_DEBUG(m_param.LogDebugLevel, 4, - printf("\nvarRC = %g, alpha = %g", varRedCost, alpha); - ); - if(!checkRC || ((varRedCost + alpha) < -DecompEpsilon)){ - //then no need to get into qualex + printf("\nind: %d redCostX: %g varRedCost: %g", *it, + redCostX[*it], varRedCost);); + } + UTIL_DEBUG(m_param.LogDebugLevel, 4, + printf("\nvarRC = %g, alpha = %g", varRedCost, alpha);); + if (!checkRC || ((varRedCost + alpha) < -DecompEpsilon)) { UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) - << "\nGreedy is good enough rc = " - << varRedCost+alpha << endl; - ); - } - else{ - //status = STAT_UNKNOWN; - //m_qualex->findMaxIndSetQualexMS(); //keeps failing use cbc - } - - - //--- - //--- store the solution as a DecompVar and push into list - //--- - - //if(status != STAT_UNKNOWN){ - if(!checkRC || ((varRedCost + alpha) < -DecompEpsilon)){ - - CoinAssert(m_qualex->m_clique.size() > 0); - varRedCost = 0.0; - varOrigCost = 0.0; - vector apInd(m_qualex->m_clique.begin(), - m_qualex->m_clique.end()); - vector apEls(m_qualex->m_clique.size(), 1.0); - - for(it = m_qualex->m_clique.begin(); - it != m_qualex->m_clique.end(); it++){ - varRedCost += redCostX[*it]; - varOrigCost += origCost[*it]; - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - printf("\nind: %d redCostX: %g varRedCost: %g", - *it, redCostX[*it], varRedCost); - ); - } - UTIL_DEBUG(m_param.LogDebugLevel, 4, - printf("\nvarRC = %g, alpha = %g", varRedCost, alpha); - ); - if(!checkRC || ((varRedCost + alpha) < -DecompEpsilon)){ - UTIL_DEBUG(m_param.LogDebugLevel, 4, - printf("\nPUSH var with RC = %g", varRedCost + alpha); - ); - DecompVar * var = new DecompVar(apInd, apEls, - varRedCost + alpha, varOrigCost); - vars.push_back(var); - - //double tmpSol[100000]; - //var->fillDenseArr(n_origCols, tmpSol); - //printOriginalSolution(n_origCols, tmpSol, &cout); - } - } - printf("timer after setupvars = %12.10f\n", - thisTimer.timeElapsed()); + printf("\nPUSH var with RC = %g", varRedCost + alpha);); + DecompVar *var = + new DecompVar(apInd, apEls, varRedCost + alpha, varOrigCost); + vars.push_back(var); + + // double tmpSol[100000]; + // var->fillDenseArr(n_origCols, tmpSol); + // printOriginalSolution(n_origCols, tmpSol, &cout); + } + } + printf("timer after setupvars = %12.10f\n", thisTimer.timeElapsed()); + +#else // this is for cliquer + + int *redCostXInt = m_auxMemPool.intArrNCoreCols; + int alphaInt = 0; + + //--- + //--- scale reduced costs (include alpha) to integers + //--- + int scaleFactor = + UtilScaleDblToIntArr(n_origCols, redCostX, redCostXInt, alpha, &alphaInt); + + UTIL_DEBUG( + m_param.LogDebugLevel, 4, for (i = 0; i < m_nOrigRows; i++) { + for (b = 0; b < m_beta; b++) { + cout << "x[ " << i << "," << b << " -> " << xIndex(i, b) + << " ] : " << redCostXInt[xIndex(i, b)] << endl; + } + }); + + //--- + //--- flip reduced costs (max c == min -c) + //--- + UtilNegateArr(n_origCols, redCostXInt); + + UTIL_DEBUG( + m_param.LogDebugLevel, 4, for (i = 0; i < m_nOrigRows; i++) { + for (b = 0; b < m_beta; b++) { + cout << "x[ " << i << "," << b << " -> " << xIndex(i, b) + << " ] : " << redCostXInt[xIndex(i, b)] << endl; + } + }); + + //--- + //--- add a constant so that all vertex weights are positive, inc alpha + //--- + int offset = 0; + int minrc = *min_element(redCostXInt, redCostXInt + n_origCols); + if (alphaInt < minrc) + minrc = alphaInt; + if (minrc <= 0) { + offset = -minrc + 1; + UtilAddOffsetArr(n_origCols, offset, redCostXInt); + } + + // the issue is "maximal"... you are finding the maximum clique == + // the maximum IS (in complemented graph), to find the minimum reduced cost + // + + //--- + //--- set the vertex weights in the cliquer object + //--- + m_cliquer->setVertWeight(m_gStar, redCostXInt); + + UTIL_DEBUG( + m_param.LogDebugLevel, 4, for (i = 0; i < m_nOrigRows; i++) { + for (b = 0; b < m_beta; b++) { + cout << "x[ " << i << "," << b << " -> " << xIndex(i, b) + << " ] : " << redCostX[xIndex(i, b)] << "\t" + << redCostXInt[xIndex(i, b)] << endl; + } + } m_cliquer->printGraph(m_gStar);); + + if (checkRC) { + + //--- + //--- we will be looking for cliques that have negative reduced cost + //--- sum{rc[i] s[i]} + alpha < 0, + //--- + //--- since cliquer is solving for maximal weighed clique, + //--- we flipped the vertex weights + //--- sum{-rc[i] s[i]} - alpha > 0, + //--- sum{-rcInt[i] s[i]} - alphaInt > 0, + //--- + //--- so, the min_weight we will accept is alphaInt + epsilon + //--- + + // this won't work - might have to switch to find one... + // m_cliquer->cliqueFindAll(alphaInt + 2*offset, 0, 1);//THINK + + // this should find the max + // m_cliquer->cliqueFindOne(0, 0, 1); + m_cliquer->cliqueFindOne(m_gStar, 0, 0, 0); + } else { + // in general this will cosntruct all the maximal cliques - which is + // bad for any large problems - need to limit, or just generate one + // or use heuristic to generate a few... + // if !checkRC, we can assume is coming from genInit, just gen one + // m_cliquer->cliqueUnweightedFindAll(2, m_kappa, 0); + + // need to figure out a trivial ub on clique size based on costs sent + // in? we need "size" < kappa + // think - prune cliquer for restricted length? should be easy? + // prune by weight and length - but then, does that solve all of + // MAD with cliquer? + + // if we could generate all maximal ISs of size < kappa, + // we'd be done - right? + m_cliquer->cliqueFindOne(m_gStar, 0, 0, 0); + // m_cliquer->cliqueFindAll(0, 0, 1);//THINK + } + + UTIL_DEBUG(m_param.LogDebugLevel, 4, m_cliquer->cliquePrintAll(m_gStar);); + + //--- + //--- store the solution as a DecompVar and push into list + //--- + double varRedCost; + double varOrigCost; + vector::iterator it; + for (i = 0; i < m_cliquer->m_clique_count; i++) { + vector apInd; + m_cliquer->cliquePopulateVector(i, apInd); + + varRedCost = 0.0; + varOrigCost = 0.0; + vector apEls(apInd.size(), 1.0); + for (it = apInd.begin(); it != apInd.end(); it++) { + varRedCost += redCostX[*it]; + varOrigCost += origCost[*it]; - - -#else //this is for cliquer - - int * redCostXInt = m_auxMemPool.intArrNCoreCols; - int alphaInt = 0; - - //--- - //--- scale reduced costs (include alpha) to integers - //--- - int scaleFactor = UtilScaleDblToIntArr(n_origCols, - redCostX, redCostXInt, - alpha, &alphaInt); - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - for(i = 0; i < m_nOrigRows; i++){ - for(b = 0; b < m_beta; b++){ - cout << "x[ " << i << "," << b << " -> " << xIndex(i,b) - << " ] : " << redCostXInt[xIndex(i,b)] << endl; - } - } - ); - - - //--- - //--- flip reduced costs (max c == min -c) - //--- - UtilNegateArr(n_origCols, redCostXInt); - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - for(i = 0; i < m_nOrigRows; i++){ - for(b = 0; b < m_beta; b++){ - cout << "x[ " << i << "," << b << " -> " << xIndex(i,b) - << " ] : " << redCostXInt[xIndex(i,b)] << endl; - } - } - ); - - //--- - //--- add a constant so that all vertex weights are positive, inc alpha - //--- - int offset = 0; - int minrc = *min_element(redCostXInt, redCostXInt + n_origCols); - if(alphaInt < minrc) - minrc = alphaInt; - if(minrc <= 0){ - offset = -minrc + 1; - UtilAddOffsetArr(n_origCols, offset, redCostXInt); - } - - - //the issue is "maximal"... you are finding the maximum clique == - //the maximum IS (in complemented graph), to find the minimum reduced cost - // - - //--- - //--- set the vertex weights in the cliquer object - //--- - m_cliquer->setVertWeight(m_gStar, redCostXInt); - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - for(i = 0; i < m_nOrigRows; i++){ - for(b = 0; b < m_beta; b++){ - cout << "x[ " << i << "," << b << " -> " << xIndex(i,b) - << " ] : " << redCostX[xIndex(i,b)] << "\t" - << redCostXInt[xIndex(i,b)] << endl; - } - } - m_cliquer->printGraph(m_gStar); - ); - - if(checkRC){ - - //--- - //--- we will be looking for cliques that have negative reduced cost - //--- sum{rc[i] s[i]} + alpha < 0, - //--- - //--- since cliquer is solving for maximal weighed clique, - //--- we flipped the vertex weights - //--- sum{-rc[i] s[i]} - alpha > 0, - //--- sum{-rcInt[i] s[i]} - alphaInt > 0, - //--- - //--- so, the min_weight we will accept is alphaInt + epsilon - //--- - - //this won't work - might have to switch to find one... - //m_cliquer->cliqueFindAll(alphaInt + 2*offset, 0, 1);//THINK - - //this should find the max - //m_cliquer->cliqueFindOne(0, 0, 1); - m_cliquer->cliqueFindOne(m_gStar, 0, 0, 0); - } - else{ - //in general this will cosntruct all the maximal cliques - which is - //bad for any large problems - need to limit, or just generate one - //or use heuristic to generate a few... - //if !checkRC, we can assume is coming from genInit, just gen one - //m_cliquer->cliqueUnweightedFindAll(2, m_kappa, 0); - - //need to figure out a trivial ub on clique size based on costs sent - //in? we need "size" < kappa - //think - prune cliquer for restricted length? should be easy? - //prune by weight and length - but then, does that solve all of - //MAD with cliquer? - - //if we could generate all maximal ISs of size < kappa, - //we'd be done - right? - m_cliquer->cliqueFindOne(m_gStar, 0, 0, 0); - //m_cliquer->cliqueFindAll(0, 0, 1);//THINK - } - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - m_cliquer->cliquePrintAll(m_gStar); - ); - - //--- - //--- store the solution as a DecompVar and push into list - //--- - double varRedCost; - double varOrigCost; - vector::iterator it; - for(i = 0; i < m_cliquer->m_clique_count; i++) { - vector apInd; - m_cliquer->cliquePopulateVector(i, apInd); - - varRedCost = 0.0; - varOrigCost = 0.0; - vector apEls(apInd.size(), 1.0); - for(it = apInd.begin(); it != apInd.end(); it++){ - varRedCost += redCostX[*it]; - varOrigCost += origCost[*it]; - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - printf("\nind: %d redCostX: %g varRedCost: %g", - *it, redCostX[*it], varRedCost); - ); - - } - UTIL_DEBUG(m_param.LogDebugLevel, 4, - printf("\nvarRC = %g, alpha = %g", varRedCost, alpha); - ); - if(!checkRC || ((varRedCost + alpha) < -DecompEpsilon)){ - UTIL_DEBUG(m_param.LogDebugLevel, 4, - printf("\nPUSH var with RC = %g\n", varRedCost + alpha); - ); - vars.push_back(new DecompVar(apInd, apEls, - varRedCost + alpha, varOrigCost)); - } - } - - //--- - //--- now, we have to clean out the clique_list for the next pass - //--- - m_cliquer->cliqueFreeMemory(); + UTIL_DEBUG(m_param.LogDebugLevel, 4, + printf("\nind: %d redCostX: %g varRedCost: %g", *it, + redCostX[*it], varRedCost);); + } + UTIL_DEBUG(m_param.LogDebugLevel, 4, + printf("\nvarRC = %g, alpha = %g", varRedCost, alpha);); + if (!checkRC || ((varRedCost + alpha) < -DecompEpsilon)) { + UTIL_DEBUG(m_param.LogDebugLevel, 4, + printf("\nPUSH var with RC = %g\n", varRedCost + alpha);); + vars.push_back( + new DecompVar(apInd, apEls, varRedCost + alpha, varOrigCost)); + } + } + + //--- + //--- now, we have to clean out the clique_list for the next pass + //--- + m_cliquer->cliqueFreeMemory(); #endif - - m_cliquer->graphFree(m_gStar); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "APPsolveRelaxed()", m_param.LogDebugLevel, 2); + m_cliquer->graphFree(m_gStar); + UtilPrintFuncEnd(m_osLog, m_classTag, "APPsolveRelaxed()", + m_param.LogDebugLevel, 2); - - return status; //think + return status; // think } //--------------------------------------------------------------------- // -int MAD_DecompApp::generateInitVars(DecompVarList & initVars){ - - //--- - //--- x[i,b] in {0,1}, 1 = row i is assigned to block b - //--- i in M = {1, ..., m} - //--- b in B = {1, ..., beta} - //--- beta <= m (possibly an input = number of processors available) - //--- - //--- min sum{i in M, b in B} -x[i,b] - //--- s.t. - //--- (1) sum{b in B} x[i,b] <= 1, for i in M - //--- (2) sum{i in M} x[i,b] <= k, for b in B - //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', - //--- i,j in M, i != j, such that - //--- a[i,k] != 0 != a[j,k], for some k - //--- (4) x[i,b] in {0,1} - //--- - - UtilPrintFuncBegin(m_osLog, m_classTag, - "generateInitVars()", m_param.LogDebugLevel, 2); - - //TODO: we should do some perturbation? - const double * origCost = m_model.objCoeff; //all -1.0 - - //note - if we do random perturbation and APPsolveRelaxed - //starts calling heuristics, then just let base class do this work? - - //since all costs are -1.0, the sort would be arbitrary - - - - - //--- - //--- try to generate a column with small reduced cost - //--- that is feasible to original problem using primal heuristic - //--- - int i; - int nGreedyPts = 0; - int nOrigCols = m_nOrigRows * m_beta; - vector greedyPoints; - nGreedyPts = heuristicGreedy(greedyPoints, INCREASING, origCost, origCost); - for(i = 0; i < nGreedyPts; i++){ - GreedyPoint & gp = greedyPoints[i]; - - //--- - //--- create a DecompVar from the greedySol - //--- - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) - << "Greedy DecompVar origCost = " << gp.solValueOrigCost; - printOriginalSolution(nOrigCols, gp.solution, &cout); - ); - //TODO: might be more efficient if return sparse... greedySol - initVars.push_back(new DecompVar(nOrigCols, - gp.solution, - 0, - gp.solValueOrigCost)); - } +int MAD_DecompApp::generateInitVars(DecompVarList &initVars) { + + //--- + //--- x[i,b] in {0,1}, 1 = row i is assigned to block b + //--- i in M = {1, ..., m} + //--- b in B = {1, ..., beta} + //--- beta <= m (possibly an input = number of processors available) + //--- + //--- min sum{i in M, b in B} -x[i,b] + //--- s.t. + //--- (1) sum{b in B} x[i,b] <= 1, for i in M + //--- (2) sum{i in M} x[i,b] <= k, for b in B + //--- (3) x[i,b] + x[j,b'] <= 1, for b,b' in B, b != b', + //--- i,j in M, i != j, such that + //--- a[i,k] != 0 != a[j,k], for some k + //--- (4) x[i,b] in {0,1} + //--- + + UtilPrintFuncBegin(m_osLog, m_classTag, "generateInitVars()", + m_param.LogDebugLevel, 2); + + // TODO: we should do some perturbation? + const double *origCost = m_model.objCoeff; // all -1.0 + + // note - if we do random perturbation and APPsolveRelaxed + // starts calling heuristics, then just let base class do this work? + + // since all costs are -1.0, the sort would be arbitrary + + //--- + //--- try to generate a column with small reduced cost + //--- that is feasible to original problem using primal heuristic + //--- + int i; + int nGreedyPts = 0; + int nOrigCols = m_nOrigRows * m_beta; + vector greedyPoints; + nGreedyPts = heuristicGreedy(greedyPoints, INCREASING, origCost, origCost); + for (i = 0; i < nGreedyPts; i++) { + GreedyPoint &gp = greedyPoints[i]; + + //--- + //--- create a DecompVar from the greedySol + //--- + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "Greedy DecompVar origCost = " + << gp.solValueOrigCost; + printOriginalSolution(nOrigCols, gp.solution, &cout);); + // TODO: might be more efficient if return sparse... greedySol + initVars.push_back( + new DecompVar(nOrigCols, gp.solution, 0, gp.solValueOrigCost)); + } #if 0 @@ -1339,116 +1207,109 @@ int MAD_DecompApp::generateInitVars(DecompVarList & initVars){ UTIL_DELARR(blocks); #endif - - UtilPrintFuncEnd(m_osLog, m_classTag, - "generateInitVars()", m_param.LogDebugLevel, 2); + UtilPrintFuncEnd(m_osLog, m_classTag, "generateInitVars()", + m_param.LogDebugLevel, 2); - - - return static_cast(initVars.size()); + return static_cast(initVars.size()); } - //--------------------------------------------------------------------- // -void MAD_DecompApp::printOriginalColumn(const int index, - ostream * os) const { - pair p = xIndexInv(index); - (*os) << "x[ " << p.first << " , " << p.second << " ]"; +void MAD_DecompApp::printOriginalColumn(const int index, ostream *os) const { + pair p = xIndexInv(index); + (*os) << "x[ " << p.first << " , " << p.second << " ]"; } /*-------------------------------------------------------------------------*/ -void MAD_DecompApp::printOriginalSolution(const int n_cols, - const double * solution, - ostream * os) const{ - int i, j, b; - int border_size; - double xj; - bool isIntegral = true; - - DecompApp::printOriginalSolution(n_cols, solution, os); - - isIntegral = UtilIsIntegral(solution, n_cols); - if(isIntegral){ - (*os) << "\nBlock Decomposition:"; - vector border(m_nOrigRows, true); - for(b = 0; b < m_beta; b++){ - (*os) << "\nBLOCK " << b << ":\t"; - for(i = 0; i < m_nOrigRows; i++){ - xj = solution[xIndex(i,b)]; - CoinAssertDebug(UtilIsIntegral(xj)); - CoinAssertDebug(xj < (1.0 + DecompEpsilon)); - CoinAssertDebug(xj > ( - DecompEpsilon)); - if(xj > 0.5){ - (*os) << i << " "; - border[i] = false; - } - } - } - border_size = count(border.begin(), border.end(), true); - (*os) << "\nBORDER :\t"; - for(i = 0; i < m_nOrigRows; i++){ - if(!border[i]) - continue; - (*os) << i << " "; - } - (*os) << "\nBORDER Size = " << border_size << "\n"; - - - const CoinPackedMatrix * M = m_instance.getMatrixByRow(); - for(b = 0; b < m_beta; b++){ - (*os) << "\nBLOCK " << b << "\n"; - for(i = 0; i < m_nOrigRows; i++){ - xj = solution[xIndex(i,b)]; - if(xj > 0.5){ - CoinShallowPackedVector row = M->getVector(i); - const int rowLen = row.getNumElements(); - const int * rowInd = row.getIndices(); - (*os) << "Row i: " << i << "\t"; - for(j = 0; j < rowLen; j++){ - (*os) << rowInd[j] << " "; - } - (*os) << endl; - } - } +void MAD_DecompApp::printOriginalSolution(const int n_cols, + const double *solution, + ostream *os) const { + int i, j, b; + int border_size; + double xj; + bool isIntegral = true; + + DecompApp::printOriginalSolution(n_cols, solution, os); + + isIntegral = UtilIsIntegral(solution, n_cols); + if (isIntegral) { + (*os) << "\nBlock Decomposition:"; + vector border(m_nOrigRows, true); + for (b = 0; b < m_beta; b++) { + (*os) << "\nBLOCK " << b << ":\t"; + for (i = 0; i < m_nOrigRows; i++) { + xj = solution[xIndex(i, b)]; + CoinAssertDebug(UtilIsIntegral(xj)); + CoinAssertDebug(xj < (1.0 + DecompEpsilon)); + CoinAssertDebug(xj > (-DecompEpsilon)); + if (xj > 0.5) { + (*os) << i << " "; + border[i] = false; + } } - (*os) << "\nBORDER\n"; - for(i = 0; i < m_nOrigRows; i++){ - if(!border[i]) - continue; - CoinShallowPackedVector row = M->getVector(i); - const int rowLen = row.getNumElements(); - const int * rowInd = row.getIndices(); - (*os) << "Row i: " << i << "\t"; - for(j = 0; j < rowLen; j++){ - (*os) << rowInd[j] << " "; - } - (*os) << endl; + } + border_size = count(border.begin(), border.end(), true); + (*os) << "\nBORDER :\t"; + for (i = 0; i < m_nOrigRows; i++) { + if (!border[i]) + continue; + (*os) << i << " "; + } + (*os) << "\nBORDER Size = " << border_size << "\n"; + + const CoinPackedMatrix *M = m_instance.getMatrixByRow(); + for (b = 0; b < m_beta; b++) { + (*os) << "\nBLOCK " << b << "\n"; + for (i = 0; i < m_nOrigRows; i++) { + xj = solution[xIndex(i, b)]; + if (xj > 0.5) { + CoinShallowPackedVector row = M->getVector(i); + const int rowLen = row.getNumElements(); + const int *rowInd = row.getIndices(); + (*os) << "Row i: " << i << "\t"; + for (j = 0; j < rowLen; j++) { + (*os) << rowInd[j] << " "; + } + (*os) << endl; + } } - - for(b = 0; b < m_beta; b++){ - (*os) << "\nBLOCK " << b << "\n"; - for(i = 0; i < m_nOrigRows; i++){ - xj = solution[xIndex(i,b)]; - if(xj > 0.5){ - CoinShallowPackedVector row = M->getVector(i); - const int rowLen = row.getNumElements(); - const int * rowInd = row.getIndices(); - printRowMarks(rowInd, rowLen); - } - } + } + (*os) << "\nBORDER\n"; + for (i = 0; i < m_nOrigRows; i++) { + if (!border[i]) + continue; + CoinShallowPackedVector row = M->getVector(i); + const int rowLen = row.getNumElements(); + const int *rowInd = row.getIndices(); + (*os) << "Row i: " << i << "\t"; + for (j = 0; j < rowLen; j++) { + (*os) << rowInd[j] << " "; } - (*os) << "\nBORDER\n"; - for(i = 0; i < m_nOrigRows; i++){ - if(!border[i]) - continue; - CoinShallowPackedVector row = M->getVector(i); - const int rowLen = row.getNumElements(); - const int * rowInd = row.getIndices(); - printRowMarks(rowInd, rowLen); + (*os) << endl; + } + + for (b = 0; b < m_beta; b++) { + (*os) << "\nBLOCK " << b << "\n"; + for (i = 0; i < m_nOrigRows; i++) { + xj = solution[xIndex(i, b)]; + if (xj > 0.5) { + CoinShallowPackedVector row = M->getVector(i); + const int rowLen = row.getNumElements(); + const int *rowInd = row.getIndices(); + printRowMarks(rowInd, rowLen); + } } - - } + } + (*os) << "\nBORDER\n"; + for (i = 0; i < m_nOrigRows; i++) { + if (!border[i]) + continue; + CoinShallowPackedVector row = M->getVector(i); + const int rowLen = row.getNumElements(); + const int *rowInd = row.getIndices(); + printRowMarks(rowInd, rowLen); + } + } } -//TODO: visualization tool -//TODO: sanity check that really is feasible for MAD +// TODO: visualization tool +// TODO: sanity check that really is feasible for MAD diff --git a/Dip/examples/MAD/MAD_DecompDebug.cpp b/Dip/examples/MAD/MAD_DecompDebug.cpp index d87b1575..80fd4d03 100644 --- a/Dip/examples/MAD/MAD_DecompDebug.cpp +++ b/Dip/examples/MAD/MAD_DecompDebug.cpp @@ -15,14 +15,13 @@ #include "MAD_DecompApp.h" // --------------------------------------------------------------------- // -void MAD_DecompApp::printRowMarks(const int * rowInd, - const int rowLen) const{ +void MAD_DecompApp::printRowMarks(const int *rowInd, const int rowLen) const { - int i; - const char mark = '*'; - string str(m_instance.getNumCols(),' '); - for(i = 0; i < rowLen; i++){ - str[rowInd[i]] = mark; - } - (*m_osLog) << str << endl; + int i; + const char mark = '*'; + string str(m_instance.getNumCols(), ' '); + for (i = 0; i < rowLen; i++) { + str[rowInd[i]] = mark; + } + (*m_osLog) << str << endl; } diff --git a/Dip/examples/MAD/MAD_Heuristic.cpp b/Dip/examples/MAD/MAD_Heuristic.cpp old mode 100755 new mode 100644 index 96f82256..e74edaf8 --- a/Dip/examples/MAD/MAD_Heuristic.cpp +++ b/Dip/examples/MAD/MAD_Heuristic.cpp @@ -4,9 +4,9 @@ // Decomp is distributed under the Common Public License as part of the // // COIN-OR repository (http://www.coin-or.org). // // // -// Authors: Matthew Galati, SAS Institute Inc. (matthew.galati@sas.com) // -// Ted Ralphs, Lehigh University (ted@lehigh.edu) // -// Jiadong Wang, Lehigh University (jiw408@lehigh.edu) // +// Authors: Matthew Galati, SAS Institute Inc. (matthew.galati@sas.com) // +// Ted Ralphs, Lehigh University (ted@lehigh.edu) // +// Jiadong Wang, Lehigh University (jiw408@lehigh.edu) // // // // Copyright (C) 2002-2019, Lehigh University, Matthew Galati, and Ted Ralphs// // All Rights Reserved. // @@ -15,183 +15,175 @@ #include "MAD_DecompApp.h" #include "MAD_DecompSolution.h" - // --------------------------------------------------------------------- // -//name? -int MAD_DecompApp::APPheuristics(const double * xhat, - const double * origCost, - vector & xhatIPFeas){ - - int nVars = 0; - nVars += heuristicGreedy(DECREASING, xhat, origCost, xhatIPFeas); - return nVars; +// name? +int MAD_DecompApp::APPheuristics(const double *xhat, const double *origCost, + vector &xhatIPFeas) { + + int nVars = 0; + nVars += heuristicGreedy(DECREASING, xhat, origCost, xhatIPFeas); + return nVars; } // --------------------------------------------------------------------- // -//TODO: this revisits an old question about vector vs vector?? -int MAD_DecompApp::heuristicGreedy(vector & greedyPoints, - const MAD_HeurSortDir sortDir, - const double * sortValues, - const double * origCost, - const double * redCost){ - - UtilPrintFuncBegin(m_osLog, m_classTag, - "heuristicGreedy()", m_param.LogDebugLevel, 2); - - //--- - //--- One example of sortDir = DECREASING - //--- order non-increasing based on xhat (current LP point) - //--- so, we take the values that are at 1.0 first, etc... - //--- - //--- One example of sortDir = INCREASING - //--- order non-dercreasing based on costs - //--- so, we take the cheapest first - //--- - pair ib; - int k, i, b, bp; - int nGreedyPts = 0; - const int n_origCols = m_nOrigRows * m_beta; - pair * colSort = m_auxMemPool.pIntDblArrNCoreCols; - for(i = 0; i < n_origCols; i++){ - colSort[i].first = i; - colSort[i].second = sortValues[i]; - } - if(sortDir == INCREASING) - sort(colSort, colSort + n_origCols, UtilIsLessThan()); - else - sort(colSort, colSort + n_origCols, UtilIsGreaterThan()); - - //--- - //--- residual capacity for blocks - //--- - double * blockRes = m_auxMemPool.dblArrNBlocks; - UtilFillN(blockRes, m_beta, static_cast(m_kappa)); - - //--- - //--- for marking if a row has already been assigned - //--- - int * isRowAssigned = m_auxMemPool.intArrNOrigRows; - UtilFillN(isRowAssigned, m_nOrigRows, 0); - - //--- - //--- greedily assign rows to blocks - //--- checking conflicts and capacity - //--- - //--- for xhat[i,b], - //--- if block b has residual capacity - //--- and row i has not already been assigned - //--- and for all b' != b - //--- for all j in b', !isEdge(i,j) - //--- then put i in b - //--- - //TODO: use mem pool? - vector * blocks = new vector[m_beta]; - CoinAssertHint(blocks, "Error: Out of Memory"); - - bool assignOk; - vector::iterator vit; - for(k = 0; k < n_origCols; k++){ - ib = xIndexInv(colSort[k].first); - i = ib.first; - b = ib.second; - if(isRowAssigned[i] || (blockRes[b] < DecompEpsilon)) - continue; - - assignOk = true; - for(bp = 0; bp < m_beta; bp++){ - if(bp == b) - continue; - for(vit = blocks[bp].begin(); vit != blocks[bp].end(); vit++){ - if(GRAPH_IS_EDGE_FAST(m_conflictGraph, i, *vit)){ - assignOk = false; - break; - } - } - if(!assignOk) - break; - } - if(!assignOk) - continue; - - blockRes[b] -= 1.0; - isRowAssigned[i] = 1; - blocks[b].push_back(i); - } - - //--- - //--- place to store the greedy solution - //--- - int xInd; - double solValueOrigCost = 0.0; - double solValueRedCost = 0.0; - double * greedySol = m_auxMemPool.dblArrNCoreCols; - UtilFillN(greedySol, n_origCols, 0.0); - for(b = 0; b < m_beta; b++){ - for(vit = blocks[b].begin(); vit != blocks[b].end(); vit++){ - xInd = xIndex(*vit,b); - greedySol[xInd] = 1.0; - solValueOrigCost += origCost[xInd]; +// TODO: this revisits an old question about vector vs vector?? +int MAD_DecompApp::heuristicGreedy(vector &greedyPoints, + const MAD_HeurSortDir sortDir, + const double *sortValues, + const double *origCost, + const double *redCost) { + + UtilPrintFuncBegin(m_osLog, m_classTag, "heuristicGreedy()", + m_param.LogDebugLevel, 2); + + //--- + //--- One example of sortDir = DECREASING + //--- order non-increasing based on xhat (current LP point) + //--- so, we take the values that are at 1.0 first, etc... + //--- + //--- One example of sortDir = INCREASING + //--- order non-dercreasing based on costs + //--- so, we take the cheapest first + //--- + pair ib; + int k, i, b, bp; + int nGreedyPts = 0; + const int n_origCols = m_nOrigRows * m_beta; + pair *colSort = m_auxMemPool.pIntDblArrNCoreCols; + for (i = 0; i < n_origCols; i++) { + colSort[i].first = i; + colSort[i].second = sortValues[i]; + } + if (sortDir == INCREASING) + sort(colSort, colSort + n_origCols, UtilIsLessThan()); + else + sort(colSort, colSort + n_origCols, UtilIsGreaterThan()); + + //--- + //--- residual capacity for blocks + //--- + double *blockRes = m_auxMemPool.dblArrNBlocks; + UtilFillN(blockRes, m_beta, static_cast(m_kappa)); + + //--- + //--- for marking if a row has already been assigned + //--- + int *isRowAssigned = m_auxMemPool.intArrNOrigRows; + UtilFillN(isRowAssigned, m_nOrigRows, 0); + + //--- + //--- greedily assign rows to blocks + //--- checking conflicts and capacity + //--- + //--- for xhat[i,b], + //--- if block b has residual capacity + //--- and row i has not already been assigned + //--- and for all b' != b + //--- for all j in b', !isEdge(i,j) + //--- then put i in b + //--- + // TODO: use mem pool? + vector *blocks = new vector[m_beta]; + CoinAssertHint(blocks, "Error: Out of Memory"); + + bool assignOk; + vector::iterator vit; + for (k = 0; k < n_origCols; k++) { + ib = xIndexInv(colSort[k].first); + i = ib.first; + b = ib.second; + if (isRowAssigned[i] || (blockRes[b] < DecompEpsilon)) + continue; + + assignOk = true; + for (bp = 0; bp < m_beta; bp++) { + if (bp == b) + continue; + for (vit = blocks[bp].begin(); vit != blocks[bp].end(); vit++) { + if (GRAPH_IS_EDGE_FAST(m_conflictGraph, i, *vit)) { + assignOk = false; + break; + } } - } - if(redCost){ - for(b = 0; b < m_beta; b++){ - for(vit = blocks[b].begin(); vit != blocks[b].end(); vit++){ - solValueRedCost += redCost[xIndex(*vit,b)]; - } + if (!assignOk) + break; + } + if (!assignOk) + continue; + + blockRes[b] -= 1.0; + isRowAssigned[i] = 1; + blocks[b].push_back(i); + } + + //--- + //--- place to store the greedy solution + //--- + int xInd; + double solValueOrigCost = 0.0; + double solValueRedCost = 0.0; + double *greedySol = m_auxMemPool.dblArrNCoreCols; + UtilFillN(greedySol, n_origCols, 0.0); + for (b = 0; b < m_beta; b++) { + for (vit = blocks[b].begin(); vit != blocks[b].end(); vit++) { + xInd = xIndex(*vit, b); + greedySol[xInd] = 1.0; + solValueOrigCost += origCost[xInd]; + } + } + if (redCost) { + for (b = 0; b < m_beta; b++) { + for (vit = blocks[b].begin(); vit != blocks[b].end(); vit++) { + solValueRedCost += redCost[xIndex(*vit, b)]; } - } - - //--- - //--- create a GreedyPoint and set solution ptr - //--- - GreedyPoint gp; - gp.solValueOrigCost = solValueOrigCost; - gp.solValueRedCost = solValueRedCost; - gp.solution = greedySol; - greedyPoints.push_back(gp); - - nGreedyPts++; - - UTIL_DELARR(blocks); - UtilPrintFuncEnd(m_osLog, m_classTag, - "heuristicGreedy()", m_param.LogDebugLevel, 2); - - return nGreedyPts; + } + } + + //--- + //--- create a GreedyPoint and set solution ptr + //--- + GreedyPoint gp; + gp.solValueOrigCost = solValueOrigCost; + gp.solValueRedCost = solValueRedCost; + gp.solution = greedySol; + greedyPoints.push_back(gp); + + nGreedyPts++; + + UTIL_DELARR(blocks); + UtilPrintFuncEnd(m_osLog, m_classTag, "heuristicGreedy()", + m_param.LogDebugLevel, 2); + + return nGreedyPts; } // --------------------------------------------------------------------- // -int MAD_DecompApp::heuristicGreedy(const MAD_HeurSortDir sortDir, - const double * sortValues, - const double * origCost, - vector & solVec){ - - - int i; - int nGreedyPts = 0; - const int n_origCols = m_nOrigRows * m_beta; - - vector greedyPoints; - nGreedyPts = heuristicGreedy(greedyPoints, sortDir, sortValues, origCost); - - for(i = 0; i < nGreedyPts; i++){ - GreedyPoint & gp = greedyPoints[i]; - - //--- - //--- create a DecompSolution from the greedySol - //--- - //why not also use these as columns?? - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) - << "Greedy DecompSolution cost = " << gp.solValueOrigCost - << endl; - printOriginalSolution(n_origCols, gp.solution, &cout); - ); - //TODO: for use in app solved - might not want this... - solVec.push_back(new MAD_DecompSolution(this, - n_origCols, - gp.solution, - gp.solValueOrigCost) - ); - } - return nGreedyPts; +int MAD_DecompApp::heuristicGreedy(const MAD_HeurSortDir sortDir, + const double *sortValues, + const double *origCost, + vector &solVec) { + + int i; + int nGreedyPts = 0; + const int n_origCols = m_nOrigRows * m_beta; + + vector greedyPoints; + nGreedyPts = heuristicGreedy(greedyPoints, sortDir, sortValues, origCost); + + for (i = 0; i < nGreedyPts; i++) { + GreedyPoint &gp = greedyPoints[i]; + + //--- + //--- create a DecompSolution from the greedySol + //--- + // why not also use these as columns?? + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "Greedy DecompSolution cost = " + << gp.solValueOrigCost << endl; + printOriginalSolution(n_origCols, gp.solution, &cout);); + // TODO: for use in app solved - might not want this... + solVec.push_back(new MAD_DecompSolution(this, n_origCols, gp.solution, + gp.solValueOrigCost)); + } + return nGreedyPts; } diff --git a/Dip/examples/MAD/MAD_Main.cpp b/Dip/examples/MAD/MAD_Main.cpp old mode 100755 new mode 100644 index b4d9ef0d..a302fe58 --- a/Dip/examples/MAD/MAD_Main.cpp +++ b/Dip/examples/MAD/MAD_Main.cpp @@ -13,8 +13,8 @@ //===========================================================================// //===========================================================================// -#include "UtilParameters.h" #include "MAD_DecompApp.h" +#include "UtilParameters.h" #include "AlpsDecompModel.h" #include "AlpsKnowledgeBroker.h" @@ -22,135 +22,133 @@ #include "DecompAlgoPC2.h" #include "DecompAlgoRC.h" -#include "CoinError.hpp" #include "AlpsTime.h" +#include "CoinError.hpp" //===========================================================================// //#define CREATE_FULL //===========================================================================// -int main(int argc, char ** argv){ - try{ - +int main(int argc, char **argv) { + try { + + //--- + //--- create the utility class for parsing parameters + //--- + UtilParameters utilParam(argc, argv); + + bool useAlps = utilParam.GetSetting("useAlps", true); + + bool doCut = utilParam.GetSetting("doCut", true); + bool doPrice = utilParam.GetSetting("doPrice", false); + bool doPriceCut = utilParam.GetSetting("doPriceCut", false); + bool doRelaxCut = utilParam.GetSetting("doRelaxCut", false); + + string Instance = utilParam.GetSetting("Instance", ".", "MAD"); + + AlpsTimer timer; + double timeSetupReal = 0.0; + double timeSetupCpu = 0.0; + double timeSolveReal = 0.0; + double timeSolveCpu = 0.0; + + timer.start(); + + //--- + //--- create the user application (a DecompApp) + //--- + MAD_DecompApp mad(utilParam); + mad.createModel(); + +#ifdef CREATE_FULL + { + // create full mps file for debugging + string DataSubDir = utilParam.GetSetting("DataSubDir", ".", "MAD"); + string::size_type pos = Instance.find_first_of("."); + + DecompAlgoC2 cutMps(&mad, &utilParam); + cutMps.initSetup(&utilParam, "MAD"); + + string fileName = DataSubDir + "_" + Instance.substr(0, pos); + cutMps.createFullMps(fileName); + exit(1); + } +#endif + + //--- + //--- create the algorithm(s) (a DecompAlgo) + //--- + DecompAlgoC2 *cut = NULL; + DecompAlgoPC2 *price = NULL; + DecompAlgoPC2 *pc = NULL; + DecompAlgoRC *rc = NULL; + + if (doCut) { + cut = new DecompAlgoC2(&mad, &utilParam); + } + if (doPrice) { + price = new DecompAlgoPC2(&mad, &utilParam, "PRICE"); + } + if (doPriceCut) { + pc = new DecompAlgoPC2(&mad, &utilParam); + } + if (doRelaxCut) { + rc = new DecompAlgoRC(&mad, &utilParam); + } + + if (useAlps) { //--- - //--- create the utility class for parsing parameters + //--- create the driver AlpsDecomp model //--- - UtilParameters utilParam(argc, argv); - - bool useAlps = utilParam.GetSetting("useAlps", true); - - bool doCut = utilParam.GetSetting("doCut", true); - bool doPrice = utilParam.GetSetting("doPrice", false); - bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - bool doRelaxCut = utilParam.GetSetting("doRelaxCut", false); - - string Instance = utilParam.GetSetting("Instance", ".", "MAD"); - - AlpsTimer timer; - double timeSetupReal = 0.0; - double timeSetupCpu = 0.0; - double timeSolveReal = 0.0; - double timeSolveCpu = 0.0; + AlpsDecompModel alpsModel(utilParam); + if (cut) + alpsModel.addDecompAlgo(cut); + if (price) + alpsModel.addDecompAlgo(price); + if (pc) + alpsModel.addDecompAlgo(pc); + if (rc) + alpsModel.addDecompAlgo(rc); + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getWallClock(); timer.start(); + alpsModel.solve(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getWallClock(); //--- - //--- create the user application (a DecompApp) + //--- sanity check //--- - MAD_DecompApp mad(utilParam); - mad.createModel(); - -#ifdef CREATE_FULL - { - //create full mps file for debugging - string DataSubDir = utilParam.GetSetting("DataSubDir", ".", "MAD"); - string::size_type pos = Instance.find_first_of("."); - - DecompAlgoC2 cutMps(&mad, &utilParam); - cutMps.initSetup(&utilParam, "MAD"); - - string fileName = DataSubDir + "_" + Instance.substr(0,pos); - cutMps.createFullMps(fileName); - exit(1); - } -#endif - + cout << "Instance = " << Instance << " NRows = " << mad.getNOrigRows() + << " Border = " << mad.getNOrigRows() + alpsModel.getBestObj() + << " Solution = " << alpsModel.getBestObj() << " [ " + << mad.getBestKnownLB() << " , " << mad.getBestKnownUB() << " ]" + << " SetupCPU = " << timeSetupCpu << " SolveCPU = " << timeSolveCpu + << endl; + // double diff = alpsModel.getBestObj() - mad.getKnownOptimalBound(); + // CoinAssert(UtilIsZero(diff)); - + } else { //--- - //--- create the algorithm(s) (a DecompAlgo) + //--- just solve the bounding problem (root node) //--- - DecompAlgoC2 * cut = NULL; - DecompAlgoPC2 * price = NULL; - DecompAlgoPC2 * pc = NULL; - DecompAlgoRC * rc = NULL; - - if(doCut){ - cut = new DecompAlgoC2(&mad, &utilParam); - } - if(doPrice){ - price = new DecompAlgoPC2(&mad, &utilParam, "PRICE"); - } - if(doPriceCut){ - pc = new DecompAlgoPC2(&mad, &utilParam); - } - if(doRelaxCut){ - rc = new DecompAlgoRC(&mad, &utilParam); - } - - if(useAlps){ - //--- - //--- create the driver AlpsDecomp model - //--- - AlpsDecompModel alpsModel(utilParam); - if(cut) - alpsModel.addDecompAlgo(cut); - if(price) - alpsModel.addDecompAlgo(price); - if(pc) - alpsModel.addDecompAlgo(pc); - if(rc) - alpsModel.addDecompAlgo(rc); - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getWallClock(); - - timer.start(); - alpsModel.solve(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getWallClock(); - - //--- - //--- sanity check - //--- - cout << "Instance = " << Instance - << " NRows = " << mad.getNOrigRows() - << " Border = " << mad.getNOrigRows() + alpsModel.getBestObj() - << " Solution = " << alpsModel.getBestObj() - << " [ " << mad.getBestKnownLB() - << " , " << mad.getBestKnownUB() << " ]" - << " SetupCPU = " << timeSetupCpu - << " SolveCPU = " << timeSolveCpu << endl; - - //double diff = alpsModel.getBestObj() - mad.getKnownOptimalBound(); - //CoinAssert(UtilIsZero(diff)); - - }else{ - //--- - //--- just solve the bounding problem (root node) - //--- - } - - if(cut) delete cut; - if(price) delete price; - if(pc) delete pc; - if(rc) delete rc; - } - catch(CoinError & ex){ - cerr << "COIN Exception:" << ex.message() << endl - << " from method " << ex.methodName() << endl - << " from class " << ex.className() << endl; - } -} + } + + if (cut) + delete cut; + if (price) + delete price; + if (pc) + delete pc; + if (rc) + delete rc; + } catch (CoinError &ex) { + cerr << "COIN Exception:" << ex.message() << endl + << " from method " << ex.methodName() << endl + << " from class " << ex.className() << endl; + } +} diff --git a/Dip/examples/MCF/MCF_DecompApp.cpp b/Dip/examples/MCF/MCF_DecompApp.cpp index 989adf47..6b4c7b6e 100644 --- a/Dip/examples/MCF/MCF_DecompApp.cpp +++ b/Dip/examples/MCF/MCF_DecompApp.cpp @@ -13,412 +13,396 @@ //===========================================================================// //===========================================================================// -#include "DecompVar.h" #include "MCF_DecompApp.h" +#include "DecompVar.h" //===========================================================================// -void MCF_DecompApp::initializeApp() -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); - - //--- - //--- read problem instance - //--- - string instanceFile = m_appParam.DataDir - + UtilDirSlash() + m_appParam.Instance; - int rc = m_instance.readInstance(instanceFile); - - if (rc) - throw UtilException("Error in readInstance", - "initializeApp", "MCF_DecompApp"); - - //--- - //--- create models - //--- - createModels(); - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); -} - -//===========================================================================// -void MCF_DecompApp::createModels() -{ - //--- - //--- This function does the work to create the different models - //--- that will be used. This memory is owned by the user. It will - //--- be passed to the application interface and used by the algorithms. - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - //--- - //--- (Integer) Multi-Commodity Flow Problem (MCF). - //--- - //--- We are given: - //--- (1) a directed graph G=(N,A), - //--- (2) a set of commodities K, where each commodity is - //--- a source-sink pair. - //--- - //--- min sum{k in K} sum{(i,j) in A} w[i,j] x[k,i,j] - //--- s.t. sum{(j,i) in A} x[k,i,j] - - //--- sum{(i,j) in A} x[k,i,j] = d[i,k], for all i in N, k in K - //--- sum{k in K} x[k,i,j] >= l[i,j], for all (i,j) in A - //--- sum{k in K} x[k,i,j] <= u[i,j], for all (i,j) in A - //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A - //--- For k=(s,t) in K, - //--- d[i,k] = -d[k] if i=s - //--- = d[k] if i=t - //--- = 0, otherwise - //--- - //--- NOTE: to make sure the problem is always feasible, dummy arcs - //--- have been added between all source-sink commodity pairs and have - //--- been given a 'big' weight. - //--- - //--- - //--- The decomposition is formed as: - //--- - //--- MASTER (A''): - //--- sum{k in K} x[k,i,j] >= l[i,j], for all (i,j) in A - //--- sum{k in K} x[k,i,j] <= u[i,j], for all (i,j) in A - //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A - //--- - //--- SUBPROBLEM (A'): (one block for each k in K) - //--- sum{(j,i) in A} x[k,i,j] - - //--- sum{(i,j) in A} x[k,i,j] = d[i,k], for all i in N - //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A - //--- - //--- - //--- Get information about this problem instance. - //--- - int k, a, colIndex; - int numCommodities = m_instance.m_numCommodities; - int numArcs = m_instance.m_numArcs; - int numCols = numCommodities * numArcs; - MCF_Instance::arc* arcs = m_instance.m_arcs; - //--- - //--- Construct the objective function and set it - //--- columns indexed as [k,a]= k*numArcs + a - //--- - objective = new double[numCols]; - - if (!objective) { - throw UtilExceptionMemory("createModels", "MCF_DecompApp"); - } - - colIndex = 0; - - for (k = 0; k < numCommodities; k++) - for (a = 0; a < numArcs; a++) { - objective[colIndex++] = arcs[a].weight; - } - - //--- - //--- set the objective - //--- - setModelObjective(objective, numCols); - //--- - //--- create the core/master model and set it - //--- - modelCore = new DecompConstraintSet(); - createModelCore(modelCore); - setModelCore(modelCore, "core"); - - //--- - //--- create the relaxed/subproblem models and set them - //--- - for (k = 0; k < numCommodities; k++) { - modelRelax = new DecompConstraintSet(); - string modelName = "relax" + UtilIntToStr(k); - - if (m_appParam.UseSparse) { - createModelRelaxSparse(modelRelax, k); - } else { - createModelRelax(modelRelax, k); - } - - setModelRelax(modelRelax, modelName, k); - m_models.push_back(modelRelax); - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); +void MCF_DecompApp::initializeApp() { + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", + m_appParam.LogLevel, 2); + + //--- + //--- read problem instance + //--- + string instanceFile = + m_appParam.DataDir + UtilDirSlash() + m_appParam.Instance; + int rc = m_instance.readInstance(instanceFile); + + if (rc) + throw UtilException("Error in readInstance", "initializeApp", + "MCF_DecompApp"); + + //--- + //--- create models + //--- + createModels(); + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", m_appParam.LogLevel, + 2); } //===========================================================================// -void MCF_DecompApp::createModelCore(DecompConstraintSet* model) -{ - //--- - //--- MASTER (A''): - //--- sum{k in K} x[k,i,j] >= l[i,j], for all (i,j) in A - //--- sum{k in K} x[k,i,j] <= u[i,j], for all (i,j) in A - //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A - //--- - int k, a, colIndex; - int numCommodities = m_instance.m_numCommodities; - int numArcs = m_instance.m_numArcs; - int numCols = numCommodities * numArcs; - int numRows = 2 * numArcs; - MCF_Instance::arc* arcs = m_instance.m_arcs; - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelCore()", m_appParam.LogLevel, 2); - //--- - //--- create space for the model matrix (row-majored) - //--- - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - - if (!model->M) { - throw UtilExceptionMemory("createModelCore", "MCF_DecompApp"); - } - - model->M->setDimensions(0, numCols); - model->reserve(numRows, numCols); - //--- - //--- create the rows and set the col/row bounds - //--- - UtilFillN(model->colLB, numCols, 0.0); - UtilFillN(model->colUB, numCols, m_infinity); - - for (a = 0; a < numArcs; a++) { - CoinPackedVector row; - double arcLB = arcs[a].lb; - double arcUB = arcs[a].ub; - - for (k = 0; k < numCommodities; k++) { - colIndex = k * numArcs + a; - model->colLB[colIndex] = arcLB; - model->colUB[colIndex] = arcUB; - row.insert(colIndex, 1.0); - } - - //TODO: any issue with range constraints? - model->appendRow(row, -m_infinity, arcUB); - string rowNameUB = "capUB(" + - UtilIntToStr(a) + "_" + - UtilIntToStr(arcs[a].tail) + "," + - UtilIntToStr(arcs[a].head) + ")"; - model->rowNames.push_back(rowNameUB); - model->appendRow(row, arcLB, m_infinity); - string rowNameLB = "capLB(" + - UtilIntToStr(a) + "_" + - UtilIntToStr(arcs[a].tail) + "," + - UtilIntToStr(arcs[a].head) + ")"; - model->rowNames.push_back(rowNameLB); - } - - //--- - //--- create column names (helps with debugging) - //--- - for (k = 0; k < numCommodities; k++) { - for (a = 0; a < numArcs; a++) { - string colName = "x(comm_" + UtilIntToStr(k) + "," + - UtilIntToStr(a) + "_" + - UtilIntToStr(arcs[a].tail) + "," + - UtilIntToStr(arcs[a].head) + ")"; - model->colNames.push_back(colName); - } - } - - //--- - //--- set the indices of the integer variables of model - //--- - UtilIotaN(model->integerVars, numCols, 0); - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelCore()", m_appParam.LogLevel, 2); +void MCF_DecompApp::createModels() { + //--- + //--- This function does the work to create the different models + //--- that will be used. This memory is owned by the user. It will + //--- be passed to the application interface and used by the algorithms. + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + //--- + //--- (Integer) Multi-Commodity Flow Problem (MCF). + //--- + //--- We are given: + //--- (1) a directed graph G=(N,A), + //--- (2) a set of commodities K, where each commodity is + //--- a source-sink pair. + //--- + //--- min sum{k in K} sum{(i,j) in A} w[i,j] x[k,i,j] + //--- s.t. sum{(j,i) in A} x[k,i,j] - + //--- sum{(i,j) in A} x[k,i,j] = d[i,k], for all i in N, k in K + //--- sum{k in K} x[k,i,j] >= l[i,j], for all (i,j) in A + //--- sum{k in K} x[k,i,j] <= u[i,j], for all (i,j) in A + //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A + //--- For k=(s,t) in K, + //--- d[i,k] = -d[k] if i=s + //--- = d[k] if i=t + //--- = 0, otherwise + //--- + //--- NOTE: to make sure the problem is always feasible, dummy arcs + //--- have been added between all source-sink commodity pairs and have + //--- been given a 'big' weight. + //--- + //--- + //--- The decomposition is formed as: + //--- + //--- MASTER (A''): + //--- sum{k in K} x[k,i,j] >= l[i,j], for all (i,j) in A + //--- sum{k in K} x[k,i,j] <= u[i,j], for all (i,j) in A + //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A + //--- + //--- SUBPROBLEM (A'): (one block for each k in K) + //--- sum{(j,i) in A} x[k,i,j] - + //--- sum{(i,j) in A} x[k,i,j] = d[i,k], for all i in N + //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A + //--- + //--- + //--- Get information about this problem instance. + //--- + int k, a, colIndex; + int numCommodities = m_instance.m_numCommodities; + int numArcs = m_instance.m_numArcs; + int numCols = numCommodities * numArcs; + MCF_Instance::arc *arcs = m_instance.m_arcs; + //--- + //--- Construct the objective function and set it + //--- columns indexed as [k,a]= k*numArcs + a + //--- + objective = new double[numCols]; + + if (!objective) { + throw UtilExceptionMemory("createModels", "MCF_DecompApp"); + } + + colIndex = 0; + + for (k = 0; k < numCommodities; k++) + for (a = 0; a < numArcs; a++) { + objective[colIndex++] = arcs[a].weight; + } + + //--- + //--- set the objective + //--- + setModelObjective(objective, numCols); + //--- + //--- create the core/master model and set it + //--- + modelCore = new DecompConstraintSet(); + createModelCore(modelCore); + setModelCore(modelCore, "core"); + + //--- + //--- create the relaxed/subproblem models and set them + //--- + for (k = 0; k < numCommodities; k++) { + modelRelax = new DecompConstraintSet(); + string modelName = "relax" + UtilIntToStr(k); + + if (m_appParam.UseSparse) { + createModelRelaxSparse(modelRelax, k); + } else { + createModelRelax(modelRelax, k); + } + + setModelRelax(modelRelax, modelName, k); + m_models.push_back(modelRelax); + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); } - //===========================================================================// -void MCF_DecompApp::createModelRelax(DecompConstraintSet* model, - int commId) -{ - //--- - //--- SUBPROBLEM (A'): (one block for each k in K) - //--- sum{(j,i) in A} x[k,i,j] - - //--- sum{(i,j) in A} x[k,i,j] = d[i,k], for all i in N - //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A - //--- For k=(s,t) in K, - //--- d[i,k] = -d[k] if i=s - //--- = d[k] if i=t - //--- = 0, otherwise - //--- - int a, i, head, tail, colIndex, source, sink; - int numCommodities = m_instance.m_numCommodities; - int numArcs = m_instance.m_numArcs; - int numNodes = m_instance.m_numNodes; - int numCols = numCommodities * numArcs; - int numRows = numNodes; - MCF_Instance::arc* arcs = m_instance.m_arcs; - MCF_Instance::commodity* commodities = m_instance.m_commodities; - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelRelax()", m_appParam.LogLevel, 2); - //--- - //--- create space for the model matrix (row-majored) - //--- - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - - if (!model->M) { - throw UtilExceptionMemory("createModelCore", "MCF_DecompApp"); - } - - model->M->setDimensions(0, numCols); - model->reserve(numRows, numCols); - //--- - //--- get this commodity's source and sink node - //--- - source = commodities[commId].source; - sink = commodities[commId].sink; - - //--- - //--- create the rows - //--- NOTE: this is somewhat inefficient (but simple) - //--- - for (i = 0; i < numNodes; i++) { - CoinPackedVector row; - - for (a = 0; a < numArcs; a++) { - tail = arcs[a].tail; - head = arcs[a].head; - - if (head == i) { - colIndex = commId * numArcs + a; - row.insert(colIndex, 1.0); - } else if (tail == i) { - colIndex = commId * numArcs + a; - row.insert(colIndex, -1.0); - } - } - - if (i == source) - model->appendRow(row, - -commodities[commId].demand, - -commodities[commId].demand); - else if (i == sink) - model->appendRow(row, - commodities[commId].demand, - commodities[commId].demand); - else { - model->appendRow(row, 0.0, 0.0); - } - - string rowName = "flow(" + - UtilIntToStr(commId) + "_" + - UtilIntToStr(i) + "_" + - UtilIntToStr(source) + "," + - UtilIntToStr(sink) + ")"; - model->rowNames.push_back(rowName); - } - - //--- - //--- create a list of the "active" columns (those related - //--- to this commmodity) all other columns are fixed to 0 - //--- - UtilFillN(model->colLB, numCols, 0.0); - UtilFillN(model->colUB, numCols, 0.0); - colIndex = commId * numArcs; - - for (a = 0; a < numArcs; a++) { - double arcLB = arcs[a].lb; - double arcUB = arcs[a].ub; +void MCF_DecompApp::createModelCore(DecompConstraintSet *model) { + //--- + //--- MASTER (A''): + //--- sum{k in K} x[k,i,j] >= l[i,j], for all (i,j) in A + //--- sum{k in K} x[k,i,j] <= u[i,j], for all (i,j) in A + //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A + //--- + int k, a, colIndex; + int numCommodities = m_instance.m_numCommodities; + int numArcs = m_instance.m_numArcs; + int numCols = numCommodities * numArcs; + int numRows = 2 * numArcs; + MCF_Instance::arc *arcs = m_instance.m_arcs; + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelCore()", + m_appParam.LogLevel, 2); + //--- + //--- create space for the model matrix (row-majored) + //--- + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + + if (!model->M) { + throw UtilExceptionMemory("createModelCore", "MCF_DecompApp"); + } + + model->M->setDimensions(0, numCols); + model->reserve(numRows, numCols); + //--- + //--- create the rows and set the col/row bounds + //--- + UtilFillN(model->colLB, numCols, 0.0); + UtilFillN(model->colUB, numCols, m_infinity); + + for (a = 0; a < numArcs; a++) { + CoinPackedVector row; + double arcLB = arcs[a].lb; + double arcUB = arcs[a].ub; + + for (k = 0; k < numCommodities; k++) { + colIndex = k * numArcs + a; model->colLB[colIndex] = arcLB; model->colUB[colIndex] = arcUB; - model->activeColumns.push_back(colIndex); - colIndex++; - } - - //--- - //--- set the indices of the integer variables of model - //--- - UtilIotaN(model->integerVars, numCols, 0); - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelRelax()", m_appParam.LogLevel, 2); + row.insert(colIndex, 1.0); + } + + // TODO: any issue with range constraints? + model->appendRow(row, -m_infinity, arcUB); + string rowNameUB = "capUB(" + UtilIntToStr(a) + "_" + + UtilIntToStr(arcs[a].tail) + "," + + UtilIntToStr(arcs[a].head) + ")"; + model->rowNames.push_back(rowNameUB); + model->appendRow(row, arcLB, m_infinity); + string rowNameLB = "capLB(" + UtilIntToStr(a) + "_" + + UtilIntToStr(arcs[a].tail) + "," + + UtilIntToStr(arcs[a].head) + ")"; + model->rowNames.push_back(rowNameLB); + } + + //--- + //--- create column names (helps with debugging) + //--- + for (k = 0; k < numCommodities; k++) { + for (a = 0; a < numArcs; a++) { + string colName = "x(comm_" + UtilIntToStr(k) + "," + UtilIntToStr(a) + + "_" + UtilIntToStr(arcs[a].tail) + "," + + UtilIntToStr(arcs[a].head) + ")"; + model->colNames.push_back(colName); + } + } + + //--- + //--- set the indices of the integer variables of model + //--- + UtilIotaN(model->integerVars, numCols, 0); + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelCore()", + m_appParam.LogLevel, 2); } //===========================================================================// -void MCF_DecompApp::createModelRelaxSparse(DecompConstraintSet* model, - int commId) -{ - //--- - //--- SUBPROBLEM (A'): (one block for each k in K) - //--- sum{(j,i) in A} x[k,i,j] - - //--- sum{(i,j) in A} x[k,i,j] = d[i,k], for all i in N - //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A - //--- For k=(s,t) in K, - //--- d[i,k] = -d[k] if i=s - //--- = d[k] if i=t - //--- = 0, otherwise - //--- - int a, i, head, tail, origColIndex, source, sink; - int numArcs = m_instance.m_numArcs; - int numNodes = m_instance.m_numNodes; - int numCommodities = m_instance.m_numCommodities; - int numCols = numArcs; - int numRows = numNodes; - int numColsOrig = numArcs * numCommodities; - MCF_Instance::arc* arcs = m_instance.m_arcs; - MCF_Instance::commodity* commodities = m_instance.m_commodities; - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelRelaxSparse()", m_appParam.LogLevel, 2); - //--- - //--- create space for the model matrix (row-majored) - //--- - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - - if (!model->M) { - throw UtilExceptionMemory("createModelCore", "MCF_DecompApp"); - } - - model->M->setDimensions(0, numCols); - model->reserve(numRows, numCols); - model->setSparse(numColsOrig); - //--- - //--- get this commodity's source and sink node - //--- - source = commodities[commId].source; - sink = commodities[commId].sink; - - //--- - //--- create the rows - //--- NOTE: this is somewhat inefficient (but simple) - //--- - for (i = 0; i < numNodes; i++) { - CoinPackedVector row; - - for (a = 0; a < numArcs; a++) { - tail = arcs[a].tail; - head = arcs[a].head; - - if (head == i) { - row.insert(a, 1.0); - } else if (tail == i) { - row.insert(a, -1.0); - } +void MCF_DecompApp::createModelRelax(DecompConstraintSet *model, int commId) { + //--- + //--- SUBPROBLEM (A'): (one block for each k in K) + //--- sum{(j,i) in A} x[k,i,j] - + //--- sum{(i,j) in A} x[k,i,j] = d[i,k], for all i in N + //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A + //--- For k=(s,t) in K, + //--- d[i,k] = -d[k] if i=s + //--- = d[k] if i=t + //--- = 0, otherwise + //--- + int a, i, head, tail, colIndex, source, sink; + int numCommodities = m_instance.m_numCommodities; + int numArcs = m_instance.m_numArcs; + int numNodes = m_instance.m_numNodes; + int numCols = numCommodities * numArcs; + int numRows = numNodes; + MCF_Instance::arc *arcs = m_instance.m_arcs; + MCF_Instance::commodity *commodities = m_instance.m_commodities; + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelRelax()", + m_appParam.LogLevel, 2); + //--- + //--- create space for the model matrix (row-majored) + //--- + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + + if (!model->M) { + throw UtilExceptionMemory("createModelCore", "MCF_DecompApp"); + } + + model->M->setDimensions(0, numCols); + model->reserve(numRows, numCols); + //--- + //--- get this commodity's source and sink node + //--- + source = commodities[commId].source; + sink = commodities[commId].sink; + + //--- + //--- create the rows + //--- NOTE: this is somewhat inefficient (but simple) + //--- + for (i = 0; i < numNodes; i++) { + CoinPackedVector row; + + for (a = 0; a < numArcs; a++) { + tail = arcs[a].tail; + head = arcs[a].head; + + if (head == i) { + colIndex = commId * numArcs + a; + row.insert(colIndex, 1.0); + } else if (tail == i) { + colIndex = commId * numArcs + a; + row.insert(colIndex, -1.0); } + } + + if (i == source) + model->appendRow(row, -commodities[commId].demand, + -commodities[commId].demand); + else if (i == sink) + model->appendRow(row, commodities[commId].demand, + commodities[commId].demand); + else { + model->appendRow(row, 0.0, 0.0); + } + + string rowName = "flow(" + UtilIntToStr(commId) + "_" + UtilIntToStr(i) + + "_" + UtilIntToStr(source) + "," + UtilIntToStr(sink) + + ")"; + model->rowNames.push_back(rowName); + } + + //--- + //--- create a list of the "active" columns (those related + //--- to this commmodity) all other columns are fixed to 0 + //--- + UtilFillN(model->colLB, numCols, 0.0); + UtilFillN(model->colUB, numCols, 0.0); + colIndex = commId * numArcs; + + for (a = 0; a < numArcs; a++) { + double arcLB = arcs[a].lb; + double arcUB = arcs[a].ub; + model->colLB[colIndex] = arcLB; + model->colUB[colIndex] = arcUB; + model->activeColumns.push_back(colIndex); + colIndex++; + } + + //--- + //--- set the indices of the integer variables of model + //--- + UtilIotaN(model->integerVars, numCols, 0); + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelRelax()", + m_appParam.LogLevel, 2); +} - if (i == source) - model->appendRow(row, - -commodities[commId].demand, - -commodities[commId].demand); - else if (i == sink) - model->appendRow(row, - commodities[commId].demand, - commodities[commId].demand); - else { - model->appendRow(row, 0.0, 0.0); +//===========================================================================// +void MCF_DecompApp::createModelRelaxSparse(DecompConstraintSet *model, + int commId) { + //--- + //--- SUBPROBLEM (A'): (one block for each k in K) + //--- sum{(j,i) in A} x[k,i,j] - + //--- sum{(i,j) in A} x[k,i,j] = d[i,k], for all i in N + //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A + //--- For k=(s,t) in K, + //--- d[i,k] = -d[k] if i=s + //--- = d[k] if i=t + //--- = 0, otherwise + //--- + int a, i, head, tail, origColIndex, source, sink; + int numArcs = m_instance.m_numArcs; + int numNodes = m_instance.m_numNodes; + int numCommodities = m_instance.m_numCommodities; + int numCols = numArcs; + int numRows = numNodes; + int numColsOrig = numArcs * numCommodities; + MCF_Instance::arc *arcs = m_instance.m_arcs; + MCF_Instance::commodity *commodities = m_instance.m_commodities; + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelRelaxSparse()", + m_appParam.LogLevel, 2); + //--- + //--- create space for the model matrix (row-majored) + //--- + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + + if (!model->M) { + throw UtilExceptionMemory("createModelCore", "MCF_DecompApp"); + } + + model->M->setDimensions(0, numCols); + model->reserve(numRows, numCols); + model->setSparse(numColsOrig); + //--- + //--- get this commodity's source and sink node + //--- + source = commodities[commId].source; + sink = commodities[commId].sink; + + //--- + //--- create the rows + //--- NOTE: this is somewhat inefficient (but simple) + //--- + for (i = 0; i < numNodes; i++) { + CoinPackedVector row; + + for (a = 0; a < numArcs; a++) { + tail = arcs[a].tail; + head = arcs[a].head; + + if (head == i) { + row.insert(a, 1.0); + } else if (tail == i) { + row.insert(a, -1.0); } - } - - //--- - //--- set the colLB, colUB, integerVars and sparse mapping - //--- - origColIndex = commId * numArcs; - - for (a = 0; a < numArcs; a++) { - double arcLB = arcs[a].lb; - double arcUB = arcs[a].ub; - model->pushCol(arcLB, arcUB, true, origColIndex); - origColIndex++; - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelRelaxSparse()", m_appParam.LogLevel, 2); + } + + if (i == source) + model->appendRow(row, -commodities[commId].demand, + -commodities[commId].demand); + else if (i == sink) + model->appendRow(row, commodities[commId].demand, + commodities[commId].demand); + else { + model->appendRow(row, 0.0, 0.0); + } + } + + //--- + //--- set the colLB, colUB, integerVars and sparse mapping + //--- + origColIndex = commId * numArcs; + + for (a = 0; a < numArcs; a++) { + double arcLB = arcs[a].lb; + double arcUB = arcs[a].ub; + model->pushCol(arcLB, arcUB, true, origColIndex); + origColIndex++; + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelRelaxSparse()", + m_appParam.LogLevel, 2); } diff --git a/Dip/examples/MCF/MCF_Instance.cpp b/Dip/examples/MCF/MCF_Instance.cpp index 09718097..b804fc16 100644 --- a/Dip/examples/MCF/MCF_Instance.cpp +++ b/Dip/examples/MCF/MCF_Instance.cpp @@ -18,118 +18,109 @@ //===========================================================================// //===========================================================================// -int MCF_Instance::readInstance(string& fileName, - bool addDummyArcs) -{ - ifstream is; - int status = UtilOpenFile(is, fileName.c_str()); - - if (status) - throw UtilException("Failed to read instance", - "readInstance", "MCF_Instance"); - - double sumweight = 0; - bool size_read = true; - int arcs_read = 0; - int commodities_read = 0;; - char line[1000]; - char name[1000]; - - while (is.good()) { - is.getline(line, 1000); - - if (is.gcount() >= 999) { - cerr << "ERROR: Input file is incorrect. " - << "A line more than 1000 characters is found." << endl; - return 1; +int MCF_Instance::readInstance(string &fileName, bool addDummyArcs) { + ifstream is; + int status = UtilOpenFile(is, fileName.c_str()); + + if (status) + throw UtilException("Failed to read instance", "readInstance", + "MCF_Instance"); + + double sumweight = 0; + bool size_read = true; + int arcs_read = 0; + int commodities_read = 0; + ; + char line[1000]; + char name[1000]; + + while (is.good()) { + is.getline(line, 1000); + + if (is.gcount() >= 999) { + cerr << "ERROR: Input file is incorrect. " + << "A line more than 1000 characters is found." << endl; + return 1; + } + + switch (line[0]) { + case 'p': + if (sscanf(line, "p%s%i%i%i", name, &m_numNodes, &m_numArcs, + &m_numCommodities) != 4) { + cerr << "ERROR: Input file is incorrect. (p line)" << endl; + return 1; } - switch (line[0]) { - case 'p': - if (sscanf(line, "p%s%i%i%i", - name, &m_numNodes, &m_numArcs, &m_numCommodities) != 4) { - cerr << "ERROR: Input file is incorrect. (p line)" << endl; - return 1; - } - - m_problemName = name; - m_arcs = new arc[m_numArcs + - (addDummyArcs ? m_numCommodities : 0)]; - - if (!m_arcs) { - throw UtilExceptionMemory("readInstance", "MCF_DecompApp"); - } - - m_commodities = new commodity[m_numCommodities]; - - if (!m_commodities) { - throw UtilExceptionMemory("readInstance", "MCF_DecompApp"); - } - - break; - - case 'c': - break; - - case 'd': - if (sscanf(line, "d%i%i%i", - &m_commodities[commodities_read].source, - &m_commodities[commodities_read].sink, - &m_commodities[commodities_read].demand) != 3) { - cerr << "ERROR: Input file is incorrect. (d line)" << endl; - return 1; - } - - ++commodities_read; - break; - - case 'a': - if (sscanf(line, "a%i%i%i%i%lf", - &m_arcs[arcs_read].tail, - &m_arcs[arcs_read].head, - &m_arcs[arcs_read].lb, - &m_arcs[arcs_read].ub, - &m_arcs[arcs_read].weight) != 5) { - cerr << "Input file is incorrect. (a line)" << endl; - return 1; - } - - sumweight += fabs(m_arcs[arcs_read].weight); - ++arcs_read; - break; - - default: - if (sscanf(line + 1, "%s", name) <= 0) { - cerr << "Input file is incorrect. (non-recognizable line)" << endl; - return 1; - } - - break; + m_problemName = name; + m_arcs = new arc[m_numArcs + (addDummyArcs ? m_numCommodities : 0)]; + + if (!m_arcs) { + throw UtilExceptionMemory("readInstance", "MCF_DecompApp"); } - } - - if (!size_read || - arcs_read != m_numArcs || - commodities_read != m_numCommodities) { - cerr << "Input file is incorrect." - << " size_read=" << size_read - << " arcs_read=" << arcs_read - << " commodities_read=" << commodities_read << endl; - return 1; - } - - if (addDummyArcs) { - for (int i = 0; i < m_numCommodities; ++i) { - m_arcs[m_numArcs].tail = m_commodities[i].source; - m_arcs[m_numArcs].head = m_commodities[i].sink; - m_arcs[m_numArcs].lb = 0; - m_arcs[m_numArcs].ub = m_commodities[i].demand; - m_arcs[m_numArcs].weight = sumweight + 1; - ++m_numArcs; + + m_commodities = new commodity[m_numCommodities]; + + if (!m_commodities) { + throw UtilExceptionMemory("readInstance", "MCF_DecompApp"); } - } - is.close(); - return 0; -} + break; + + case 'c': + break; + + case 'd': + if (sscanf(line, "d%i%i%i", &m_commodities[commodities_read].source, + &m_commodities[commodities_read].sink, + &m_commodities[commodities_read].demand) != 3) { + cerr << "ERROR: Input file is incorrect. (d line)" << endl; + return 1; + } + + ++commodities_read; + break; + + case 'a': + if (sscanf(line, "a%i%i%i%i%lf", &m_arcs[arcs_read].tail, + &m_arcs[arcs_read].head, &m_arcs[arcs_read].lb, + &m_arcs[arcs_read].ub, &m_arcs[arcs_read].weight) != 5) { + cerr << "Input file is incorrect. (a line)" << endl; + return 1; + } + sumweight += fabs(m_arcs[arcs_read].weight); + ++arcs_read; + break; + + default: + if (sscanf(line + 1, "%s", name) <= 0) { + cerr << "Input file is incorrect. (non-recognizable line)" << endl; + return 1; + } + + break; + } + } + + if (!size_read || arcs_read != m_numArcs || + commodities_read != m_numCommodities) { + cerr << "Input file is incorrect." + << " size_read=" << size_read << " arcs_read=" << arcs_read + << " commodities_read=" << commodities_read << endl; + return 1; + } + + if (addDummyArcs) { + for (int i = 0; i < m_numCommodities; ++i) { + m_arcs[m_numArcs].tail = m_commodities[i].source; + m_arcs[m_numArcs].head = m_commodities[i].sink; + m_arcs[m_numArcs].lb = 0; + m_arcs[m_numArcs].ub = m_commodities[i].demand; + m_arcs[m_numArcs].weight = sumweight + 1; + ++m_numArcs; + } + } + + is.close(); + return 0; +} diff --git a/Dip/examples/MCF/MCF_Main.cpp b/Dip/examples/MCF/MCF_Main.cpp index b8bbe793..a70eab8f 100644 --- a/Dip/examples/MCF/MCF_Main.cpp +++ b/Dip/examples/MCF/MCF_Main.cpp @@ -26,108 +26,101 @@ #include "UtilTimer.h" //===========================================================================// -int main(int argc, char** argv) -{ - try { - //--- - //--- create the utility class for parsing parameters - //--- - UtilParameters utilParam(argc, argv); - bool doCut = utilParam.GetSetting("doCut", true); - bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - bool doDirect = utilParam.GetSetting("doDirect", false); - UtilTimer timer; - double timeSetupReal = 0.0; - double timeSetupCpu = 0.0; - double timeSolveReal = 0.0; - double timeSolveCpu = 0.0; +int main(int argc, char **argv) { + try { + //--- + //--- create the utility class for parsing parameters + //--- + UtilParameters utilParam(argc, argv); + bool doCut = utilParam.GetSetting("doCut", true); + bool doPriceCut = utilParam.GetSetting("doPriceCut", false); + bool doDirect = utilParam.GetSetting("doDirect", false); + UtilTimer timer; + double timeSetupReal = 0.0; + double timeSetupCpu = 0.0; + double timeSolveReal = 0.0; + double timeSolveCpu = 0.0; + //--- + //--- start overall timer + //--- + timer.start(); + //--- + //--- create the user application (a DecompApp) + //--- + MCF_DecompApp mmkp(utilParam); + //--- + //--- create the algorithm (a DecompAlgo) + //--- + DecompAlgo *algo = NULL; + assert(doCut + doPriceCut == 1); + + //--- + //--- create the CPM algorithm object + //--- + if (doCut) { + algo = new DecompAlgoC(&mmkp, utilParam); + } + + //--- + //--- create the PC algorithm object + //--- + if (doPriceCut) { + algo = new DecompAlgoPC(&mmkp, utilParam); + } + + if (doCut && doDirect) { + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); //--- - //--- start overall timer + //--- solve //--- timer.start(); + algo->solveDirect(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); + } else { //--- - //--- create the user application (a DecompApp) + //--- create the driver AlpsDecomp model //--- - MCF_DecompApp mmkp(utilParam); + int status = 0; + AlpsDecompModel alpsModel(utilParam, algo); + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); //--- - //--- create the algorithm (a DecompAlgo) + //--- solve //--- - DecompAlgo* algo = NULL; - assert(doCut + doPriceCut == 1); - + timer.start(); + status = alpsModel.solve(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); //--- - //--- create the CPM algorithm object + //--- sanity check //--- - if (doCut) { - algo = new DecompAlgoC(&mmkp, utilParam); - } - + cout << setiosflags(ios::fixed | ios::showpoint); + cout << "Status= " << status << " BestLB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalLB(), 5) + << " BestUB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalUB(), 5) << " Nodes= " << setw(6) + << alpsModel.getNumNodesProcessed() << " SetupCPU= " << timeSetupCpu + << " SolveCPU= " << timeSolveCpu + << " TotalCPU= " << timeSetupCpu + timeSolveCpu + << " SetupReal= " << timeSetupReal << " SolveReal= " << timeSolveReal + << " TotalReal= " << timeSetupReal + timeSolveReal << endl; //--- - //--- create the PC algorithm object + //--- free local memory //--- - if (doPriceCut) { - algo = new DecompAlgoPC(&mmkp, utilParam); - } + delete algo; + } + } catch (CoinError &ex) { + cerr << "COIN Exception [ " << ex.message() << " ]" + << " at " << ex.fileName() << ":L" << ex.lineNumber() << " in " + << ex.className() << "::" << ex.methodName() << endl; + return 1; + } - if (doCut && doDirect) { - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - //--- - //--- solve - //--- - timer.start(); - algo->solveDirect(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); - } else { - //--- - //--- create the driver AlpsDecomp model - //--- - int status = 0; - AlpsDecompModel alpsModel(utilParam, algo); - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - //--- - //--- solve - //--- - timer.start(); - status = alpsModel.solve(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); - //--- - //--- sanity check - //--- - cout << setiosflags(ios::fixed | ios::showpoint); - cout << "Status= " << status - << " BestLB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalLB(), 5) - << " BestUB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalUB(), 5) - << " Nodes= " << setw(6) - << alpsModel.getNumNodesProcessed() - << " SetupCPU= " << timeSetupCpu - << " SolveCPU= " << timeSolveCpu - << " TotalCPU= " << timeSetupCpu + timeSolveCpu - << " SetupReal= " << timeSetupReal - << " SolveReal= " << timeSolveReal - << " TotalReal= " << timeSetupReal + timeSolveReal - << endl; - //--- - //--- free local memory - //--- - delete algo; - } - } catch (CoinError& ex) { - cerr << "COIN Exception [ " << ex.message() << " ]" - << " at " << ex.fileName() << ":L" << ex.lineNumber() - << " in " << ex.className() << "::" << ex.methodName() << endl; - return 1; - } - - return 0; + return 0; } - diff --git a/Dip/examples/MILP/MILP_DecompApp.cpp b/Dip/examples/MILP/MILP_DecompApp.cpp index 77c0cd40..d3d45813 100644 --- a/Dip/examples/MILP/MILP_DecompApp.cpp +++ b/Dip/examples/MILP/MILP_DecompApp.cpp @@ -16,221 +16,210 @@ #include "MILP_DecompApp.h" //===========================================================================// -void MILP_DecompApp::initializeApp(UtilParameters & utilParam) { - - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); - - //--- - //--- get application parameters - //--- - m_appParam.getSettings(utilParam); - if(m_appParam.LogLevel >= 1) - m_appParam.dumpSettings(); - - //--- - //--- read MILP instance (mps format) - //--- - string fileName; - if (m_appParam.DataDir != "") { - fileName = m_appParam.DataDir + UtilDirSlash() + m_param.Instance; - } else { - fileName = m_appParam.Instance; - } - - m_mpsIO.messageHandler()->setLogLevel(m_param.LogLpLevel); - - int rstatus = m_mpsIO.readMps(fileName.c_str()); - if(rstatus < 0){ - cerr << "Error: Filename = " << fileName << " failed to open." << endl; - throw UtilException("I/O Error.", "initalizeApp", "MILP_DecompApp"); - } - if(m_appParam.LogLevel >= 2) - (*m_osLog) << "Objective Offset = " - << UtilDblToStr(m_mpsIO.objectiveOffset()) << endl; - - //--- - //--- set best known lb/ub - //--- - double offset = m_mpsIO.objectiveOffset(); - setBestKnownLB(m_appParam.BestKnownLB + offset); - setBestKnownUB(m_appParam.BestKnownUB + offset); - - //--- - //--- create models - //--- - createModels(); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); +void MILP_DecompApp::initializeApp(UtilParameters &utilParam) { + + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", + m_appParam.LogLevel, 2); + + //--- + //--- get application parameters + //--- + m_appParam.getSettings(utilParam); + if (m_appParam.LogLevel >= 1) + m_appParam.dumpSettings(); + + //--- + //--- read MILP instance (mps format) + //--- + string fileName; + if (m_appParam.DataDir != "") { + fileName = m_appParam.DataDir + UtilDirSlash() + m_param.Instance; + } else { + fileName = m_appParam.Instance; + } + + m_mpsIO.messageHandler()->setLogLevel(m_param.LogLpLevel); + + int rstatus = m_mpsIO.readMps(fileName.c_str()); + if (rstatus < 0) { + cerr << "Error: Filename = " << fileName << " failed to open." << endl; + throw UtilException("I/O Error.", "initalizeApp", "MILP_DecompApp"); + } + if (m_appParam.LogLevel >= 2) + (*m_osLog) << "Objective Offset = " + << UtilDblToStr(m_mpsIO.objectiveOffset()) << endl; + + //--- + //--- set best known lb/ub + //--- + double offset = m_mpsIO.objectiveOffset(); + setBestKnownLB(m_appParam.BestKnownLB + offset); + setBestKnownUB(m_appParam.BestKnownUB + offset); + + //--- + //--- create models + //--- + createModels(); + + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", m_appParam.LogLevel, + 2); } //===========================================================================// -void MILP_DecompApp::createModels(){ - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - - - //--- - //--- seed random number generator - //--- - srand(m_appParam.RandomSeed); - - //--- - //--- how many rows to put into relaxation - //--- - int i, c, nRowsRelax, nRowsCore; - const int nRows = m_mpsIO.getNumRows(); - const int nCols = m_mpsIO.getNumCols(); - nRowsRelax = static_cast(ceil(nRows * m_appParam.RelaxPercent)); - nRowsRelax = min(nRows-1, nRowsRelax); - nRowsCore = nRows - nRowsRelax; - - UTIL_MSG(m_appParam.LogLevel, 2, - (*m_osLog) << "Instance = " << m_appParam.Instance << endl; - (*m_osLog) << " nRows = " << nRows << endl; - (*m_osLog) << " nCols = " << nCols << endl; - (*m_osLog) << " nRowsCore = " << nRowsCore << endl; - (*m_osLog) << " nRowsRelax = " << nRowsRelax - << " [ " << 100*nRowsRelax/nRows << " % ]" << endl; - ); - - - //--- - //--- pick nRowsRelax random rows - //--- - set relaxRows; - while(static_cast(relaxRows.size()) < nRowsRelax) - relaxRows.insert(UtilURand(0,nRows-1)); - assert(static_cast(relaxRows.size()) == nRowsRelax); - - //--- - //--- setup markers for core and relax rows - //--- - int * rowsMarker = new int[nRows]; - int * rowsCore = new int[nRowsCore]; - int * rowsRelax = new int[nRowsRelax]; - UtilFillN(rowsMarker, nRows, 0); - - int nRowsCoreTmp = 0; - int nRowsRelaxTmp = 0; - set::iterator it; - for(it = relaxRows.begin(); it != relaxRows.end(); it++) - rowsMarker[*it] = 1; - for(i = 0; i < nRows; i++){ - if(rowsMarker[i]) - rowsRelax[nRowsRelaxTmp++] = i; - else - rowsCore[nRowsCoreTmp++] = i; - } - assert((nRowsRelaxTmp + nRowsCoreTmp) == nRows); - - UTIL_MSG(m_appParam.LogLevel, 3, - (*m_osLog) << "Core Rows:"; - for(i = 0; i < nRowsCore; i++) - (*m_osLog) << rowsCore[i] << " "; - (*m_osLog) << "\nRelax Rows:"; - for(i = 0; i < nRowsRelax; i++) - (*m_osLog) << rowsRelax[i] << " "; - (*m_osLog) << "\n"; - ); - - //--- - //--- Construct the objective function. - //--- - m_objective = new double[nCols]; - if(!m_objective) - throw UtilExceptionMemory("createModels", "MMKP_DecompApp"); - memcpy(m_objective, - m_mpsIO.getObjCoefficients(), nCols * sizeof(double)); - setModelObjective(m_objective, nCols); - - //--- - //--- Construct the core matrix. - //--- - m_modelRandCore.M = new CoinPackedMatrix(false, 0.0, 0.0); - if(!m_modelRandCore.M) - throw UtilExceptionMemory("createModels", "MILP_DecompApp"); - m_modelRandCore.reserve(nRowsCore, nCols); - m_modelRandCore.M->submatrixOf(*m_mpsIO.getMatrixByRow(), - nRowsCore, rowsCore); - - //--- - //--- Construct the relaxation matrix. - //--- - m_modelRandRelax.M = new CoinPackedMatrix(false, 0.0, 0.0); - if(!m_modelRandRelax.M) - throw UtilExceptionMemory("createModels", "MILP_DecompApp"); - m_modelRandRelax.reserve(nRowsRelax, nCols); - m_modelRandRelax.M->submatrixOf(*m_mpsIO.getMatrixByRow(), - nRowsRelax, rowsRelax); - - //--- - //--- set the row upper and lower bounds - //--- set the col upper and lower bounds - //--- - const double * rowLB = m_mpsIO.getRowLower(); - const double * rowUB = m_mpsIO.getRowUpper(); - const double * colLB = m_mpsIO.getColLower(); - const double * colUB = m_mpsIO.getColUpper(); - for(i = 0; i < nRowsCore; i++){ - m_modelRandCore.rowLB.push_back(rowLB[rowsCore[i]]); - m_modelRandCore.rowUB.push_back(rowUB[rowsCore[i]]); - } - for(i = 0; i < nRowsRelax; i++){ - m_modelRandRelax.rowLB.push_back(rowLB[rowsRelax[i]]); - m_modelRandRelax.rowUB.push_back(rowUB[rowsRelax[i]]); - } - copy(colLB, colLB + nCols, back_inserter( m_modelRandCore.colLB) ); - copy(colUB, colUB + nCols, back_inserter( m_modelRandCore.colUB) ); - copy(colLB, colLB + nCols, back_inserter( m_modelRandRelax.colLB) ); - copy(colUB, colUB + nCols, back_inserter( m_modelRandRelax.colUB) ); - - //--- - //--- big fat hack... we don't deal with dual rays yet, - //--- so, we assume subproblems are bounded - //--- - //--- NOTE: might also need to tighten LBs - //--- - for(c = 0; c < nCols; c++){ - //printf("c: %5d lb: %8.5f ub:%8.5f\n", c, colLB[c], colUB[c]); - if(colUB[c] > 1.0e15){ - printf("colUB[%d]: %g\n", c, colUB[c]); - m_modelRandRelax.colUB[c] = 1000; - m_modelRandCore.colUB[c] = 1000; +void MILP_DecompApp::createModels() { + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + + //--- + //--- seed random number generator + //--- + srand(m_appParam.RandomSeed); + + //--- + //--- how many rows to put into relaxation + //--- + int i, c, nRowsRelax, nRowsCore; + const int nRows = m_mpsIO.getNumRows(); + const int nCols = m_mpsIO.getNumCols(); + nRowsRelax = static_cast(ceil(nRows * m_appParam.RelaxPercent)); + nRowsRelax = min(nRows - 1, nRowsRelax); + nRowsCore = nRows - nRowsRelax; + + UTIL_MSG(m_appParam.LogLevel, 2, + (*m_osLog) << "Instance = " << m_appParam.Instance << endl; + (*m_osLog) << " nRows = " << nRows << endl; + (*m_osLog) << " nCols = " << nCols << endl; + (*m_osLog) << " nRowsCore = " << nRowsCore << endl; + (*m_osLog) << " nRowsRelax = " << nRowsRelax << " [ " + << 100 * nRowsRelax / nRows << " % ]" << endl;); + + //--- + //--- pick nRowsRelax random rows + //--- + set relaxRows; + while (static_cast(relaxRows.size()) < nRowsRelax) + relaxRows.insert(UtilURand(0, nRows - 1)); + assert(static_cast(relaxRows.size()) == nRowsRelax); + + //--- + //--- setup markers for core and relax rows + //--- + int *rowsMarker = new int[nRows]; + int *rowsCore = new int[nRowsCore]; + int *rowsRelax = new int[nRowsRelax]; + UtilFillN(rowsMarker, nRows, 0); + + int nRowsCoreTmp = 0; + int nRowsRelaxTmp = 0; + set::iterator it; + for (it = relaxRows.begin(); it != relaxRows.end(); it++) + rowsMarker[*it] = 1; + for (i = 0; i < nRows; i++) { + if (rowsMarker[i]) + rowsRelax[nRowsRelaxTmp++] = i; + else + rowsCore[nRowsCoreTmp++] = i; + } + assert((nRowsRelaxTmp + nRowsCoreTmp) == nRows); + + UTIL_MSG(m_appParam.LogLevel, 3, (*m_osLog) << "Core Rows:"; + for (i = 0; i < nRowsCore; i++)(*m_osLog) << rowsCore[i] << " "; + (*m_osLog) << "\nRelax Rows:"; + for (i = 0; i < nRowsRelax; i++)(*m_osLog) << rowsRelax[i] << " "; + (*m_osLog) << "\n";); + + //--- + //--- Construct the objective function. + //--- + m_objective = new double[nCols]; + if (!m_objective) + throw UtilExceptionMemory("createModels", "MMKP_DecompApp"); + memcpy(m_objective, m_mpsIO.getObjCoefficients(), nCols * sizeof(double)); + setModelObjective(m_objective, nCols); + + //--- + //--- Construct the core matrix. + //--- + m_modelRandCore.M = new CoinPackedMatrix(false, 0.0, 0.0); + if (!m_modelRandCore.M) + throw UtilExceptionMemory("createModels", "MILP_DecompApp"); + m_modelRandCore.reserve(nRowsCore, nCols); + m_modelRandCore.M->submatrixOf(*m_mpsIO.getMatrixByRow(), nRowsCore, + rowsCore); + + //--- + //--- Construct the relaxation matrix. + //--- + m_modelRandRelax.M = new CoinPackedMatrix(false, 0.0, 0.0); + if (!m_modelRandRelax.M) + throw UtilExceptionMemory("createModels", "MILP_DecompApp"); + m_modelRandRelax.reserve(nRowsRelax, nCols); + m_modelRandRelax.M->submatrixOf(*m_mpsIO.getMatrixByRow(), nRowsRelax, + rowsRelax); + + //--- + //--- set the row upper and lower bounds + //--- set the col upper and lower bounds + //--- + const double *rowLB = m_mpsIO.getRowLower(); + const double *rowUB = m_mpsIO.getRowUpper(); + const double *colLB = m_mpsIO.getColLower(); + const double *colUB = m_mpsIO.getColUpper(); + for (i = 0; i < nRowsCore; i++) { + m_modelRandCore.rowLB.push_back(rowLB[rowsCore[i]]); + m_modelRandCore.rowUB.push_back(rowUB[rowsCore[i]]); + } + for (i = 0; i < nRowsRelax; i++) { + m_modelRandRelax.rowLB.push_back(rowLB[rowsRelax[i]]); + m_modelRandRelax.rowUB.push_back(rowUB[rowsRelax[i]]); + } + copy(colLB, colLB + nCols, back_inserter(m_modelRandCore.colLB)); + copy(colUB, colUB + nCols, back_inserter(m_modelRandCore.colUB)); + copy(colLB, colLB + nCols, back_inserter(m_modelRandRelax.colLB)); + copy(colUB, colUB + nCols, back_inserter(m_modelRandRelax.colUB)); + + //--- + //--- big fat hack... we don't deal with dual rays yet, + //--- so, we assume subproblems are bounded + //--- + //--- NOTE: might also need to tighten LBs + //--- + for (c = 0; c < nCols; c++) { + // printf("c: %5d lb: %8.5f ub:%8.5f\n", c, colLB[c], colUB[c]); + if (colUB[c] > 1.0e15) { + printf("colUB[%d]: %g\n", c, colUB[c]); + m_modelRandRelax.colUB[c] = 1000; + m_modelRandCore.colUB[c] = 1000; + } + } + + //--- + //--- set the indices of the integer variables of modelRelax + //--- + const char *integerVars = m_mpsIO.integerColumns(); + if (integerVars) { + for (c = 0; c < nCols; c++) { + if (integerVars[c]) { + m_modelRandCore.integerVars.push_back(c); + m_modelRandRelax.integerVars.push_back(c); } - } - - //--- - //--- set the indices of the integer variables of modelRelax - //--- - const char * integerVars = m_mpsIO.integerColumns(); - if(integerVars){ - for(c = 0; c < nCols; c++){ - if(integerVars[c]){ - m_modelRandCore.integerVars.push_back(c); - m_modelRandRelax.integerVars.push_back(c); - } - } - } - - //--- - //--- set core and relax systems in framework - //--- - setModelCore (&m_modelRandCore, "core"); - setModelRelax(&m_modelRandRelax, "relax"); - - - //--- - //--- free up local memory - //--- - UTIL_DELARR(rowsMarker); - UTIL_DELARR(rowsCore); - UTIL_DELARR(rowsRelax); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); + } + } + + //--- + //--- set core and relax systems in framework + //--- + setModelCore(&m_modelRandCore, "core"); + setModelRelax(&m_modelRandRelax, "relax"); + + //--- + //--- free up local memory + //--- + UTIL_DELARR(rowsMarker); + UTIL_DELARR(rowsCore); + UTIL_DELARR(rowsRelax); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); } - - diff --git a/Dip/examples/MILP/MILP_Main.cpp b/Dip/examples/MILP/MILP_Main.cpp index 93f00f32..b31b564a 100644 --- a/Dip/examples/MILP/MILP_Main.cpp +++ b/Dip/examples/MILP/MILP_Main.cpp @@ -26,137 +26,128 @@ #include "UtilTimer.h" //===========================================================================// -int main(int argc, char ** argv){ - try{ - - //--- - //--- create the utility class for parsing parameters - //--- - UtilParameters utilParam(argc, argv); - - bool doCut = utilParam.GetSetting("doCut", true); - bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - bool doDirect = utilParam.GetSetting("doDirect", false); - - UtilTimer timer; - double timeSetupReal = 0.0; - double timeSetupCpu = 0.0; - double timeSolveReal = 0.0; - double timeSolveCpu = 0.0; +int main(int argc, char **argv) { + try { + + //--- + //--- create the utility class for parsing parameters + //--- + UtilParameters utilParam(argc, argv); + + bool doCut = utilParam.GetSetting("doCut", true); + bool doPriceCut = utilParam.GetSetting("doPriceCut", false); + bool doDirect = utilParam.GetSetting("doDirect", false); + + UtilTimer timer; + double timeSetupReal = 0.0; + double timeSetupCpu = 0.0; + double timeSolveReal = 0.0; + double timeSolveCpu = 0.0; + + //--- + //--- start overall timer + //--- + timer.start(); + + //--- + //--- create the user application (a DecompApp) + //--- + MILP_DecompApp milp(utilParam); + + //--- + //--- create the algorithm (a DecompAlgo) + //--- + DecompAlgo *algo = NULL; + assert(doCut + doPriceCut == 1); + + //--- + //--- create the CPM algorithm object + //--- + if (doCut) + algo = new DecompAlgoC(&milp, utilParam); + + //--- + //--- create the PC algorithm object + //--- + if (doPriceCut) + algo = new DecompAlgoPC(&milp, utilParam); + + if (doCut && doDirect) { + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); //--- - //--- start overall timer + //--- solve //--- timer.start(); - + algo->solveDirect(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); + } else { //--- - //--- create the user application (a DecompApp) + //--- create the driver AlpsDecomp model //--- - MILP_DecompApp milp(utilParam); + AlpsDecompModel alpsModel(utilParam, algo); + + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); //--- - //--- create the algorithm (a DecompAlgo) + //--- solve //--- - DecompAlgo * algo = NULL; - assert(doCut + doPriceCut == 1); + timer.start(); + alpsModel.solve(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); //--- - //--- create the CPM algorithm object - //--- - if(doCut) - algo = new DecompAlgoC(&milp, utilParam); + //--- sanity check + //--- + cout << setiosflags(ios::fixed | ios::showpoint); + cout << "Status= " << alpsModel.getSolStatus() << " BestLB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalLB(), 5) + << " BestUB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalUB(), 5) << " Nodes= " << setw(6) + << alpsModel.getNumNodesProcessed() << " SetupCPU= " << timeSetupCpu + << " SolveCPU= " << timeSolveCpu + << " TotalCPU= " << timeSetupCpu + timeSolveCpu + << " SetupReal= " << timeSetupReal << " SolveReal= " << timeSolveReal + << " TotalReal= " << timeSetupReal + timeSolveReal << endl; //--- - //--- create the PC algorithm object + //--- sanity check + //--- if user defines bestLB==bestUB (i.e., known optimal) + //--- and solved claims we have optimal, check that they match //--- - if(doPriceCut) - algo = new DecompAlgoPC(&milp, utilParam); - - if(doCut && doDirect){ - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - - //--- - //--- solve - //--- - timer.start(); - algo->solveDirect(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); + double epsilon = 1.0e-5; + double userLB = milp.getBestKnownLB(); + double userUB = milp.getBestKnownUB(); + double userDiff = fabs(userUB - userLB); + if (alpsModel.getSolStatus() == AlpsExitStatusOptimal && + userDiff < epsilon) { + double diff = fabs(alpsModel.getGlobalUB() - userUB); + if (diff > epsilon) { + cerr << "ERROR. BestKnownLB/UB= " << userUB + << " but DIP claims GlobalUB= " << alpsModel.getGlobalUB() + << endl; + throw UtilException("Invalid claim of optimal.", "main", "MILP"); + } } - else{ - //--- - //--- create the driver AlpsDecomp model - //--- - AlpsDecompModel alpsModel(utilParam, algo); - - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - - //--- - //--- solve - //--- - timer.start(); - alpsModel.solve(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); - - //--- - //--- sanity check - //--- - cout << setiosflags(ios::fixed|ios::showpoint); - cout << "Status= " << alpsModel.getSolStatus() - << " BestLB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalLB(),5) - << " BestUB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalUB(),5) - << " Nodes= " << setw(6) - << alpsModel.getNumNodesProcessed() - << " SetupCPU= " << timeSetupCpu - << " SolveCPU= " << timeSolveCpu - << " TotalCPU= " << timeSetupCpu + timeSolveCpu - << " SetupReal= " << timeSetupReal - << " SolveReal= " << timeSolveReal - << " TotalReal= " << timeSetupReal + timeSolveReal - << endl; - - - //--- - //--- sanity check - //--- if user defines bestLB==bestUB (i.e., known optimal) - //--- and solved claims we have optimal, check that they match - //--- - double epsilon = 1.0e-5; - double userLB = milp.getBestKnownLB(); - double userUB = milp.getBestKnownUB(); - double userDiff = fabs(userUB - userLB); - if(alpsModel.getSolStatus() == AlpsExitStatusOptimal && - userDiff < epsilon){ - double diff = fabs(alpsModel.getGlobalUB() - userUB); - if(diff > epsilon){ - cerr << "ERROR. BestKnownLB/UB= " - << userUB << " but DIP claims GlobalUB= " - << alpsModel.getGlobalUB() << endl; - throw UtilException("Invalid claim of optimal.", - "main", "MILP"); - } - } - - //--- - //--- free local memory - //--- - delete algo; - } - } - catch(CoinError & ex){ - cerr << "COIN Exception [ " << ex.message() << " ]" - << " at " << ex.fileName() << ":L" << ex.lineNumber() - << " in " << ex.className() << "::" << ex.methodName() << endl; - return 1; - } - return 0; -} + + //--- + //--- free local memory + //--- + delete algo; + } + } catch (CoinError &ex) { + cerr << "COIN Exception [ " << ex.message() << " ]" + << " at " << ex.fileName() << ":L" << ex.lineNumber() << " in " + << ex.className() << "::" << ex.methodName() << endl; + return 1; + } + return 0; +} diff --git a/Dip/examples/MILPBlock/MILPBlock_DecompApp.cpp b/Dip/examples/MILPBlock/MILPBlock_DecompApp.cpp index c2103fe8..46196a50 100644 --- a/Dip/examples/MILPBlock/MILPBlock_DecompApp.cpp +++ b/Dip/examples/MILPBlock/MILPBlock_DecompApp.cpp @@ -13,403 +13,395 @@ //===========================================================================// //===========================================================================// -#include "DecompAlgo.h" #include "MILPBlock_DecompApp.h" +#include "DecompAlgo.h" //===========================================================================// -void MILPBlock_DecompApp::initializeApp() { - - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); - - //--- - //--- read MILPBlock instance (mps format) - //--- - string fileName; - if (m_appParam.DataDir != "") { - fileName = m_appParam.DataDir + UtilDirSlash() + m_param.Instance; - } else { - fileName = m_appParam.Instance; - } - - m_mpsIO.messageHandler()->setLogLevel(m_param.LogLpLevel); - - int rstatus = m_mpsIO.readMps(fileName.c_str()); - if(rstatus < 0){ - cerr << "Error: Filename = " << fileName << " failed to open." << endl; - throw UtilException("I/O Error.", "initalizeApp", "MILPBlock_DecompApp"); - } - if(m_appParam.LogLevel >= 2) - (*m_osLog) << "Objective Offset = " - << UtilDblToStr(m_mpsIO.objectiveOffset()) << endl; - - //--- - //--- set best known lb/ub - //--- - double offset = m_mpsIO.objectiveOffset(); - setBestKnownLB(m_appParam.BestKnownLB + offset); - setBestKnownUB(m_appParam.BestKnownUB + offset); - - //--- - //--- read block file - //--- - readBlockFile(); - - //--- - //--- create models - //--- - createModels(); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); +void MILPBlock_DecompApp::initializeApp() { + + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", + m_appParam.LogLevel, 2); + + //--- + //--- read MILPBlock instance (mps format) + //--- + string fileName; + if (m_appParam.DataDir != "") { + fileName = m_appParam.DataDir + UtilDirSlash() + m_param.Instance; + } else { + fileName = m_appParam.Instance; + } + + m_mpsIO.messageHandler()->setLogLevel(m_param.LogLpLevel); + + int rstatus = m_mpsIO.readMps(fileName.c_str()); + if (rstatus < 0) { + cerr << "Error: Filename = " << fileName << " failed to open." << endl; + throw UtilException("I/O Error.", "initalizeApp", "MILPBlock_DecompApp"); + } + if (m_appParam.LogLevel >= 2) + (*m_osLog) << "Objective Offset = " + << UtilDblToStr(m_mpsIO.objectiveOffset()) << endl; + + //--- + //--- set best known lb/ub + //--- + double offset = m_mpsIO.objectiveOffset(); + setBestKnownLB(m_appParam.BestKnownLB + offset); + setBestKnownUB(m_appParam.BestKnownUB + offset); + + //--- + //--- read block file + //--- + readBlockFile(); + + //--- + //--- create models + //--- + createModels(); + + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", m_appParam.LogLevel, + 2); } //===========================================================================// -void MILPBlock_DecompApp::readBlockFile(){ - - ifstream is; - string fileName = m_appParam.DataDir - + UtilDirSlash() + m_appParam.BlockFile; - - //--- - //--- is there a permutation file? - //--- this file just remaps the row ids - //--- (for use in submission of atm to MIPLIB2010 and debugging) - //--- - map permute; - map::iterator mit; - string fileNameP = m_appParam.DataDir - + UtilDirSlash() + m_appParam.PermuteFile; - - if(m_appParam.PermuteFile.size() > 0){ - ifstream isP; - int rowIdOld, rowIdNew; - //--- - //--- open file streams - //--- - UtilOpenFile(isP, fileName.c_str()); - while(!isP.eof()){ - if(isP.eof()) break; - isP >> rowIdOld >> rowIdNew; - permute.insert(make_pair(rowIdOld, rowIdNew)); +void MILPBlock_DecompApp::readBlockFile() { + + ifstream is; + string fileName = m_appParam.DataDir + UtilDirSlash() + m_appParam.BlockFile; + + //--- + //--- is there a permutation file? + //--- this file just remaps the row ids + //--- (for use in submission of atm to MIPLIB2010 and debugging) + //--- + map permute; + map::iterator mit; + string fileNameP = + m_appParam.DataDir + UtilDirSlash() + m_appParam.PermuteFile; + + if (m_appParam.PermuteFile.size() > 0) { + ifstream isP; + int rowIdOld, rowIdNew; + //--- + //--- open file streams + //--- + UtilOpenFile(isP, fileName.c_str()); + while (!isP.eof()) { + if (isP.eof()) + break; + isP >> rowIdOld >> rowIdNew; + permute.insert(make_pair(rowIdOld, rowIdNew)); + } + isP.close(); + } + + //--- + //--- open file streams + //--- + UtilOpenFile(is, fileName.c_str()); + + int i, rowId, rowIdP, numRowsInBlock, blockId; + if (m_appParam.LogLevel >= 1) + (*m_osLog) << "Reading " << fileName << endl; + + map> blocks; + map>::iterator blocksIt; + if (m_appParam.BlockFileFormat == "List" || + m_appParam.BlockFileFormat == "LIST") { + + //--- + //--- The block file defines those rows in each block. + //--- + //--- + //--- + //--- + //--- + while (!is.eof()) { + is >> blockId; + is >> numRowsInBlock; + if (is.eof()) + break; + vector rowsInBlock; + for (i = 0; i < numRowsInBlock; i++) { + is >> rowId; + mit = permute.find(rowId); + if (mit != permute.end()) + rowsInBlock.push_back(mit->second); + else + rowsInBlock.push_back(rowId); } - isP.close(); - } - - - //--- - //--- open file streams - //--- - UtilOpenFile(is, fileName.c_str()); - - int i, rowId, rowIdP, numRowsInBlock, blockId; - if(m_appParam.LogLevel >= 1) - (*m_osLog) << "Reading " << fileName << endl; - - map > blocks; - map >::iterator blocksIt; - if(m_appParam.BlockFileFormat == "List" || - m_appParam.BlockFileFormat == "LIST"){ - - //--- - //--- The block file defines those rows in each block. - //--- - //--- - //--- - //--- - //--- - while(!is.eof()){ - is >> blockId; - is >> numRowsInBlock; - if(is.eof()) break; - vector rowsInBlock; - for(i = 0; i < numRowsInBlock; i++){ - is >> rowId; - mit = permute.find(rowId); - if(mit != permute.end()) - rowsInBlock.push_back(mit->second); - else - rowsInBlock.push_back(rowId); - } - blocks.insert(make_pair(blockId, rowsInBlock)); - if(is.eof()) break; + blocks.insert(make_pair(blockId, rowsInBlock)); + if (is.eof()) + break; + } + } else if (m_appParam.BlockFileFormat == "Pair" || + m_appParam.BlockFileFormat == "PAIR") { + //--- + //--- + //--- ... + //--- + is >> blockId; + while (!is.eof()) { + is >> rowId; + mit = permute.find(rowId); + if (mit != permute.end()) + rowIdP = mit->second; + else + rowIdP = rowId; + blocksIt = blocks.find(blockId); + if (blocksIt != blocks.end()) + blocksIt->second.push_back(rowIdP); + else { + vector rowsInBlocks; + rowsInBlocks.push_back(rowIdP); + blocks.insert(make_pair(blockId, rowsInBlocks)); } - } - else if(m_appParam.BlockFileFormat == "Pair" || - m_appParam.BlockFileFormat == "PAIR"){ - //--- - //--- - //--- ... - //--- is >> blockId; - while(!is.eof()){ - is >> rowId; - mit = permute.find(rowId); - if(mit != permute.end()) - rowIdP = mit->second; - else - rowIdP = rowId; - blocksIt = blocks.find(blockId); - if(blocksIt != blocks.end()) - blocksIt->second.push_back(rowIdP); - else{ - vector rowsInBlocks; - rowsInBlocks.push_back(rowIdP); - blocks.insert(make_pair(blockId, rowsInBlocks)); - } - is >> blockId; - if(is.eof()) break; - - } - } else if(m_appParam.BlockFileFormat == "PairName" || - m_appParam.BlockFileFormat == "PAIRNAME"){ - //--- - //--- - //--- ... - //--- - - //--- - //--- first create a map from row name to row id from mps - //--- CHECK: mps to OSI guaranteed to keep order of rows? - //--- - map rowNameToId; - map::iterator rowNameToIdIt; - for(i = 0; i < m_mpsIO.getNumRows(); i++){ - rowNameToId.insert(make_pair(m_mpsIO.rowName(i), i)); + if (is.eof()) + break; + } + } else if (m_appParam.BlockFileFormat == "PairName" || + m_appParam.BlockFileFormat == "PAIRNAME") { + //--- + //--- + //--- ... + //--- + + //--- + //--- first create a map from row name to row id from mps + //--- CHECK: mps to OSI guaranteed to keep order of rows? + //--- + map rowNameToId; + map::iterator rowNameToIdIt; + for (i = 0; i < m_mpsIO.getNumRows(); i++) { + rowNameToId.insert(make_pair(m_mpsIO.rowName(i), i)); + } + + string rowName = ""; + is >> blockId; + while (!is.eof()) { + is >> rowName; + if (is.eof()) + break; + rowNameToIdIt = rowNameToId.find(rowName); + if (rowNameToIdIt != rowNameToId.end()) { + rowId = rowNameToIdIt->second; + // printf("rowName=%s rowId=%d\n", rowName.c_str(), rowId); + } else { + //--- + //--- NOTE: this can happen if we use a presolved mps file + //--- with an original blocks file + //--- + if (m_appParam.LogLevel >= 3) { + (*m_osLog) << "Warning: Row name (" << rowName << " in block file " + << "is not found in mps file" << endl; + } + // throw UtilException("Invalid Input.", + // "readBlockFile", "MILPBlock_DecompApp"); + rowId = -1; } - - string rowName = ""; - is >> blockId; - while(!is.eof()){ - is >> rowName; - if(is.eof()) - break; - rowNameToIdIt = rowNameToId.find(rowName); - if(rowNameToIdIt != rowNameToId.end()){ - rowId = rowNameToIdIt->second; - //printf("rowName=%s rowId=%d\n", rowName.c_str(), rowId); - } - else{ - //--- - //--- NOTE: this can happen if we use a presolved mps file - //--- with an original blocks file - //--- - if(m_appParam.LogLevel >= 3){ - (*m_osLog) << "Warning: Row name (" - << rowName << " in block file " - << "is not found in mps file" << endl; - } - //throw UtilException("Invalid Input.", - // "readBlockFile", "MILPBlock_DecompApp"); - rowId = -1; - } - if(rowId != -1){ - mit = permute.find(rowId); - if(mit != permute.end()) - rowIdP = mit->second; - else - rowIdP = rowId; - blocksIt = blocks.find(blockId); - if(blocksIt != blocks.end()) - blocksIt->second.push_back(rowIdP); - else{ - vector rowsInBlocks; - rowsInBlocks.push_back(rowIdP); - blocks.insert(make_pair(blockId, rowsInBlocks)); - } - } - is >> blockId; - if(is.eof()) - break; - } - } else{ - cerr << "Error: BlockFileFormat = " - << m_appParam.BlockFileFormat - << " is an invalid type. Valid types = (List,Pair,PairName)." - << endl; - throw UtilException("Invalid Parameter.", - "readBlockFile", "MILPBlock_DecompApp"); - } - - //--- - //--- after presolve, some blocks might have been completely - //--- removed - renumber the block ids - it is arbitrary anyway - //--- and copy into class object m_blocks - //--- - blockId = 0; - for(blocksIt = blocks.begin(); blocksIt != blocks.end(); blocksIt++){ - m_blocks.insert(make_pair(blockId, blocksIt->second)); - blockId++; - } - - if(m_appParam.LogLevel >= 3){ - map >::iterator mit; - vector ::iterator vit; - for(mit = m_blocks.begin(); mit != m_blocks.end(); mit++){ - (*m_osLog) << "Block " << (*mit).first << " : "; - for(vit = (*mit).second.begin(); vit != (*mit).second.end(); vit++) - (*m_osLog) << (*vit) << " "; - (*m_osLog) << endl; + if (rowId != -1) { + mit = permute.find(rowId); + if (mit != permute.end()) + rowIdP = mit->second; + else + rowIdP = rowId; + blocksIt = blocks.find(blockId); + if (blocksIt != blocks.end()) + blocksIt->second.push_back(rowIdP); + else { + vector rowsInBlocks; + rowsInBlocks.push_back(rowIdP); + blocks.insert(make_pair(blockId, rowsInBlocks)); + } } - } - //exit(1); - is.close(); + is >> blockId; + if (is.eof()) + break; + } + } else { + cerr << "Error: BlockFileFormat = " << m_appParam.BlockFileFormat + << " is an invalid type. Valid types = (List,Pair,PairName)." << endl; + throw UtilException("Invalid Parameter.", "readBlockFile", + "MILPBlock_DecompApp"); + } + + //--- + //--- after presolve, some blocks might have been completely + //--- removed - renumber the block ids - it is arbitrary anyway + //--- and copy into class object m_blocks + //--- + blockId = 0; + for (blocksIt = blocks.begin(); blocksIt != blocks.end(); blocksIt++) { + m_blocks.insert(make_pair(blockId, blocksIt->second)); + blockId++; + } + + if (m_appParam.LogLevel >= 3) { + map>::iterator mit; + vector::iterator vit; + for (mit = m_blocks.begin(); mit != m_blocks.end(); mit++) { + (*m_osLog) << "Block " << (*mit).first << " : "; + for (vit = (*mit).second.begin(); vit != (*mit).second.end(); vit++) + (*m_osLog) << (*vit) << " "; + (*m_osLog) << endl; + } + } + // exit(1); + is.close(); } //===========================================================================// -void MILPBlock_DecompApp::readInitSolutionFile(DecompVarList & initVars){ - - //TODO: is this ok for sparse? - - ifstream is; - string fileName = m_appParam.DataDir - + UtilDirSlash() + m_appParam.InitSolutionFile; - if(m_appParam.InitSolutionFile == "") - return; - - //--- - //--- create map from col name to col index - //--- - int i; - map colNameToIndex; - const vector & colNames = m_modelC->getColNames(); - for(i = 0; i < m_modelC->getNumCols(); i++) - colNameToIndex.insert(make_pair(colNames[i], i)); - - //--- - //--- create a map from col index to block index - //--- - map colIndexToBlockIndex; - map::iterator mit; - const double * colLB = m_modelC->getColLB(); - const double * colUB = m_modelC->getColUB(); - for(mit = m_modelR.begin(); mit != m_modelR.end(); mit++){ - int blockIndex = mit->first; - DecompConstraintSet * model = mit->second; - /* - if(model->m_masterOnly){ - colIndexToBlockIndex.insert(make_pair(model->m_masterOnlyIndex, - blockIndex)); +void MILPBlock_DecompApp::readInitSolutionFile(DecompVarList &initVars) { + + // TODO: is this ok for sparse? + + ifstream is; + string fileName = + m_appParam.DataDir + UtilDirSlash() + m_appParam.InitSolutionFile; + if (m_appParam.InitSolutionFile == "") + return; + + //--- + //--- create map from col name to col index + //--- + int i; + map colNameToIndex; + const vector &colNames = m_modelC->getColNames(); + for (i = 0; i < m_modelC->getNumCols(); i++) + colNameToIndex.insert(make_pair(colNames[i], i)); + + //--- + //--- create a map from col index to block index + //--- + map colIndexToBlockIndex; + map::iterator mit; + const double *colLB = m_modelC->getColLB(); + const double *colUB = m_modelC->getColUB(); + for (mit = m_modelR.begin(); mit != m_modelR.end(); mit++) { + int blockIndex = mit->first; + DecompConstraintSet *model = mit->second; + /* + if(model->m_masterOnly){ + colIndexToBlockIndex.insert(make_pair(model->m_masterOnlyIndex, + blockIndex)); + } + else + */ + { + const vector &activeColumns = model->getActiveColumns(); + vector::const_iterator vit; + for (vit = activeColumns.begin(); vit != activeColumns.end(); vit++) { + colIndexToBlockIndex.insert(make_pair(*vit, blockIndex)); } - else - */{ - const vector & activeColumns = model->getActiveColumns(); - vector::const_iterator vit; - for(vit = activeColumns.begin(); vit != activeColumns.end(); vit++){ - colIndexToBlockIndex.insert(make_pair(*vit, blockIndex)); - } - } - } - - //--- - //--- open file streams - //--- - UtilOpenFile(is, fileName.c_str()); - if(m_appParam.LogLevel >= 1) - (*m_osLog) << "Reading " << fileName << endl; - - //--- - //--- create variables for each block of each solution - //--- - int solutionIndex, colIndex, blockIndex; - string colName; - double colValue; - char line[1000]; - map< pair, pair< vector,vector > > varTemp; - map< pair, pair< vector,vector > >::iterator it; - is.getline(line, 1000); - - - //TODO? master-only - // 1. if user gives lb, then add lb only - // if 0, add 0-col? or just let it take care of from PI? - // 2. if user gives ub, then add ub only - // 3. if user gives betwen bounds, then add lb and ub - // unless it is general integer - while(!is.eof()){ - is >> solutionIndex >> colName >> colValue; - if(is.eof()) break; - colIndex = colNameToIndex[colName]; - blockIndex = colIndexToBlockIndex[colIndex]; - DecompConstraintSet * model = m_modelR[blockIndex]; - - /* if(model->m_masterOnly){ - printf("MasterOnly col=%s value=%g lb=%g ub=%g", - colName.c_str(), colValue, colLB[colIndex], colUB[colIndex]); - if(colValue < (colUB[colIndex]-1.0e-5) && - colValue > (colLB[colIndex]+1.0e-5)){ - printf(" --> in between bounds"); - //TODO: if so, should add both lb and ub - } - printf("\n"); - }*/ - - pair p = make_pair(solutionIndex, blockIndex); - it = varTemp.find(p); - if(it == varTemp.end()){ - vector ind; - vector els; - ind.push_back(colIndex); - els.push_back(colValue); - varTemp.insert(make_pair(p, make_pair(ind, els))); - } - else{ - vector & ind = it->second.first; - vector & els = it->second.second; - ind.push_back(colIndex); - els.push_back(colValue); - } - } - - //--- - //--- create DecompVar's from varTemp - //--- - for(it = varTemp.begin(); it != varTemp.end(); it++){ - const pair & indexPair = it->first; - pair< vector, vector > & columnPair = it->second; - double origCost = 0.0; - for(i = 0; i < static_cast(columnPair.first.size()); i++){ - origCost += columnPair.second[i] * - m_objective[columnPair.first[i]]; - } - DecompVar * var = new DecompVar(columnPair.first, - columnPair.second, - -1.0, - origCost); - var->setBlockId(indexPair.second); - - var->print(m_infinity, m_osLog, colNames); - - initVars.push_back(var); - printf("Adding initial variable with origCost = %g\n", origCost); - } - - is.close(); + } + } + + //--- + //--- open file streams + //--- + UtilOpenFile(is, fileName.c_str()); + if (m_appParam.LogLevel >= 1) + (*m_osLog) << "Reading " << fileName << endl; + + //--- + //--- create variables for each block of each solution + //--- + int solutionIndex, colIndex, blockIndex; + string colName; + double colValue; + char line[1000]; + map, pair, vector>> varTemp; + map, pair, vector>>::iterator it; + is.getline(line, 1000); + + // TODO? master-only + // 1. if user gives lb, then add lb only + // if 0, add 0-col? or just let it take care of from PI? + // 2. if user gives ub, then add ub only + // 3. if user gives betwen bounds, then add lb and ub + // unless it is general integer + while (!is.eof()) { + is >> solutionIndex >> colName >> colValue; + if (is.eof()) + break; + colIndex = colNameToIndex[colName]; + blockIndex = colIndexToBlockIndex[colIndex]; + DecompConstraintSet *model = m_modelR[blockIndex]; + + /* if(model->m_masterOnly){ + printf("MasterOnly col=%s value=%g lb=%g ub=%g", + colName.c_str(), colValue, colLB[colIndex], colUB[colIndex]); + if(colValue < (colUB[colIndex]-1.0e-5) && + colValue > (colLB[colIndex]+1.0e-5)){ + printf(" --> in between bounds"); + //TODO: if so, should add both lb and ub + } + printf("\n"); + }*/ + + pair p = make_pair(solutionIndex, blockIndex); + it = varTemp.find(p); + if (it == varTemp.end()) { + vector ind; + vector els; + ind.push_back(colIndex); + els.push_back(colValue); + varTemp.insert(make_pair(p, make_pair(ind, els))); + } else { + vector &ind = it->second.first; + vector &els = it->second.second; + ind.push_back(colIndex); + els.push_back(colValue); + } + } + + //--- + //--- create DecompVar's from varTemp + //--- + for (it = varTemp.begin(); it != varTemp.end(); it++) { + const pair &indexPair = it->first; + pair, vector> &columnPair = it->second; + double origCost = 0.0; + for (i = 0; i < static_cast(columnPair.first.size()); i++) { + origCost += columnPair.second[i] * m_objective[columnPair.first[i]]; + } + DecompVar *var = + new DecompVar(columnPair.first, columnPair.second, -1.0, origCost); + var->setBlockId(indexPair.second); + + var->print(m_infinity, m_osLog, colNames); + + initVars.push_back(var); + printf("Adding initial variable with origCost = %g\n", origCost); + } + + is.close(); } //===========================================================================// -void -MILPBlock_DecompApp::findActiveColumns(const vector & rowsPart, - set & activeColsSet){ - - const CoinPackedMatrix * M = m_mpsIO.getMatrixByRow(); - const int * ind = M->getIndices(); - const int * beg = M->getVectorStarts(); - const int * len = M->getVectorLengths(); - const int * indR = NULL; - - //--- - //--- which columns are present in this part's rows - //--- - int k, r; - vector::const_iterator it; - for(it = rowsPart.begin(); it != rowsPart.end(); it++){ - r = *it; - indR = ind + beg[r]; - for(k = 0; k < len[r]; k++){ - activeColsSet.insert(indR[k]); - } - } +void MILPBlock_DecompApp::findActiveColumns(const vector &rowsPart, + set &activeColsSet) { + + const CoinPackedMatrix *M = m_mpsIO.getMatrixByRow(); + const int *ind = M->getIndices(); + const int *beg = M->getVectorStarts(); + const int *len = M->getVectorLengths(); + const int *indR = NULL; + + //--- + //--- which columns are present in this part's rows + //--- + int k, r; + vector::const_iterator it; + for (it = rowsPart.begin(); it != rowsPart.end(); it++) { + r = *it; + indR = ind + beg[r]; + for (k = 0; k < len[r]; k++) { + activeColsSet.insert(indR[k]); + } + } } //===========================================================================// @@ -441,432 +433,417 @@ MILPBlock_DecompApp::createModelMasterOnlys(vector & masterOnlyCols){ //THINK: // what-if master-only var is integer and bound is not at integer - + DecompConstraintSet * model = new DecompConstraintSet(); model->m_masterOnly = true; model->m_masterOnlyIndex = i; model->m_masterOnlyLB = colLB[i]; model->m_masterOnlyUB = colUB[i]; //0=cont, 1=integer - model->m_masterOnlyIsInt = + model->m_masterOnlyIsInt = (integerVars && integerVars[i]) ? true : false; if(colUB[i] > 1.0e15 && - m_appParam.ColumnUB >= 1.0e15) - (*m_osLog) << "WARNING: Master-only column " << i - << " has unbounded upper bound. DIP does not" - << " yet support extreme rays. Please bound all" - << " variables or use the ColumnUB parameter." << endl; - if(colLB[i] < -1.0e15 && - m_appParam.ColumnLB <= -1.0e15) - (*m_osLog) << "WARNING: Master-only column " << i - << " has unbounded lower bound. DIP does not" - << " yet support extreme rays. Please bound all" - << " variables or use the ColumnLB parameter." << endl; + m_appParam.ColumnUB >= 1.0e15) + (*m_osLog) << "WARNING: Master-only column " << i + << " has unbounded upper bound. DIP does not" + << " yet support extreme rays. Please bound all" + << " variables or use the ColumnUB parameter." << endl; + if(colLB[i] < -1.0e15 && + m_appParam.ColumnLB <= -1.0e15) + (*m_osLog) << "WARNING: Master-only column " << i + << " has unbounded lower bound. DIP does not" + << " yet support extreme rays. Please bound all" + << " variables or use the ColumnLB parameter." << endl; if(m_appParam.ColumnUB < 1.0e15) - if(colUB[i] > 1.0e15) - model->m_masterOnlyUB = m_appParam.ColumnUB; + if(colUB[i] > 1.0e15) + model->m_masterOnlyUB = m_appParam.ColumnUB; if(m_appParam.ColumnLB > -1.0e15) - if(colLB[i] < -1.0e15) - model->m_masterOnlyLB = m_appParam.ColumnLB; + if(colLB[i] < -1.0e15) + model->m_masterOnlyLB = m_appParam.ColumnLB; m_modelR.insert(make_pair(nBlocks, model)); - setModelRelax(model, + setModelRelax(model, "master_only" + UtilIntToStr(i), nBlocks); nBlocks++; } - return; + return; } */ //===========================================================================// -void -MILPBlock_DecompApp::createModelPart(DecompConstraintSet * model, - const int nRowsPart, - const int * rowsPart){ - - const int nCols = m_mpsIO.getNumCols(); - const double * rowLB = m_mpsIO.getRowLower(); - const double * rowUB = m_mpsIO.getRowUpper(); - const double * colLB = m_mpsIO.getColLower(); - const double * colUB = m_mpsIO.getColUpper(); - const char * integerVars = m_mpsIO.integerColumns(); - - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - if(!model->M) - throw UtilExceptionMemory("createModels", "MILPBlock_DecompApp"); - model->reserve(nRowsPart, nCols); - model->M->submatrixOf(*m_mpsIO.getMatrixByRow(), nRowsPart, rowsPart); - - //--- - //--- set the row upper and lower bounds - //--- set the col upper and lower bounds - //--- - int i, r; - for(i = 0; i < nRowsPart; i++){ - r = rowsPart[i]; - if(m_appParam.UseNames){ - const char * rowName = m_mpsIO.rowName(r); - if(rowName) - model->rowNames.push_back(rowName); +void MILPBlock_DecompApp::createModelPart(DecompConstraintSet *model, + const int nRowsPart, + const int *rowsPart) { + + const int nCols = m_mpsIO.getNumCols(); + const double *rowLB = m_mpsIO.getRowLower(); + const double *rowUB = m_mpsIO.getRowUpper(); + const double *colLB = m_mpsIO.getColLower(); + const double *colUB = m_mpsIO.getColUpper(); + const char *integerVars = m_mpsIO.integerColumns(); + + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + if (!model->M) + throw UtilExceptionMemory("createModels", "MILPBlock_DecompApp"); + model->reserve(nRowsPart, nCols); + model->M->submatrixOf(*m_mpsIO.getMatrixByRow(), nRowsPart, rowsPart); + + //--- + //--- set the row upper and lower bounds + //--- set the col upper and lower bounds + //--- + int i, r; + for (i = 0; i < nRowsPart; i++) { + r = rowsPart[i]; + if (m_appParam.UseNames) { + const char *rowName = m_mpsIO.rowName(r); + if (rowName) + model->rowNames.push_back(rowName); + } + model->rowLB.push_back(rowLB[r]); + model->rowUB.push_back(rowUB[r]); + } + copy(colLB, colLB + nCols, back_inserter(model->colLB)); + copy(colUB, colUB + nCols, back_inserter(model->colUB)); + + //--- + //--- big fat hack... we don't deal with dual rays yet, + //--- so, we assume subproblems are bounded + //--- + //--- NOTE: might also need to tighten LBs + //--- + //--- Too small - ATM infeasible! + //--- Too big - round off issues with big coeffs in + //--- master-only vars + //--- + //--- TODO: need extreme rays or bounded subproblems from user + //--- + if (m_appParam.ColumnUB < 1.0e15) { + for (i = 0; i < nCols; i++) { + if (colUB[i] > 1.0e15) { + model->colUB[i] = m_appParam.ColumnUB; } - model->rowLB.push_back(rowLB[r]); - model->rowUB.push_back(rowUB[r]); - } - copy(colLB, colLB + nCols, back_inserter( model->colLB) ); - copy(colUB, colUB + nCols, back_inserter( model->colUB) ); - - //--- - //--- big fat hack... we don't deal with dual rays yet, - //--- so, we assume subproblems are bounded - //--- - //--- NOTE: might also need to tighten LBs - //--- - //--- Too small - ATM infeasible! - //--- Too big - round off issues with big coeffs in - //--- master-only vars - //--- - //--- TODO: need extreme rays or bounded subproblems from user - //--- - if(m_appParam.ColumnUB < 1.0e15){ - for(i = 0; i < nCols; i++){ - if(colUB[i] > 1.0e15){ - model->colUB[i] = m_appParam.ColumnUB; - } - } - } - if(m_appParam.ColumnLB > -1.0e15){ - for(i = 0; i < nCols; i++){ - if(colLB[i] < -1.0e15){ - model->colLB[i] = m_appParam.ColumnLB; - } + } + } + if (m_appParam.ColumnLB > -1.0e15) { + for (i = 0; i < nCols; i++) { + if (colLB[i] < -1.0e15) { + model->colLB[i] = m_appParam.ColumnLB; } - } - - //--- - //--- set the indices of the integer variables of modelRelax - //--- also set the column names, if they exist - //--- - for(i = 0; i < nCols; i++){ - if(m_appParam.UseNames){ - const char * colName = m_mpsIO.columnName(i); - if(colName) - model->colNames.push_back(colName); - } - if(integerVars && integerVars[i]){ - model->integerVars.push_back(i); - } - } + } + } + + //--- + //--- set the indices of the integer variables of modelRelax + //--- also set the column names, if they exist + //--- + for (i = 0; i < nCols; i++) { + if (m_appParam.UseNames) { + const char *colName = m_mpsIO.columnName(i); + if (colName) + model->colNames.push_back(colName); + } + if (integerVars && integerVars[i]) { + model->integerVars.push_back(i); + } + } } //===========================================================================// -void -MILPBlock_DecompApp::createModelPartSparse(DecompConstraintSet * model, - const int nRowsPart, - const int * rowsPart){ - - const int nColsOrig = m_mpsIO.getNumCols(); - const double * rowLB = m_mpsIO.getRowLower(); - const double * rowUB = m_mpsIO.getRowUpper(); - const double * colLB = m_mpsIO.getColLower(); - const double * colUB = m_mpsIO.getColUpper(); - const char * integerVars = m_mpsIO.integerColumns(); - - //--- - //--- set model as sparse - //--- - model->setSparse(nColsOrig); - - bool isInteger; - int nCols, origIndex, newIndex; - vector::iterator vit; - newIndex = 0; - for(vit = model->activeColumns.begin(); - vit != model->activeColumns.end(); vit++){ - origIndex = *vit; - if(integerVars && integerVars[origIndex]) - isInteger = true; - else - isInteger = false; - model->pushCol(colLB[origIndex], - colUB[origIndex], - isInteger, - origIndex); - - //--- - //--- big fat hack... we don't deal with dual rays yet, - //--- so, we assume subproblems are bounded - //--- - if(m_appParam.ColumnUB < 1.0e15){ - if(colUB[origIndex] > 1.0e15){ - model->colUB[newIndex] = m_appParam.ColumnUB; - } +void MILPBlock_DecompApp::createModelPartSparse(DecompConstraintSet *model, + const int nRowsPart, + const int *rowsPart) { + + const int nColsOrig = m_mpsIO.getNumCols(); + const double *rowLB = m_mpsIO.getRowLower(); + const double *rowUB = m_mpsIO.getRowUpper(); + const double *colLB = m_mpsIO.getColLower(); + const double *colUB = m_mpsIO.getColUpper(); + const char *integerVars = m_mpsIO.integerColumns(); + + //--- + //--- set model as sparse + //--- + model->setSparse(nColsOrig); + + bool isInteger; + int nCols, origIndex, newIndex; + vector::iterator vit; + newIndex = 0; + for (vit = model->activeColumns.begin(); vit != model->activeColumns.end(); + vit++) { + origIndex = *vit; + if (integerVars && integerVars[origIndex]) + isInteger = true; + else + isInteger = false; + model->pushCol(colLB[origIndex], colUB[origIndex], isInteger, origIndex); + + //--- + //--- big fat hack... we don't deal with dual rays yet, + //--- so, we assume subproblems are bounded + //--- + if (m_appParam.ColumnUB < 1.0e15) { + if (colUB[origIndex] > 1.0e15) { + model->colUB[newIndex] = m_appParam.ColumnUB; } - if(m_appParam.ColumnLB > -1.0e15){ - if(colLB[origIndex] < -1.0e15){ - model->colLB[newIndex] = m_appParam.ColumnLB; - } + } + if (m_appParam.ColumnLB > -1.0e15) { + if (colLB[origIndex] < -1.0e15) { + model->colLB[newIndex] = m_appParam.ColumnLB; } - - if(m_appParam.UseNames){ - const char * colName = m_mpsIO.columnName(origIndex); - if(colName) - model->colNames.push_back(colName); - } - newIndex++; - } - - nCols = static_cast(model->activeColumns.size()); - assert(static_cast(model->colLB.size()) == nCols); - assert(static_cast(model->colUB.size()) == nCols); - - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - if(!model->M) - throw UtilExceptionMemory("createModels", "MILPBlock_DecompApp"); - model->M->setDimensions(0, nCols); - model->reserve(nRowsPart, nCols); - - //--- - //--- for each row in rowsPart, create the row using sparse mapping - //--- - int i, k, r, begInd; - const map & origToSparse = model->getMapOrigToSparse(); - const CoinPackedMatrix * M = m_mpsIO.getMatrixByRow(); - const int * matInd = M->getIndices(); - const CoinBigIndex * matBeg = M->getVectorStarts(); - const int * matLen = M->getVectorLengths(); - const double * matVal = M->getElements(); - const int * matIndI = NULL; - const double * matValI = NULL; - - vector & rowBeg = model->m_rowBeg;//used as temp - vector & rowInd = model->m_rowInd;//used as temp - vector & rowVal = model->m_rowVal;//used as temp - map::const_iterator mit; - - begInd = 0; - rowBeg.push_back(0); - for(i = 0; i < nRowsPart; i++){ - r = rowsPart[i]; - if(m_appParam.UseNames){ - const char * rowName = m_mpsIO.rowName(r); - if(rowName) - model->rowNames.push_back(rowName); - } - model->rowLB.push_back(rowLB[r]); - model->rowUB.push_back(rowUB[r]); - - matIndI = matInd + matBeg[r]; - matValI = matVal + matBeg[r]; - for(k = 0; k < matLen[r]; k++){ - origIndex = matIndI[k]; - mit = origToSparse.find(origIndex); - assert(mit != origToSparse.end()); - rowInd.push_back(mit->second); - rowVal.push_back(matValI[k]); - } - begInd += matLen[r]; - rowBeg.push_back(begInd); - } - model->M->appendRows(nRowsPart, - &rowBeg[0], - &rowInd[0], - &rowVal[0]); - rowBeg.clear(); - rowInd.clear(); - rowVal.clear(); + } + + if (m_appParam.UseNames) { + const char *colName = m_mpsIO.columnName(origIndex); + if (colName) + model->colNames.push_back(colName); + } + newIndex++; + } + + nCols = static_cast(model->activeColumns.size()); + assert(static_cast(model->colLB.size()) == nCols); + assert(static_cast(model->colUB.size()) == nCols); + + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + if (!model->M) + throw UtilExceptionMemory("createModels", "MILPBlock_DecompApp"); + model->M->setDimensions(0, nCols); + model->reserve(nRowsPart, nCols); + + //--- + //--- for each row in rowsPart, create the row using sparse mapping + //--- + int i, k, r, begInd; + const map &origToSparse = model->getMapOrigToSparse(); + const CoinPackedMatrix *M = m_mpsIO.getMatrixByRow(); + const int *matInd = M->getIndices(); + const CoinBigIndex *matBeg = M->getVectorStarts(); + const int *matLen = M->getVectorLengths(); + const double *matVal = M->getElements(); + const int *matIndI = NULL; + const double *matValI = NULL; + + vector &rowBeg = model->m_rowBeg; // used as temp + vector &rowInd = model->m_rowInd; // used as temp + vector &rowVal = model->m_rowVal; // used as temp + map::const_iterator mit; + + begInd = 0; + rowBeg.push_back(0); + for (i = 0; i < nRowsPart; i++) { + r = rowsPart[i]; + if (m_appParam.UseNames) { + const char *rowName = m_mpsIO.rowName(r); + if (rowName) + model->rowNames.push_back(rowName); + } + model->rowLB.push_back(rowLB[r]); + model->rowUB.push_back(rowUB[r]); + + matIndI = matInd + matBeg[r]; + matValI = matVal + matBeg[r]; + for (k = 0; k < matLen[r]; k++) { + origIndex = matIndI[k]; + mit = origToSparse.find(origIndex); + assert(mit != origToSparse.end()); + rowInd.push_back(mit->second); + rowVal.push_back(matValI[k]); + } + begInd += matLen[r]; + rowBeg.push_back(begInd); + } + model->M->appendRows(nRowsPart, &rowBeg[0], &rowInd[0], &rowVal[0]); + rowBeg.clear(); + rowInd.clear(); + rowVal.clear(); } //===========================================================================// -void MILPBlock_DecompApp::createModels(){ - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - - //--- - //--- how many rows to put into relaxation - //--- - int i, nRowsRelax, nRowsCore; - const int nRows = m_mpsIO.getNumRows(); - const int nCols = m_mpsIO.getNumCols(); - int nBlocks = static_cast(m_blocks.size()); - - map >::iterator mit; - nRowsRelax = 0; - for(mit = m_blocks.begin(); mit != m_blocks.end(); mit++) - nRowsRelax += static_cast((*mit).second.size()); - nRowsCore = nRows - nRowsRelax; - - UTIL_MSG(m_appParam.LogLevel, 2, - (*m_osLog) << "Instance = " << m_appParam.Instance << endl; - (*m_osLog) << " nRows = " << nRows << endl; - (*m_osLog) << " nCols = " << nCols << endl; - (*m_osLog) << " nBlocks = " << nBlocks << endl; - (*m_osLog) << " nRowsCore = " << nRowsCore << endl; - (*m_osLog) << " nRowsRelax = " << nRowsRelax - << " [ " << 100*nRowsRelax/nRows << " % ]" << endl; - ); - - //--- - //--- setup markers for core and relax rows - //--- - int * rowsMarker = new int[nRows]; - int * rowsCore = new int[nRowsCore]; - UtilFillN(rowsMarker, nRows, -1);//-1 will mark core rows - - for(mit = m_blocks.begin(); mit != m_blocks.end(); mit++){ - vector & rowsRelax = (*mit).second; - vector::iterator vit; - for(vit = rowsRelax.begin(); vit != rowsRelax.end(); vit++) - rowsMarker[*vit] = (*mit).first; - } - - int nRowsCoreTmp = 0; - for(i = 0; i < nRows; i++){ - if(rowsMarker[i] == -1) - rowsCore[nRowsCoreTmp++] = i; - } - assert(nRowsCoreTmp == nRowsCore); - - UTIL_MSG(m_appParam.LogLevel, 3, - (*m_osLog) << "Core Rows:"; - for(i = 0; i < nRowsCore; i++) - (*m_osLog) << rowsCore[i] << " "; - (*m_osLog) << "\n"; - ); - - //--- - //--- Construct the objective function. - //--- - m_objective = new double[nCols]; - if(!m_objective) - throw UtilExceptionMemory("createModels", "MILPBlock_DecompApp"); - memcpy(m_objective, - m_mpsIO.getObjCoefficients(), nCols * sizeof(double)); - if(m_appParam.ObjectiveSense == -1){ - for(i = 0; i < nCols; i++) - m_objective[i] *= -1; - } - setModelObjective(m_objective, nCols); - - //--- - //--- Construct the core matrix. - //--- - DecompConstraintSet * modelCore = new DecompConstraintSet(); - createModelPart(modelCore, nRowsCore, rowsCore); - - //--- - //--- save a pointer so we can delete it later - //--- - m_modelC = modelCore; - - //--- - //--- Construct the relaxation matrices. - //--- - for(mit = m_blocks.begin(); mit != m_blocks.end(); mit++){ - vector & rowsRelax = (*mit).second; - int nRowsRelax = static_cast(rowsRelax.size()); - - if(m_appParam.LogLevel >= 1) - (*m_osLog) << "Create model part nRowsRelax = " - << nRowsRelax << " (Block=" << (*mit).first << ")" << endl; - - DecompConstraintSet * modelRelax = new DecompConstraintSet(); - CoinAssertHint(modelRelax, "Error: Out of Memory"); - - //--- - //--- find and set active columns +void MILPBlock_DecompApp::createModels() { + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + + //--- + //--- how many rows to put into relaxation + //--- + int i, nRowsRelax, nRowsCore; + const int nRows = m_mpsIO.getNumRows(); + const int nCols = m_mpsIO.getNumCols(); + int nBlocks = static_cast(m_blocks.size()); + + map>::iterator mit; + nRowsRelax = 0; + for (mit = m_blocks.begin(); mit != m_blocks.end(); mit++) + nRowsRelax += static_cast((*mit).second.size()); + nRowsCore = nRows - nRowsRelax; + + UTIL_MSG(m_appParam.LogLevel, 2, + (*m_osLog) << "Instance = " << m_appParam.Instance << endl; + (*m_osLog) << " nRows = " << nRows << endl; + (*m_osLog) << " nCols = " << nCols << endl; + (*m_osLog) << " nBlocks = " << nBlocks << endl; + (*m_osLog) << " nRowsCore = " << nRowsCore << endl; + (*m_osLog) << " nRowsRelax = " << nRowsRelax << " [ " + << 100 * nRowsRelax / nRows << " % ]" << endl;); + + //--- + //--- setup markers for core and relax rows + //--- + int *rowsMarker = new int[nRows]; + int *rowsCore = new int[nRowsCore]; + UtilFillN(rowsMarker, nRows, -1); //-1 will mark core rows + + for (mit = m_blocks.begin(); mit != m_blocks.end(); mit++) { + vector &rowsRelax = (*mit).second; + vector::iterator vit; + for (vit = rowsRelax.begin(); vit != rowsRelax.end(); vit++) + rowsMarker[*vit] = (*mit).first; + } + + int nRowsCoreTmp = 0; + for (i = 0; i < nRows; i++) { + if (rowsMarker[i] == -1) + rowsCore[nRowsCoreTmp++] = i; + } + assert(nRowsCoreTmp == nRowsCore); + + UTIL_MSG(m_appParam.LogLevel, 3, (*m_osLog) << "Core Rows:"; + for (i = 0; i < nRowsCore; i++)(*m_osLog) << rowsCore[i] << " "; + (*m_osLog) << "\n";); + + //--- + //--- Construct the objective function. + //--- + m_objective = new double[nCols]; + if (!m_objective) + throw UtilExceptionMemory("createModels", "MILPBlock_DecompApp"); + memcpy(m_objective, m_mpsIO.getObjCoefficients(), nCols * sizeof(double)); + if (m_appParam.ObjectiveSense == -1) { + for (i = 0; i < nCols; i++) + m_objective[i] *= -1; + } + setModelObjective(m_objective, nCols); + + //--- + //--- Construct the core matrix. + //--- + DecompConstraintSet *modelCore = new DecompConstraintSet(); + createModelPart(modelCore, nRowsCore, rowsCore); + + //--- + //--- save a pointer so we can delete it later + //--- + m_modelC = modelCore; + + //--- + //--- Construct the relaxation matrices. + //--- + for (mit = m_blocks.begin(); mit != m_blocks.end(); mit++) { + vector &rowsRelax = (*mit).second; + int nRowsRelax = static_cast(rowsRelax.size()); + + if (m_appParam.LogLevel >= 1) + (*m_osLog) << "Create model part nRowsRelax = " << nRowsRelax + << " (Block=" << (*mit).first << ")" << endl; + + DecompConstraintSet *modelRelax = new DecompConstraintSet(); + CoinAssertHint(modelRelax, "Error: Out of Memory"); + + //--- + //--- find and set active columns + //--- + set::iterator sit; + set activeColsSet; + findActiveColumns(rowsRelax, activeColsSet); + for (sit = activeColsSet.begin(); sit != activeColsSet.end(); sit++) + modelRelax->activeColumns.push_back(*sit); + + if (m_appParam.UseSparse) { //--- - set::iterator sit; - set activeColsSet; - findActiveColumns(rowsRelax, activeColsSet); - for(sit = activeColsSet.begin(); sit != activeColsSet.end(); sit++) - modelRelax->activeColumns.push_back(*sit); - - if(m_appParam.UseSparse){ - //--- - //--- create model part (using sparse API) - //--- - createModelPartSparse(modelRelax, nRowsRelax, &rowsRelax[0]); - } - else{ - //--- - //--- create model part (using dense API) - //--- - createModelPart(modelRelax, nRowsRelax, &rowsRelax[0]); - } - + //--- create model part (using sparse API) //--- - //--- save a pointer so we can delete it later - //--- - m_modelR.insert(make_pair((*mit).first, modelRelax)); - } - - //--- - //--- figure out which columns are not active in any subprobs - //--- we refer to these as "master-only" variables - //--- - int * colMarker = new int[nCols]; - if(!colMarker) - throw UtilExceptionMemory("createModels", "MILPBlock_DecompApp"); - UtilFillN(colMarker, nCols, 0); - - vector ::iterator vi; - map ::iterator mdi; - for(mdi = m_modelR.begin(); mdi != m_modelR.end(); mdi++){ - vector & activeColumns = (*mdi).second->activeColumns; - for(vi = activeColumns.begin(); vi != activeColumns.end(); vi++){ - colMarker[*vi] = 1; - } - } - - for(i = 0; i < nCols; i++){ - if(!colMarker[i]){ - if(m_appParam.LogLevel >= 3){ - if(modelCore->getColNames().size() > 0) - (*m_osLog) << "Column " << setw(5) << i << " -> " - << setw(25) << modelCore->colNames[i] - << " is not in union of blocks." << endl; - } - modelCore->masterOnlyCols.push_back(i); - } - } - if(m_appParam.LogLevel >= 3){ - (*m_osLog) << "Master only columns:" << endl; - UtilPrintVector(modelCore->masterOnlyCols, m_osLog); - if(modelCore->getColNames().size() > 0) - UtilPrintVector(modelCore->masterOnlyCols, - modelCore->getColNames(), m_osLog); - } - - //--- - //--- set core and system in framework - //--- - setModelCore(modelCore, "core"); - - for(mdi = m_modelR.begin(); mdi != m_modelR.end(); mdi++){ - DecompConstraintSet * modelRelax = (*mdi).second; + createModelPartSparse(modelRelax, nRowsRelax, &rowsRelax[0]); + } else { //--- - //--- set system in framework + //--- create model part (using dense API) //--- - setModelRelax((*mdi).second, - "relax" + UtilIntToStr((*mdi).first), - (*mdi).first); - - if(m_appParam.LogLevel >= 3){ - (*m_osLog) << "Active Columns:" << endl; - UtilPrintVector(modelRelax->activeColumns, m_osLog); - if(modelCore->getColNames().size() > 0) - UtilPrintVector(modelRelax->activeColumns, - modelCore->getColNames(), m_osLog); + createModelPart(modelRelax, nRowsRelax, &rowsRelax[0]); + } + + //--- + //--- save a pointer so we can delete it later + //--- + m_modelR.insert(make_pair((*mit).first, modelRelax)); + } + + //--- + //--- figure out which columns are not active in any subprobs + //--- we refer to these as "master-only" variables + //--- + int *colMarker = new int[nCols]; + if (!colMarker) + throw UtilExceptionMemory("createModels", "MILPBlock_DecompApp"); + UtilFillN(colMarker, nCols, 0); + + vector::iterator vi; + map::iterator mdi; + for (mdi = m_modelR.begin(); mdi != m_modelR.end(); mdi++) { + vector &activeColumns = (*mdi).second->activeColumns; + for (vi = activeColumns.begin(); vi != activeColumns.end(); vi++) { + colMarker[*vi] = 1; + } + } + + for (i = 0; i < nCols; i++) { + if (!colMarker[i]) { + if (m_appParam.LogLevel >= 3) { + if (modelCore->getColNames().size() > 0) + (*m_osLog) << "Column " << setw(5) << i << " -> " << setw(25) + << modelCore->colNames[i] << " is not in union of blocks." + << endl; } - } - - //--- - //--- create an extra "empty" block for the master-only vars - //--- since I don't know what OSI will do with empty problem - //--- we will make column bounds explicity rows - //--- - ///////////STOP - don't need anymore if DECOMP_MASTERONLY_DIRECT + modelCore->masterOnlyCols.push_back(i); + } + } + if (m_appParam.LogLevel >= 3) { + (*m_osLog) << "Master only columns:" << endl; + UtilPrintVector(modelCore->masterOnlyCols, m_osLog); + if (modelCore->getColNames().size() > 0) + UtilPrintVector(modelCore->masterOnlyCols, modelCore->getColNames(), + m_osLog); + } + + //--- + //--- set core and system in framework + //--- + setModelCore(modelCore, "core"); + + for (mdi = m_modelR.begin(); mdi != m_modelR.end(); mdi++) { + DecompConstraintSet *modelRelax = (*mdi).second; + //--- + //--- set system in framework + //--- + setModelRelax((*mdi).second, "relax" + UtilIntToStr((*mdi).first), + (*mdi).first); + + if (m_appParam.LogLevel >= 3) { + (*m_osLog) << "Active Columns:" << endl; + UtilPrintVector(modelRelax->activeColumns, m_osLog); + if (modelCore->getColNames().size() > 0) + UtilPrintVector(modelRelax->activeColumns, modelCore->getColNames(), + m_osLog); + } + } + + //--- + //--- create an extra "empty" block for the master-only vars + //--- since I don't know what OSI will do with empty problem + //--- we will make column bounds explicity rows + //--- + ///////////STOP - don't need anymore if DECOMP_MASTERONLY_DIRECT #if 0 int nMasterOnlyCols = static_cast(modelCore->masterOnlyCols.size()); if(nMasterOnlyCols){ @@ -875,41 +852,40 @@ void MILPBlock_DecompApp::createModels(){ createModelMasterOnlys(modelCore->masterOnlyCols); } #endif - - //--- - //--- free up local memory - //--- - UTIL_DELARR(rowsMarker); - UTIL_DELARR(rowsCore); - UTIL_DELARR(colMarker); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - //exit(1); -} + //--- + //--- free up local memory + //--- + UTIL_DELARR(rowsMarker); + UTIL_DELARR(rowsCore); + UTIL_DELARR(colMarker); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + // exit(1); +} //===========================================================================// -int MILPBlock_DecompApp::generateInitVars(DecompVarList & initVars){ - UtilPrintFuncBegin(m_osLog, m_classTag, - "generateInitVars()", m_appParam.LogLevel, 2); +int MILPBlock_DecompApp::generateInitVars(DecompVarList &initVars) { + UtilPrintFuncBegin(m_osLog, m_classTag, "generateInitVars()", + m_appParam.LogLevel, 2); - readInitSolutionFile(initVars); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "generateInitVars()", m_appParam.LogLevel, 2); - return static_cast(initVars.size()); + readInitSolutionFile(initVars); + + UtilPrintFuncEnd(m_osLog, m_classTag, "generateInitVars()", + m_appParam.LogLevel, 2); + return static_cast(initVars.size()); } /* #if 0 //===========================================================================// -DecompSolverStatus +DecompSolverStatus MILPBlock_DecompApp::solveRelaxedNest(const int whichBlock, - const double * redCostX, - const double convexDual, - DecompVarList & varList){ - + const double * redCostX, + const double convexDual, + DecompVarList & varList){ + //--- //--- solve full model heuristically as IP //--- if get incumbent, break them out into approriate blocks @@ -918,12 +894,12 @@ MILPBlock_DecompApp::solveRelaxedNest(const int whichBlock, UtilPrintFuncBegin(m_osLog, m_classTag, - "solveRelaxedNest()", m_appParam.LogLevel, 2); - + "solveRelaxedNest()", m_appParam.LogLevel, 2); + /////STOP //--- - //--- this allows user direct access to access methods in - //--- algorithm interface (in case they want to use any + //--- this allows user direct access to access methods in + //--- algorithm interface (in case they want to use any //--- of its data) //--- //--- for example, if the user wants to enforce the branching @@ -937,7 +913,7 @@ MILPBlock_DecompApp::solveRelaxedNest(const int whichBlock, //--- //--- in the case where oracle=MCKP0, we have a specialized solver //--- in the case where oracle=MDKP, we do not have a specialized solver - //--- so, we just return with no solution and the framework will + //--- so, we just return with no solution and the framework will //--- attempt to use the built-in MILP solver //--- DecompSolverStatus solverStatus = DecompSolStatNoSolution; @@ -951,25 +927,25 @@ MILPBlock_DecompApp::solveRelaxedNest(const int whichBlock, mcknapK->solveMCKnap(redCostX, m_objective, solInd, solEls, varRedCost, varOrigCost); assert(static_cast(solInd.size()) == m_instance.getNGroupRows()); - + UTIL_DEBUG(m_param.LogDebugLevel, 4, - printf("PUSH var with k = %d RC = %g origCost = %g\n", + printf("PUSH var with k = %d RC = %g origCost = %g\n", whichBlock, varRedCost - convexDual, varOrigCost); ); - + //the user should not have to calculate orig cost too // the framework can do this... in fact the framework shoudl // calculate red-cost too... but the user might want to check this stuff - + //this is way too confusing for user to remember they need -alpha! // let framework do that - also setting the block id - framework! - DecompVar * var = new DecompVar(solInd, solEls, + DecompVar * var = new DecompVar(solInd, solEls, varRedCost - convexDual, varOrigCost); var->setBlockId(whichBlock); - varList.push_back(var); + varList.push_back(var); solverStatus = DecompSolStatOptimal; } - + UtilPrintFuncEnd(m_osLog, m_classTag, "solveRelaxedNest()", m_appParam.LogLevel, 2); return solverStatus; diff --git a/Dip/examples/MILPBlock/MILPBlock_Main.cpp b/Dip/examples/MILPBlock/MILPBlock_Main.cpp index 157a8daa..f60aa97c 100644 --- a/Dip/examples/MILPBlock/MILPBlock_Main.cpp +++ b/Dip/examples/MILPBlock/MILPBlock_Main.cpp @@ -26,156 +26,145 @@ #include "UtilTimer.h" //===========================================================================// -int main(int argc, char ** argv){ - try{ - - //--- - //--- create the utility class for parsing parameters - //--- - UtilParameters utilParam(argc, argv); - - bool doCut = utilParam.GetSetting("doCut", true); - bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - bool doDirect = utilParam.GetSetting("doDirect", false); - - UtilTimer timer; - double timeSetupReal = 0.0; - double timeSetupCpu = 0.0; - double timeSolveReal = 0.0; - double timeSolveCpu = 0.0; +int main(int argc, char **argv) { + try { + + //--- + //--- create the utility class for parsing parameters + //--- + UtilParameters utilParam(argc, argv); + + bool doCut = utilParam.GetSetting("doCut", true); + bool doPriceCut = utilParam.GetSetting("doPriceCut", false); + bool doDirect = utilParam.GetSetting("doDirect", false); + + UtilTimer timer; + double timeSetupReal = 0.0; + double timeSetupCpu = 0.0; + double timeSolveReal = 0.0; + double timeSolveCpu = 0.0; + + //--- + //--- start overall timer + //--- + timer.start(); + + //--- + //--- create the user application (a DecompApp) + //--- + MILPBlock_DecompApp milp(utilParam); + + //--- + //--- create the algorithm (a DecompAlgo) + //--- + DecompAlgo *algo = NULL; + if ((doCut + doPriceCut) != 1) + throw UtilException("doCut or doPriceCut must be set", "main", "main"); + // assert(doCut + doPriceCut == 1); + + //--- + //--- create the CPM algorithm object + //--- + if (doCut) + algo = new DecompAlgoC(&milp, utilParam); + + //--- + //--- create the PC algorithm object + //--- + if (doPriceCut) + algo = new DecompAlgoPC(&milp, utilParam); + + if (doCut && doDirect) { + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); //--- - //--- start overall timer + //--- solve //--- timer.start(); - + algo->solveDirect(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); + } else { //--- - //--- create the user application (a DecompApp) + //--- create the driver AlpsDecomp model //--- - MILPBlock_DecompApp milp(utilParam); + AlpsDecompModel alpsModel(utilParam, algo); + + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); //--- - //--- create the algorithm (a DecompAlgo) + //--- solve //--- - DecompAlgo * algo = NULL; - if((doCut + doPriceCut) != 1) - throw UtilException("doCut or doPriceCut must be set", - "main", "main"); - //assert(doCut + doPriceCut == 1); + timer.start(); + alpsModel.solve(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); //--- - //--- create the CPM algorithm object - //--- - if(doCut) - algo = new DecompAlgoC(&milp, utilParam); - + //--- sanity check //--- - //--- create the PC algorithm object + cout << setiosflags(ios::fixed | ios::showpoint); + cout << "Status= " << alpsModel.getSolStatus() << " BestLB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalLB(), 5) + << " BestUB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalUB(), 5) << " Nodes= " << setw(6) + << alpsModel.getNumNodesProcessed() << " SetupCPU= " << timeSetupCpu + << " SolveCPU= " << timeSolveCpu + << " TotalCPU= " << timeSetupCpu + timeSolveCpu + << " SetupReal= " << timeSetupReal << " SolveReal= " << timeSolveReal + << " TotalReal= " << timeSetupReal + timeSolveReal << endl; + //--- - if(doPriceCut) - algo = new DecompAlgoPC(&milp, utilParam); - - if(doCut && doDirect){ - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - - //--- - //--- solve - //--- - timer.start(); - algo->solveDirect(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); + //--- sanity check + //--- if user defines bestLB==bestUB (i.e., known optimal) + //--- and solved claims we have optimal, check that they match + //--- + double epsilon = 0.01; // 1% + double userLB = milp.getBestKnownLB(); + double userUB = milp.getBestKnownUB(); + double userDiff = fabs(userUB - userLB); + if (alpsModel.getSolStatus() == AlpsExitStatusOptimal && + userDiff < epsilon) { + double diff = fabs(alpsModel.getGlobalUB() - userUB); + double diffPer = userUB == 0 ? diff : diff / userUB; + if (diffPer > epsilon) { + cerr << setiosflags(ios::fixed | ios::showpoint); + cerr << "ERROR. BestKnownLB/UB= " << UtilDblToStr(userUB, 5) + << " but DIP claims GlobalUB= " + << UtilDblToStr(alpsModel.getGlobalUB(), 5) << endl; + throw UtilException("Invalid claim of optimal.", "main", "MILPBlock"); + } } - else{ - //--- - //--- create the driver AlpsDecomp model - //--- - AlpsDecompModel alpsModel(utilParam, algo); - - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - - //--- - //--- solve - //--- - timer.start(); - alpsModel.solve(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); - - //--- - //--- sanity check - //--- - cout << setiosflags(ios::fixed|ios::showpoint); - cout << "Status= " << alpsModel.getSolStatus() - << " BestLB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalLB(),5) - << " BestUB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalUB(),5) - << " Nodes= " << setw(6) - << alpsModel.getNumNodesProcessed() - << " SetupCPU= " << timeSetupCpu - << " SolveCPU= " << timeSolveCpu - << " TotalCPU= " << timeSetupCpu + timeSolveCpu - << " SetupReal= " << timeSetupReal - << " SolveReal= " << timeSolveReal - << " TotalReal= " << timeSetupReal + timeSolveReal - << endl; - - //--- - //--- sanity check - //--- if user defines bestLB==bestUB (i.e., known optimal) - //--- and solved claims we have optimal, check that they match - //--- - double epsilon = 0.01; //1% - double userLB = milp.getBestKnownLB(); - double userUB = milp.getBestKnownUB(); - double userDiff = fabs(userUB - userLB); - if(alpsModel.getSolStatus() == AlpsExitStatusOptimal && - userDiff < epsilon){ - double diff = fabs(alpsModel.getGlobalUB() - userUB); - double diffPer= userUB == 0 ? diff : diff / userUB; - if(diffPer > epsilon){ - cerr << setiosflags(ios::fixed|ios::showpoint); - cerr << "ERROR. BestKnownLB/UB= " - << UtilDblToStr(userUB,5) - << " but DIP claims GlobalUB= " - << UtilDblToStr(alpsModel.getGlobalUB(),5) - << endl; - throw UtilException("Invalid claim of optimal.", - "main", "MILPBlock"); - } - } - - //--- - //--- get optimal solution - //--- - if(alpsModel.getSolStatus() == AlpsExitStatusOptimal){ - string solutionFile = milp.getInstanceName() + ".sol"; - ofstream osSolution(solutionFile.c_str()); - const DecompSolution * solution = alpsModel.getBestSolution(); - const vector & colNames = alpsModel.getColNames(); - cout << "Optimal Solution" << endl; - solution->print(colNames, 8, osSolution); - osSolution.close(); - } - - //--- - //--- free local memory - //--- - delete algo; + + //--- + //--- get optimal solution + //--- + if (alpsModel.getSolStatus() == AlpsExitStatusOptimal) { + string solutionFile = milp.getInstanceName() + ".sol"; + ofstream osSolution(solutionFile.c_str()); + const DecompSolution *solution = alpsModel.getBestSolution(); + const vector &colNames = alpsModel.getColNames(); + cout << "Optimal Solution" << endl; + solution->print(colNames, 8, osSolution); + osSolution.close(); } - } - catch(CoinError & ex){ - cerr << "COIN Exception [ " << ex.message() << " ]" - << " at " << ex.fileName() << ":L" << ex.lineNumber() - << " in " << ex.className() << "::" << ex.methodName() << endl; - return 1; - } - return 0; -} + + //--- + //--- free local memory + //--- + delete algo; + } + } catch (CoinError &ex) { + cerr << "COIN Exception [ " << ex.message() << " ]" + << " at " << ex.fileName() << ":L" << ex.lineNumber() << " in " + << ex.className() << "::" << ex.methodName() << endl; + return 1; + } + return 0; +} diff --git a/Dip/examples/MMKP/MMKP_DecompApp.cpp b/Dip/examples/MMKP/MMKP_DecompApp.cpp index 21ec90c4..9b1a976b 100644 --- a/Dip/examples/MMKP/MMKP_DecompApp.cpp +++ b/Dip/examples/MMKP/MMKP_DecompApp.cpp @@ -13,554 +13,545 @@ //===========================================================================// //===========================================================================// -#include "DecompVar.h" #include "MMKP_DecompApp.h" +#include "DecompVar.h" //===========================================================================// void MMKP_DecompApp::initializeApp() { - - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); - - //--- - //--- read problem instance - //--- - string instanceFile = m_appParam.DataDir - + UtilDirSlash() + m_appParam.Instance; - string dataFormat = m_appParam.DataFormat; - if(dataFormat == "khan" || dataFormat == "hifi") - m_instance.readInstance(instanceFile, dataFormat); - else if(dataFormat == "gsimon") - m_instance.readInstanceSimon(instanceFile); - else - throw UtilException("Unknown data format", - "initializeApp", "MMKP_DecompApp"); - - //--- - //--- read best known lb/ub - //--- - if(dataFormat == "khan"){ - string bestKnownFile = m_appParam.DataDir + UtilDirSlash() + "mmkp.opt"; - m_instance.readBestKnown(bestKnownFile, m_appParam.Instance); - setBestKnownLB(m_instance.getBestKnownLB()); - setBestKnownUB(m_instance.getBestKnownUB()); - } - - //--- - //--- open space for MMKP_MCKnap objects - //--- - int nKnapRows = m_instance.getNKnapRows(); - int nGroupRows = m_instance.getNGroupRows(); - int nGroupCols = m_instance.getNGroupCols(); - const double * capacity = m_instance.getCapacity(); - const double * const * weight = m_instance.getWeight(); - MMKP_MCKnap * mcknapK = NULL; - - int k; - m_mcknap.reserve(nKnapRows); - for(k = 0; k < nKnapRows; k++){ - mcknapK = new MMKP_MCKnap(nGroupRows, nGroupCols); - mcknapK->setMCKnapData(capacity[k], weight[k]); - m_mcknap.push_back(mcknapK); - } - - //--- - //--- open memory for auxiliary memory pool - need? - //--- - m_auxMemPool.allocateMemory(nGroupRows * nGroupCols); - - //--- - //--- create models - //--- - createModels(); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); + + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", + m_appParam.LogLevel, 2); + + //--- + //--- read problem instance + //--- + string instanceFile = + m_appParam.DataDir + UtilDirSlash() + m_appParam.Instance; + string dataFormat = m_appParam.DataFormat; + if (dataFormat == "khan" || dataFormat == "hifi") + m_instance.readInstance(instanceFile, dataFormat); + else if (dataFormat == "gsimon") + m_instance.readInstanceSimon(instanceFile); + else + throw UtilException("Unknown data format", "initializeApp", + "MMKP_DecompApp"); + + //--- + //--- read best known lb/ub + //--- + if (dataFormat == "khan") { + string bestKnownFile = m_appParam.DataDir + UtilDirSlash() + "mmkp.opt"; + m_instance.readBestKnown(bestKnownFile, m_appParam.Instance); + setBestKnownLB(m_instance.getBestKnownLB()); + setBestKnownUB(m_instance.getBestKnownUB()); + } + + //--- + //--- open space for MMKP_MCKnap objects + //--- + int nKnapRows = m_instance.getNKnapRows(); + int nGroupRows = m_instance.getNGroupRows(); + int nGroupCols = m_instance.getNGroupCols(); + const double *capacity = m_instance.getCapacity(); + const double *const *weight = m_instance.getWeight(); + MMKP_MCKnap *mcknapK = NULL; + + int k; + m_mcknap.reserve(nKnapRows); + for (k = 0; k < nKnapRows; k++) { + mcknapK = new MMKP_MCKnap(nGroupRows, nGroupCols); + mcknapK->setMCKnapData(capacity[k], weight[k]); + m_mcknap.push_back(mcknapK); + } + + //--- + //--- open memory for auxiliary memory pool - need? + //--- + m_auxMemPool.allocateMemory(nGroupRows * nGroupCols); + + //--- + //--- create models + //--- + createModels(); + + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", m_appParam.LogLevel, + 2); } //===========================================================================// -void MMKP_DecompApp::createModels(){ - - //--- - //--- This function does the work to create the different models - //--- that will be used. This memory is owned by the user. It will - //--- be passed to the application interface and used by the algorithms. - //--- - - //--- - //--- Get information about this problem instance. - //--- - int i; - const int nGroupRows = m_instance.getNGroupRows(); - const int nGroupCols = m_instance.getNGroupCols(); - const int nKnapRows = m_instance.getNKnapRows(); - const double * value = m_instance.getValue(); - const int numCols = nGroupRows * nGroupCols; - - //--- - //--- Multi-Dimensional Multi-Choice Knapsack Problem (MMKP). - //--- - //--- max sum{i in 1..n, j in 1..l[i]} v[i,j] x[i,j] <==> - //--- min sum{i in 1..n, j in 1..l[i]} -v[i,j] x[i,j] - //--- s.t. sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in 1..m - //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n - //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] - //--- - //--- Multi-Choice Knapsack Polytope (MCKP) [for a fixed k] - //--- s.t. sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k] - //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n - //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] - //--- - //--- Multi-Dimensional Knapsack Polytope (MDKP) - //--- s.t. sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in 1..m - //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] - //--- - - //--- - //--- MMKP example structure (n=4, m=3) - //--- xxxx <= b[1] - //--- xxxx <= b[2] - //--- xxxx <= b[3] - //--- x = 1 (i=1) - //--- x = 1 (i=2) - //--- x = 1 (i=3) - //--- x = 1 (i=4) - //--- - //--- MDKP example structure - //--- xxxx <= b[1] - //--- xxxx <= b[2] - //--- xxxx <= b[3] - //--- - //--- MCKP example structure - //--- xxxx <= b[1] - //--- x = 1 (i=1) - //--- x = 1 (i=2) - //--- x = 1 (i=3) - //--- x = 1 (i=4) - //--- - //--- MC2KP example structure - //--- xxxx <= b[1] - //--- xxxx <= b[2] - //--- x = 1 (i=1) - //--- x = 1 (i=2) - //--- x = 1 (i=3) - //--- x = 1 (i=4) - //--- - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - - //--- - //--- Construct the objective function (the original problem is - //--- a maximization, so we flip the sign to make it minimization). - //--- - m_objective = new double[numCols]; - if(!m_objective) - throw UtilExceptionMemory("createModels", "MMKP_DecompApp"); - for(i = 0; i < numCols; i++) - m_objective[i] = -value[i]; - setModelObjective(m_objective, numCols); - - //--- - //--- Model - //--- relax = MCKP (with knapsack constraint 1 ) - //--- core = MDKP (with knapsack constraints 2..m) - //--- - //--- A' (relax) = MCKP - //--- sum{i in 1..n, j in 1..l[i]} r[1,i,j] x[i,j] <= b[1] - //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n - //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] - //--- A'' (core) - //--- sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in 2..m - //--- x[i,j] in [0,1], i in 1..n, j in 1..l[i] - //--- - if(m_appParam.ModelNameCore == "MDKP0"){ - DecompConstraintSet * modelCore = new DecompConstraintSet(); - createModelPartMDKPCompl(modelCore); - m_models.push_back(modelCore); - setModelCore(modelCore, m_appParam.ModelNameCore); - } - if(m_appParam.ModelNameRelax == "MCKP0"){ - DecompConstraintSet * modelRelax = new DecompConstraintSet(); - createModelPartMCKP(modelRelax); - m_models.push_back(modelRelax); - setModelRelax(modelRelax, m_appParam.ModelNameRelax); - } - - //--- - //--- Model (nested oracles) - //--- relax = MCKP (with knapsack constraint 1 ) - //--- core = MDKP (with knapsack constraints 2..m) - //--- relax*[b] = MCKP (with knapsack constraints 1,b ), b=2..m - //--- - //--- A'[b] (relax*) = MC2KP0 - //--- sum{i in 1..n, j in 1..l[i]} r[1,i,j] x[i,j] <= b[k], k in {1,b} - //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n - //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] - //--- - if(m_appParam.ModelNameRelaxNest == "MC2KP0"){ - for(i = 1; i < nKnapRows; i++){ - DecompConstraintSet * modelRelax = new DecompConstraintSet(); - createModelPartMC2KP(modelRelax, 0, i); - m_models.push_back(modelRelax); - setModelRelaxNest(modelRelax, - m_appParam.ModelNameRelaxNest + UtilIntToStr(i)); - } - } - - //--- - //--- Model - //--- relax = MDKP (with knapsack constraints 1..m) - //--- core = MCP (with choice constraints) - //--- - //--- A' (relax) = MDKP - //--- sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in 1..m - //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] - //--- A'' (master) - //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n - //--- x[i,j] in [0,1], i in 1..n, j in 1..l[i] - //--- - if(m_appParam.ModelNameCore == "MCP"){ - DecompConstraintSet * modelCore = new DecompConstraintSet(); - createModelPartMCP(modelCore); - m_models.push_back(modelCore); - setModelCore(modelCore, m_appParam.ModelNameCore); - } - if(m_appParam.ModelNameRelax == "MDKP"){ - DecompConstraintSet * modelRelax = new DecompConstraintSet(); - createModelPartMDKP(modelRelax); - m_models.push_back(modelRelax); - setModelRelax(modelRelax, m_appParam.ModelNameRelax); - } - - //--- - //--- Model - //--- relax = MDKP (with knapsack constraints 1..floor(m/2)) - //--- relax = MDKP (with knapsack constraints 1..m) -> nested - //--- core = MCP (with choice constraints) - //--- - //--- A' (relax) = MDKP - //--- sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in 1..m - //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] - //--- A'' (master) + missing from MDKP - //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n - //--- x[i,j] in [0,1], i in 1..n, j in 1..l[i] - //--- - //--- which half? those with tight knapsack - //--- - if(m_appParam.ModelNameCore == "MMKPHalf"){ - DecompConstraintSet * modelCore = new DecompConstraintSet(); - createModelPartMMKPHalf(modelCore); - m_models.push_back(modelCore); - setModelCore(modelCore, m_appParam.ModelNameCore); - } - if(m_appParam.ModelNameRelax == "MDKPHalf"){ - DecompConstraintSet * modelRelax = new DecompConstraintSet(); - createModelPartMDKPHalf(modelRelax); +void MMKP_DecompApp::createModels() { + + //--- + //--- This function does the work to create the different models + //--- that will be used. This memory is owned by the user. It will + //--- be passed to the application interface and used by the algorithms. + //--- + + //--- + //--- Get information about this problem instance. + //--- + int i; + const int nGroupRows = m_instance.getNGroupRows(); + const int nGroupCols = m_instance.getNGroupCols(); + const int nKnapRows = m_instance.getNKnapRows(); + const double *value = m_instance.getValue(); + const int numCols = nGroupRows * nGroupCols; + + //--- + //--- Multi-Dimensional Multi-Choice Knapsack Problem (MMKP). + //--- + //--- max sum{i in 1..n, j in 1..l[i]} v[i,j] x[i,j] <==> + //--- min sum{i in 1..n, j in 1..l[i]} -v[i,j] x[i,j] + //--- s.t. sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in 1..m + //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n + //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] + //--- + //--- Multi-Choice Knapsack Polytope (MCKP) [for a fixed k] + //--- s.t. sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k] + //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n + //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] + //--- + //--- Multi-Dimensional Knapsack Polytope (MDKP) + //--- s.t. sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in 1..m + //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] + //--- + + //--- + //--- MMKP example structure (n=4, m=3) + //--- xxxx <= b[1] + //--- xxxx <= b[2] + //--- xxxx <= b[3] + //--- x = 1 (i=1) + //--- x = 1 (i=2) + //--- x = 1 (i=3) + //--- x = 1 (i=4) + //--- + //--- MDKP example structure + //--- xxxx <= b[1] + //--- xxxx <= b[2] + //--- xxxx <= b[3] + //--- + //--- MCKP example structure + //--- xxxx <= b[1] + //--- x = 1 (i=1) + //--- x = 1 (i=2) + //--- x = 1 (i=3) + //--- x = 1 (i=4) + //--- + //--- MC2KP example structure + //--- xxxx <= b[1] + //--- xxxx <= b[2] + //--- x = 1 (i=1) + //--- x = 1 (i=2) + //--- x = 1 (i=3) + //--- x = 1 (i=4) + //--- + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + + //--- + //--- Construct the objective function (the original problem is + //--- a maximization, so we flip the sign to make it minimization). + //--- + m_objective = new double[numCols]; + if (!m_objective) + throw UtilExceptionMemory("createModels", "MMKP_DecompApp"); + for (i = 0; i < numCols; i++) + m_objective[i] = -value[i]; + setModelObjective(m_objective, numCols); + + //--- + //--- Model + //--- relax = MCKP (with knapsack constraint 1 ) + //--- core = MDKP (with knapsack constraints 2..m) + //--- + //--- A' (relax) = MCKP + //--- sum{i in 1..n, j in 1..l[i]} r[1,i,j] x[i,j] <= b[1] + //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n + //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] + //--- A'' (core) + //--- sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in 2..m + //--- x[i,j] in [0,1], i in 1..n, j in 1..l[i] + //--- + if (m_appParam.ModelNameCore == "MDKP0") { + DecompConstraintSet *modelCore = new DecompConstraintSet(); + createModelPartMDKPCompl(modelCore); + m_models.push_back(modelCore); + setModelCore(modelCore, m_appParam.ModelNameCore); + } + if (m_appParam.ModelNameRelax == "MCKP0") { + DecompConstraintSet *modelRelax = new DecompConstraintSet(); + createModelPartMCKP(modelRelax); + m_models.push_back(modelRelax); + setModelRelax(modelRelax, m_appParam.ModelNameRelax); + } + + //--- + //--- Model (nested oracles) + //--- relax = MCKP (with knapsack constraint 1 ) + //--- core = MDKP (with knapsack constraints 2..m) + //--- relax*[b] = MCKP (with knapsack constraints 1,b ), b=2..m + //--- + //--- A'[b] (relax*) = MC2KP0 + //--- sum{i in 1..n, j in 1..l[i]} r[1,i,j] x[i,j] <= b[k], k in {1,b} + //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n + //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] + //--- + if (m_appParam.ModelNameRelaxNest == "MC2KP0") { + for (i = 1; i < nKnapRows; i++) { + DecompConstraintSet *modelRelax = new DecompConstraintSet(); + createModelPartMC2KP(modelRelax, 0, i); m_models.push_back(modelRelax); - setModelRelax(modelRelax, m_appParam.ModelNameRelax); - } - if(m_appParam.ModelNameRelaxNest == "MDKP"){ - DecompConstraintSet * modelRelax = new DecompConstraintSet(); - createModelPartMDKP(modelRelax); - m_models.push_back(modelRelax); - setModelRelaxNest(modelRelax, m_appParam.ModelNameRelax); - } - if(m_appParam.ModelNameRelaxNest == "MMKP"){ - DecompConstraintSet * modelRelax = new DecompConstraintSet(); - createModelPartMMKP(modelRelax); - m_models.push_back(modelRelax); - setModelRelaxNest(modelRelax, m_appParam.ModelNameRelax); - } - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); + setModelRelaxNest(modelRelax, + m_appParam.ModelNameRelaxNest + UtilIntToStr(i)); + } + } + + //--- + //--- Model + //--- relax = MDKP (with knapsack constraints 1..m) + //--- core = MCP (with choice constraints) + //--- + //--- A' (relax) = MDKP + //--- sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in 1..m + //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] + //--- A'' (master) + //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n + //--- x[i,j] in [0,1], i in 1..n, j in 1..l[i] + //--- + if (m_appParam.ModelNameCore == "MCP") { + DecompConstraintSet *modelCore = new DecompConstraintSet(); + createModelPartMCP(modelCore); + m_models.push_back(modelCore); + setModelCore(modelCore, m_appParam.ModelNameCore); + } + if (m_appParam.ModelNameRelax == "MDKP") { + DecompConstraintSet *modelRelax = new DecompConstraintSet(); + createModelPartMDKP(modelRelax); + m_models.push_back(modelRelax); + setModelRelax(modelRelax, m_appParam.ModelNameRelax); + } + + //--- + //--- Model + //--- relax = MDKP (with knapsack constraints 1..floor(m/2)) + //--- relax = MDKP (with knapsack constraints 1..m) -> nested + //--- core = MCP (with choice constraints) + //--- + //--- A' (relax) = MDKP + //--- sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in 1..m + //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] + //--- A'' (master) + missing from MDKP + //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n + //--- x[i,j] in [0,1], i in 1..n, j in 1..l[i] + //--- + //--- which half? those with tight knapsack + //--- + if (m_appParam.ModelNameCore == "MMKPHalf") { + DecompConstraintSet *modelCore = new DecompConstraintSet(); + createModelPartMMKPHalf(modelCore); + m_models.push_back(modelCore); + setModelCore(modelCore, m_appParam.ModelNameCore); + } + if (m_appParam.ModelNameRelax == "MDKPHalf") { + DecompConstraintSet *modelRelax = new DecompConstraintSet(); + createModelPartMDKPHalf(modelRelax); + m_models.push_back(modelRelax); + setModelRelax(modelRelax, m_appParam.ModelNameRelax); + } + if (m_appParam.ModelNameRelaxNest == "MDKP") { + DecompConstraintSet *modelRelax = new DecompConstraintSet(); + createModelPartMDKP(modelRelax); + m_models.push_back(modelRelax); + setModelRelaxNest(modelRelax, m_appParam.ModelNameRelax); + } + if (m_appParam.ModelNameRelaxNest == "MMKP") { + DecompConstraintSet *modelRelax = new DecompConstraintSet(); + createModelPartMMKP(modelRelax); + m_models.push_back(modelRelax); + setModelRelaxNest(modelRelax, m_appParam.ModelNameRelax); + } + UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); } //===========================================================================// -void MMKP_DecompApp::createModelPartMDKP(DecompConstraintSet * model){ - vector whichKnaps; - int nKnapRows = m_instance.getNKnapRows(); - UtilIotaN(whichKnaps, nKnapRows, 0); - createModelPartMDKP(model, whichKnaps); +void MMKP_DecompApp::createModelPartMDKP(DecompConstraintSet *model) { + vector whichKnaps; + int nKnapRows = m_instance.getNKnapRows(); + UtilIotaN(whichKnaps, nKnapRows, 0); + createModelPartMDKP(model, whichKnaps); } //===========================================================================// -void MMKP_DecompApp::createModelPartMDKPCompl(DecompConstraintSet * model, - int whichKnap){ - int i; - vector whichKnaps; - const int nKnapRows = m_instance.getNKnapRows(); - for(i = 0; i < nKnapRows; i++){ - if(i == whichKnap) - continue; - whichKnaps.push_back(i); - } - createModelPartMDKP(model, whichKnaps); +void MMKP_DecompApp::createModelPartMDKPCompl(DecompConstraintSet *model, + int whichKnap) { + int i; + vector whichKnaps; + const int nKnapRows = m_instance.getNKnapRows(); + for (i = 0; i < nKnapRows; i++) { + if (i == whichKnap) + continue; + whichKnaps.push_back(i); + } + createModelPartMDKP(model, whichKnaps); } //===========================================================================// -void MMKP_DecompApp::createModelPartMDKPHalf(DecompConstraintSet * model){ - int i; - vector whichKnaps; - const int nKnapRows = m_instance.getNKnapRows(); - const int nHalfRows = static_cast(std::floor(nKnapRows/2.0)); - for(i = 0; i < nHalfRows; i++){ - whichKnaps.push_back(i); - } - createModelPartMDKP(model, whichKnaps); +void MMKP_DecompApp::createModelPartMDKPHalf(DecompConstraintSet *model) { + int i; + vector whichKnaps; + const int nKnapRows = m_instance.getNKnapRows(); + const int nHalfRows = static_cast(std::floor(nKnapRows / 2.0)); + for (i = 0; i < nHalfRows; i++) { + whichKnaps.push_back(i); + } + createModelPartMDKP(model, whichKnaps); } - //===========================================================================// -void MMKP_DecompApp::createModelPartMDKP(DecompConstraintSet * model, - vector & whichKnaps){ - - //--- - //--- Multi-Dimensional Knapsack Polytope - //--- sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in K - //--- - int i, j, colIndex; - int nGroupRows = m_instance.getNGroupRows(); - int nGroupCols = m_instance.getNGroupCols(); - const double * capacity = m_instance.getCapacity(); - const double * const * weight = m_instance.getWeight(); - int nKnapRows = static_cast(whichKnaps.size()); - int numCols = nGroupRows * nGroupCols; - int numRows = nKnapRows; - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelPartMDKP()", m_appParam.LogLevel, 2); - - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - if(!model->M) - throw UtilExceptionMemory("createModelPartMDKP", "MMKP_DecompApp"); - model->M->setDimensions(0, numCols); - model->reserve(numRows, numCols); - - //--- - //--- sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in K - //--- - vector::const_iterator vi; - for(vi = whichKnaps.begin(); vi != whichKnaps.end(); vi++){ - CoinPackedVector rowK; - const double * weightK = weight[*vi]; - colIndex = 0; - for(i = 0; i < nGroupRows; i++){ - for(j = 0; j < nGroupCols; j++){ - rowK.insert(colIndex, weightK[colIndex]); - colIndex++; - } +void MMKP_DecompApp::createModelPartMDKP(DecompConstraintSet *model, + vector &whichKnaps) { + + //--- + //--- Multi-Dimensional Knapsack Polytope + //--- sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in K + //--- + int i, j, colIndex; + int nGroupRows = m_instance.getNGroupRows(); + int nGroupCols = m_instance.getNGroupCols(); + const double *capacity = m_instance.getCapacity(); + const double *const *weight = m_instance.getWeight(); + int nKnapRows = static_cast(whichKnaps.size()); + int numCols = nGroupRows * nGroupCols; + int numRows = nKnapRows; + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelPartMDKP()", + m_appParam.LogLevel, 2); + + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + if (!model->M) + throw UtilExceptionMemory("createModelPartMDKP", "MMKP_DecompApp"); + model->M->setDimensions(0, numCols); + model->reserve(numRows, numCols); + + //--- + //--- sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in K + //--- + vector::const_iterator vi; + for (vi = whichKnaps.begin(); vi != whichKnaps.end(); vi++) { + CoinPackedVector rowK; + const double *weightK = weight[*vi]; + colIndex = 0; + for (i = 0; i < nGroupRows; i++) { + for (j = 0; j < nGroupCols; j++) { + rowK.insert(colIndex, weightK[colIndex]); + colIndex++; } - model->appendRow(rowK, -m_infinity, capacity[*vi]); - } - - //--- - //--- set the col upper and lower bounds - //--- - UtilFillN(model->colLB, numCols, 0.0); - UtilFillN(model->colUB, numCols, 1.0); - - //--- - //--- set the indices of the integer variables of model - //--- - UtilIotaN(model->integerVars, numCols, 0); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelPartMDKP()", m_appParam.LogLevel, 2); + } + model->appendRow(rowK, -m_infinity, capacity[*vi]); + } + + //--- + //--- set the col upper and lower bounds + //--- + UtilFillN(model->colLB, numCols, 0.0); + UtilFillN(model->colUB, numCols, 1.0); + + //--- + //--- set the indices of the integer variables of model + //--- + UtilIotaN(model->integerVars, numCols, 0); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelPartMDKP()", + m_appParam.LogLevel, 2); } //===========================================================================// -void MMKP_DecompApp::createModelPartMCP(DecompConstraintSet * model){ - vector whichKnaps; - createModelPartMCKP(model, whichKnaps); +void MMKP_DecompApp::createModelPartMCP(DecompConstraintSet *model) { + vector whichKnaps; + createModelPartMCKP(model, whichKnaps); } //===========================================================================// -void MMKP_DecompApp::createModelPartMCKP(DecompConstraintSet * model, - int whichKnap){ - vector whichKnaps; - whichKnaps.push_back(whichKnap); - createModelPartMCKP(model, whichKnaps); +void MMKP_DecompApp::createModelPartMCKP(DecompConstraintSet *model, + int whichKnap) { + vector whichKnaps; + whichKnaps.push_back(whichKnap); + createModelPartMCKP(model, whichKnaps); } //===========================================================================// -void MMKP_DecompApp::createModelPartMMKPHalf(DecompConstraintSet * model){ - vector whichKnaps; - int i; - const int nKnapRows = m_instance.getNKnapRows(); - const int nHalfRows = static_cast(std::floor(nKnapRows/2.0)); - for(i = nHalfRows; i < nKnapRows; i++) - whichKnaps.push_back(i); - createModelPartMCKP(model, whichKnaps); +void MMKP_DecompApp::createModelPartMMKPHalf(DecompConstraintSet *model) { + vector whichKnaps; + int i; + const int nKnapRows = m_instance.getNKnapRows(); + const int nHalfRows = static_cast(std::floor(nKnapRows / 2.0)); + for (i = nHalfRows; i < nKnapRows; i++) + whichKnaps.push_back(i); + createModelPartMCKP(model, whichKnaps); } //===========================================================================// -void MMKP_DecompApp::createModelPartMMKP(DecompConstraintSet * model){ - vector whichKnaps; - int i; - const int nKnapRows = m_instance.getNKnapRows(); - for(i = 0; i < nKnapRows; i++) - whichKnaps.push_back(i); - createModelPartMCKP(model, whichKnaps); +void MMKP_DecompApp::createModelPartMMKP(DecompConstraintSet *model) { + vector whichKnaps; + int i; + const int nKnapRows = m_instance.getNKnapRows(); + for (i = 0; i < nKnapRows; i++) + whichKnaps.push_back(i); + createModelPartMCKP(model, whichKnaps); } //===========================================================================// -void MMKP_DecompApp::createModelPartMC2KP(DecompConstraintSet * model, - int whichKnap1, - int whichKnap2){ - vector whichKnaps; - whichKnaps.push_back(whichKnap1); - whichKnaps.push_back(whichKnap2); - createModelPartMCKP(model, whichKnaps); +void MMKP_DecompApp::createModelPartMC2KP(DecompConstraintSet *model, + int whichKnap1, int whichKnap2) { + vector whichKnaps; + whichKnaps.push_back(whichKnap1); + whichKnaps.push_back(whichKnap2); + createModelPartMCKP(model, whichKnaps); } //===========================================================================// -void MMKP_DecompApp::createModelPartMCKP(DecompConstraintSet * model, - vector & whichKnaps){ - - - //--- - //--- Multi-Choice Knapsack Polytope [for a fixed k] (Subproblem[k]): - //--- sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k] - //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelPartMCKP()", m_appParam.LogLevel, 2); - - - int i, j, colIndex; - int nGroupRows = m_instance.getNGroupRows(); - int nGroupCols = m_instance.getNGroupCols(); - const double * capacity = m_instance.getCapacity(); - const double * const * weight = m_instance.getWeight(); - int nKnapRows = static_cast(whichKnaps.size()); - int numCols = nGroupRows * nGroupCols; - int numRows = nGroupRows + nKnapRows; - - //TODO: should this all be more opaque? - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - if(!model->M) - throw UtilExceptionMemory("createModelPartMDKP", "MMKP_DecompApp"); - model->M->setDimensions(0, numCols); - model->reserve(numRows, numCols); - - //--- - //--- s.t. sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k] - //--- - vector::const_iterator vi; - for(vi = whichKnaps.begin(); vi != whichKnaps.end(); vi++){ - CoinPackedVector rowK; - const double * weightK = weight[*vi]; - colIndex = 0; - for(i = 0; i < nGroupRows; i++){ - for(j = 0; j < nGroupCols; j++){ - rowK.insert(colIndex, weightK[colIndex]); - colIndex++; - } +void MMKP_DecompApp::createModelPartMCKP(DecompConstraintSet *model, + vector &whichKnaps) { + + //--- + //--- Multi-Choice Knapsack Polytope [for a fixed k] (Subproblem[k]): + //--- sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k] + //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelPartMCKP()", + m_appParam.LogLevel, 2); + + int i, j, colIndex; + int nGroupRows = m_instance.getNGroupRows(); + int nGroupCols = m_instance.getNGroupCols(); + const double *capacity = m_instance.getCapacity(); + const double *const *weight = m_instance.getWeight(); + int nKnapRows = static_cast(whichKnaps.size()); + int numCols = nGroupRows * nGroupCols; + int numRows = nGroupRows + nKnapRows; + + // TODO: should this all be more opaque? + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + if (!model->M) + throw UtilExceptionMemory("createModelPartMDKP", "MMKP_DecompApp"); + model->M->setDimensions(0, numCols); + model->reserve(numRows, numCols); + + //--- + //--- s.t. sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k] + //--- + vector::const_iterator vi; + for (vi = whichKnaps.begin(); vi != whichKnaps.end(); vi++) { + CoinPackedVector rowK; + const double *weightK = weight[*vi]; + colIndex = 0; + for (i = 0; i < nGroupRows; i++) { + for (j = 0; j < nGroupCols; j++) { + rowK.insert(colIndex, weightK[colIndex]); + colIndex++; } - model->appendRow(rowK, -m_infinity, capacity[*vi]); - } - - //--- - //--- sum{j in 1..l[i]} x[i,j] = 1, i in 1..n - //--- - colIndex = 0; - for(i = 0; i < nGroupRows; i++){ - CoinPackedVector row; - for(j = 0; j < nGroupCols; j++){ - row.insert(colIndex, 1.0); - colIndex++; - } - model->appendRow(row, 1.0, 1.0); - } - - //--- - //--- set the col upper and lower bounds - //--- - UtilFillN(model->colLB, numCols, 0.0); - UtilFillN(model->colUB, numCols, 1.0); - - //--- - //--- set the indices of the integer variables of model - //--- - UtilIotaN(model->integerVars, numCols, 0); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelPartMCKP()", m_appParam.LogLevel, 2); + } + model->appendRow(rowK, -m_infinity, capacity[*vi]); + } + + //--- + //--- sum{j in 1..l[i]} x[i,j] = 1, i in 1..n + //--- + colIndex = 0; + for (i = 0; i < nGroupRows; i++) { + CoinPackedVector row; + for (j = 0; j < nGroupCols; j++) { + row.insert(colIndex, 1.0); + colIndex++; + } + model->appendRow(row, 1.0, 1.0); + } + + //--- + //--- set the col upper and lower bounds + //--- + UtilFillN(model->colLB, numCols, 0.0); + UtilFillN(model->colUB, numCols, 1.0); + + //--- + //--- set the indices of the integer variables of model + //--- + UtilIotaN(model->integerVars, numCols, 0); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelPartMCKP()", + m_appParam.LogLevel, 2); } //===========================================================================// -DecompSolverStatus -MMKP_DecompApp::solveRelaxed(const int whichBlock, - const double * redCostX, - const double convexDual, - DecompVarList & varList){ - - if(!m_appParam.UsePisinger) - return DecompSolStatNoSolution; - - UtilPrintFuncBegin(m_osLog, m_classTag, - "solveRelaxed()", m_appParam.LogLevel, 2); - - //--- - //--- this allows user direct access to access methods in - //--- algorithm interface (in case they want to use any - //--- of its data) - //--- - //--- for example, if the user wants to enforce the branching - //--- decisions in the oracle - //--- TODO: can this be done using mcknap solver? - //--- - //const DecompAlgo * decompAlgo = getDecompAlgo(); - //const double * colLBNode = decompAlgo->getColLBNode(); - //const double * colUBNode = decompAlgo->getColUBNode(); - - //--- - //--- in the case where oracle=MCKP0, we have a specialized solver - //--- in the case where oracle=MDKP, we do not have a specialized solver - //--- so, we just return with no solution and the framework will - //--- attempt to use the built-in MILP solver - //--- - DecompSolverStatus solverStatus = DecompSolStatNoSolution; - if(m_appParam.ModelNameRelax == "MCKP0"){ - vector solInd; - vector solEls; - double varRedCost = 0.0; - double varOrigCost = 0.0; - MMKP_MCKnap * mcknapK = m_mcknap[whichBlock]; - //TODO: check status return codes here - mcknapK->solveMCKnap(redCostX, m_objective, - solInd, solEls, varRedCost, varOrigCost); - assert(static_cast(solInd.size()) == m_instance.getNGroupRows()); - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - printf("PUSH var with k = %d RC = %g origCost = %g\n", - whichBlock, varRedCost - convexDual, varOrigCost); - ); - - //the user should not have to calculate orig cost too - // the framework can do this... in fact the framework shoudl - // calculate red-cost too... but the user might want to check this stuff - - //this is way too confusing for user to remember they need -alpha! - // let framework do that - also setting the block id - framework! - DecompVar * var = new DecompVar(solInd, solEls, - varRedCost - convexDual, varOrigCost); - var->setBlockId(whichBlock); - varList.push_back(var); - solverStatus = DecompSolStatOptimal; - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "solveRelaxed()", m_appParam.LogLevel, 2); - return solverStatus; +DecompSolverStatus MMKP_DecompApp::solveRelaxed(const int whichBlock, + const double *redCostX, + const double convexDual, + DecompVarList &varList) { + + if (!m_appParam.UsePisinger) + return DecompSolStatNoSolution; + + UtilPrintFuncBegin(m_osLog, m_classTag, "solveRelaxed()", m_appParam.LogLevel, + 2); + + //--- + //--- this allows user direct access to access methods in + //--- algorithm interface (in case they want to use any + //--- of its data) + //--- + //--- for example, if the user wants to enforce the branching + //--- decisions in the oracle + //--- TODO: can this be done using mcknap solver? + //--- + // const DecompAlgo * decompAlgo = getDecompAlgo(); + // const double * colLBNode = decompAlgo->getColLBNode(); + // const double * colUBNode = decompAlgo->getColUBNode(); + + //--- + //--- in the case where oracle=MCKP0, we have a specialized solver + //--- in the case where oracle=MDKP, we do not have a specialized solver + //--- so, we just return with no solution and the framework will + //--- attempt to use the built-in MILP solver + //--- + DecompSolverStatus solverStatus = DecompSolStatNoSolution; + if (m_appParam.ModelNameRelax == "MCKP0") { + vector solInd; + vector solEls; + double varRedCost = 0.0; + double varOrigCost = 0.0; + MMKP_MCKnap *mcknapK = m_mcknap[whichBlock]; + // TODO: check status return codes here + mcknapK->solveMCKnap(redCostX, m_objective, solInd, solEls, varRedCost, + varOrigCost); + assert(static_cast(solInd.size()) == m_instance.getNGroupRows()); + + UTIL_DEBUG(m_param.LogDebugLevel, 4, + printf("PUSH var with k = %d RC = %g origCost = %g\n", + whichBlock, varRedCost - convexDual, varOrigCost);); + + // the user should not have to calculate orig cost too + // the framework can do this... in fact the framework shoudl + // calculate red-cost too... but the user might want to check this stuff + + // this is way too confusing for user to remember they need -alpha! + // let framework do that - also setting the block id - framework! + DecompVar *var = + new DecompVar(solInd, solEls, varRedCost - convexDual, varOrigCost); + var->setBlockId(whichBlock); + varList.push_back(var); + solverStatus = DecompSolStatOptimal; + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "solveRelaxed()", m_appParam.LogLevel, + 2); + return solverStatus; } - //===========================================================================// -void MMKP_DecompApp::printOriginalColumn(const int index, - ostream * os) const { - pair p = m_instance.getIndexInv(index); - (*os) << "x[ " << index << " : " << p.first << " , " << p.second << " ]"; +void MMKP_DecompApp::printOriginalColumn(const int index, ostream *os) const { + pair p = m_instance.getIndexInv(index); + (*os) << "x[ " << index << " : " << p.first << " , " << p.second << " ]"; } - diff --git a/Dip/examples/MMKP/MMKP_Instance.cpp b/Dip/examples/MMKP/MMKP_Instance.cpp index fe005024..0d8c435d 100644 --- a/Dip/examples/MMKP/MMKP_Instance.cpp +++ b/Dip/examples/MMKP/MMKP_Instance.cpp @@ -18,182 +18,175 @@ //===========================================================================// //===========================================================================// -void MMKP_Instance::readInstanceSimon(string & fileName){ - - int k, i, j, ij, numIJ, dummy; - int status = 0; - string dummyStr; - char dummyChr; - char dummyChrA[1000]; - ifstream is; - - - /* - # MMKP instances generated from definition file: tmp2.xml - # serie_name rp_hep_hec_strong - # num_instances 1 - instance_id 1 - num_class 10 - num_dimension 5 - # class_id 1 - num_item 5 - 1 2 2 3 3 3 - 3 4 4 4 4 5 - 5 6 6 6 6 6 - 7 8 8 8 8 8 - 9 10 10 10 10 10 - # class_id 2 - num_item 5 - ... - c: 518 524 531 540 553 - */ - status = UtilOpenFile(is, fileName.c_str()); - if(status) - throw UtilException("Failed to read instance", - "readInstance", "MMKP_Instance"); - is.getline(dummyChrA, 1000); - is.getline(dummyChrA, 1000); - is.getline(dummyChrA, 1000); - is.getline(dummyChrA, 1000); - is >> dummyStr >> m_nGroupRows //num_class - >> dummyStr >> m_nKnapRows; //num_dimension - is.getline(dummyChrA, 1000); - is.getline(dummyChrA, 1000); - is >> dummyStr >> m_nGroupCols; - printf("dummyStr = %s\n", dummyStr.c_str()); - printf("nGroupRows = %d\n", m_nGroupRows); - printf("nKnapRows = %d\n", m_nKnapRows); - printf("nGroupCols = %d\n", m_nGroupCols); - fflush(stdout); - - //--- - //--- allocate memory for capacity, value and weight - //--- - numIJ = m_nGroupCols * m_nGroupRows; - m_capacity = new double[m_nKnapRows]; - m_value = new double[numIJ]; - m_weight = new double*[m_nKnapRows]; - if(!(m_capacity && m_value && m_weight)) +void MMKP_Instance::readInstanceSimon(string &fileName) { + + int k, i, j, ij, numIJ, dummy; + int status = 0; + string dummyStr; + char dummyChr; + char dummyChrA[1000]; + ifstream is; + + /* + # MMKP instances generated from definition file: tmp2.xml + # serie_name rp_hep_hec_strong + # num_instances 1 + instance_id 1 + num_class 10 + num_dimension 5 + # class_id 1 + num_item 5 + 1 2 2 3 3 3 + 3 4 4 4 4 5 + 5 6 6 6 6 6 + 7 8 8 8 8 8 + 9 10 10 10 10 10 + # class_id 2 + num_item 5 + ... + c: 518 524 531 540 553 + */ + status = UtilOpenFile(is, fileName.c_str()); + if (status) + throw UtilException("Failed to read instance", "readInstance", + "MMKP_Instance"); + is.getline(dummyChrA, 1000); + is.getline(dummyChrA, 1000); + is.getline(dummyChrA, 1000); + is.getline(dummyChrA, 1000); + is >> dummyStr >> m_nGroupRows // num_class + >> dummyStr >> m_nKnapRows; // num_dimension + is.getline(dummyChrA, 1000); + is.getline(dummyChrA, 1000); + is >> dummyStr >> m_nGroupCols; + printf("dummyStr = %s\n", dummyStr.c_str()); + printf("nGroupRows = %d\n", m_nGroupRows); + printf("nKnapRows = %d\n", m_nKnapRows); + printf("nGroupCols = %d\n", m_nGroupCols); + fflush(stdout); + + //--- + //--- allocate memory for capacity, value and weight + //--- + numIJ = m_nGroupCols * m_nGroupRows; + m_capacity = new double[m_nKnapRows]; + m_value = new double[numIJ]; + m_weight = new double *[m_nKnapRows]; + if (!(m_capacity && m_value && m_weight)) + throw UtilExceptionMemory("readInstance", "MMKP_Instance"); + + for (k = 0; k < m_nKnapRows; k++) { + m_weight[k] = new double[numIJ]; + if (!m_weight[k]) throw UtilExceptionMemory("readInstance", "MMKP_Instance"); + } - for(k = 0; k < m_nKnapRows; k++){ - m_weight[k] = new double[numIJ]; - if(!m_weight[k]) - throw UtilExceptionMemory("readInstance", "MMKP_Instance"); - } - - for(i = 0; i < m_nGroupRows; i++){ - for(j = 0; j < m_nGroupCols; j++){ - ij = getIndexIJ(i,j); - is >> m_value[ij]; - printf("value[%d]: %g\n", ij, m_value[ij]); - fflush(stdout); - for(k = 0; k < m_nKnapRows; k++){ - is >> m_weight[k][ij]; - printf("weight[%d][%d]: %g\n", k, ij, m_weight[k][ij]); - fflush(stdout); - } + for (i = 0; i < m_nGroupRows; i++) { + for (j = 0; j < m_nGroupCols; j++) { + ij = getIndexIJ(i, j); + is >> m_value[ij]; + printf("value[%d]: %g\n", ij, m_value[ij]); + fflush(stdout); + for (k = 0; k < m_nKnapRows; k++) { + is >> m_weight[k][ij]; + printf("weight[%d][%d]: %g\n", k, ij, m_weight[k][ij]); + fflush(stdout); } + } + is >> dummyChr; + if (dummyChr == '#') { + is.getline(dummyChrA, 1000); + is.getline(dummyChrA, 1000); + printf("dummyChrA = %s\n", dummyChrA); + } else { is >> dummyChr; - if(dummyChr == '#'){ - is.getline(dummyChrA, 1000); - is.getline(dummyChrA, 1000); - printf("dummyChrA = %s\n", dummyChrA); - } - else{ - is >> dummyChr; - } - } - - - printf("dummyChr = %c\n", dummyChr); - for(k = 0; k < m_nKnapRows; k++){ - m_weight[k] = new double[numIJ]; - if(!m_weight[k]) - throw UtilExceptionMemory("readInstance", "MMKP_Instance"); - is >> m_capacity[k]; - printf("cap[k=%d]: %g\n", k, m_capacity[k]); - } - - is.close(); + } + } + + printf("dummyChr = %c\n", dummyChr); + for (k = 0; k < m_nKnapRows; k++) { + m_weight[k] = new double[numIJ]; + if (!m_weight[k]) + throw UtilExceptionMemory("readInstance", "MMKP_Instance"); + is >> m_capacity[k]; + printf("cap[k=%d]: %g\n", k, m_capacity[k]); + } + + is.close(); } //===========================================================================// -void MMKP_Instance::readInstance(string & fileName, - string & dataFormat){ - - int k, i, j, ij, numIJ, dummy; - int status = 0; - ifstream is; - - //--- - //--- ftp://cermsem.univ-paris1.fr/pub/CERMSEM/hifi/MMKP/MMKP.html - //--- NOTE: l[i] = l, for all i - //--- - status = UtilOpenFile(is, fileName.c_str()); - if(status) - throw UtilException("Failed to read instance", - "readInstance", "MMKP_Instance"); - is >> m_nGroupRows - >> m_nGroupCols - >> m_nKnapRows; - - //--- - //--- allocate memory for capacity, value and weight - //--- - numIJ = m_nGroupCols * m_nGroupRows; - m_capacity = new double[m_nKnapRows]; - m_value = new double[numIJ]; - m_weight = new double*[m_nKnapRows]; - if(!(m_capacity && m_value && m_weight)) +void MMKP_Instance::readInstance(string &fileName, string &dataFormat) { + + int k, i, j, ij, numIJ, dummy; + int status = 0; + ifstream is; + + //--- + //--- ftp://cermsem.univ-paris1.fr/pub/CERMSEM/hifi/MMKP/MMKP.html + //--- NOTE: l[i] = l, for all i + //--- + status = UtilOpenFile(is, fileName.c_str()); + if (status) + throw UtilException("Failed to read instance", "readInstance", + "MMKP_Instance"); + is >> m_nGroupRows >> m_nGroupCols >> m_nKnapRows; + + //--- + //--- allocate memory for capacity, value and weight + //--- + numIJ = m_nGroupCols * m_nGroupRows; + m_capacity = new double[m_nKnapRows]; + m_value = new double[numIJ]; + m_weight = new double *[m_nKnapRows]; + if (!(m_capacity && m_value && m_weight)) + throw UtilExceptionMemory("readInstance", "MMKP_Instance"); + + for (k = 0; k < m_nKnapRows; k++) { + m_weight[k] = new double[numIJ]; + if (!m_weight[k]) throw UtilExceptionMemory("readInstance", "MMKP_Instance"); - - for(k = 0; k < m_nKnapRows; k++){ - m_weight[k] = new double[numIJ]; - if(!m_weight[k]) - throw UtilExceptionMemory("readInstance", "MMKP_Instance"); - is >> m_capacity[k]; - } - - for(i = 0; i < m_nGroupRows; i++){ - if(dataFormat == "khan") - is >> dummy; - for(j = 0; j < m_nGroupCols; j++){ - ij = getIndexIJ(i,j); - is >> m_value[ij]; - for(k = 0; k < m_nKnapRows; k++){ - is >> m_weight[k][ij]; - } + is >> m_capacity[k]; + } + + for (i = 0; i < m_nGroupRows; i++) { + if (dataFormat == "khan") + is >> dummy; + for (j = 0; j < m_nGroupCols; j++) { + ij = getIndexIJ(i, j); + is >> m_value[ij]; + for (k = 0; k < m_nKnapRows; k++) { + is >> m_weight[k][ij]; } - } - - is.close(); + } + } + + is.close(); } //===========================================================================// -void MMKP_Instance::readBestKnown(string & fileName, - string & instanceName){ - - ifstream is; - string instance; - double bestUpperBound; - bool isProvenOptimal; - int status = 0; - status = UtilOpenFile(is, fileName); - if(status) - throw UtilException("Failed to best-known file", - "readBestKnown", "MMKP_Instance"); - while(!is.eof()){ - is >> instance >> bestUpperBound >> isProvenOptimal; - instance = UtilStrTrim(instance); - if(instance == instanceName){ - if(isProvenOptimal) - m_bestKnownLB = bestUpperBound; - else - m_bestKnownLB = COIN_DBL_MAX; - m_bestKnownUB = bestUpperBound; - m_isProvenOptimal = isProvenOptimal; - break; - } - } +void MMKP_Instance::readBestKnown(string &fileName, string &instanceName) { + + ifstream is; + string instance; + double bestUpperBound; + bool isProvenOptimal; + int status = 0; + status = UtilOpenFile(is, fileName); + if (status) + throw UtilException("Failed to best-known file", "readBestKnown", + "MMKP_Instance"); + while (!is.eof()) { + is >> instance >> bestUpperBound >> isProvenOptimal; + instance = UtilStrTrim(instance); + if (instance == instanceName) { + if (isProvenOptimal) + m_bestKnownLB = bestUpperBound; + else + m_bestKnownLB = COIN_DBL_MAX; + m_bestKnownUB = bestUpperBound; + m_isProvenOptimal = isProvenOptimal; + break; + } + } } diff --git a/Dip/examples/MMKP/MMKP_MCKnap.cpp b/Dip/examples/MMKP/MMKP_MCKnap.cpp old mode 100755 new mode 100644 index 91ffd4da..820ff35e --- a/Dip/examples/MMKP/MMKP_MCKnap.cpp +++ b/Dip/examples/MMKP/MMKP_MCKnap.cpp @@ -25,359 +25,341 @@ //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n //--- - // --------------------------------------------------------------------- // #include "MMKP_MCKnap.h" #include "UtilMacrosDecomp.h" -extern "C"{ +extern "C" { #include "mcknap.h" } // --------------------------------------------------------------------- // //#define MCKP_EPSILON 1.0e-4 //still causing overflow -#define MCKP_EPSILON 1.0e-3 +#define MCKP_EPSILON 1.0e-3 //#define MMKP_MCKNAP_DEBUG // --------------------------------------------------------------------- // #include "UtilMacros.h" // --------------------------------------------------------------------- // -void MMKP_MCKnap::solveTrivialMaxSum(const double * redCost, - const double * origCost, - vector & solInd, - double & varRedCost, - double & varOrigCost){ - - double minRedCost; - int i, j, minRedCostInd, ijIndex, minWeight, totalWeight; - - //--- - //--- Pisinger's code breaks on this trivial case. - //--- - //--- In the case where maxwsum <= c, then we can trivially - //--- pick the max profit / min cost element from each group - //--- - //--- We have to be careful here because there might be ties to deal - //--- with. The max profit might be p* for two different choices - //--- but only one of those gave a weight that was under capacity. - //--- So, we have to find the alternative choice with lowest weight. - //--- - //--- NOTE: this algorithm was added because mcknap algorithm seems - //--- to crash on this trivial case. It would be better if we could - //--- get this case fixed. TODO: send test case to Pisinger. - //--- - varRedCost = 0.0; - varOrigCost = 0.0; - solInd.reserve(m_nGroupRows); - - ijIndex = 0; - totalWeight = 0; - for(i = 0; i < m_nGroupRows; i++){ - ijIndex = getIndexIJ(i, 0); - minRedCostInd = ijIndex; - minRedCost = redCost[ijIndex]; - minWeight = m_weight[ijIndex];//need if we have ties +void MMKP_MCKnap::solveTrivialMaxSum(const double *redCost, + const double *origCost, + vector &solInd, double &varRedCost, + double &varOrigCost) { + + double minRedCost; + int i, j, minRedCostInd, ijIndex, minWeight, totalWeight; + + //--- + //--- Pisinger's code breaks on this trivial case. + //--- + //--- In the case where maxwsum <= c, then we can trivially + //--- pick the max profit / min cost element from each group + //--- + //--- We have to be careful here because there might be ties to deal + //--- with. The max profit might be p* for two different choices + //--- but only one of those gave a weight that was under capacity. + //--- So, we have to find the alternative choice with lowest weight. + //--- + //--- NOTE: this algorithm was added because mcknap algorithm seems + //--- to crash on this trivial case. It would be better if we could + //--- get this case fixed. TODO: send test case to Pisinger. + //--- + varRedCost = 0.0; + varOrigCost = 0.0; + solInd.reserve(m_nGroupRows); + + ijIndex = 0; + totalWeight = 0; + for (i = 0; i < m_nGroupRows; i++) { + ijIndex = getIndexIJ(i, 0); + minRedCostInd = ijIndex; + minRedCost = redCost[ijIndex]; + minWeight = m_weight[ijIndex]; // need if we have ties #ifdef MMKP_MCKNAP_DEBUG - printf("i:%d j:%d redCost:%g minRedCost:%g wt:%d minWeight:%d\n", - i, 0, redCost[ijIndex], minRedCost, m_weight[ijIndex], - minWeight); + printf("i:%d j:%d redCost:%g minRedCost:%g wt:%d minWeight:%d\n", i, 0, + redCost[ijIndex], minRedCost, m_weight[ijIndex], minWeight); #endif - ijIndex++; - for(j = 1; j < m_nGroupCols; j++){ - if((redCost[ijIndex] - minRedCost) < -MCKP_EPSILON){ - minRedCost = redCost[ijIndex]; - minWeight = m_weight[ijIndex]; - minRedCostInd = ijIndex; - }else if( UtilIsZero(redCost[ijIndex] - minRedCost, MCKP_EPSILON) ){ - //--- - //--- break ties with element with least weight - //--- - if(minWeight > m_weight[ijIndex]){ - minWeight = m_weight[ijIndex]; - minRedCostInd = ijIndex; - } - } + ijIndex++; + for (j = 1; j < m_nGroupCols; j++) { + if ((redCost[ijIndex] - minRedCost) < -MCKP_EPSILON) { + minRedCost = redCost[ijIndex]; + minWeight = m_weight[ijIndex]; + minRedCostInd = ijIndex; + } else if (UtilIsZero(redCost[ijIndex] - minRedCost, MCKP_EPSILON)) { + //--- + //--- break ties with element with least weight + //--- + if (minWeight > m_weight[ijIndex]) { + minWeight = m_weight[ijIndex]; + minRedCostInd = ijIndex; + } + } #ifdef MMKP_MCKNAP_DEBUG - printf("i:%d j:%d redCost:%g minRedCost:%g wt:%d minWeight:%d\n", - i, j, redCost[ijIndex], minRedCost, m_weight[ijIndex], - minWeight); + printf("i:%d j:%d redCost:%g minRedCost:%g wt:%d minWeight:%d\n", i, j, + redCost[ijIndex], minRedCost, m_weight[ijIndex], minWeight); #endif - ijIndex++; - } - assert((minRedCostInd >= 0) && (minRedCostInd < m_nCols)); - totalWeight += m_weight[minRedCostInd]; - solInd.push_back(minRedCostInd); + ijIndex++; + } + assert((minRedCostInd >= 0) && (minRedCostInd < m_nCols)); + totalWeight += m_weight[minRedCostInd]; + solInd.push_back(minRedCostInd); #ifdef MMKP_MCKNAP_DEBUG - printf("i:%d totalWeight:%d cap:%d mincostInd:%d redCost:%g origCost:%g\n", - i, totalWeight, m_capacity, minRedCostInd, minRedCost, - origCost[minRedCostInd]);fflush(stdout); + printf("i:%d totalWeight:%d cap:%d mincostInd:%d redCost:%g origCost:%g\n", + i, totalWeight, m_capacity, minRedCostInd, minRedCost, + origCost[minRedCostInd]); + fflush(stdout); #endif - assert(totalWeight <= m_capacity); - varRedCost += minRedCost; - varOrigCost += origCost[minRedCostInd]; - } + assert(totalWeight <= m_capacity); + varRedCost += minRedCost; + varOrigCost += origCost[minRedCostInd]; + } } // --------------------------------------------------------------------- // -void MMKP_MCKnap::setMCKnapData(const double capacity, - const double * weight){ - - int i, j, colIndex; - itemset * setPtr = NULL; - itemrec * recPtr = NULL; - - //TODO: allow pass in arrFrac to this util function, for speed - //TODO: really need to use malloc here? - - //--- - //--- Pisinger's code assume cost/weight is integer type. - //--- So, we need to scale the cost/weight to nearest integer first. - //--- - m_wscale = UtilScaleDblToIntArr(m_nCols, - weight, m_weight, - capacity, &m_capacity, MCKP_EPSILON); - - m_setset = (isetset*) malloc(sizeof(isetset)); - assert(m_setset); - - m_setset->size = m_nGroupRows; - m_setset->fset = (itemset*) malloc(m_setset->size * sizeof(itemset)); - assert(m_setset->fset); - - setPtr = m_setset->fset; - colIndex = 0; - for(i = 0; i < m_setset->size; i++){ - setPtr->size = m_nGroupCols; - setPtr->fset = (itemrec*) malloc(setPtr->size * sizeof(itemrec)); - assert(m_setset->fset); - - recPtr = setPtr->fset; - for(j = 0; j < setPtr->size; j++){ - recPtr->i = i; - recPtr->j = j; - recPtr->wsum = m_weight[colIndex]; - recPtr->psum = m_cost[colIndex]; - recPtr++; - colIndex++; - } - setPtr->lset = setPtr->fset + setPtr->size - 1; - setPtr++; - } - m_setset->lset = m_setset->fset + m_setset->size - 1; +void MMKP_MCKnap::setMCKnapData(const double capacity, const double *weight) { + + int i, j, colIndex; + itemset *setPtr = NULL; + itemrec *recPtr = NULL; + + // TODO: allow pass in arrFrac to this util function, for speed + // TODO: really need to use malloc here? + + //--- + //--- Pisinger's code assume cost/weight is integer type. + //--- So, we need to scale the cost/weight to nearest integer first. + //--- + m_wscale = UtilScaleDblToIntArr(m_nCols, weight, m_weight, capacity, + &m_capacity, MCKP_EPSILON); + + m_setset = (isetset *)malloc(sizeof(isetset)); + assert(m_setset); + + m_setset->size = m_nGroupRows; + m_setset->fset = (itemset *)malloc(m_setset->size * sizeof(itemset)); + assert(m_setset->fset); + + setPtr = m_setset->fset; + colIndex = 0; + for (i = 0; i < m_setset->size; i++) { + setPtr->size = m_nGroupCols; + setPtr->fset = (itemrec *)malloc(setPtr->size * sizeof(itemrec)); + assert(m_setset->fset); + + recPtr = setPtr->fset; + for (j = 0; j < setPtr->size; j++) { + recPtr->i = i; + recPtr->j = j; + recPtr->wsum = m_weight[colIndex]; + recPtr->psum = m_cost[colIndex]; + recPtr++; + colIndex++; + } + setPtr->lset = setPtr->fset + setPtr->size - 1; + setPtr++; + } + m_setset->lset = m_setset->fset + m_setset->size - 1; } // --------------------------------------------------------------------- // -void MMKP_MCKnap::solveMCKnap(const double * redCost, - const double * origCost, - vector & solInd, - vector & solEls, - double & varRedCost, - double & varOrigCost){ - - int i, j, colIndex; - - //--- - //--- Pisinger's code (mcknap) solves a max problem. And, it assumes - //--- all positive costs/profits and weights. - //--- - memcpy(m_costDbl, redCost, m_nCols * sizeof(double)); +void MMKP_MCKnap::solveMCKnap(const double *redCost, const double *origCost, + vector &solInd, vector &solEls, + double &varRedCost, double &varOrigCost) { + + int i, j, colIndex; + + //--- + //--- Pisinger's code (mcknap) solves a max problem. And, it assumes + //--- all positive costs/profits and weights. + //--- + memcpy(m_costDbl, redCost, m_nCols * sizeof(double)); #ifdef MMKP_MCKNAP_DEBUG - for(i = 0; i < m_nCols; i++){ - pair ij = getIndexInv(i); - printf("\ncostDbl[%d: %d, %d]: %g", - i, ij.first, ij.second, m_costDbl[i]); - } + for (i = 0; i < m_nCols; i++) { + pair ij = getIndexInv(i); + printf("\ncostDbl[%d: %d, %d]: %g", i, ij.first, ij.second, m_costDbl[i]); + } #endif - - //--- - //--- flip reduced costs (max c == min -c) - //--- - UtilNegateArr(m_nCols, m_costDbl); - - //--- - //--- add a constant so that all vertex weights are positive, inc alpha - //--- - double offset = 0.0; - double minrc = *min_element(m_costDbl, m_costDbl + m_nCols); + //--- + //--- flip reduced costs (max c == min -c) + //--- + UtilNegateArr(m_nCols, m_costDbl); + + //--- + //--- add a constant so that all vertex weights are positive, inc alpha + //--- + double offset = 0.0; + double minrc = *min_element(m_costDbl, m_costDbl + m_nCols); #ifdef MMKP_MCKNAP_DEBUG - printf("\nminrc = %g", minrc); -#endif - if(minrc <= 0){ - offset = -minrc + 1; - UtilAddOffsetArr(m_nCols, offset, m_costDbl); - } - - //--- - //--- now scale the double array to an integer array - //--- - //TODO: magic number - have to be careful of overflow... - m_cscale = UtilScaleDblToIntArr(m_nCols, m_costDbl, m_cost, MCKP_EPSILON); + printf("\nminrc = %g", minrc); +#endif + if (minrc <= 0) { + offset = -minrc + 1; + UtilAddOffsetArr(m_nCols, offset, m_costDbl); + } + + //--- + //--- now scale the double array to an integer array + //--- + // TODO: magic number - have to be careful of overflow... + m_cscale = UtilScaleDblToIntArr(m_nCols, m_costDbl, m_cost, MCKP_EPSILON); #ifdef MMKP_MCKNAP_DEBUG - double diff; - printf("\noffset = %g", offset); - printf("\nm_cscale = %d", m_cscale); - printf("\nm_wscale = %d", m_wscale); - printf("\ncapacity = %d", m_capacity); - for(i = 0; i < m_nCols; i++){ - pair ij = getIndexInv(i); - diff = fabs((m_costDbl[i]*m_cscale) - m_cost[i]); - printf("\n[%d: %d, %d]: dbl-> %12.5f int-> %8d diff-> %12.5f", - i, ij.first, ij.second, m_costDbl[i], m_cost[i], diff); - assert( diff < 0.99 ); - } + double diff; + printf("\noffset = %g", offset); + printf("\nm_cscale = %d", m_cscale); + printf("\nm_wscale = %d", m_wscale); + printf("\ncapacity = %d", m_capacity); + for (i = 0; i < m_nCols; i++) { + pair ij = getIndexInv(i); + diff = fabs((m_costDbl[i] * m_cscale) - m_cost[i]); + printf("\n[%d: %d, %d]: dbl-> %12.5f int-> %8d diff-> %12.5f", i, ij.first, + ij.second, m_costDbl[i], m_cost[i], diff); + assert(diff < 0.99); + } #endif - //--- - //--- sanity check - //--- if any cost value becomes negative that - //--- denotes an overflow happened - //--- TODO: not sure how to do this scaling safely and - //--- accurately - //--- - for(i = 0; i < m_nCols; i++){ - if(m_cost[i] < 0){ - throw UtilException("negative cost value", - "solveMCKnap", "MMKP_MCKnap"); - } - } - - //--- - //--- setup the data structures for mcknap - //--- - itemset * setPtr = m_setset->fset; - itemrec * recPtr = NULL; - itemrec * recSolPtr = NULL; - - //THINK: reset - assume memory is still there - m_setset->size = m_nGroupRows; - setPtr = m_setset->fset; - for(i = 0; i < m_setset->size; i++){ - setPtr->size = m_nGroupCols; - recPtr = setPtr->fset; - setPtr->lset = setPtr->fset + setPtr->size - 1; - setPtr++; - } - m_setset->lset = m_setset->fset + m_setset->size - 1; - - - colIndex = 0; - setPtr = m_setset->fset; - for(i = 0; i < m_setset->size; i++){ - recPtr = setPtr->fset; - for(j = 0; j < setPtr->size; j++){ - recPtr->i = i; - recPtr->j = j; - recPtr->psum = m_cost[colIndex]; - recPtr->wsum = m_weight[colIndex]; + //--- + //--- sanity check + //--- if any cost value becomes negative that + //--- denotes an overflow happened + //--- TODO: not sure how to do this scaling safely and + //--- accurately + //--- + for (i = 0; i < m_nCols; i++) { + if (m_cost[i] < 0) { + throw UtilException("negative cost value", "solveMCKnap", "MMKP_MCKnap"); + } + } + + //--- + //--- setup the data structures for mcknap + //--- + itemset *setPtr = m_setset->fset; + itemrec *recPtr = NULL; + itemrec *recSolPtr = NULL; + + // THINK: reset - assume memory is still there + m_setset->size = m_nGroupRows; + setPtr = m_setset->fset; + for (i = 0; i < m_setset->size; i++) { + setPtr->size = m_nGroupCols; + recPtr = setPtr->fset; + setPtr->lset = setPtr->fset + setPtr->size - 1; + setPtr++; + } + m_setset->lset = m_setset->fset + m_setset->size - 1; + + colIndex = 0; + setPtr = m_setset->fset; + for (i = 0; i < m_setset->size; i++) { + recPtr = setPtr->fset; + for (j = 0; j < setPtr->size; j++) { + recPtr->i = i; + recPtr->j = j; + recPtr->psum = m_cost[colIndex]; + recPtr->wsum = m_weight[colIndex]; #ifdef MMKP_MCKNAP_DEBUG - printf("\ncolIndex: %d i: %d, j: %d, p: %d, w: %d", - colIndex, i, j, recPtr->psum, recPtr->wsum); + printf("\ncolIndex: %d i: %d, j: %d, p: %d, w: %d", colIndex, i, j, + recPtr->psum, recPtr->wsum); #endif - recPtr++; - colIndex++; - } - setPtr++; - } - - itemrec * solRec = new itemrec[m_setset->size]; - - double minObj = 99999; -// long minObj = 99999; - int status = minmcknapSolve(m_capacity, m_setset, solRec, &minObj); - - solInd.reserve(m_nGroupRows); - solEls.reserve(m_nGroupRows); - UtilFillN(solEls, m_nGroupRows, 1.0); - varRedCost = 0.0; - varOrigCost = 0.0; - - //--- - //--- TODO: - //--- this is painful to get optimal assignments - //--- wrote Dr. Pisinger for help (7/4/07) - //--- NOTE: optsol is NOT reentrant - //--- - //CoinAssert(optsol.size == 1); //TODO + recPtr++; + colIndex++; + } + setPtr++; + } + + itemrec *solRec = new itemrec[m_setset->size]; + + double minObj = 99999; + // long minObj = 99999; + int status = minmcknapSolve(m_capacity, m_setset, solRec, &minObj); + + solInd.reserve(m_nGroupRows); + solEls.reserve(m_nGroupRows); + UtilFillN(solEls, m_nGroupRows, 1.0); + varRedCost = 0.0; + varOrigCost = 0.0; + + //--- + //--- TODO: + //--- this is painful to get optimal assignments + //--- wrote Dr. Pisinger for help (7/4/07) + //--- NOTE: optsol is NOT reentrant + //--- + // CoinAssert(optsol.size == 1); //TODO #ifdef MMKP_MCKNAP_DEBUG - printf("\nstatus=%d minObj=%g\n", status, minObj); + printf("\nstatus=%d minObj=%g\n", status, minObj); #endif - switch(status){ - case MCKNAP_RC_INF: - assert(status != MCKNAP_RC_INF); - break; - case MCKNAP_RC_OK: - { - //--- - //--- need to unravel: - //--- s * ((-x) + offset) - //--- - - double solObj = minObj / static_cast(m_cscale); - solObj -= (offset * m_setset->size); - solObj = -solObj; - + switch (status) { + case MCKNAP_RC_INF: + assert(status != MCKNAP_RC_INF); + break; + case MCKNAP_RC_OK: { + //--- + //--- need to unravel: + //--- s * ((-x) + offset) + //--- + + double solObj = minObj / static_cast(m_cscale); + solObj -= (offset * m_setset->size); + solObj = -solObj; + #ifdef MMKP_MCKNAP_DEBUG - printf("\nminObj = %g, solObj = %g", minObj, solObj); + printf("\nminObj = %g, solObj = %g", minObj, solObj); #endif - - - int c, i, j, g; - for(g = 0; g < m_setset->size; g++){ - recSolPtr = &solRec[g]; - i = recSolPtr->i;//was missing - STOP - j = recSolPtr->j;//was missing - c = getIndexIJ(i, j); - solInd.push_back(c); - varRedCost += redCost[c]; - varOrigCost += origCost[c]; -#ifdef MMKP_MCKNAP_DEBUG - printf("\nc: %d = (%d,%d) redCost: %g cost: %d wt: %d", - c, i, j, - redCost[c], m_cost[c], m_weight[c]); + + int c, i, j, g; + for (g = 0; g < m_setset->size; g++) { + recSolPtr = &solRec[g]; + i = recSolPtr->i; // was missing - STOP + j = recSolPtr->j; // was missing + c = getIndexIJ(i, j); + solInd.push_back(c); + varRedCost += redCost[c]; + varOrigCost += origCost[c]; +#ifdef MMKP_MCKNAP_DEBUG + printf("\nc: %d = (%d,%d) redCost: %g cost: %d wt: %d", c, i, j, + redCost[c], m_cost[c], m_weight[c]); #endif - /*for(c = 0; c < m_nCols; c++){ - //but could have more than one equal in p and w! - //this is NOT the right way to get back optimal - if((m_cost[c] == recSolPtr->psum) && - (m_weight[c] == recSolPtr->wsum)){ - //TODO: why should the user have to calc origCost? - //framework should probably do that - solInd.push_back(c); - varRedCost += redCost[c]; - varOrigCost += origCost[c]; - #ifdef MMKP_MCKNAP_DEBUG - pair ij = getIndexInv(c); - printf("\nc: %d = (%d,%d) redCost: %g cost: %d wt: %d", - c, - ij.first, ij.second, - redCost[c], m_cost[c], m_weight[c]); - #endif - break; - } - }*/ - } - //UtilPrintVector(solInd); - break; + /*for(c = 0; c < m_nCols; c++){ + //but could have more than one equal in p and w! + //this is NOT the right way to get back optimal + if((m_cost[c] == recSolPtr->psum) && + (m_weight[c] == recSolPtr->wsum)){ + //TODO: why should the user have to calc origCost? + //framework should probably do that + solInd.push_back(c); + varRedCost += redCost[c]; + varOrigCost += origCost[c]; + #ifdef MMKP_MCKNAP_DEBUG + pair ij = getIndexInv(c); + printf("\nc: %d = (%d,%d) redCost: %g cost: %d wt: %d", + c, + ij.first, ij.second, + redCost[c], m_cost[c], m_weight[c]); + #endif + break; } - case MCKNAP_RC_TRIVIAL_MAXSUM: - fflush(stdout); - //maxwsum <= c, so just pick the max elements from each group - solveTrivialMaxSum(redCost, origCost, solInd, - varRedCost, varOrigCost); -#ifdef MMKP_MCKNAP_DEBUG - printf("trivial sum varRedCost=%g varOrigCost=%g\n", - varRedCost, varOrigCost); + }*/ + } + // UtilPrintVector(solInd); + break; + } + case MCKNAP_RC_TRIVIAL_MAXSUM: + fflush(stdout); + // maxwsum <= c, so just pick the max elements from each group + solveTrivialMaxSum(redCost, origCost, solInd, varRedCost, varOrigCost); +#ifdef MMKP_MCKNAP_DEBUG + printf("trivial sum varRedCost=%g varOrigCost=%g\n", varRedCost, + varOrigCost); #endif - break; - default: - assert(0); - } - UTIL_DELARR(solRec); + break; + default: + assert(0); + } + UTIL_DELARR(solRec); } - - diff --git a/Dip/examples/MMKP/MMKP_Main.cpp b/Dip/examples/MMKP/MMKP_Main.cpp index 15bc8ed6..f3f9bfd7 100644 --- a/Dip/examples/MMKP/MMKP_Main.cpp +++ b/Dip/examples/MMKP/MMKP_Main.cpp @@ -28,133 +28,122 @@ using namespace std; //===========================================================================// -int main(int argc, char ** argv){ - try { +int main(int argc, char **argv) { + try { - //--- - //--- create the utility class for parsing parameters - //--- - UtilParameters utilParam(argc, argv); - - bool doCut = utilParam.GetSetting("doCut", true); - bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - bool doDirect = utilParam.GetSetting("doDirect", false); - - UtilTimer timer; - double timeSetupReal = 0.0; - double timeSetupCpu = 0.0; - double timeSolveReal = 0.0; - double timeSolveCpu = 0.0; + //--- + //--- create the utility class for parsing parameters + //--- + UtilParameters utilParam(argc, argv); + + bool doCut = utilParam.GetSetting("doCut", true); + bool doPriceCut = utilParam.GetSetting("doPriceCut", false); + bool doDirect = utilParam.GetSetting("doDirect", false); + + UtilTimer timer; + double timeSetupReal = 0.0; + double timeSetupCpu = 0.0; + double timeSolveReal = 0.0; + double timeSolveCpu = 0.0; + + //--- + //--- start overall timer + //--- + timer.start(); + + //--- + //--- create the user application (a DecompApp) + //--- + MMKP_DecompApp mmkp(utilParam); + + //--- + //--- create the algorithm (a DecompAlgo) + //--- + DecompAlgo *algo = NULL; + assert(doCut + doPriceCut == 1); + + //--- + //--- create the CPM algorithm object + //--- + if (doCut) + algo = new DecompAlgoC(&mmkp, utilParam); + + //--- + //--- create the PC algorithm object + //--- + if (doPriceCut) + algo = new DecompAlgoPC(&mmkp, utilParam); + + if (doCut && doDirect) { + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); //--- - //--- start overall timer + //--- solve //--- timer.start(); - + algo->solveDirect(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); + } else { + //--- + //--- create the driver AlpsDecomp model //--- - //--- create the user application (a DecompApp) - //--- - MMKP_DecompApp mmkp(utilParam); + int status = 0; + AlpsDecompModel alpsModel(utilParam, algo); + + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); //--- - //--- create the algorithm (a DecompAlgo) + //--- solve //--- - DecompAlgo * algo = NULL; - assert(doCut + doPriceCut == 1); + timer.start(); + status = alpsModel.solve(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); //--- - //--- create the CPM algorithm object - //--- - if(doCut) - algo = new DecompAlgoC(&mmkp, utilParam); + //--- sanity check + //--- + cout << setiosflags(ios::fixed | ios::showpoint); + cout << "Status= " << status << " BestLB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalLB(), 5) + << " BestUB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalUB(), 5) << " Nodes= " << setw(6) + << alpsModel.getNumNodesProcessed() << " SetupCPU= " << timeSetupCpu + << " SolveCPU= " << timeSolveCpu + << " TotalCPU= " << timeSetupCpu + timeSolveCpu + << " SetupReal= " << timeSetupReal << " SolveReal= " << timeSolveReal + << " TotalReal= " << timeSetupReal + timeSolveReal << endl; + + if (status == AlpsExitStatusOptimal && mmkp.getBestKnownUB() < 1.0e50) { + //--- + //--- the assumption here is that the BestKnownLB/UB is optimal + //--- + double diff = fabs(mmkp.getBestKnownUB() - alpsModel.getGlobalUB()); + if (diff > 1.0e-4) { + cerr << "ERROR. BestKnownUB= " << mmkp.getBestKnownUB() + << " but DECOMP claims GlobalUB= " << alpsModel.getGlobalUB() + << endl; + throw UtilException("Invalid claim of optimal.", "main", "DECOMP"); + } + } //--- - //--- create the PC algorithm object + //--- free local memory //--- - if(doPriceCut) - algo = new DecompAlgoPC(&mmkp, utilParam); - - - if(doCut && doDirect){ - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - - //--- - //--- solve - //--- - timer.start(); - algo->solveDirect(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); - } - else{ - //--- - //--- create the driver AlpsDecomp model - //--- - int status = 0; - AlpsDecompModel alpsModel(utilParam, algo); - - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - - //--- - //--- solve - //--- - timer.start(); - status = alpsModel.solve(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); - - //--- - //--- sanity check - //--- - cout << setiosflags(ios::fixed|ios::showpoint); - cout << "Status= " << status - << " BestLB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalLB(),5) - << " BestUB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalUB(),5) - << " Nodes= " << setw(6) - << alpsModel.getNumNodesProcessed() - << " SetupCPU= " << timeSetupCpu - << " SolveCPU= " << timeSolveCpu - << " TotalCPU= " << timeSetupCpu + timeSolveCpu - << " SetupReal= " << timeSetupReal - << " SolveReal= " << timeSolveReal - << " TotalReal= " << timeSetupReal + timeSolveReal - << endl; - - if(status == AlpsExitStatusOptimal && mmkp.getBestKnownUB() < 1.0e50){ - //--- - //--- the assumption here is that the BestKnownLB/UB is optimal - //--- - double diff - = fabs(mmkp.getBestKnownUB() - alpsModel.getGlobalUB()); - if(diff > 1.0e-4){ - cerr << "ERROR. BestKnownUB= " << mmkp.getBestKnownUB() - << " but DECOMP claims GlobalUB= " - << alpsModel.getGlobalUB() << endl; - throw UtilException("Invalid claim of optimal.", - "main", "DECOMP"); - } - } - - //--- - //--- free local memory - //--- - delete algo; - } - } - catch(CoinError & ex){ - cerr << "COIN Exception [ " << ex.message() << " ]" - << " at " << ex.fileName() << ":L" << ex.lineNumber() - << " in " << ex.className() << "::" << ex.methodName() << endl; - return 1; - } - return 0; + delete algo; + } + } catch (CoinError &ex) { + cerr << "COIN Exception [ " << ex.message() << " ]" + << " at " << ex.fileName() << ":L" << ex.lineNumber() << " in " + << ex.className() << "::" << ex.methodName() << endl; + return 1; + } + return 0; } - diff --git a/Dip/examples/MMKP/old/old.cpp b/Dip/examples/MMKP/old/old.cpp index 3ede2647..f7618680 100644 --- a/Dip/examples/MMKP/old/old.cpp +++ b/Dip/examples/MMKP/old/old.cpp @@ -1,69 +1,66 @@ //#if 0 //--------------------------------------------------------------------- // -//for debugging -bool MMKP_DecompApp::APPisUserFeasible(const double * x, - const int n_cols, - const double tolZero){ +// for debugging +bool MMKP_DecompApp::APPisUserFeasible(const double *x, const int n_cols, + const double tolZero) { - //--- - //--- Assume: it is already integeral. - //--- s.t. sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in 1..m - //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n - //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] - //--- - const MMKP_Param & appParam = m_model->getParam(); - UtilPrintFuncBegin(m_osLog, m_classTag, - "APPisUserFeasible()", appParam.LogLevel, 2); + //--- + //--- Assume: it is already integeral. + //--- s.t. sum{i in 1..n, j in 1..l[i]} r[k,i,j] x[i,j] <= b[k], k in 1..m + //--- sum{j in 1..l[i]} x[i,j] = 1 , i in 1..n + //--- x[i,j] in {0,1}, i in 1..n, j in 1..l[i] + //--- + const MMKP_Param &appParam = m_model->getParam(); + UtilPrintFuncBegin(m_osLog, m_classTag, "APPisUserFeasible()", + appParam.LogLevel, 2); + int c, i, j, k, ij; + bool isFeasible = true; + const MMKP_Instance &instance = m_model->getInstance(); + int nGroupRows = instance.getNGroupRows(); + int nKnapRows = instance.getNKnapRows(); + const double *capacity = instance.getCapacity(); + const double *const *weight = instance.getWeight(); + vector numInGroup(nGroupRows, 0); + vector weightInKnap(nKnapRows, 0.0); - int c, i, j, k, ij; - bool isFeasible = true; - const MMKP_Instance & instance = m_model->getInstance(); - int nGroupRows = instance.getNGroupRows(); - int nKnapRows = instance.getNKnapRows(); - const double * capacity = instance.getCapacity(); - const double * const * weight = instance.getWeight(); - vector numInGroup(nGroupRows, 0); - vector weightInKnap(nKnapRows, 0.0); - - for(c = 0; c < n_cols; c++){ - CoinAssertDebug(UtilIsIntegral(x[c], tolZero)); - CoinAssertDebug(x[c] > (0.0 - tolZero)); - CoinAssertDebug(x[c] < (1.0 + tolZero)); - if(x[c] > 0.5){ - pair p = instance.getIndexInv(c); - i = p.first; - j = p.second; - ij = instance.getIndexIJ(i,j); - numInGroup[i]++; - for(k = 0; k < nKnapRows; k++){ - weightInKnap[k] += weight[k][ij]; - } - } - } - for(i = 0; i < nGroupRows; i++){ - //printf("APPisUserFeasible numInGroup[%d]: %d", i, numInGroup[i]); - if(numInGroup[i] != 1){ - //printf(" --> NOT FEASIBLE"); - isFeasible = false; + for (c = 0; c < n_cols; c++) { + CoinAssertDebug(UtilIsIntegral(x[c], tolZero)); + CoinAssertDebug(x[c] > (0.0 - tolZero)); + CoinAssertDebug(x[c] < (1.0 + tolZero)); + if (x[c] > 0.5) { + pair p = instance.getIndexInv(c); + i = p.first; + j = p.second; + ij = instance.getIndexIJ(i, j); + numInGroup[i]++; + for (k = 0; k < nKnapRows; k++) { + weightInKnap[k] += weight[k][ij]; } - //printf("\n"); - } - for(k = 0; k < nKnapRows; k++){ - //printf("APPisUserFeasible weightInKnap[%d]: %g, cap: %g", - // k, weightInKnap[k], capacity[k]); - if(weightInKnap[k] >= (capacity[k] + tolZero)){ - //printf(" --> NOT FEASIBLE"); - isFeasible = false; - } - //printf("\n"); - } + } + } + for (i = 0; i < nGroupRows; i++) { + // printf("APPisUserFeasible numInGroup[%d]: %d", i, numInGroup[i]); + if (numInGroup[i] != 1) { + // printf(" --> NOT FEASIBLE"); + isFeasible = false; + } + // printf("\n"); + } + for (k = 0; k < nKnapRows; k++) { + // printf("APPisUserFeasible weightInKnap[%d]: %g, cap: %g", + // k, weightInKnap[k], capacity[k]); + if (weightInKnap[k] >= (capacity[k] + tolZero)) { + // printf(" --> NOT FEASIBLE"); + isFeasible = false; + } + // printf("\n"); + } - //printf("APPisUserFeasible = %d\n", isFeasible); - UtilPrintFuncEnd(m_osLog, m_classTag, - "APPisUserFeasible()", appParam.LogLevel, 2); + // printf("APPisUserFeasible = %d\n", isFeasible); + UtilPrintFuncEnd(m_osLog, m_classTag, "APPisUserFeasible()", + appParam.LogLevel, 2); - return isFeasible; + return isFeasible; } //#endif - diff --git a/Dip/examples/SDPUC/SDPUC_DecompApp.cpp b/Dip/examples/SDPUC/SDPUC_DecompApp.cpp index 7abbc542..59d66681 100644 --- a/Dip/examples/SDPUC/SDPUC_DecompApp.cpp +++ b/Dip/examples/SDPUC/SDPUC_DecompApp.cpp @@ -15,785 +15,816 @@ //===========================================================================// // SWITCHED DISPATCH PROBLEM WITH UNIT COMMITMENT //===========================================================================// -#include "DecompVar.h" #include "SDPUC_DecompApp.h" +#include "DecompVar.h" //===========================================================================// void SDPUC_DecompApp::initializeApp() { - - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); - - //--- - //--- read problem instance - //--- - string instanceFile = m_appParam.DataDir - + UtilDirSlash() + m_appParam.Instance; - int rc = m_instance.readInstance(instanceFile, false); - if(rc) - throw UtilException("Error in readInstance", - "initializeApp", "MCF_DecompApp"); - //--- - //--- create models - //--- - createModels(); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); + + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", + m_appParam.LogLevel, 2); + + //--- + //--- read problem instance + //--- + string instanceFile = + m_appParam.DataDir + UtilDirSlash() + m_appParam.Instance; + int rc = m_instance.readInstance(instanceFile, false); + if (rc) + throw UtilException("Error in readInstance", "initializeApp", + "MCF_DecompApp"); + //--- + //--- create models + //--- + createModels(); + + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", m_appParam.LogLevel, + 2); } //===========================================================================// -void SDPUC_DecompApp::createModels(){ - - //--- - //--- This function does the work to create the different models - //--- that will be used. This memory is owned by the user. It will - //--- be passed to the application interface and used by the algorithms. - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - - //--- - //--- Switched Dispatch Problem with Unit Commitment (SDPUC). - //--- - //--- We are given: - //--- (1) a directed graph G=(N,A), - //--- (2) a set of time periods T, - //--- - //--- min sum{(i,j) in A} f1[i,j] y1[i,j] - //--- + sum{t in T} sum{(i,j) in A} f2[i,j] y2[i,j,t] + c[i,j,t] x[i,j,t] - //--- s.t. sum{(j,i) in A} x[i,j,t] - - //--- sum{(i,j) in A} x[i,j,t] = d[i,t], for all i in N, t in T - //--- x[i,j,t] >= l[i,j,t] z[i,j,t], for all (i,j) in A, t in T - //--- x[i,j,t] <= u[i,j,t] z[i,j,t], for all (i,j) in A, t in T - //--- r[i,j] x[i,j,t] - theta[j] + theta[i] <= M (1 - z[i,j,t]) for all i,j,t - //--- r[i,j] x[i,j,t] - theta[j] + theta[i] >= -M (1 - z[i,j,t]) for all i,j,t - //--- z[i,j,t] <= y1[i,j] for all (i,j) in A, t in T //arc-investment - //--- z[i,j,t] - z[i,j,t-1] <= y2[i,j,t] for all (i,j) in A, t in T //arc(unit) commitment - //--- y[i,j] binary for all (i,j) in A - //--- - //--- NOTE: to make sure the problem is always feasible, - //---- demand may have to be modelled as arcs with large negative costs - //--- - //--- - //--- The decomposition is formed as: - //--- - //--- MASTER (A''): - //--- z[i,j,t] <= y1[i,j] for all (i,j) in A, t in T //arc-investment - //--- z[i,j,t] - z[i,j,t-1] <= y2[i,j,t] for all (i,j) in A, t in T //arc(unit) commitment - //--- y[i,j] binary for all (i,j) in A - //--- - //--- SUBPROBLEM (A'): (one block for each t in T) - //--- sum{(j,i) in A} x[i,j,t] - - //--- sum{(i,j) in A} x[i,j,t] = d[i,t], for all i in N - //--- x[i,j,t] >= l[i,j,t] z[i,j,t], for all (i,j) in A - //--- x[i,j,t] <= u[i,j,t] z[i,j,t], for all (i,j) in A - //--- r[i,j] x[i,j,t] - theta[j] + theta[i] <= M (1 - z[i,j,t]) for all i,j - //--- r[i,j] x[i,j,t] - theta[j] + theta[i] >= -M (1 - z[i,j,t]) for all i,j - - //--- - - //--- - //--- Get information about this problem instance. - //--- - int i, t, a, colIndex; - int numTimeperiods = m_instance.m_numTimeperiods; - int numArcs = m_instance.m_numArcs; - int numNodes = m_instance.m_numNodes; - int numCols = numArcs //y1-vars - + 3 * numTimeperiods * numArcs //y2-, z-, and x-vars - + numTimeperiods * numNodes; //theta-vars - SDPUC_Instance::arc * arcs = m_instance.m_arcs; - SDPUC_Instance::timeseries * ts = m_instance.m_timeseries; - cout << "\nnumCols=" << numCols << " numTimePeriods=" << numTimeperiods; - cout << "numNodes=" << numNodes << " numArcs=" << numArcs << endl; - - //--- - //--- Construct the objective function and set it - //--- y1-var columns indexed as [a] = a in [0 ; numArcs-1] - //--- y2-var columns indexed as [a,t] = a + numArcs in [numArcs ; numArcs * (1 + numTimeperiods) - 1] - //--- z-var columns indexed as [a,t] = t*numArcs + a + numArcs * (1 + numTimeperiods) - //--- in [numArcs*(1+numTimeperiods); numArcs*(1 + 2*numTimeperiods) - 1] - //--- x-var columns indexed as [a,t] = t*numArcs + a + numArcs*(1 + 2*numTimeperiods) , - //--- in [numArcs*(1 + 2*numTimeperiods) ; numArcs*(1 + 3*numTimeperiods) - 1] - //--- theta-var columns indexed as [i,t] = t*numNodes + i + numArcs*(1 + 3*numTimeperiods) , - //--- in [numArcs*(1 + 3*numTimeperiods) ; numArcs*(1 + 3*numTimeperiods) + numNodes*numTimeperiods - 1] - //-- - m_objective = new double[numCols]; - //initialise to 0 - for(i = 0; i < numCols; i++){ - m_objective[i] = 0; - } - if(!m_objective) - throw UtilExceptionMemory("createModels", "MCF_DecompApp"); - colIndex = 0; - for(a = 0; a < numArcs; a++) { - m_objective[colIndex++] = arcs[a].fcost1; //fixed arc investment cost - } - for(t = 0; t < numTimeperiods; t++) { - for(a = 0; a < numArcs; a++) { - m_objective[colIndex++] = arcs[a].fcost2; //fixed arc "start-up" cost - } - } - colIndex = numArcs*(1 + 2*numTimeperiods); //start-index for x-vars - for(t = 0; t < numTimeperiods; t++) { - for(a = 0; a < numArcs; a++) { - m_objective[colIndex++] = arcs[a].mcost * ts[0].values[t] ; //arc cost * probability (assume ts[0] indicate timeperiod probabilities) - } - } - //--- - //--- set the objective - //--- - setModelObjective(m_objective, numCols); - /*cout << "obj = " ; - for(i = 0; i < numCols; i++){ - cout << m_objective[i] << " "; - } - cout << endl;*/ - - //--- - //--- create the core/master model and set it - //--- - DecompConstraintSet * modelCore = new DecompConstraintSet(); - createModelCore(modelCore); - setModelCore(modelCore, "core"); - - //--- - //--- create the relaxed/subproblem models and set them - //--- - for(t = 0; t < numTimeperiods; t++){ - DecompConstraintSet * modelRelax = new DecompConstraintSet(); - string modelName = "relax" + UtilIntToStr(t); - if(m_appParam.UseSparse) - createModelRelaxSparse(modelRelax, t); - else - createModelRelax(modelRelax, t); - - setModelRelax(modelRelax, modelName, t); - } - - //--- - //--- create an extra "empty" block for the master-only vars - //--- since I don't know what OSI will do with empty problem - //--- we will make column bounds explicity rows - //--- - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); +void SDPUC_DecompApp::createModels() { + + //--- + //--- This function does the work to create the different models + //--- that will be used. This memory is owned by the user. It will + //--- be passed to the application interface and used by the algorithms. + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + + //--- + //--- Switched Dispatch Problem with Unit Commitment (SDPUC). + //--- + //--- We are given: + //--- (1) a directed graph G=(N,A), + //--- (2) a set of time periods T, + //--- + //--- min sum{(i,j) in A} f1[i,j] y1[i,j] + //--- + sum{t in T} sum{(i,j) in A} f2[i,j] y2[i,j,t] + c[i,j,t] + // x[i,j,t] + //--- s.t. sum{(j,i) in A} x[i,j,t] - + //--- sum{(i,j) in A} x[i,j,t] = d[i,t], for all i in N, t in T + //--- x[i,j,t] >= l[i,j,t] z[i,j,t], for all (i,j) in A, t in T + //--- x[i,j,t] <= u[i,j,t] z[i,j,t], for all (i,j) in A, t in T + //--- r[i,j] x[i,j,t] - theta[j] + theta[i] <= M (1 - z[i,j,t]) for all + // i,j,t + //--- r[i,j] x[i,j,t] - theta[j] + theta[i] >= -M (1 - z[i,j,t]) for all + // i,j,t + //--- z[i,j,t] <= y1[i,j] for all (i,j) in A, t in T + ////arc-investment + //--- z[i,j,t] - z[i,j,t-1] <= y2[i,j,t] for all (i,j) in A, t in T + ////arc(unit) commitment + //--- y[i,j] binary for all (i,j) in A + //--- + //--- NOTE: to make sure the problem is always feasible, + //---- demand may have to be modelled as arcs with large negative costs + //--- + //--- + //--- The decomposition is formed as: + //--- + //--- MASTER (A''): + //--- z[i,j,t] <= y1[i,j] for all (i,j) in A, t in T + ////arc-investment + //--- z[i,j,t] - z[i,j,t-1] <= y2[i,j,t] for all (i,j) in A, t in T + ////arc(unit) commitment + //--- y[i,j] binary for all (i,j) in A + //--- + //--- SUBPROBLEM (A'): (one block for each t in T) + //--- sum{(j,i) in A} x[i,j,t] - + //--- sum{(i,j) in A} x[i,j,t] = d[i,t], for all i in N + //--- x[i,j,t] >= l[i,j,t] z[i,j,t], for all (i,j) in A + //--- x[i,j,t] <= u[i,j,t] z[i,j,t], for all (i,j) in A + //--- r[i,j] x[i,j,t] - theta[j] + theta[i] <= M (1 - z[i,j,t]) for all i,j + //--- r[i,j] x[i,j,t] - theta[j] + theta[i] >= -M (1 - z[i,j,t]) for all i,j + + //--- + + //--- + //--- Get information about this problem instance. + //--- + int i, t, a, colIndex; + int numTimeperiods = m_instance.m_numTimeperiods; + int numArcs = m_instance.m_numArcs; + int numNodes = m_instance.m_numNodes; + int numCols = numArcs // y1-vars + + 3 * numTimeperiods * numArcs // y2-, z-, and x-vars + + numTimeperiods * numNodes; // theta-vars + SDPUC_Instance::arc *arcs = m_instance.m_arcs; + SDPUC_Instance::timeseries *ts = m_instance.m_timeseries; + cout << "\nnumCols=" << numCols << " numTimePeriods=" << numTimeperiods; + cout << "numNodes=" << numNodes << " numArcs=" << numArcs << endl; + + //--- + //--- Construct the objective function and set it + //--- y1-var columns indexed as [a] = a in [0 ; numArcs-1] + //--- y2-var columns indexed as [a,t] = a + numArcs in [numArcs ; numArcs + //* (1 + numTimeperiods) - 1] + //--- z-var columns indexed as [a,t] = t*numArcs + a + numArcs * (1 + + // numTimeperiods) + //--- in [numArcs*(1+numTimeperiods); numArcs*(1 + 2*numTimeperiods) - 1] + //--- x-var columns indexed as [a,t] = t*numArcs + a + numArcs*(1 + + // 2*numTimeperiods) , + //---in + //[numArcs*(1 + //+ 2*numTimeperiods) ; numArcs*(1 + 3*numTimeperiods) - 1] + //--- theta-var columns indexed as [i,t] = t*numNodes + i + numArcs*(1 + + // 3*numTimeperiods) , + //---in + //[numArcs*(1 + //+ 3*numTimeperiods) ; numArcs*(1 + 3*numTimeperiods) + + // numNodes*numTimeperiods + //-1] + //-- + m_objective = new double[numCols]; + // initialise to 0 + for (i = 0; i < numCols; i++) { + m_objective[i] = 0; + } + if (!m_objective) + throw UtilExceptionMemory("createModels", "MCF_DecompApp"); + colIndex = 0; + for (a = 0; a < numArcs; a++) { + m_objective[colIndex++] = arcs[a].fcost1; // fixed arc investment cost + } + for (t = 0; t < numTimeperiods; t++) { + for (a = 0; a < numArcs; a++) { + m_objective[colIndex++] = arcs[a].fcost2; // fixed arc "start-up" cost + } + } + colIndex = numArcs * (1 + 2 * numTimeperiods); // start-index for x-vars + for (t = 0; t < numTimeperiods; t++) { + for (a = 0; a < numArcs; a++) { + m_objective[colIndex++] = + arcs[a].mcost * + ts[0].values[t]; // arc cost * probability (assume ts[0] indicate + // timeperiod probabilities) + } + } + //--- + //--- set the objective + //--- + setModelObjective(m_objective, numCols); + /*cout << "obj = " ; + for(i = 0; i < numCols; i++){ + cout << m_objective[i] << " "; + } + cout << endl;*/ + + //--- + //--- create the core/master model and set it + //--- + DecompConstraintSet *modelCore = new DecompConstraintSet(); + createModelCore(modelCore); + setModelCore(modelCore, "core"); + + //--- + //--- create the relaxed/subproblem models and set them + //--- + for (t = 0; t < numTimeperiods; t++) { + DecompConstraintSet *modelRelax = new DecompConstraintSet(); + string modelName = "relax" + UtilIntToStr(t); + if (m_appParam.UseSparse) + createModelRelaxSparse(modelRelax, t); + else + createModelRelax(modelRelax, t); + + setModelRelax(modelRelax, modelName, t); + } + + //--- + //--- create an extra "empty" block for the master-only vars + //--- since I don't know what OSI will do with empty problem + //--- we will make column bounds explicity rows + //--- + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); } //===========================================================================// -void SDPUC_DecompApp::createModelCore(DecompConstraintSet * model){ - - //--- - //--- MASTER (A''): - //--- z[i,j,t] <= y1[i,j] for all (i,j) in A, t in T //arc-investment - //--- z[i,j,t] - z[i,j,t-1] <= y2[i,j,t] for all (i,j) in A, t in T //arc(unit) commitment - //--- y[i,j] binary for all (i,j) in A - //--- - int i, t, a, colIndex; - int numTimeperiods = m_instance.m_numTimeperiods; - int numArcs = m_instance.m_numArcs; - int numNodes = m_instance.m_numNodes; - int numCols = numArcs //y1-vars - + 3 * numTimeperiods * numArcs //y2-, z-, and x-vars - + numTimeperiods * numNodes; //theta-vars - int numRows = numArcs * numTimeperiods; - int col_yStartIndex = 0; - int col_zStartIndex = numArcs*(1+numTimeperiods); - int col_xStartIndex = numArcs * (1 + 2*numTimeperiods); - int col_thetaStartIndex = numArcs*(1 + 3*numTimeperiods) ; - - SDPUC_Instance::arc * arcs = m_instance.m_arcs; - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelCore()", m_appParam.LogLevel, 2); - - //--- - //--- create space for the model matrix (row-majored) - //--- - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - if(!model->M) - throw UtilExceptionMemory("createModelCore", "SDPUC_DecompApp"); - model->M->setDimensions(0, numCols); - model->reserve(numRows, numCols); - - //--- - //--- create the rows and set the col/row bounds - //--- - UtilFillN(model->colLB, numCols, -m_infinity); - UtilFillN(model->colUB, numCols, m_infinity); - - for(a = 0; a < numArcs; a++){ - colIndex = a; - //add y1-vars +void SDPUC_DecompApp::createModelCore(DecompConstraintSet *model) { + + //--- + //--- MASTER (A''): + //--- z[i,j,t] <= y1[i,j] for all (i,j) in A, t in T + ////arc-investment + //--- z[i,j,t] - z[i,j,t-1] <= y2[i,j,t] for all (i,j) in A, t in T + ////arc(unit) commitment + //--- y[i,j] binary for all (i,j) in A + //--- + int i, t, a, colIndex; + int numTimeperiods = m_instance.m_numTimeperiods; + int numArcs = m_instance.m_numArcs; + int numNodes = m_instance.m_numNodes; + int numCols = numArcs // y1-vars + + 3 * numTimeperiods * numArcs // y2-, z-, and x-vars + + numTimeperiods * numNodes; // theta-vars + int numRows = numArcs * numTimeperiods; + int col_yStartIndex = 0; + int col_zStartIndex = numArcs * (1 + numTimeperiods); + int col_xStartIndex = numArcs * (1 + 2 * numTimeperiods); + int col_thetaStartIndex = numArcs * (1 + 3 * numTimeperiods); + + SDPUC_Instance::arc *arcs = m_instance.m_arcs; + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelCore()", + m_appParam.LogLevel, 2); + + //--- + //--- create space for the model matrix (row-majored) + //--- + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + if (!model->M) + throw UtilExceptionMemory("createModelCore", "SDPUC_DecompApp"); + model->M->setDimensions(0, numCols); + model->reserve(numRows, numCols); + + //--- + //--- create the rows and set the col/row bounds + //--- + UtilFillN(model->colLB, numCols, -m_infinity); + UtilFillN(model->colUB, numCols, m_infinity); + + for (a = 0; a < numArcs; a++) { + colIndex = a; + // add y1-vars + model->colLB[colIndex] = 0; + model->colUB[colIndex] = 1; + + for (t = 0; t < numTimeperiods; t++) { + int t_prev = 0; + if (t == 0) { + t_prev = numTimeperiods - 1; + } else { + t_prev = t - 1; + } + + colIndex = a + t * numArcs + numArcs; + // add y2-vars model->colLB[colIndex] = 0; model->colUB[colIndex] = 1; - - for(t = 0; t < numTimeperiods; t++){ - int t_prev = 0; - if(t == 0) { - t_prev = numTimeperiods - 1; - } - else { - t_prev = t - 1; - } - - colIndex = a + t * numArcs + numArcs; - //add y2-vars - model->colLB[colIndex] = 0; - model->colUB[colIndex] = 1; - - CoinPackedVector row1; // 0 <= y1[i,j] - z[i,j,t] for all (i,j) in A, t in T - CoinPackedVector row2; // 0 <= y2[i,j,t] - z[i,j,t] + z[i,j,t-1] for all (i,j) in A, t in T - CoinPackedVector rowFix_y1; //fix y1=1 - CoinPackedVector y2upper1; // y2(t) <= z(t) : if off in t then we dont start-up - CoinPackedVector y2upper2; // y2(t) <= 1-z(t-1) : if on in t-1 then dont start up in t - - //insert y1-var coefficient - row1.insert(a, 1.0); - rowFix_y1.insert(a, 1.0); - //insert y2-var coefficient - colIndex = t * numArcs + a + numArcs; - row2.insert(colIndex, 1.0); - y2upper1.insert(colIndex, -1.0); - y2upper2.insert(colIndex, -1.0); - //add z-vars - colIndex = t * numArcs + a + col_zStartIndex; - model->colLB[colIndex] = 0; - model->colUB[colIndex] = 1; - //insert z-var coefficient - row1.insert(colIndex, -1.0); - row2.insert(colIndex, -1.0); - y2upper1.insert(colIndex, 1.0); - colIndex = t_prev * numArcs + a + col_zStartIndex; - row2.insert(colIndex, 1.0); - y2upper2.insert(colIndex, -1.0); - - std::string rowName1_1 = "MP1_1_" + UtilIntToStr(a) + "_" + UtilIntToStr(t); - std::string rowName1_2 = "MP1_2_" + UtilIntToStr(a) + "_" + UtilIntToStr(t); - std::string rowName2_1 = "MP2_1_" + UtilIntToStr(a) + "_" + UtilIntToStr(t); - std::string rowName2_2 = "MP2_2_" + UtilIntToStr(a) + "_" + UtilIntToStr(t); - std::string rowNameFix = "fix_y1_" + UtilIntToStr(a) + "_" + UtilIntToStr(t); - - //TODO: any issue with range constraint - model->appendRow(row1, 0.0, m_infinity, rowName1_1); //add MP1_constraints (arc investments) - - model->appendRow(row1, -m_infinity, 1.0, rowName1_2); //add MP1_constraints (arc investments) - if(arcs[a].tail == 0) { //ONLY for supply arcs (!!) - model->appendRow(row2, 0.0, m_infinity, rowName2_1); //add MP2_constraints (arc commitment) - model->appendRow(row2, -m_infinity, 1.0, rowName2_2); //add MP2_constraints (arc commitment) - } - model->appendRow(rowFix_y1, 1.0, m_infinity, rowNameFix); //add fix y1 vars - //model->appendRow(y2upper1, 0.0, m_infinity, std::string("y2-upperbound-1")); //add upperbounds on y2 - //model->appendRow(y2upper2, -1.0, m_infinity, std::string("y2-upperbound-2")); //..to strengthen formulation + + CoinPackedVector + row1; // 0 <= y1[i,j] - z[i,j,t] for all (i,j) in A, t in T + CoinPackedVector row2; // 0 <= y2[i,j,t] - z[i,j,t] + z[i,j,t-1] for all + // (i,j) in A, t in T + CoinPackedVector rowFix_y1; // fix y1=1 + CoinPackedVector + y2upper1; // y2(t) <= z(t) : if off in t then we dont start-up + CoinPackedVector + y2upper2; // y2(t) <= 1-z(t-1) : if on in t-1 then dont start up in t + + // insert y1-var coefficient + row1.insert(a, 1.0); + rowFix_y1.insert(a, 1.0); + // insert y2-var coefficient + colIndex = t * numArcs + a + numArcs; + row2.insert(colIndex, 1.0); + y2upper1.insert(colIndex, -1.0); + y2upper2.insert(colIndex, -1.0); + // add z-vars + colIndex = t * numArcs + a + col_zStartIndex; + model->colLB[colIndex] = 0; + model->colUB[colIndex] = 1; + // insert z-var coefficient + row1.insert(colIndex, -1.0); + row2.insert(colIndex, -1.0); + y2upper1.insert(colIndex, 1.0); + colIndex = t_prev * numArcs + a + col_zStartIndex; + row2.insert(colIndex, 1.0); + y2upper2.insert(colIndex, -1.0); + + std::string rowName1_1 = + "MP1_1_" + UtilIntToStr(a) + "_" + UtilIntToStr(t); + std::string rowName1_2 = + "MP1_2_" + UtilIntToStr(a) + "_" + UtilIntToStr(t); + std::string rowName2_1 = + "MP2_1_" + UtilIntToStr(a) + "_" + UtilIntToStr(t); + std::string rowName2_2 = + "MP2_2_" + UtilIntToStr(a) + "_" + UtilIntToStr(t); + std::string rowNameFix = + "fix_y1_" + UtilIntToStr(a) + "_" + UtilIntToStr(t); + + // TODO: any issue with range constraint + model->appendRow(row1, 0.0, m_infinity, + rowName1_1); // add MP1_constraints (arc investments) + + model->appendRow(row1, -m_infinity, 1.0, + rowName1_2); // add MP1_constraints (arc investments) + if (arcs[a].tail == 0) { // ONLY for supply arcs (!!) + model->appendRow(row2, 0.0, m_infinity, + rowName2_1); // add MP2_constraints (arc commitment) + model->appendRow(row2, -m_infinity, 1.0, + rowName2_2); // add MP2_constraints (arc commitment) } - } - - //--- - //--- create column names (helps with debugging) - //--- - //y-vars - for(a = 0; a < numArcs; a++){ - std::string colName = "y1(a" + UtilIntToStr(a) + "(" + - UtilIntToStr(arcs[a].tail) + "," + - UtilIntToStr(arcs[a].head) + "))"; + model->appendRow(rowFix_y1, 1.0, m_infinity, + rowNameFix); // add fix y1 vars + // model->appendRow(y2upper1, 0.0, m_infinity, + // std::string("y2-upperbound-1")); //add upperbounds on y2 + // model->appendRow(y2upper2, -1.0, m_infinity, + // std::string("y2-upperbound-2")); //..to strengthen formulation + } + } + + //--- + //--- create column names (helps with debugging) + //--- + // y-vars + for (a = 0; a < numArcs; a++) { + std::string colName = "y1(a" + UtilIntToStr(a) + "(" + + UtilIntToStr(arcs[a].tail) + "," + + UtilIntToStr(arcs[a].head) + "))"; + model->colNames.push_back(colName); + } + for (t = 0; t < numTimeperiods; t++) { + for (a = 0; a < numArcs; a++) { + std::string colName = "y2(t" + UtilIntToStr(t) + ",a" + UtilIntToStr(a) + + "(" + UtilIntToStr(arcs[a].tail) + "," + + UtilIntToStr(arcs[a].head) + "))"; model->colNames.push_back(colName); - } - for(t = 0; t < numTimeperiods; t++){ - for(a = 0; a < numArcs; a++){ - std::string colName = "y2(t" + UtilIntToStr(t) + ",a" + UtilIntToStr(a) + "(" + - UtilIntToStr(arcs[a].tail) + "," + - UtilIntToStr(arcs[a].head) + "))"; - model->colNames.push_back(colName); - } - } - //z-vars - for(t = 0; t < numTimeperiods; t++){ - for(a = 0; a < numArcs; a++){ - std::string colName = "z(t" + UtilIntToStr(t) + ",a" + UtilIntToStr(a) + "(" + - UtilIntToStr(arcs[a].tail) + "," + - UtilIntToStr(arcs[a].head) + "))"; - model->colNames.push_back(colName); - } - } - //x-vars - for(t = 0; t < numTimeperiods; t++){ - for(a = 0; a < numArcs; a++){ - std::string colName = "x(t" + UtilIntToStr(t) + ",a" + UtilIntToStr(a) + "(" + - UtilIntToStr(arcs[a].tail) + "," + - UtilIntToStr(arcs[a].head) + "))"; - model->colNames.push_back(colName); - } - } - //theta-vars - for(t = 0; t < numTimeperiods; t++){ - for(i = 0; i < numNodes; i++){ - std::string colName = "theta(t" + UtilIntToStr(t) + ",n" + UtilIntToStr(i) + ")"; - model->colNames.push_back(colName); - } - } - - //--- - //--- create a list of the "active" columns (those related - //--- to this commmodity) all other columns are fixed to 0 - //--- - UtilFillN(model->colLB, numCols, 0.0); - UtilFillN(model->colUB, numCols, 0.0); - colIndex = 0; - for(a = 0; a < numArcs; a++){ - //set y-columns active - - //model->colLB[colIndex] = 0; - //model->colUB[colIndex] = 1; - //model->activeColumns.push_back(colIndex); - //set y-columns as master-only columns - colIndex = col_yStartIndex + a; - model->masterOnlyCols.push_back(colIndex); //y1-vars - for(t = 0; t < numTimeperiods; t++){ - colIndex = col_yStartIndex + t * numArcs + a + numArcs; - model->masterOnlyCols.push_back(colIndex); //y2-vars - } - } - - if(m_appParam.LogLevel >= 3){ - (*m_osLog) << "Master only columns:" << endl; - UtilPrintVector(model->masterOnlyCols, m_osLog); - if(model->getColNames().size() > 0) - UtilPrintVector(model->masterOnlyCols, - model->getColNames(), m_osLog); - } - - //--- - //--- set the indices of the integer variables of model - //--- - UtilIotaN(model->integerVars, col_xStartIndex, col_yStartIndex); - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelCore()", m_appParam.LogLevel, 2); - //--- - //--- display problem - //--- - //int j = 0; - //cout << "find: \nActive cols: "; - //std::vector::const_iterator it; - //for(it = model->getActiveColumns().begin(); it != model->getActiveColumns().end(); it++){ - //cout << *it << " "; - //} - // - //cout << "\ns.t.\n"; - //for(j = 0; j < model->getNumRows(); j++){ - //cout << model->rowNames[j] << " : \t\t"; - //cout << model->rowLB[j] << " \t <= \t" ; - //for (i = 0; i < model->getNumCols(); i++){ - ////cout << "numCols=" << model->getNumCols() << endl; - //if(model->getMatrix()->getCoefficient(j,i) != 0) { - ////cout << "i" << i << " "; - //cout << " " << model->M->getCoefficient(j,i) << " " ; - //cout << model->getColNames()[i] ; - //} - //else {cout << "" ;} - //} - //cout << " \t <= \t " << model->rowUB[j] << endl ; - // - //} - + } + } + // z-vars + for (t = 0; t < numTimeperiods; t++) { + for (a = 0; a < numArcs; a++) { + std::string colName = "z(t" + UtilIntToStr(t) + ",a" + UtilIntToStr(a) + + "(" + UtilIntToStr(arcs[a].tail) + "," + + UtilIntToStr(arcs[a].head) + "))"; + model->colNames.push_back(colName); + } + } + // x-vars + for (t = 0; t < numTimeperiods; t++) { + for (a = 0; a < numArcs; a++) { + std::string colName = "x(t" + UtilIntToStr(t) + ",a" + UtilIntToStr(a) + + "(" + UtilIntToStr(arcs[a].tail) + "," + + UtilIntToStr(arcs[a].head) + "))"; + model->colNames.push_back(colName); + } + } + // theta-vars + for (t = 0; t < numTimeperiods; t++) { + for (i = 0; i < numNodes; i++) { + std::string colName = + "theta(t" + UtilIntToStr(t) + ",n" + UtilIntToStr(i) + ")"; + model->colNames.push_back(colName); + } + } + + //--- + //--- create a list of the "active" columns (those related + //--- to this commmodity) all other columns are fixed to 0 + //--- + UtilFillN(model->colLB, numCols, 0.0); + UtilFillN(model->colUB, numCols, 0.0); + colIndex = 0; + for (a = 0; a < numArcs; a++) { + // set y-columns active + + // model->colLB[colIndex] = 0; + // model->colUB[colIndex] = 1; + // model->activeColumns.push_back(colIndex); + // set y-columns as master-only columns + colIndex = col_yStartIndex + a; + model->masterOnlyCols.push_back(colIndex); // y1-vars + for (t = 0; t < numTimeperiods; t++) { + colIndex = col_yStartIndex + t * numArcs + a + numArcs; + model->masterOnlyCols.push_back(colIndex); // y2-vars + } + } + + if (m_appParam.LogLevel >= 3) { + (*m_osLog) << "Master only columns:" << endl; + UtilPrintVector(model->masterOnlyCols, m_osLog); + if (model->getColNames().size() > 0) + UtilPrintVector(model->masterOnlyCols, model->getColNames(), m_osLog); + } + + //--- + //--- set the indices of the integer variables of model + //--- + UtilIotaN(model->integerVars, col_xStartIndex, col_yStartIndex); + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelCore()", + m_appParam.LogLevel, 2); + //--- + //--- display problem + //--- + // int j = 0; + // cout << "find: \nActive cols: "; + // std::vector::const_iterator it; + // for(it = model->getActiveColumns().begin(); it != + // model->getActiveColumns().end(); it++){ cout << *it << " "; + //} + // + // cout << "\ns.t.\n"; + // for(j = 0; j < model->getNumRows(); j++){ + // cout << model->rowNames[j] << " : \t\t"; + // cout << model->rowLB[j] << " \t <= \t" ; + // for (i = 0; i < model->getNumCols(); i++){ + ////cout << "numCols=" << model->getNumCols() << endl; + // if(model->getMatrix()->getCoefficient(j,i) != 0) { + ////cout << "i" << i << " "; + // cout << " " << model->M->getCoefficient(j,i) << " " ; + // cout << model->getColNames()[i] ; + //} + // else {cout << "" ;} + //} + // cout << " \t <= \t " << model->rowUB[j] << endl ; + // + //} } - //===========================================================================// -void SDPUC_DecompApp::createModelRelax(DecompConstraintSet * model, - int tpId){ - - //--- - //--- SUBPROBLEM (A'): (one block for each k in K) - //--- sum{(j,i) in A} x[k,i,j] - - //--- sum{(i,j) in A} x[k,i,j] = d[i,k], for all i in N - //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A - //--- For k=(s,t) in K, - //--- d[i,k] = -d[k] if i=s - //--- = d[k] if i=t - //--- = 0, otherwise - - //--- SUBPROBLEM (A'): (one block for each t in T) - //--- sum{(j,i) in A} x[i,j,t] - - //--- sum{(i,j) in A} x[i,j,t] = d[i,t], for all i in N\{s}, where s is the super source - //--- x[i,j,t] >= l[i,j,t] z[i,j,t], for all (i,j) in A - //--- x[i,j,t] <= u[i,j,t] z[i,j,t], for all (i,j) in A - //--- r[i,j] x[i,j,t] - theta[j] + theta[i] <= M (1 - z[i,j,t]) for all i,j in A - //--- r[i,j] x[i,j,t] - theta[j] + theta[i] >= -M (1 - z[i,j,t]) for all i,j in A - //--- - int a, i, j, head, tail, colIndex, source; - int numTimeperiods = m_instance.m_numTimeperiods; - int numArcs = m_instance.m_numArcs; - int numSwitchings = m_instance.m_numSwitchings; - int numNodes = m_instance.m_numNodes; - int numCols = numArcs //y1-vars - + 3 * numTimeperiods * numArcs //y2-, z-, and x-vars - + numTimeperiods * numNodes; //theta-vars - - SDPUC_Instance::arc * arcs = m_instance.m_arcs; - SDPUC_Instance::node * nodes = m_instance.m_nodes; - SDPUC_Instance::timeseries * ts = m_instance.m_timeseries; - - int numACArcs = 0; - for(a = 0; a < numArcs; a++){ - if(arcs[a].acline == 1) { numACArcs++; } - } - - int numRows = numNodes - 1 // balance - + 2 * numArcs // capacity - + 2 * numACArcs // and kirchoffs constraints - + 1; // max. allowed no. of switches employed sum{z} <= k - - int col_yStartIndex = 0; - int col_zStartIndex = numArcs*(1+numTimeperiods); - int col_xStartIndex = numArcs * (1 + 2*numTimeperiods); - int col_thetaStartIndex = numArcs*(1 + 3*numTimeperiods) ; - - double bigM = 100; - - - - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelRelax()", m_appParam.LogLevel, 2); - - - - - //--- - //--- create space for the model matrix (row-majored) - //--- - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - if(!model->M) - throw UtilExceptionMemory("createModelCore", "MCF_DecompApp"); - model->M->setDimensions(0, numCols); - model->reserve(numRows, numCols); - - //--- - //--- set super source node - //--- - source = 0; - - //--- - //--- create the rows - //--- NOTE: this is somewhat inefficient (but simple) - //--- - int hej = 0; - cout << "Generating sub-problem " << tpId << endl; hej++; - //cout << "y_index = " << col_yStartIndex << endl; - //cout << "z_index = " << col_zStartIndex << endl; - //cout << "x_index = " << col_xStartIndex << endl; - //cout << "theta_index = " << col_thetaStartIndex << endl; - //--- create balance constraints - for(i = 0; i < numNodes; i++){ - CoinPackedVector row; - for(a = 0; a < numArcs; a++){ - tail = arcs[a].tail; - head = arcs[a].head; - if(head == i){ - colIndex = col_xStartIndex + tpId * numArcs + a; - row.insert(colIndex, 1.0); - } - else if(tail == i){ - colIndex = col_xStartIndex + tpId * numArcs + a; - row.insert(colIndex, -1.0); - } - } - - //set demand d - double d = nodes[i].demand * ts[nodes[i].tsdemand].values[tpId]; - std::string rowName = "balance_" + UtilIntToStr(i) + "_" + UtilIntToStr(tpId); - if(i == source) { - model->appendRow(row, -m_infinity, 0.0, rowName); - } - else { - model->appendRow(row, d, d, rowName); - } - } - - //--- create capacity constraints and kirchoffs constraints - for(a = 0; a < numArcs; a++){ - CoinPackedVector rowLB, rowUB; //lower-, and upperbound - //for(a = 0; a < numArcs; a++){ +void SDPUC_DecompApp::createModelRelax(DecompConstraintSet *model, int tpId) { + + //--- + //--- SUBPROBLEM (A'): (one block for each k in K) + //--- sum{(j,i) in A} x[k,i,j] - + //--- sum{(i,j) in A} x[k,i,j] = d[i,k], for all i in N + //--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A + //--- For k=(s,t) in K, + //--- d[i,k] = -d[k] if i=s + //--- = d[k] if i=t + //--- = 0, otherwise + + //--- SUBPROBLEM (A'): (one block for each t in T) + //--- sum{(j,i) in A} x[i,j,t] - + //--- sum{(i,j) in A} x[i,j,t] = d[i,t], for all i in N\{s}, where s + // is the super source + //--- x[i,j,t] >= l[i,j,t] z[i,j,t], for all (i,j) in A + //--- x[i,j,t] <= u[i,j,t] z[i,j,t], for all (i,j) in A + //--- r[i,j] x[i,j,t] - theta[j] + theta[i] <= M (1 - z[i,j,t]) for all i,j + // in A + //--- r[i,j] x[i,j,t] - theta[j] + theta[i] >= -M (1 - z[i,j,t]) for all i,j + // in A + //--- + int a, i, j, head, tail, colIndex, source; + int numTimeperiods = m_instance.m_numTimeperiods; + int numArcs = m_instance.m_numArcs; + int numSwitchings = m_instance.m_numSwitchings; + int numNodes = m_instance.m_numNodes; + int numCols = numArcs // y1-vars + + 3 * numTimeperiods * numArcs // y2-, z-, and x-vars + + numTimeperiods * numNodes; // theta-vars + + SDPUC_Instance::arc *arcs = m_instance.m_arcs; + SDPUC_Instance::node *nodes = m_instance.m_nodes; + SDPUC_Instance::timeseries *ts = m_instance.m_timeseries; + + int numACArcs = 0; + for (a = 0; a < numArcs; a++) { + if (arcs[a].acline == 1) { + numACArcs++; + } + } + + int numRows = numNodes - 1 // balance + + 2 * numArcs // capacity + + 2 * numACArcs // and kirchoffs constraints + + 1; // max. allowed no. of switches employed sum{z} <= k + + int col_yStartIndex = 0; + int col_zStartIndex = numArcs * (1 + numTimeperiods); + int col_xStartIndex = numArcs * (1 + 2 * numTimeperiods); + int col_thetaStartIndex = numArcs * (1 + 3 * numTimeperiods); + + double bigM = 100; + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelRelax()", + m_appParam.LogLevel, 2); + + //--- + //--- create space for the model matrix (row-majored) + //--- + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + if (!model->M) + throw UtilExceptionMemory("createModelCore", "MCF_DecompApp"); + model->M->setDimensions(0, numCols); + model->reserve(numRows, numCols); + + //--- + //--- set super source node + //--- + source = 0; + + //--- + //--- create the rows + //--- NOTE: this is somewhat inefficient (but simple) + //--- + int hej = 0; + cout << "Generating sub-problem " << tpId << endl; + hej++; + // cout << "y_index = " << col_yStartIndex << endl; + // cout << "z_index = " << col_zStartIndex << endl; + // cout << "x_index = " << col_xStartIndex << endl; + // cout << "theta_index = " << col_thetaStartIndex << endl; + //--- create balance constraints + for (i = 0; i < numNodes; i++) { + CoinPackedVector row; + for (a = 0; a < numArcs; a++) { tail = arcs[a].tail; head = arcs[a].head; - double lb = arcs[a].lb * ts[arcs[a].tscap].values[tpId]; - double ub = arcs[a].ub * ts[arcs[a].tscap].values[tpId]; - double reactance = arcs[a].weight; - colIndex = col_zStartIndex + tpId * numArcs + a; - rowLB.insert(colIndex, -lb); - rowUB.insert(colIndex, -ub); - - colIndex = col_xStartIndex + tpId * numArcs + a; - rowLB.insert(colIndex, 1.0); - rowUB.insert(colIndex, 1.0); - - - //set flow lower and upperbound - std::string rowNameLB = "lb_" + UtilIntToStr(a) + "_" + UtilIntToStr(tpId); - std::string rowNameUB = "ub_" + UtilIntToStr(a) + "_" + UtilIntToStr(tpId); - model->appendRow(rowLB, 0.0, m_infinity, rowNameLB); - model->appendRow(rowUB, -m_infinity, 0.0, rowNameUB); - - //set kirchoffs voltage constraints for ac-arcs - if(arcs[a].acline == 1) { - CoinPackedVector rowK1, rowK2; //Kirchoff1, and Kirchoff2 - colIndex = col_zStartIndex + tpId * numArcs + a; - rowK1.insert(colIndex, bigM); - rowK2.insert(colIndex, -bigM); - colIndex = col_xStartIndex + tpId * numArcs + a; - rowK1.insert(colIndex, reactance); - rowK2.insert(colIndex, reactance); - for(i = 0; i < numNodes; i++){ - if(head == i){ - colIndex = col_thetaStartIndex + tpId * numNodes + i; - rowK1.insert(colIndex, -1.0); - rowK2.insert(colIndex, -1.0); - } - else if(tail == i){ - colIndex = col_thetaStartIndex + tpId * numNodes + i; - rowK1.insert(colIndex, 1.0); - rowK2.insert(colIndex, 1.0); - } - } - std::string rowNameK1 = "k1_" + UtilIntToStr(a) + "_" + UtilIntToStr(tpId); - std::string rowNameK2 = "k2_" + UtilIntToStr(a) + "_" + UtilIntToStr(tpId); - model->appendRow(rowK1, -m_infinity, bigM, rowNameK1); - model->appendRow(rowK2, -bigM, m_infinity, rowNameK2); - } - - } - - //--- create max. no. of switchings-constraint - CoinPackedVector row; - for(a = 0; a < numArcs; a++){ - colIndex = col_zStartIndex + tpId * numArcs + a; - if(arcs[a].acline == 1) { //only for arcs with voltage constraints - row.insert(colIndex, 1.0); + if (head == i) { + colIndex = col_xStartIndex + tpId * numArcs + a; + row.insert(colIndex, 1.0); + } else if (tail == i) { + colIndex = col_xStartIndex + tpId * numArcs + a; + row.insert(colIndex, -1.0); } - } - //model->appendRow(row, numACArcs-numSwitchings, numACArcs, std::string("sum{1-z}<=k")); - std::string rowNameSumK = "sumk_" + UtilIntToStr(tpId); - model->appendRow(row, numACArcs-numSwitchings, numACArcs, rowNameSumK); - - - //--- - //--- create a list of the "active" columns (those related - //--- to this commmodity) all other columns are fixed to 0 - //--- - UtilFillN(model->colLB, numCols, 0.0); - UtilFillN(model->colUB, numCols, 0.0); - colIndex = 0; - for(a = 0; a < numArcs; a++){ - //set y-columns active - //if(tpId == 0) { - //colIndex = col_yStartIndex + a; - // model->colLB[colIndex] = 0; - // model->colUB[colIndex] = 1; - // model->activeColumns.push_back(colIndex); - //cout << "" << colIndex << ", " ; - //} - //set z-columns active + } + + // set demand d + double d = nodes[i].demand * ts[nodes[i].tsdemand].values[tpId]; + std::string rowName = + "balance_" + UtilIntToStr(i) + "_" + UtilIntToStr(tpId); + if (i == source) { + model->appendRow(row, -m_infinity, 0.0, rowName); + } else { + model->appendRow(row, d, d, rowName); + } + } + + //--- create capacity constraints and kirchoffs constraints + for (a = 0; a < numArcs; a++) { + CoinPackedVector rowLB, rowUB; // lower-, and upperbound + // for(a = 0; a < numArcs; a++){ + tail = arcs[a].tail; + head = arcs[a].head; + double lb = arcs[a].lb * ts[arcs[a].tscap].values[tpId]; + double ub = arcs[a].ub * ts[arcs[a].tscap].values[tpId]; + double reactance = arcs[a].weight; + colIndex = col_zStartIndex + tpId * numArcs + a; + rowLB.insert(colIndex, -lb); + rowUB.insert(colIndex, -ub); + + colIndex = col_xStartIndex + tpId * numArcs + a; + rowLB.insert(colIndex, 1.0); + rowUB.insert(colIndex, 1.0); + + // set flow lower and upperbound + std::string rowNameLB = "lb_" + UtilIntToStr(a) + "_" + UtilIntToStr(tpId); + std::string rowNameUB = "ub_" + UtilIntToStr(a) + "_" + UtilIntToStr(tpId); + model->appendRow(rowLB, 0.0, m_infinity, rowNameLB); + model->appendRow(rowUB, -m_infinity, 0.0, rowNameUB); + + // set kirchoffs voltage constraints for ac-arcs + if (arcs[a].acline == 1) { + CoinPackedVector rowK1, rowK2; // Kirchoff1, and Kirchoff2 colIndex = col_zStartIndex + tpId * numArcs + a; - model->colLB[colIndex] = 0; //1 - arcs[a].switchable; //if arc not switchable then fix z=1 - model->colUB[colIndex] = 1; - model->activeColumns.push_back(colIndex); - //set x-columns active + rowK1.insert(colIndex, bigM); + rowK2.insert(colIndex, -bigM); colIndex = col_xStartIndex + tpId * numArcs + a; - double arcLB = min(0.0, arcs[a].lb * ts[arcs[a].tscap].values[tpId]); //!!**** - double arcUB = arcs[a].ub * ts[arcs[a].tscap].values[tpId]; - model->colLB[colIndex] = arcLB; - model->colUB[colIndex] = arcUB; - model->activeColumns.push_back(colIndex); - } - //set theta-columns active - for(i = 0; i < numNodes; i++){ - colIndex = col_thetaStartIndex + tpId * numNodes + i; - model->colLB[colIndex] = -m_infinity; - model->colUB[colIndex] = m_infinity; - model->activeColumns.push_back(colIndex); - } - - //--- - //--- set the indices of the integer variables of model - //--- - UtilIotaN(model->integerVars, col_xStartIndex, col_yStartIndex); - - ////Display problem - //cout << "find: \nActive cols: "; - //std::vector::const_iterator it; - //for(it = model->getActiveColumns().begin(); it < model->getActiveColumns().end(); it++){ - //cout << *it << " "; - //} - //cout << "\ns.t.\n"; - //for(j = 0; j < model->getNumRows(); j++){ - //cout << model->rowNames[j] << " : \t\t"; - //cout << model->rowLB[j] << " \t <= \t" ; - //for (i = 0; i < model->getNumCols(); i++){ - ////cout << "numCols=" << model->getNumCols() << endl; - //if(model->getMatrix()->getCoefficient(j,i) != 0) { - ////cout << "i" << i << " "; - //cout << " " << model->M->getCoefficient(j,i) << " (" << i << ") + "; - ////cout << model->getColNames()[i] ; - //} - //else {cout << "" ;} - //} - //cout << " \t <= \t " << model->rowUB[j] << endl ; - // - //} - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelRelax()", m_appParam.LogLevel, 2); + rowK1.insert(colIndex, reactance); + rowK2.insert(colIndex, reactance); + for (i = 0; i < numNodes; i++) { + if (head == i) { + colIndex = col_thetaStartIndex + tpId * numNodes + i; + rowK1.insert(colIndex, -1.0); + rowK2.insert(colIndex, -1.0); + } else if (tail == i) { + colIndex = col_thetaStartIndex + tpId * numNodes + i; + rowK1.insert(colIndex, 1.0); + rowK2.insert(colIndex, 1.0); + } + } + std::string rowNameK1 = + "k1_" + UtilIntToStr(a) + "_" + UtilIntToStr(tpId); + std::string rowNameK2 = + "k2_" + UtilIntToStr(a) + "_" + UtilIntToStr(tpId); + model->appendRow(rowK1, -m_infinity, bigM, rowNameK1); + model->appendRow(rowK2, -bigM, m_infinity, rowNameK2); + } + } + + //--- create max. no. of switchings-constraint + CoinPackedVector row; + for (a = 0; a < numArcs; a++) { + colIndex = col_zStartIndex + tpId * numArcs + a; + if (arcs[a].acline == 1) { // only for arcs with voltage constraints + row.insert(colIndex, 1.0); + } + } + // model->appendRow(row, numACArcs-numSwitchings, numACArcs, + // std::string("sum{1-z}<=k")); + std::string rowNameSumK = "sumk_" + UtilIntToStr(tpId); + model->appendRow(row, numACArcs - numSwitchings, numACArcs, rowNameSumK); + + //--- + //--- create a list of the "active" columns (those related + //--- to this commmodity) all other columns are fixed to 0 + //--- + UtilFillN(model->colLB, numCols, 0.0); + UtilFillN(model->colUB, numCols, 0.0); + colIndex = 0; + for (a = 0; a < numArcs; a++) { + // set y-columns active + // if(tpId == 0) { + // colIndex = col_yStartIndex + a; + // model->colLB[colIndex] = 0; + // model->colUB[colIndex] = 1; + // model->activeColumns.push_back(colIndex); + // cout << "" << colIndex << ", " ; + //} + // set z-columns active + colIndex = col_zStartIndex + tpId * numArcs + a; + model->colLB[colIndex] = + 0; // 1 - arcs[a].switchable; //if arc not switchable then fix z=1 + model->colUB[colIndex] = 1; + model->activeColumns.push_back(colIndex); + // set x-columns active + colIndex = col_xStartIndex + tpId * numArcs + a; + double arcLB = + min(0.0, arcs[a].lb * ts[arcs[a].tscap].values[tpId]); //!!**** + double arcUB = arcs[a].ub * ts[arcs[a].tscap].values[tpId]; + model->colLB[colIndex] = arcLB; + model->colUB[colIndex] = arcUB; + model->activeColumns.push_back(colIndex); + } + // set theta-columns active + for (i = 0; i < numNodes; i++) { + colIndex = col_thetaStartIndex + tpId * numNodes + i; + model->colLB[colIndex] = -m_infinity; + model->colUB[colIndex] = m_infinity; + model->activeColumns.push_back(colIndex); + } + + //--- + //--- set the indices of the integer variables of model + //--- + UtilIotaN(model->integerVars, col_xStartIndex, col_yStartIndex); + + ////Display problem + // cout << "find: \nActive cols: "; + // std::vector::const_iterator it; + // for(it = model->getActiveColumns().begin(); it < + // model->getActiveColumns().end(); it++){ cout << *it << " "; + //} + // cout << "\ns.t.\n"; + // for(j = 0; j < model->getNumRows(); j++){ + // cout << model->rowNames[j] << " : \t\t"; + // cout << model->rowLB[j] << " \t <= \t" ; + // for (i = 0; i < model->getNumCols(); i++){ + ////cout << "numCols=" << model->getNumCols() << endl; + // if(model->getMatrix()->getCoefficient(j,i) != 0) { + ////cout << "i" << i << " "; + // cout << " " << model->M->getCoefficient(j,i) << " (" << i << ") + "; + ////cout << model->getColNames()[i] ; + //} + // else {cout << "" ;} + //} + // cout << " \t <= \t " << model->rowUB[j] << endl ; + // + //} + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelRelax()", + m_appParam.LogLevel, 2); } //===========================================================================// -void SDPUC_DecompApp::createModelRelaxSparse(DecompConstraintSet * model, - int commId){ - - ////--- - ////--- SUBPROBLEM (A'): (one block for each k in K) - ////--- sum{(j,i) in A} x[k,i,j] - - ////--- sum{(i,j) in A} x[k,i,j] = d[i,k], for all i in N - ////--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A - ////--- For k=(s,t) in K, - ////--- d[i,k] = -d[k] if i=s - ////--- = d[k] if i=t - ////--- = 0, otherwise - ////--- - //int a, i, head, tail, origColIndex, source, sink; - //int numArcs = m_instance.m_numArcs; - //int numNodes = m_instance.m_numNodes; - //int numCommodities = m_instance.m_numCommodities; - //int numCols = numArcs; - //int numRows = numNodes; - //int numColsOrig = numArcs * numCommodities; - //SILCEP_Instance::arc * arcs = m_instance.m_arcs; - //SILCEP_Instance::commodity * commodities = m_instance.m_commodities; - - //UtilPrintFuncBegin(m_osLog, m_classTag, - // "createModelRelaxSparse()", m_appParam.LogLevel, 2); - - ////--- - ////--- create space for the model matrix (row-majored) - ////--- - //model->M = new CoinPackedMatrix(false, 0.0, 0.0); - //if(!model->M) - // throw UtilExceptionMemory("createModelCore", "MCF_DecompApp"); - //model->M->setDimensions(0, numCols); - //model->reserve(numRows, numCols); - //model->setSparse(numColsOrig); - - ////--- - ////--- get this commodity's source and sink node - ////--- - //source = commodities[commId].source; - //sink = commodities[commId].sink; - // - ////--- - ////--- create the rows - ////--- NOTE: this is somewhat inefficient (but simple) - ////--- - //for(i = 0; i < numNodes; i++){ - // CoinPackedVector row; - // for(a = 0; a < numArcs; a++){ - // tail = arcs[a].tail; - // head = arcs[a].head; - // if(head == i) - // row.insert(a, 1.0); - // else if(tail == i) - // row.insert(a, -1.0); - // } - // if(i == source) - // model->appendRow(row, - // -commodities[commId].demand, - // -commodities[commId].demand); - // else if(i == sink) - // model->appendRow(row, - // commodities[commId].demand, - // commodities[commId].demand); - // else - // model->appendRow(row, 0.0, 0.0); - //} - - ////--- - ////--- set the colLB, colUB, integerVars and sparse mapping - ////--- - //origColIndex = commId * numArcs; - //for(a = 0; a < numArcs; a++){ - // double arcLB = arcs[a].lb; - // double arcUB = arcs[a].ub; - // model->pushCol(arcLB, arcUB, true, origColIndex); - // origColIndex++; - //} - // - //UtilPrintFuncEnd(m_osLog, m_classTag, - // "createModelRelaxSparse()", m_appParam.LogLevel, 2); +void SDPUC_DecompApp::createModelRelaxSparse(DecompConstraintSet *model, + int commId) { + + ////--- + ////--- SUBPROBLEM (A'): (one block for each k in K) + ////--- sum{(j,i) in A} x[k,i,j] - + ////--- sum{(i,j) in A} x[k,i,j] = d[i,k], for all i in N + ////--- x[k,i,j] integer >= l[i,j] <= u[i,j], for all (i,j) in A + ////--- For k=(s,t) in K, + ////--- d[i,k] = -d[k] if i=s + ////--- = d[k] if i=t + ////--- = 0, otherwise + ////--- + // int a, i, head, tail, origColIndex, source, sink; + // int numArcs = m_instance.m_numArcs; + // int numNodes = m_instance.m_numNodes; + // int numCommodities = m_instance.m_numCommodities; + // int numCols = numArcs; + // int numRows = numNodes; + // int numColsOrig = numArcs * numCommodities; + // SILCEP_Instance::arc * arcs = m_instance.m_arcs; + // SILCEP_Instance::commodity * commodities = m_instance.m_commodities; + + // UtilPrintFuncBegin(m_osLog, m_classTag, + // "createModelRelaxSparse()", m_appParam.LogLevel, 2); + + ////--- + ////--- create space for the model matrix (row-majored) + ////--- + // model->M = new CoinPackedMatrix(false, 0.0, 0.0); + // if(!model->M) + // throw UtilExceptionMemory("createModelCore", "MCF_DecompApp"); + // model->M->setDimensions(0, numCols); + // model->reserve(numRows, numCols); + // model->setSparse(numColsOrig); + + ////--- + ////--- get this commodity's source and sink node + ////--- + // source = commodities[commId].source; + // sink = commodities[commId].sink; + // + ////--- + ////--- create the rows + ////--- NOTE: this is somewhat inefficient (but simple) + ////--- + // for(i = 0; i < numNodes; i++){ + // CoinPackedVector row; + // for(a = 0; a < numArcs; a++){ + // tail = arcs[a].tail; + // head = arcs[a].head; + // if(head == i) + // row.insert(a, 1.0); + // else if(tail == i) + // row.insert(a, -1.0); + // } + // if(i == source) + // model->appendRow(row, + // -commodities[commId].demand, + // -commodities[commId].demand); + // else if(i == sink) + // model->appendRow(row, + // commodities[commId].demand, + // commodities[commId].demand); + // else + // model->appendRow(row, 0.0, 0.0); + //} + + ////--- + ////--- set the colLB, colUB, integerVars and sparse mapping + ////--- + // origColIndex = commId * numArcs; + // for(a = 0; a < numArcs; a++){ + // double arcLB = arcs[a].lb; + // double arcUB = arcs[a].ub; + // model->pushCol(arcLB, arcUB, true, origColIndex); + // origColIndex++; + //} + // + // UtilPrintFuncEnd(m_osLog, m_classTag, + // "createModelRelaxSparse()", m_appParam.LogLevel, 2); } //===========================================================================// - //===========================================================================// -DecompSolution -SDPUC_DecompApp::createInitialSolution(){ - - - - int a, i, t, colIndex; - int numTimeperiods = m_instance.m_numTimeperiods; - int numArcs = m_instance.m_numArcs; - int numSwitchings = m_instance.m_numSwitchings; - int numNodes = m_instance.m_numNodes; - int numCols = numArcs //y1-vars - + 3 * numTimeperiods * numArcs //y2-, z-, and x-vars - + numTimeperiods * numNodes; //theta-vars - - SDPUC_Instance::arc * arcs = m_instance.m_arcs; - SDPUC_Instance::node * nodes = m_instance.m_nodes; - SDPUC_Instance::timeseries * ts = m_instance.m_timeseries; - - int numACArcs = 0; - for(a = 0; a < numArcs; a++){ - if(arcs[a].acline == 1) { numACArcs++; } - } - - int numRows = numNodes - 1 //balance - + 2 * numArcs // capacity - + 2 * numACArcs // and kirchoffs constraints - + 1; // max. allowed no. of switches employed sum{z} <= k - - int col_yStartIndex = 0; - int col_zStartIndex = numArcs*(1+numTimeperiods); - int col_xStartIndex = numArcs * (1 + 2*numTimeperiods); - int col_thetaStartIndex = numArcs*(1 + 3*numTimeperiods) ; - double bigM = 100; - - const int size = numCols; - double values_[100000]; - const double quality = 1e75; - //const double * cost[size]; - - //double my_non_const_array[100000]; - //int dummy = read_array_from_file(my_non_const_array); - //double const (&array)[100000] = my_non_const_array; - - - //initialise all values to 0 - for(i = 0; i < numCols; i++){ - values_[i] = 0; - } - // set z = 1 - for(a = 0; a < numArcs; a++){ - for(t = 0; t < numTimeperiods; t++){ - colIndex = col_zStartIndex + t * numArcs + a; - values_[colIndex] = 1.0; - } - } - - double const (&values)[100000] = values_; - - DecompSolution sol(size, values, quality); - - return sol; +DecompSolution SDPUC_DecompApp::createInitialSolution() { + + int a, i, t, colIndex; + int numTimeperiods = m_instance.m_numTimeperiods; + int numArcs = m_instance.m_numArcs; + int numSwitchings = m_instance.m_numSwitchings; + int numNodes = m_instance.m_numNodes; + int numCols = numArcs // y1-vars + + 3 * numTimeperiods * numArcs // y2-, z-, and x-vars + + numTimeperiods * numNodes; // theta-vars + + SDPUC_Instance::arc *arcs = m_instance.m_arcs; + SDPUC_Instance::node *nodes = m_instance.m_nodes; + SDPUC_Instance::timeseries *ts = m_instance.m_timeseries; + + int numACArcs = 0; + for (a = 0; a < numArcs; a++) { + if (arcs[a].acline == 1) { + numACArcs++; + } + } + + int numRows = numNodes - 1 // balance + + 2 * numArcs // capacity + + 2 * numACArcs // and kirchoffs constraints + + 1; // max. allowed no. of switches employed sum{z} <= k + + int col_yStartIndex = 0; + int col_zStartIndex = numArcs * (1 + numTimeperiods); + int col_xStartIndex = numArcs * (1 + 2 * numTimeperiods); + int col_thetaStartIndex = numArcs * (1 + 3 * numTimeperiods); + double bigM = 100; + + const int size = numCols; + double values_[100000]; + const double quality = 1e75; + // const double * cost[size]; + + // double my_non_const_array[100000]; + // int dummy = read_array_from_file(my_non_const_array); + // double const (&array)[100000] = my_non_const_array; + + // initialise all values to 0 + for (i = 0; i < numCols; i++) { + values_[i] = 0; + } + // set z = 1 + for (a = 0; a < numArcs; a++) { + for (t = 0; t < numTimeperiods; t++) { + colIndex = col_zStartIndex + t * numArcs + a; + values_[colIndex] = 1.0; + } + } + + double const(&values)[100000] = values_; + + DecompSolution sol(size, values, quality); + + return sol; } diff --git a/Dip/examples/SDPUC/SDPUC_Instance.cpp b/Dip/examples/SDPUC/SDPUC_Instance.cpp index 2665b521..95574994 100644 --- a/Dip/examples/SDPUC/SDPUC_Instance.cpp +++ b/Dip/examples/SDPUC/SDPUC_Instance.cpp @@ -18,153 +18,136 @@ //===========================================================================// //===========================================================================// -int SDPUC_Instance::readInstance(string & fileName, - bool addDummyArcs){ - - ifstream is; - int status = UtilOpenFile(is, fileName.c_str()); - if(status) - throw UtilException("Failed to read instance", - "readInstance", "MCF_Instance"); - - double sumweight = 0; - bool size_read = true; - int arcs_read = 0; - int nodes_read = 0; - int ts_read = 0; - int nt = 0; - char line[1000]; - char name[1000]; - while(is.good()) { - is.getline(line, 1000); - if (is.gcount() >= 999) { - cerr << "ERROR: Input file is incorrect. " - << "A line more than 1000 characters is found." << endl; - return 1; - } - switch (line[0]) { - case 'p': - if (sscanf(line, "p%s%i%i%i%i%i", - name, &m_numNodes, &m_numArcs, &m_numSwitchings, &m_numTimeseries, &m_numTimeperiods) != 6) { - cerr << "ERROR: Input file is incorrect. (p line)" << endl; - return 1; - } - m_problemName = name; - m_arcs = new arc[m_numArcs + (addDummyArcs ? 0 : 0)]; - if(!m_arcs) - throw UtilExceptionMemory("readInstance", "MCF_DecompApp"); - m_nodes = new node[m_numNodes]; - if(!m_nodes) - throw UtilExceptionMemory("readInstance", "MCF_DecompApp"); - m_timeseries = new timeseries[m_numTimeseries]; - if(!m_timeseries) - throw UtilExceptionMemory("readInstance", "MCF_DecompApp"); +int SDPUC_Instance::readInstance(string &fileName, bool addDummyArcs) { + + ifstream is; + int status = UtilOpenFile(is, fileName.c_str()); + if (status) + throw UtilException("Failed to read instance", "readInstance", + "MCF_Instance"); - break; - case 'c': - break; - case '#': - break; - case 'n': - if (sscanf(line, "n%i%lf%i", - &m_nodes[nodes_read].id, - &m_nodes[nodes_read].demand, - &m_nodes[nodes_read].tsdemand) != 3) { - cerr << "ERROR: Input file is incorrect. (n line)" << endl; - return 1; - } - ++nodes_read; - break; - case 'a': - if (sscanf(line, "a%i%i%lf%lf%lf%lf%lf%lf%i%i%i%i", - &m_arcs[arcs_read].tail, - &m_arcs[arcs_read].head, - &m_arcs[arcs_read].lb, - &m_arcs[arcs_read].ub, - &m_arcs[arcs_read].weight, - &m_arcs[arcs_read].mcost, - &m_arcs[arcs_read].fcost1, - &m_arcs[arcs_read].fcost2, - &m_arcs[arcs_read].tscap, - &m_arcs[arcs_read].tscost, - &m_arcs[arcs_read].acline, - &m_arcs[arcs_read].switchable - ) != 12) { - cerr << "Input file is incorrect. (a line)" << endl; - return 1; - } - sumweight += fabs(m_arcs[arcs_read].mcost); - ++arcs_read; - break; - case 't': - //cout << "ts_read=" << ts_read ; - //cout << " numTimeperiods=" << m_numTimeperiods << endl; - m_timeseries[ts_read].values = new double[m_numTimeperiods]; - /* if (sscanf(line, "t%i%lf%lf%lf%lf", - &m_timeseries[ts_read].id, - &m_timeseries[ts_read].values[0], - &m_timeseries[ts_read].values[1], - &m_timeseries[ts_read].values[2], - &m_timeseries[ts_read].values[3] - ) != 5) { - cerr << "ERROR: Input file is incorrect. (t line) << " << line << endl; - return 1; - }*/ - - nt = 0; - char * pch; - //printf ("Splitting string \"%s\" into tokens:\n",line); - pch = strtok (line,"\t"); //stripping the initial 't' - //printf ("%s ",pch); - pch = strtok (NULL, "\t"); //timeseries id - m_timeseries[ts_read].id = atoi(pch); - //printf ("%s\n",pch); - while (pch != NULL && nt < m_numTimeperiods) - { - - pch = strtok (NULL, "\t"); - m_timeseries[ts_read].values[nt] = atof(pch); - //printf ("%s\n",pch); - nt++; - } + double sumweight = 0; + bool size_read = true; + int arcs_read = 0; + int nodes_read = 0; + int ts_read = 0; + int nt = 0; + char line[1000]; + char name[1000]; + while (is.good()) { + is.getline(line, 1000); + if (is.gcount() >= 999) { + cerr << "ERROR: Input file is incorrect. " + << "A line more than 1000 characters is found." << endl; + return 1; + } + switch (line[0]) { + case 'p': + if (sscanf(line, "p%s%i%i%i%i%i", name, &m_numNodes, &m_numArcs, + &m_numSwitchings, &m_numTimeseries, &m_numTimeperiods) != 6) { + cerr << "ERROR: Input file is incorrect. (p line)" << endl; + return 1; + } + m_problemName = name; + m_arcs = new arc[m_numArcs + (addDummyArcs ? 0 : 0)]; + if (!m_arcs) + throw UtilExceptionMemory("readInstance", "MCF_DecompApp"); + m_nodes = new node[m_numNodes]; + if (!m_nodes) + throw UtilExceptionMemory("readInstance", "MCF_DecompApp"); + m_timeseries = new timeseries[m_numTimeseries]; + if (!m_timeseries) + throw UtilExceptionMemory("readInstance", "MCF_DecompApp"); + break; + case 'c': + break; + case '#': + break; + case 'n': + if (sscanf(line, "n%i%lf%i", &m_nodes[nodes_read].id, + &m_nodes[nodes_read].demand, + &m_nodes[nodes_read].tsdemand) != 3) { + cerr << "ERROR: Input file is incorrect. (n line)" << endl; + return 1; + } + ++nodes_read; + break; + case 'a': + if (sscanf(line, "a%i%i%lf%lf%lf%lf%lf%lf%i%i%i%i", + &m_arcs[arcs_read].tail, &m_arcs[arcs_read].head, + &m_arcs[arcs_read].lb, &m_arcs[arcs_read].ub, + &m_arcs[arcs_read].weight, &m_arcs[arcs_read].mcost, + &m_arcs[arcs_read].fcost1, &m_arcs[arcs_read].fcost2, + &m_arcs[arcs_read].tscap, &m_arcs[arcs_read].tscost, + &m_arcs[arcs_read].acline, + &m_arcs[arcs_read].switchable) != 12) { + cerr << "Input file is incorrect. (a line)" << endl; + return 1; + } + sumweight += fabs(m_arcs[arcs_read].mcost); + ++arcs_read; + break; + case 't': + // cout << "ts_read=" << ts_read ; + // cout << " numTimeperiods=" << m_numTimeperiods << endl; + m_timeseries[ts_read].values = new double[m_numTimeperiods]; + /* if (sscanf(line, "t%i%lf%lf%lf%lf", + &m_timeseries[ts_read].id, + &m_timeseries[ts_read].values[0], + &m_timeseries[ts_read].values[1], + &m_timeseries[ts_read].values[2], + &m_timeseries[ts_read].values[3] + ) != 5) { + cerr << "ERROR: Input file is incorrect. (t line) << " << line << endl; + return 1; +}*/ + nt = 0; + char *pch; + // printf ("Splitting string \"%s\" into tokens:\n",line); + pch = strtok(line, "\t"); // stripping the initial 't' + // printf ("%s ",pch); + pch = strtok(NULL, "\t"); // timeseries id + m_timeseries[ts_read].id = atoi(pch); + // printf ("%s\n",pch); + while (pch != NULL && nt < m_numTimeperiods) { - ++ts_read; - break; - default: - if (sscanf(line+1, "%s", name) <= 0) { - cerr << "Input file is incorrect. (non-recognizable line)" << endl; - return 1; - } - break; + pch = strtok(NULL, "\t"); + m_timeseries[ts_read].values[nt] = atof(pch); + // printf ("%s\n",pch); + nt++; } - } - - if (!size_read || - arcs_read != m_numArcs || - nodes_read != m_numNodes || - ts_read != m_numTimeseries - ) { - cerr << "Input file is incorrect." - << " size_read=" << size_read - << " arcs_read=" << arcs_read - << " nodes_read=" << nodes_read - << " ts_read=" << ts_read << endl; - return 1; - } - - /*if (addDummyArcs) { - for (int i = 0; i < m_numCommodities; ++i) { - m_arcs[m_numArcs].tail = m_commodities[i].source; - m_arcs[m_numArcs].head = m_commodities[i].sink; - m_arcs[m_numArcs].lb = 0; - m_arcs[m_numArcs].ub = m_commodities[i].demand; - m_arcs[m_numArcs].weight = sumweight+1; - ++m_numArcs; + + ++ts_read; + break; + default: + if (sscanf(line + 1, "%s", name) <= 0) { + cerr << "Input file is incorrect. (non-recognizable line)" << endl; + return 1; } - }*/ - is.close(); - return 0; -} + break; + } + } + if (!size_read || arcs_read != m_numArcs || nodes_read != m_numNodes || + ts_read != m_numTimeseries) { + cerr << "Input file is incorrect." + << " size_read=" << size_read << " arcs_read=" << arcs_read + << " nodes_read=" << nodes_read << " ts_read=" << ts_read << endl; + return 1; + } + + /*if (addDummyArcs) { + for (int i = 0; i < m_numCommodities; ++i) { + m_arcs[m_numArcs].tail = m_commodities[i].source; + m_arcs[m_numArcs].head = m_commodities[i].sink; + m_arcs[m_numArcs].lb = 0; + m_arcs[m_numArcs].ub = m_commodities[i].demand; + m_arcs[m_numArcs].weight = sumweight+1; + ++m_numArcs; + } + }*/ + is.close(); + return 0; +} diff --git a/Dip/examples/SDPUC/SDPUC_Main.cpp b/Dip/examples/SDPUC/SDPUC_Main.cpp index 11730a9f..a0ea6d15 100644 --- a/Dip/examples/SDPUC/SDPUC_Main.cpp +++ b/Dip/examples/SDPUC/SDPUC_Main.cpp @@ -26,168 +26,155 @@ #include "UtilTimer.h" //===========================================================================// -int main(int argc, char ** argv){ - try { +int main(int argc, char **argv) { + try { + + //--- + //--- create the utility class for parsing parameters + //--- + UtilParameters utilParam(argc, argv); + bool doCut = utilParam.GetSetting("doCut", true); + bool doPriceCut = utilParam.GetSetting("doPriceCut", false); + bool doDirect = utilParam.GetSetting("doDirect", false); + int timeLimit = utilParam.GetSetting("timeLimit", 60); + + UtilTimer timer; + double timeSetupReal = 0.0; + double timeSetupCpu = 0.0; + double timeSolveReal = 0.0; + double timeSolveCpu = 0.0; + //--- + //--- start overall timer + //--- + timer.start(); + //--- + //--- create the user application (a DecompApp) + //--- + SDPUC_DecompApp mmkp(utilParam); + //--- + //--- create the algorithm (a DecompAlgo) + //--- + DecompAlgo *algo = NULL; + assert(doCut + doPriceCut == 1); + + //--- + //--- create the CPM algorithm object + //--- + if (doCut) + algo = new DecompAlgoC(&mmkp, utilParam); + //--- + //--- create the PC algorithm object + //--- + if (doPriceCut) + algo = new DecompAlgoPC(&mmkp, utilParam); + + if (doCut && doDirect) { + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); + + //--- create initial solution + // DecompSolution sol = SILCEP_DecompApp::createInitialSolution(); //--- - //--- create the utility class for parsing parameters + //--- solve //--- - UtilParameters utilParam(argc, argv); - bool doCut = utilParam.GetSetting("doCut", true); - bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - bool doDirect = utilParam.GetSetting("doDirect", false); - int timeLimit = utilParam.GetSetting("timeLimit", 60); - - UtilTimer timer; - double timeSetupReal = 0.0; - double timeSetupCpu = 0.0; - double timeSolveReal = 0.0; - double timeSolveCpu = 0.0; + timer.start(); + algo->solveDirect(); + + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); + } else { //--- - //--- start overall timer + //--- create the driver AlpsDecomp model //--- - timer.start(); + int status = 0; + AlpsDecompModel alpsModel(utilParam, algo); + + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); + + //--- + //--- solve //--- - //--- create the user application (a DecompApp) - //--- - SDPUC_DecompApp mmkp(utilParam); + timer.start(); + status = alpsModel.solve(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); + //--- - //--- create the algorithm (a DecompAlgo) + //--- Display solution //--- - DecompAlgo * algo = NULL; - assert(doCut + doPriceCut == 1); + SDPUC_Instance &inst = mmkp.getInstance(); + int i = 0; + + int numTimeperiods = inst.m_numTimeperiods; + int numArcs = inst.m_numArcs; + int numNodes = inst.m_numNodes; + int numCols = numArcs // y1-vars + + 3 * numTimeperiods * numArcs // y2-, z-, and x-vars + + numTimeperiods * numNodes; // theta-vars + int numRows = numArcs * numTimeperiods; + int col_yStartIndex = 0; + int col_zStartIndex = numArcs * (1 + numTimeperiods); + int col_xStartIndex = numArcs * (1 + 2 * numTimeperiods); + int col_thetaStartIndex = numArcs * (1 + 3 * numTimeperiods); + double total_op_cost = 0; + double total_fix1_cost = 0; + double total_fix2_cost = 0; + const double *values = (alpsModel.getBestSolution()->getValues()); + // cout << "__BEGIN SOL__\n"; + for (i = 0; i < numCols; i++) { + // cout << values[i] << " "; + if (i < numArcs) { + total_fix1_cost = + total_fix1_cost + values[i] * mmkp.getObjective()[i]; + } else if (i < col_zStartIndex) { + total_fix2_cost = + total_fix2_cost + values[i] * mmkp.getObjective()[i]; + } else { // if(i > col_xStartIndex && i < col_zStartIndex ) { + total_op_cost = total_op_cost + values[i] * mmkp.getObjective()[i]; + } + } + cout << " - - - - \n"; + cout << "Total fix1 cost = " << total_fix1_cost << endl; + cout << "Total fix2 cost = " << total_fix2_cost << endl; + cout << "Total operational cost = " << total_op_cost << endl; + cout << "Number of columns in Core : " << alpsModel.getNumCoreCols() + << endl; + cout << " - - - - " << endl; //--- - //--- create the CPM algorithm object - //--- - if(doCut) - algo = new DecompAlgoC(&mmkp, utilParam); + //--- - //--- create the PC algorithm object + //--- sanity check //--- - if(doPriceCut) - algo = new DecompAlgoPC(&mmkp, utilParam); - - - if(doCut && doDirect){ - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - - //--- create initial solution - //DecompSolution sol = SILCEP_DecompApp::createInitialSolution(); - - //--- - //--- solve - //--- - timer.start(); - algo->solveDirect(); - - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); - } - else{ - //--- - //--- create the driver AlpsDecomp model - //--- - int status = 0; - AlpsDecompModel alpsModel(utilParam, algo); - - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - - //--- - //--- solve - //--- - timer.start(); - status = alpsModel.solve(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); - - //--- - //--- Display solution - //--- - - SDPUC_Instance & inst = mmkp.getInstance(); - int i = 0; - - int numTimeperiods = inst.m_numTimeperiods; - int numArcs = inst.m_numArcs; - int numNodes = inst.m_numNodes; - int numCols = numArcs //y1-vars - + 3 * numTimeperiods * numArcs //y2-, z-, and x-vars - + numTimeperiods * numNodes; //theta-vars - int numRows = numArcs * numTimeperiods; - int col_yStartIndex = 0; - int col_zStartIndex = numArcs*(1+numTimeperiods); - int col_xStartIndex = numArcs * (1 + 2*numTimeperiods); - int col_thetaStartIndex = numArcs*(1 + 3*numTimeperiods) ; - double total_op_cost = 0; - double total_fix1_cost = 0; - double total_fix2_cost = 0; - const double * values = (alpsModel.getBestSolution()->getValues()); - //cout << "__BEGIN SOL__\n"; - for(i = 0; i < numCols; i++){ - //cout << values[i] << " "; - - if(i < numArcs) { - total_fix1_cost = total_fix1_cost + values[i] * mmkp.getObjective()[i]; - } - else if(i < col_zStartIndex ) { - total_fix2_cost = total_fix2_cost + values[i] * mmkp.getObjective()[i]; - } - else { //if(i > col_xStartIndex && i < col_zStartIndex ) { - total_op_cost = total_op_cost + values[i] * mmkp.getObjective()[i]; - } - } - - cout << " - - - - \n"; - cout << "Total fix1 cost = " << total_fix1_cost << endl; - cout << "Total fix2 cost = " << total_fix2_cost << endl; - cout << "Total operational cost = " << total_op_cost << endl; - cout << "Number of columns in Core : " << alpsModel.getNumCoreCols() << endl; - cout << " - - - - " << endl; - //--- - - //--- - //--- sanity check - //--- - cout << setiosflags(ios::fixed|ios::showpoint); - cout << "Status= " << status - << " BestLB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalLB(),5) - << " BestUB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalUB(),5) - << " Nodes= " << setw(6) - << alpsModel.getNumNodesProcessed() - << " SetupCPU= " << timeSetupCpu - << " SolveCPU= " << timeSolveCpu - << " TotalCPU= " << timeSetupCpu + timeSolveCpu - << " SetupReal= " << timeSetupReal - << " SolveReal= " << timeSolveReal - << " TotalReal= " << timeSetupReal + timeSolveReal - << endl; - - - - - //--- - //--- free local memory - //--- - delete algo; - } - } - catch(CoinError & ex){ - cerr << "COIN Exception [ " << ex.message() << " ]" - << " at " << ex.fileName() << ":L" << ex.lineNumber() - << " in " << ex.className() << "::" << ex.methodName() << endl; - return 1; - } - return 0; -} + cout << setiosflags(ios::fixed | ios::showpoint); + cout << "Status= " << status << " BestLB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalLB(), 5) + << " BestUB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalUB(), 5) << " Nodes= " << setw(6) + << alpsModel.getNumNodesProcessed() << " SetupCPU= " << timeSetupCpu + << " SolveCPU= " << timeSolveCpu + << " TotalCPU= " << timeSetupCpu + timeSolveCpu + << " SetupReal= " << timeSetupReal << " SolveReal= " << timeSolveReal + << " TotalReal= " << timeSetupReal + timeSolveReal << endl; + //--- + //--- free local memory + //--- + delete algo; + } + } catch (CoinError &ex) { + cerr << "COIN Exception [ " << ex.message() << " ]" + << " at " << ex.fileName() << ":L" << ex.lineNumber() << " in " + << ex.className() << "::" << ex.methodName() << endl; + return 1; + } + return 0; +} diff --git a/Dip/examples/SmallIP/SmallIP_DecompApp.cpp b/Dip/examples/SmallIP/SmallIP_DecompApp.cpp index a25345a4..d1759dde 100644 --- a/Dip/examples/SmallIP/SmallIP_DecompApp.cpp +++ b/Dip/examples/SmallIP/SmallIP_DecompApp.cpp @@ -13,185 +13,179 @@ //===========================================================================// //===========================================================================// -#include "DecompVar.h" #include "SmallIP_DecompApp.h" +#include "DecompVar.h" //===========================================================================// -//parameters -const int LogLevel = 1; -const int whichRelax = 1; //1 matches book chapter and thesis +// parameters +const int LogLevel = 1; +const int whichRelax = 1; // 1 matches book chapter and thesis //===========================================================================// -void SmallIP_DecompApp::createModels(){ - - //--- - //--- This function does the work to create the different models - //--- that will be used. This memory is owned by the user. It will - //--- be passed to the application interface and used by the algorithms. - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", LogLevel, 2); - - //--- - //--- Small Integer Program from: - //--- "Integer Programming: Theory and Practice", - //--- "Decomposition in Integer Linear Programming", Chapter 4 - //--- - //--- min 1 x[0] - //--- - //--- s.t. 7 x[0] - 1 x[1] >= 13 (4.5) - //--- 1 x[1] >= 1 (4.6) - //--- -1 x[0] + 1 x[1] >= -3 (4.7) - //--- -4 x[0] - 1 x[1] >= -27 (4.8) - //--- - 1 x[1] >= -5 (4.9) - //--- 0.2 x[0] - 1 x[1] >= -4 (4.10) - //--- - //--- -1 x[0] - 1 x[1] >= -8 (4.11) - //--- -0.4 x[0] + 1 x[1] >= 0.3 (4.12) - //--- 1 x[0] + 1 x[1] >= 4.5 (4.13) - //--- 3 x[0] + 1 x[1] >= 9.5 (4.14) - //--- 0.25 x[0] - 1 x[1] >= -3 (4.15) - //--- x[0], x[1] >=0, <= 6 (4.16) - //--- x[0], x[1] integer (4.17) - //--- - //--- Model 1 - //--- Q' = { x in R^2 | x satisfies (4.5-10, 16, 17} modelRelax - //--- Q'' = { x in R^2 | x satisfies (4.11-16 } modelCore - //--- - //--- Model 2 - //--- Q' = { x in R^2 | x satisfies (4.11-17) } modelRelax - //--- Q'' = { x in R^2 | x satisfies (4.5-10, 16) } modelCore - //--- - - //--- - //--- Construct the objective function (the original problem is - //--- a maximization, so we flip the sign to make it minimization). - //--- - const int numCols = 2; - m_objective = new double[numCols]; - if(!m_objective) - throw UtilExceptionMemory("createModels", "SmallIP_DecompApp"); - m_objective[0] = 1.0; - m_objective[1] = 0.0; - setModelObjective(m_objective, numCols); - - //--- - //--- build matrix part 1 (4.5 -10,16) - //--- build matrix part 2 (4.11-15,16) - //--- - const int numNzs1 = 10; - const int numNzs2 = 10; - const int numRows1 = 6; - const int numRows2 = 5; - bool isRowOrdered = false; - - int rowIndices1[numNzs1] = {0,0,1,2,2,3,3,4,5,5}; - int colIndices1[numNzs1] = {0,1,1,0,1,0,1,1,0,1}; - double elements1 [numNzs1] = { 7.0, -1.0, 1.0, -1.0, 1.0, - -4.0, -1.0, -1.0, 0.2, -1.0}; - - int rowIndices2[numNzs2] = {0,0,1,1,2,2,3,3,4,4}; - int colIndices2[numNzs2] = {0,1,0,1,0,1,0,1,0,1}; - double elements2 [numNzs2] = {-1.0, -1.0, -0.4, 1.0, 1.0, - 1.0 , 3.0, 1.0, 0.25, -1.0}; - - m_modelPart1.M = new CoinPackedMatrix(isRowOrdered, - rowIndices1, colIndices1, - elements1, numNzs1); - m_modelPart2.M = new CoinPackedMatrix(isRowOrdered, - rowIndices2, colIndices2, - elements2, numNzs2); - - //--- - //--- set the row upper and lower bounds of part 1 - //--- - double rowLB1[numRows1] = {13.0, 1.0, -3.0, -27.0, -5.0, -4.0}; - std::fill_n(back_inserter(m_modelPart1.rowUB), numRows1, m_infinity); - std::copy (rowLB1, rowLB1 + numRows1, back_inserter(m_modelPart1.rowLB)); - - //--- - //--- set the column upper and lower bounds of part 1 - //--- - std::fill_n(back_inserter(m_modelPart1.colLB), numCols, 0.0); - std::fill_n(back_inserter(m_modelPart1.colUB), numCols, 6.0); - - //--- - //--- set the integer variables for part 1 - //--- - m_modelPart1.integerVars.push_back(0); - m_modelPart1.integerVars.push_back(1); - - //--- - //--- set the row upper and lower bounds of part 2 - //--- - double rowLB2[numRows2] = {-8.0, 0.3, 4.5, 9.5, -3.0}; - std::fill_n(back_inserter(m_modelPart2.rowUB), numRows2, m_infinity); - std::copy (rowLB2, rowLB2 + numRows2, back_inserter(m_modelPart2.rowLB)); - - //--- - //--- set the column upper and lower bounds of part 2 - //--- - std::fill_n(back_inserter(m_modelPart2.colLB), numCols, 0.0); - std::fill_n(back_inserter(m_modelPart2.colUB), numCols, 6.0); - - //--- - //--- set the integer variables for part 2 - //--- - m_modelPart2.integerVars.push_back(0); - m_modelPart2.integerVars.push_back(1); - - switch(whichRelax){ - case 1: - { - //--- - //--- Set Model 1 - //--- Q' = { x in R^2 | x satisfies (4.5-10, 16, 17} modelRelax - //--- Q'' = { x in R^2 | x satisfies (4.11-16 } modelCore - //--- - setModelRelax(&m_modelPart1, "RELAX1"); - setModelCore (&m_modelPart2, "CORE2"); - } - break; - case 2: - { - //--- - //--- Set Model 2 - //--- Q' = { x in R^2 | x satisfies (4.11-17) } modelRelax - //--- Q'' = { x in R^2 | x satisfies (4.5-10, 16) } modelCore - //--- - setModelRelax(&m_modelPart2, "RELAX2"); - setModelCore (&m_modelPart1, "CORE1"); - } - break; - } - UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", LogLevel, 2); +void SmallIP_DecompApp::createModels() { + + //--- + //--- This function does the work to create the different models + //--- that will be used. This memory is owned by the user. It will + //--- be passed to the application interface and used by the algorithms. + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", LogLevel, 2); + + //--- + //--- Small Integer Program from: + //--- "Integer Programming: Theory and Practice", + //--- "Decomposition in Integer Linear Programming", Chapter 4 + //--- + //--- min 1 x[0] + //--- + //--- s.t. 7 x[0] - 1 x[1] >= 13 (4.5) + //--- 1 x[1] >= 1 (4.6) + //--- -1 x[0] + 1 x[1] >= -3 (4.7) + //--- -4 x[0] - 1 x[1] >= -27 (4.8) + //--- - 1 x[1] >= -5 (4.9) + //--- 0.2 x[0] - 1 x[1] >= -4 (4.10) + //--- + //--- -1 x[0] - 1 x[1] >= -8 (4.11) + //--- -0.4 x[0] + 1 x[1] >= 0.3 (4.12) + //--- 1 x[0] + 1 x[1] >= 4.5 (4.13) + //--- 3 x[0] + 1 x[1] >= 9.5 (4.14) + //--- 0.25 x[0] - 1 x[1] >= -3 (4.15) + //--- x[0], x[1] >=0, <= 6 (4.16) + //--- x[0], x[1] integer (4.17) + //--- + //--- Model 1 + //--- Q' = { x in R^2 | x satisfies (4.5-10, 16, 17} modelRelax + //--- Q'' = { x in R^2 | x satisfies (4.11-16 } modelCore + //--- + //--- Model 2 + //--- Q' = { x in R^2 | x satisfies (4.11-17) } modelRelax + //--- Q'' = { x in R^2 | x satisfies (4.5-10, 16) } modelCore + //--- + + //--- + //--- Construct the objective function (the original problem is + //--- a maximization, so we flip the sign to make it minimization). + //--- + const int numCols = 2; + m_objective = new double[numCols]; + if (!m_objective) + throw UtilExceptionMemory("createModels", "SmallIP_DecompApp"); + m_objective[0] = 1.0; + m_objective[1] = 0.0; + setModelObjective(m_objective, numCols); + + //--- + //--- build matrix part 1 (4.5 -10,16) + //--- build matrix part 2 (4.11-15,16) + //--- + const int numNzs1 = 10; + const int numNzs2 = 10; + const int numRows1 = 6; + const int numRows2 = 5; + bool isRowOrdered = false; + + int rowIndices1[numNzs1] = {0, 0, 1, 2, 2, 3, 3, 4, 5, 5}; + int colIndices1[numNzs1] = {0, 1, 1, 0, 1, 0, 1, 1, 0, 1}; + double elements1[numNzs1] = {7.0, -1.0, 1.0, -1.0, 1.0, + -4.0, -1.0, -1.0, 0.2, -1.0}; + + int rowIndices2[numNzs2] = {0, 0, 1, 1, 2, 2, 3, 3, 4, 4}; + int colIndices2[numNzs2] = {0, 1, 0, 1, 0, 1, 0, 1, 0, 1}; + double elements2[numNzs2] = {-1.0, -1.0, -0.4, 1.0, 1.0, + 1.0, 3.0, 1.0, 0.25, -1.0}; + + m_modelPart1.M = new CoinPackedMatrix(isRowOrdered, rowIndices1, colIndices1, + elements1, numNzs1); + m_modelPart2.M = new CoinPackedMatrix(isRowOrdered, rowIndices2, colIndices2, + elements2, numNzs2); + + //--- + //--- set the row upper and lower bounds of part 1 + //--- + double rowLB1[numRows1] = {13.0, 1.0, -3.0, -27.0, -5.0, -4.0}; + std::fill_n(back_inserter(m_modelPart1.rowUB), numRows1, m_infinity); + std::copy(rowLB1, rowLB1 + numRows1, back_inserter(m_modelPart1.rowLB)); + + //--- + //--- set the column upper and lower bounds of part 1 + //--- + std::fill_n(back_inserter(m_modelPart1.colLB), numCols, 0.0); + std::fill_n(back_inserter(m_modelPart1.colUB), numCols, 6.0); + + //--- + //--- set the integer variables for part 1 + //--- + m_modelPart1.integerVars.push_back(0); + m_modelPart1.integerVars.push_back(1); + + //--- + //--- set the row upper and lower bounds of part 2 + //--- + double rowLB2[numRows2] = {-8.0, 0.3, 4.5, 9.5, -3.0}; + std::fill_n(back_inserter(m_modelPart2.rowUB), numRows2, m_infinity); + std::copy(rowLB2, rowLB2 + numRows2, back_inserter(m_modelPart2.rowLB)); + + //--- + //--- set the column upper and lower bounds of part 2 + //--- + std::fill_n(back_inserter(m_modelPart2.colLB), numCols, 0.0); + std::fill_n(back_inserter(m_modelPart2.colUB), numCols, 6.0); + + //--- + //--- set the integer variables for part 2 + //--- + m_modelPart2.integerVars.push_back(0); + m_modelPart2.integerVars.push_back(1); + + switch (whichRelax) { + case 1: { + //--- + //--- Set Model 1 + //--- Q' = { x in R^2 | x satisfies (4.5-10, 16, 17} modelRelax + //--- Q'' = { x in R^2 | x satisfies (4.11-16 } modelCore + //--- + setModelRelax(&m_modelPart1, "RELAX1"); + setModelCore(&m_modelPart2, "CORE2"); + } break; + case 2: { + //--- + //--- Set Model 2 + //--- Q' = { x in R^2 | x satisfies (4.11-17) } modelRelax + //--- Q'' = { x in R^2 | x satisfies (4.5-10, 16) } modelCore + //--- + setModelRelax(&m_modelPart2, "RELAX2"); + setModelCore(&m_modelPart1, "CORE1"); + } break; + } + UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", LogLevel, 2); } //--------------------------------------------------------------------- // -int SmallIP_DecompApp::generateInitVars(DecompVarList & initVars){ - - //--- - //--- generateInitVars is a virtual method and can be overriden - //--- if the user has some idea how to initialize the list of - //--- initial variables (columns in the DW master) - //--- - - UtilPrintFuncBegin(m_osLog, m_classTag, "generateInitVars()", LogLevel, 2); - - //--- - //--- To follow the example in the chapter: (4,1) and (5,5) - //--- - int ind [2] = {0,1}; - double els1[2] = {4.0,1.0}; - double els2[2] = {5.0,5.0}; - initVars.push_back(new DecompVar(2, ind, els1, 4.0)); - initVars.push_back(new DecompVar(2, ind, els2, 5.0)); - - UtilPrintFuncEnd(m_osLog, m_classTag, "generateInitVars()", LogLevel, 2); - return static_cast(initVars.size()); +int SmallIP_DecompApp::generateInitVars(DecompVarList &initVars) { + + //--- + //--- generateInitVars is a virtual method and can be overriden + //--- if the user has some idea how to initialize the list of + //--- initial variables (columns in the DW master) + //--- + + UtilPrintFuncBegin(m_osLog, m_classTag, "generateInitVars()", LogLevel, 2); + + //--- + //--- To follow the example in the chapter: (4,1) and (5,5) + //--- + int ind[2] = {0, 1}; + double els1[2] = {4.0, 1.0}; + double els2[2] = {5.0, 5.0}; + initVars.push_back(new DecompVar(2, ind, els1, 4.0)); + initVars.push_back(new DecompVar(2, ind, els2, 5.0)); + + UtilPrintFuncEnd(m_osLog, m_classTag, "generateInitVars()", LogLevel, 2); + return static_cast(initVars.size()); } /* //--------------------------------------------------------------------- // -int SmallIP_DecompApp::generateCuts(const double * x, +int SmallIP_DecompApp::generateCuts(const double * x, const DecompConstraintSet & modelCore, const DecompConstraintSet & modelRelax, DecompCutList & newCuts){ @@ -211,45 +205,45 @@ int SmallIP_DecompApp::generateCuts(const double * x, //--- const static double cuts[5][2] = {{ 1, -1}, - { 1, 0}, - { 0, 1}, - {-1, 1}, - {-1, -1}}; + { 1, 0}, + { 0, 1}, + {-1, 1}, + {-1, -1}}; const static double rhs[5] = {0,3,2,-2,-8}; UtilPrintFuncBegin(m_osLog, m_classTag, - "generateCuts()", m_param.LogDebugLevel, 2); + "generateCuts()", m_param.LogDebugLevel, 2); int r; for(r = 0; r < 5; r++){ if( ((x[0] * cuts[r][0]) + (x[1] * cuts[r][1])) < (rhs[r] - 0.00001)){ - CoinPackedVector cut; - cut.insert(0, cuts[r][0]); - cut.insert(1, cuts[r][1]); - - OsiRowCut rc; - rc.setRow(cut); - rc.setLb(rhs[r]); - rc.setUb(m_infinity); - - DecompCutOsi * decompCut = new DecompCutOsi(rc); - //the user should not have to do this hash - decompalgo should be doing this - decompCut->setStringHash();//TEST - - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "Found violated cut:"; - decompCut->print(m_osLog); - ); - - newCuts.push_back(decompCut); + CoinPackedVector cut; + cut.insert(0, cuts[r][0]); + cut.insert(1, cuts[r][1]); + + OsiRowCut rc; + rc.setRow(cut); + rc.setLb(rhs[r]); + rc.setUb(m_infinity); + + DecompCutOsi * decompCut = new DecompCutOsi(rc); + //the user should not have to do this hash - decompalgo should be doing +this decompCut->setStringHash();//TEST + + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "Found violated cut:"; + decompCut->print(m_osLog); + ); + + newCuts.push_back(decompCut); } } UtilPrintFuncEnd(m_osLog, m_classTag, - "generateCuts()", m_param.LogDebugLevel, 2); + "generateCuts()", m_param.LogDebugLevel, 2); + - return static_cast(newCuts.size()); } */ diff --git a/Dip/examples/SmallIP/SmallIP_DecompApp2.cpp b/Dip/examples/SmallIP/SmallIP_DecompApp2.cpp index 255e0a23..1bb9a0d1 100644 --- a/Dip/examples/SmallIP/SmallIP_DecompApp2.cpp +++ b/Dip/examples/SmallIP/SmallIP_DecompApp2.cpp @@ -13,11 +13,11 @@ //===========================================================================// //===========================================================================// -#include "DecompVar.h" #include "SmallIP_DecompApp2.h" +#include "DecompVar.h" //===========================================================================// -//parameters -const int LogLevel = 1; +// parameters +const int LogLevel = 1; //===========================================================================// //--- @@ -26,261 +26,252 @@ const int LogLevel = 1; //--- //===========================================================================// -void SmallIP_DecompApp::createModels(){ - - //--- - //--- This function does the work to create the different models - //--- that will be used. This memory is owned by the user. It will - //--- be passed to the application interface and used by the algorithms. - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", LogLevel, 2); - - //--- - //--- Small Integer Program from: - //--- "Integer Programming: Theory and Practice", - //--- "Decomposition in Integer Linear Programming", Chapter 4 - //--- - //--- min 1 x[0] - //--- - //--- s.t. 7 x[0] - 1 x[1] >= 13 (4.5) - //--- 1 x[1] >= 1 (4.6) - //--- -1 x[0] + 1 x[1] >= -3 (4.7) - //--- -4 x[0] - 1 x[1] >= -27 (4.8) - //--- - 1 x[1] >= -5 (4.9) - //--- 0.2 x[0] - 1 x[1] >= -4 (4.10) - //--- - //--- -1 x[0] - 1 x[1] >= -8 (4.11) - //--- -0.4 x[0] + 1 x[1] >= 0.3 (4.12) - //--- 1 x[0] + 1 x[1] >= 4.5 (4.13) - //--- 3 x[0] + 1 x[1] >= 9.5 (4.14) - //--- 0.25 x[0] - 1 x[1] >= -3 (4.15) - //--- x[0], x[1] >=0, <= 6 (4.16) - //--- x[0], x[1] integer (4.17) - //--- - //--- Model 1 - //--- Q' = { x in R^2 | x satisfies (4.5-10, 16, 17} modelRelax - //--- Q'' = { x in R^2 | x satisfies (4.11-16 } modelCore - //--- - //--- Model 2 - //--- Q' = { x in R^2 | x satisfies (4.11-17) } modelRelax - //--- Q'' = { x in R^2 | x satisfies (4.5-10, 16) } modelCore - //--- - - //--- - //--- Construct the objective function (the original problem is - //--- a maximization, so we flip the sign to make it minimization). - //--- - const int numCols = 2; - m_objective = new double[numCols]; - if(!m_objective) - throw UtilExceptionMemory("createModels", "SmallIP_DecompApp"); - m_objective[0] = 1.0; - m_objective[1] = 0.0; - setModelObjective(m_objective); - - //--- - //--- build matrix part 1 (4.5 -10,16) - //--- build matrix part 2 (4.11-15,16) - //--- - const int numNzs1 = 10; - const int numNzs2 = 10; - const int numRows1 = 6; - const int numRows2 = 5; - bool isRowOrdered = false; - - int rowIndices1[numNzs1] = {0,0,1,2,2,3,3,4,5,5}; - int colIndices1[numNzs1] = {0,1,1,0,1,0,1,1,0,1}; - double elements1 [numNzs1] = { 7.0, -1.0, 1.0, -1.0, 1.0, - -4.0, -1.0, -1.0, 0.2, -1.0}; - - int rowIndices2[numNzs2] = {0,0,1,1,2,2,3,3,4,4}; - int colIndices2[numNzs2] = {0,1,0,1,0,1,0,1,0,1}; - double elements2 [numNzs2] = {-1.0, -1.0, -0.4, 1.0, 1.0, - 1.0 , 3.0, 1.0, 0.25, -1.0}; - - m_modelPart1.M = new CoinPackedMatrix(isRowOrdered, - rowIndices1, colIndices1, - elements1, numNzs1); - m_modelPart2.M = new CoinPackedMatrix(isRowOrdered, - rowIndices2, colIndices2, - elements2, numNzs2); - - //--- - //--- set the row upper and lower bounds of part 1 - //--- - double rowLB1[numRows1] = {13.0, 1.0, -3.0, -27.0, -5.0, -4.0}; - std::fill_n(back_inserter(m_modelPart1.rowUB), numRows1, DecompInf); - std::copy (rowLB1, rowLB1 + numRows1, back_inserter(m_modelPart1.rowLB)); - - //--- - //--- set the column upper and lower bounds of part 1 - //--- - std::fill_n(back_inserter(m_modelPart1.colLB), numCols, 0.0); - std::fill_n(back_inserter(m_modelPart1.colUB), numCols, 6.0); - - //--- - //--- set the integer variables for part 1 - //--- - m_modelPart1.integerVars.push_back(0); - m_modelPart1.integerVars.push_back(1); - - //--- - //--- set the row upper and lower bounds of part 2 - //--- - double rowLB2[numRows2] = {-8.0, 0.3, 4.5, 9.5, -3.0}; - std::fill_n(back_inserter(m_modelPart2.rowUB), numRows2, DecompInf); - std::copy (rowLB2, rowLB2 + numRows2, back_inserter(m_modelPart2.rowLB)); - - //--- - //--- set the column upper and lower bounds of part 2 - //--- - std::fill_n(back_inserter(m_modelPart2.colLB), numCols, 0.0); - std::fill_n(back_inserter(m_modelPart2.colUB), numCols, 6.0); - - //--- - //--- set the integer variables for part 2 - //--- - m_modelPart2.integerVars.push_back(0); - m_modelPart2.integerVars.push_back(1); - - //--- - //--- set the model core - //--- Q' = { x in R^2 | x satisfies (4.5-10, 16, 17} modelRelax - //--- Q'' = { x in R^2 | x satisfies (4.11-16 } modelCore - //--- - setModelCore (&m_modelPart2, "CORE2"); - - //--- - //--- set the model relax to NULL, in this case - //--- solveRelaxed must be defined (for pricing algos) - //--- - setModelRelax(NULL); - - //--- - //--- load the OSI object to be used in solve relaxed - //--- - m_osi.messageHandler()->setLogLevel(0); - //m_osi.setHintParam(OsiDoReducePrint, true, OsiHintDo); -#ifdef __DECOMP_IP_CBC__ - m_osi.getModelPtr()->setLogLevel(0); +void SmallIP_DecompApp::createModels() { + + //--- + //--- This function does the work to create the different models + //--- that will be used. This memory is owned by the user. It will + //--- be passed to the application interface and used by the algorithms. + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", LogLevel, 2); + + //--- + //--- Small Integer Program from: + //--- "Integer Programming: Theory and Practice", + //--- "Decomposition in Integer Linear Programming", Chapter 4 + //--- + //--- min 1 x[0] + //--- + //--- s.t. 7 x[0] - 1 x[1] >= 13 (4.5) + //--- 1 x[1] >= 1 (4.6) + //--- -1 x[0] + 1 x[1] >= -3 (4.7) + //--- -4 x[0] - 1 x[1] >= -27 (4.8) + //--- - 1 x[1] >= -5 (4.9) + //--- 0.2 x[0] - 1 x[1] >= -4 (4.10) + //--- + //--- -1 x[0] - 1 x[1] >= -8 (4.11) + //--- -0.4 x[0] + 1 x[1] >= 0.3 (4.12) + //--- 1 x[0] + 1 x[1] >= 4.5 (4.13) + //--- 3 x[0] + 1 x[1] >= 9.5 (4.14) + //--- 0.25 x[0] - 1 x[1] >= -3 (4.15) + //--- x[0], x[1] >=0, <= 6 (4.16) + //--- x[0], x[1] integer (4.17) + //--- + //--- Model 1 + //--- Q' = { x in R^2 | x satisfies (4.5-10, 16, 17} modelRelax + //--- Q'' = { x in R^2 | x satisfies (4.11-16 } modelCore + //--- + //--- Model 2 + //--- Q' = { x in R^2 | x satisfies (4.11-17) } modelRelax + //--- Q'' = { x in R^2 | x satisfies (4.5-10, 16) } modelCore + //--- + + //--- + //--- Construct the objective function (the original problem is + //--- a maximization, so we flip the sign to make it minimization). + //--- + const int numCols = 2; + m_objective = new double[numCols]; + if (!m_objective) + throw UtilExceptionMemory("createModels", "SmallIP_DecompApp"); + m_objective[0] = 1.0; + m_objective[1] = 0.0; + setModelObjective(m_objective); + + //--- + //--- build matrix part 1 (4.5 -10,16) + //--- build matrix part 2 (4.11-15,16) + //--- + const int numNzs1 = 10; + const int numNzs2 = 10; + const int numRows1 = 6; + const int numRows2 = 5; + bool isRowOrdered = false; + + int rowIndices1[numNzs1] = {0, 0, 1, 2, 2, 3, 3, 4, 5, 5}; + int colIndices1[numNzs1] = {0, 1, 1, 0, 1, 0, 1, 1, 0, 1}; + double elements1[numNzs1] = {7.0, -1.0, 1.0, -1.0, 1.0, + -4.0, -1.0, -1.0, 0.2, -1.0}; + + int rowIndices2[numNzs2] = {0, 0, 1, 1, 2, 2, 3, 3, 4, 4}; + int colIndices2[numNzs2] = {0, 1, 0, 1, 0, 1, 0, 1, 0, 1}; + double elements2[numNzs2] = {-1.0, -1.0, -0.4, 1.0, 1.0, + 1.0, 3.0, 1.0, 0.25, -1.0}; + + m_modelPart1.M = new CoinPackedMatrix(isRowOrdered, rowIndices1, colIndices1, + elements1, numNzs1); + m_modelPart2.M = new CoinPackedMatrix(isRowOrdered, rowIndices2, colIndices2, + elements2, numNzs2); + + //--- + //--- set the row upper and lower bounds of part 1 + //--- + double rowLB1[numRows1] = {13.0, 1.0, -3.0, -27.0, -5.0, -4.0}; + std::fill_n(back_inserter(m_modelPart1.rowUB), numRows1, DecompInf); + std::copy(rowLB1, rowLB1 + numRows1, back_inserter(m_modelPart1.rowLB)); + + //--- + //--- set the column upper and lower bounds of part 1 + //--- + std::fill_n(back_inserter(m_modelPart1.colLB), numCols, 0.0); + std::fill_n(back_inserter(m_modelPart1.colUB), numCols, 6.0); + + //--- + //--- set the integer variables for part 1 + //--- + m_modelPart1.integerVars.push_back(0); + m_modelPart1.integerVars.push_back(1); + + //--- + //--- set the row upper and lower bounds of part 2 + //--- + double rowLB2[numRows2] = {-8.0, 0.3, 4.5, 9.5, -3.0}; + std::fill_n(back_inserter(m_modelPart2.rowUB), numRows2, DecompInf); + std::copy(rowLB2, rowLB2 + numRows2, back_inserter(m_modelPart2.rowLB)); + + //--- + //--- set the column upper and lower bounds of part 2 + //--- + std::fill_n(back_inserter(m_modelPart2.colLB), numCols, 0.0); + std::fill_n(back_inserter(m_modelPart2.colUB), numCols, 6.0); + + //--- + //--- set the integer variables for part 2 + //--- + m_modelPart2.integerVars.push_back(0); + m_modelPart2.integerVars.push_back(1); + + //--- + //--- set the model core + //--- Q' = { x in R^2 | x satisfies (4.5-10, 16, 17} modelRelax + //--- Q'' = { x in R^2 | x satisfies (4.11-16 } modelCore + //--- + setModelCore(&m_modelPart2, "CORE2"); + + //--- + //--- set the model relax to NULL, in this case + //--- solveRelaxed must be defined (for pricing algos) + //--- + setModelRelax(NULL); + + //--- + //--- load the OSI object to be used in solve relaxed + //--- + m_osi.messageHandler()->setLogLevel(0); + // m_osi.setHintParam(OsiDoReducePrint, true, OsiHintDo); +#ifdef __DECOMP_IP_CBC__ + m_osi.getModelPtr()->setLogLevel(0); #endif - m_osi.loadProblem(*m_modelPart1.getMatrix(), - m_modelPart1.getColLB(), - m_modelPart1.getColUB(), - NULL, - m_modelPart1.getRowLB(), - m_modelPart1.getRowUB()); - m_osi.setInteger(m_modelPart1.getIntegerVars(), - m_modelPart1.getNumInts()); - - UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", LogLevel, 2); + m_osi.loadProblem(*m_modelPart1.getMatrix(), m_modelPart1.getColLB(), + m_modelPart1.getColUB(), NULL, m_modelPart1.getRowLB(), + m_modelPart1.getRowUB()); + m_osi.setInteger(m_modelPart1.getIntegerVars(), m_modelPart1.getNumInts()); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", LogLevel, 2); } //===========================================================================// -int SmallIP_DecompApp::generateInitVars(DecompVarList & initVars){ - - //--- - //--- generateInitVars is a virtual method and can be overriden - //--- if the user has some idea how to initialize the list of - //--- initial variables (columns in the DW master) - //--- - - UtilPrintFuncBegin(m_osLog, m_classTag, "generateInitVars()", LogLevel, 2); - - //--- - //--- To follow the example in the chapter: (4,1) and (5,5) - //--- - int ind [2] = {0,1}; - double els1[2] = {4.0,1.0}; - double els2[2] = {5.0,5.0}; - initVars.push_back(new DecompVar(2, ind, els1, 4.0)); - initVars.push_back(new DecompVar(2, ind, els2, 5.0)); - - UtilPrintFuncEnd(m_osLog, m_classTag, "generateInitVars()", LogLevel, 2); - return static_cast(initVars.size()); +int SmallIP_DecompApp::generateInitVars(DecompVarList &initVars) { + + //--- + //--- generateInitVars is a virtual method and can be overriden + //--- if the user has some idea how to initialize the list of + //--- initial variables (columns in the DW master) + //--- + + UtilPrintFuncBegin(m_osLog, m_classTag, "generateInitVars()", LogLevel, 2); + + //--- + //--- To follow the example in the chapter: (4,1) and (5,5) + //--- + int ind[2] = {0, 1}; + double els1[2] = {4.0, 1.0}; + double els2[2] = {5.0, 5.0}; + initVars.push_back(new DecompVar(2, ind, els1, 4.0)); + initVars.push_back(new DecompVar(2, ind, els2, 5.0)); + + UtilPrintFuncEnd(m_osLog, m_classTag, "generateInitVars()", LogLevel, 2); + return static_cast(initVars.size()); } //===========================================================================// -DecompSolverStatus -SmallIP_DecompApp::solveRelaxed(const int whichBlock, - const double * redCostX, - const double convexDual, - DecompVarList & varList){ - - //--- - //--- solveRelaxed is a virtual method and can be overriden - //--- if the user wants to solve the subproblem using their own - //--- solver, rather than the built-in solver - //--- - //--- if the user does not define and set the the relaxation model - //--- then this method must be defined - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, "solveRelaxed()", LogLevel, 2); - - DecompSolverStatus status = DecompSolStatNoSolution; - - //--- - //--- set the objective function of the subproblem to the current - //--- reduced cost vector - //--- - m_osi.setObjective(redCostX); - -#ifdef __DECOMP_IP_CBC__ - //--- - //--- because OsiCbc does not keep original column bounds - //--- we must reset each time - //--- inside DIP, we avoid this by using Cbc directly, not OsiCbc - //--- - m_osi.setColLower(0, 0.0); - m_osi.setColLower(1, 0.0); - m_osi.setColUpper(0, 6.0); - m_osi.setColUpper(1, 6.0); - m_osi.getModelPtr()->resetModel(); +DecompSolverStatus SmallIP_DecompApp::solveRelaxed(const int whichBlock, + const double *redCostX, + const double convexDual, + DecompVarList &varList) { + + //--- + //--- solveRelaxed is a virtual method and can be overriden + //--- if the user wants to solve the subproblem using their own + //--- solver, rather than the built-in solver + //--- + //--- if the user does not define and set the the relaxation model + //--- then this method must be defined + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "solveRelaxed()", LogLevel, 2); + + DecompSolverStatus status = DecompSolStatNoSolution; + + //--- + //--- set the objective function of the subproblem to the current + //--- reduced cost vector + //--- + m_osi.setObjective(redCostX); + +#ifdef __DECOMP_IP_CBC__ + //--- + //--- because OsiCbc does not keep original column bounds + //--- we must reset each time + //--- inside DIP, we avoid this by using Cbc directly, not OsiCbc + //--- + m_osi.setColLower(0, 0.0); + m_osi.setColLower(1, 0.0); + m_osi.setColUpper(0, 6.0); + m_osi.setColUpper(1, 6.0); + m_osi.getModelPtr()->resetModel(); #endif - //const double * colLB = m_osi.getColLower(); - //const double * colUB = m_osi.getColUpper(); - //for(int i = 0; i < m_osi.getNumCols(); i++) - //printf("B i:%d lb:%g ub:%g\n", i, colLB[i], colUB[i]); - - //--- - //--- solve with OSI milp solver - //--- - m_osi.branchAndBound(); - - //for(int i = 0; i < m_osi.getNumCols(); i++) - //printf("A i:%d lb:%g ub:%g\n", i, colLB[i], colUB[i]); - - //--- - //--- check that found optimal - //--- - assert(!m_osi.isProvenPrimalInfeasible()); - assert(m_osi.isProvenOptimal()); - if(!m_osi.isProvenOptimal()) - return DecompSolStatNoSolution; - else - status = DecompSolStatOptimal; - - //TODO: - //this is way too confusing for user to remember they need -alpha! - // let framework do that - also setting the block id - framework! - - //--- - //--- create a variable object from the optimal solution - //--- - int i; - int nOrigCols = m_osi.getNumCols(); - double varRedCost = m_osi.getObjValue() - convexDual; - double varOrigCost = 0.0; - const double * colSolution = m_osi.getColSolution(); - for(i = 0; i < m_osi.getNumCols(); i++) - varOrigCost += colSolution[i] * m_objective[i]; - DecompVar * var = new DecompVar(nOrigCols, - colSolution, - varRedCost, - varOrigCost); - varList.push_back(var); - - UtilPrintFuncEnd(m_osLog, m_classTag, "solveRelaxed()", LogLevel, 2); - return status; + // const double * colLB = m_osi.getColLower(); + // const double * colUB = m_osi.getColUpper(); + // for(int i = 0; i < m_osi.getNumCols(); i++) + // printf("B i:%d lb:%g ub:%g\n", i, colLB[i], colUB[i]); + + //--- + //--- solve with OSI milp solver + //--- + m_osi.branchAndBound(); + + // for(int i = 0; i < m_osi.getNumCols(); i++) + // printf("A i:%d lb:%g ub:%g\n", i, colLB[i], colUB[i]); + + //--- + //--- check that found optimal + //--- + assert(!m_osi.isProvenPrimalInfeasible()); + assert(m_osi.isProvenOptimal()); + if (!m_osi.isProvenOptimal()) + return DecompSolStatNoSolution; + else + status = DecompSolStatOptimal; + + // TODO: + // this is way too confusing for user to remember they need -alpha! + // let framework do that - also setting the block id - framework! + + //--- + //--- create a variable object from the optimal solution + //--- + int i; + int nOrigCols = m_osi.getNumCols(); + double varRedCost = m_osi.getObjValue() - convexDual; + double varOrigCost = 0.0; + const double *colSolution = m_osi.getColSolution(); + for (i = 0; i < m_osi.getNumCols(); i++) + varOrigCost += colSolution[i] * m_objective[i]; + DecompVar *var = + new DecompVar(nOrigCols, colSolution, varRedCost, varOrigCost); + varList.push_back(var); + + UtilPrintFuncEnd(m_osLog, m_classTag, "solveRelaxed()", LogLevel, 2); + return status; } diff --git a/Dip/examples/SmallIP/SmallIP_Main.cpp b/Dip/examples/SmallIP/SmallIP_Main.cpp old mode 100755 new mode 100644 index 392269cf..cb9ce772 --- a/Dip/examples/SmallIP/SmallIP_Main.cpp +++ b/Dip/examples/SmallIP/SmallIP_Main.cpp @@ -17,7 +17,7 @@ //===========================================================================// #if defined(VERSION1) #include "SmallIP_DecompApp.h" -#elif defined (VERSION2) +#elif defined(VERSION2) #include "SmallIP_DecompApp2.h" #endif //===========================================================================// @@ -28,88 +28,87 @@ #include "DecompAlgoRC.h" //===========================================================================// -int main(int argc, char ** argv){ - try{ - - //--- - //--- create the utility class for parsing parameters - //--- - UtilParameters utilParam(argc, argv); +int main(int argc, char **argv) { + try { - bool doCut = utilParam.GetSetting("doCut", false); - bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - bool doRelaxCut = utilParam.GetSetting("doRelaxCut", false); + //--- + //--- create the utility class for parsing parameters + //--- + UtilParameters utilParam(argc, argv); - //--- - //--- create the user application (a DecompApp) - //--- - SmallIP_DecompApp sip(utilParam); - - //--- - //--- create the algorithm (a DecompAlgo) - //--- - DecompAlgo * algo = NULL; + bool doCut = utilParam.GetSetting("doCut", false); + bool doPriceCut = utilParam.GetSetting("doPriceCut", false); + bool doRelaxCut = utilParam.GetSetting("doRelaxCut", false); - //--- - //--- create the CPM algorithm object - //--- - if(doCut) - algo = new DecompAlgoC(&sip, utilParam); + //--- + //--- create the user application (a DecompApp) + //--- + SmallIP_DecompApp sip(utilParam); - //--- - //--- create the PC algorithm object - //--- - if(doPriceCut) - algo = new DecompAlgoPC(&sip, utilParam); + //--- + //--- create the algorithm (a DecompAlgo) + //--- + DecompAlgo *algo = NULL; - //--- - //--- create the PC algorithm object - //--- - if(doRelaxCut) - algo = new DecompAlgoRC(&sip, utilParam); - - //--- - //--- create the driver AlpsDecomp model - //--- - AlpsDecompModel alpsModel(utilParam, algo); - - //--- - //--- solve - //--- - alpsModel.solve(); + //--- + //--- create the CPM algorithm object + //--- + if (doCut) + algo = new DecompAlgoC(&sip, utilParam); - //--- - //--- sanity check that optimal solution is 3.0 - //--- - double epsilon = 1.0e-5; - double optimalUB = 3.0; - double diffUB = alpsModel.getGlobalUB() - optimalUB; - if(alpsModel.getSolStatus() != AlpsExitStatusOptimal || - fabs(diffUB) > epsilon){ - throw UtilException("SmallIP bad solution.", "main", "SmallIP"); - } + //--- + //--- create the PC algorithm object + //--- + if (doPriceCut) + algo = new DecompAlgoPC(&sip, utilParam); - //--- - //--- get optimal solution - //--- - const DecompSolution * solution = alpsModel.getBestSolution(); - cout << "Optimal Solution" << endl; - solution->print(); - if(fabs(solution->getQuality() - alpsModel.getGlobalUB()) > epsilon){ - throw UtilException("Best bound and best solution not matching.", - "main", "SmallIP"); - } + //--- + //--- create the PC algorithm object + //--- + if (doRelaxCut) + algo = new DecompAlgoRC(&sip, utilParam); - //--- - //--- free local memory - //--- - delete algo; - } - catch(CoinError & ex){ - cerr << "COIN Exception [ " << ex.message() << " ]" - << " at " << ex.fileName() << ":L" << ex.lineNumber() - << " in " << ex.className() << "::" << ex.methodName() << endl; - return 1; - } - return 0; -} + //--- + //--- create the driver AlpsDecomp model + //--- + AlpsDecompModel alpsModel(utilParam, algo); + + //--- + //--- solve + //--- + alpsModel.solve(); + + //--- + //--- sanity check that optimal solution is 3.0 + //--- + double epsilon = 1.0e-5; + double optimalUB = 3.0; + double diffUB = alpsModel.getGlobalUB() - optimalUB; + if (alpsModel.getSolStatus() != AlpsExitStatusOptimal || + fabs(diffUB) > epsilon) { + throw UtilException("SmallIP bad solution.", "main", "SmallIP"); + } + + //--- + //--- get optimal solution + //--- + const DecompSolution *solution = alpsModel.getBestSolution(); + cout << "Optimal Solution" << endl; + solution->print(); + if (fabs(solution->getQuality() - alpsModel.getGlobalUB()) > epsilon) { + throw UtilException("Best bound and best solution not matching.", "main", + "SmallIP"); + } + + //--- + //--- free local memory + //--- + delete algo; + } catch (CoinError &ex) { + cerr << "COIN Exception [ " << ex.message() << " ]" + << " at " << ex.fileName() << ":L" << ex.lineNumber() << " in " + << ex.className() << "::" << ex.methodName() << endl; + return 1; + } + return 0; +} diff --git a/Dip/examples/TSP/TSP_CutGen.cpp b/Dip/examples/TSP/TSP_CutGen.cpp old mode 100755 new mode 100644 index f1acfbe4..bb2121d8 --- a/Dip/examples/TSP/TSP_CutGen.cpp +++ b/Dip/examples/TSP/TSP_CutGen.cpp @@ -18,43 +18,39 @@ #include "TSP_SubtourCut.h" /*--------------------------------------------------------------------------*/ -int TSP_DecompApp::generateCutsSubtour(DecompCutList & newCuts){ - - UtilPrintFuncBegin(m_osLog, m_classTag, - "generateCutsSubtour()", m_param.LogDebugLevel, 2); - - //TODO: use access methods - TSP_Concorde & tspConcorde = m_tsp.m_concorde; - - vector subtourCuts; - - int c; - int n_subtour = tspConcorde.generateCutsSubtour(subtourCuts); - int n_prevcuts = static_cast(newCuts.size()); - - for(c = 0; c < n_subtour; c++){ - vector & S = subtourCuts[c].S; - vector & inS = subtourCuts[c].inS; - - if(S.size() >= 2){ - TSP_SubtourCut * sec_cut = new TSP_SubtourCut(inS, S, m_infinity); - - UTIL_DEBUG(m_param.LogDebugLevel, 3, - sec_cut->print(m_infinity); - ); - newCuts.push_back(sec_cut); - } - else{ - cout << "ERROR size of S < 2 (not adding)" << endl; - } - } +int TSP_DecompApp::generateCutsSubtour(DecompCutList &newCuts) { - UtilPrintFuncEnd(m_osLog, m_classTag, - "generateCutsSubtour()", m_param.LogDebugLevel, 2); + UtilPrintFuncBegin(m_osLog, m_classTag, "generateCutsSubtour()", + m_param.LogDebugLevel, 2); - return static_cast(newCuts.size()) - n_prevcuts; -} + // TODO: use access methods + TSP_Concorde &tspConcorde = m_tsp.m_concorde; + + vector subtourCuts; + + int c; + int n_subtour = tspConcorde.generateCutsSubtour(subtourCuts); + int n_prevcuts = static_cast(newCuts.size()); + for (c = 0; c < n_subtour; c++) { + vector &S = subtourCuts[c].S; + vector &inS = subtourCuts[c].inS; + + if (S.size() >= 2) { + TSP_SubtourCut *sec_cut = new TSP_SubtourCut(inS, S, m_infinity); + + UTIL_DEBUG(m_param.LogDebugLevel, 3, sec_cut->print(m_infinity);); + newCuts.push_back(sec_cut); + } else { + cout << "ERROR size of S < 2 (not adding)" << endl; + } + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "generateCutsSubtour()", + m_param.LogDebugLevel, 2); + + return static_cast(newCuts.size()) - n_prevcuts; +} #if 0 //TODO diff --git a/Dip/examples/TSP/TSP_DecompApp.cpp b/Dip/examples/TSP/TSP_DecompApp.cpp old mode 100755 new mode 100644 index 15224e47..71672041 --- a/Dip/examples/TSP/TSP_DecompApp.cpp +++ b/Dip/examples/TSP/TSP_DecompApp.cpp @@ -14,481 +14,465 @@ //===========================================================================// #include "TSP_DecompApp.h" -#include "TSP_Concorde.h" #include "TSP_Boost.h" +#include "TSP_Concorde.h" //===========================================================================// #include "DecompVar.h" //===========================================================================// -void TSP_DecompApp::initializeApp() { - - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); - - //--- - //--- read TSPLIB instance - //--- - string fileName = m_appParam.DataDir - + UtilDirSlash() + m_appParam.Instance + ".tsp"; - UtilGraphLib & graphLib = m_tsp.m_graphLib; - graphLib.read_data(fileName.c_str()); - - //--- - //--- read best known lb/ub - //--- - string bestKnownFile = m_appParam.DataDir + UtilDirSlash(); - bestKnownFile += ".." + UtilDirSlash(); - bestKnownFile += "TSPLIB_opt"; - - ifstream is; - string instanceName; - double bestBound, bestKnownLB, bestKnownUB; - UtilOpenFile(is, bestKnownFile); - while(!is.eof()){ - is >> instanceName >> bestBound; - instanceName = UtilStrTrim(instanceName); - if(instanceName == m_appParam.Instance){ - bestKnownLB = bestBound; - bestKnownUB = bestBound; - break; - } - } - setBestKnownLB(bestKnownLB); - setBestKnownUB(bestKnownUB); - - //--- - //--- build complete graph over V \ {m_vert} = m_boost.m_cgV - //--- for use with one-tree solver - //--- - m_tsp.m_boost.buildCompleteGraphMinusVert(m_tsp.m_vert, - graphLib.n_vertices); - - //--- - //--- create models - //--- - createModels(); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); +void TSP_DecompApp::initializeApp() { + + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", + m_appParam.LogLevel, 2); + + //--- + //--- read TSPLIB instance + //--- + string fileName = + m_appParam.DataDir + UtilDirSlash() + m_appParam.Instance + ".tsp"; + UtilGraphLib &graphLib = m_tsp.m_graphLib; + graphLib.read_data(fileName.c_str()); + + //--- + //--- read best known lb/ub + //--- + string bestKnownFile = m_appParam.DataDir + UtilDirSlash(); + bestKnownFile += ".." + UtilDirSlash(); + bestKnownFile += "TSPLIB_opt"; + + ifstream is; + string instanceName; + double bestBound, bestKnownLB, bestKnownUB; + UtilOpenFile(is, bestKnownFile); + while (!is.eof()) { + is >> instanceName >> bestBound; + instanceName = UtilStrTrim(instanceName); + if (instanceName == m_appParam.Instance) { + bestKnownLB = bestBound; + bestKnownUB = bestBound; + break; + } + } + setBestKnownLB(bestKnownLB); + setBestKnownUB(bestKnownUB); + + //--- + //--- build complete graph over V \ {m_vert} = m_boost.m_cgV + //--- for use with one-tree solver + //--- + m_tsp.m_boost.buildCompleteGraphMinusVert(m_tsp.m_vert, graphLib.n_vertices); + + //--- + //--- create models + //--- + createModels(); + + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", m_appParam.LogLevel, + 2); } //===========================================================================// -void TSP_DecompApp::createModels(){ - - //--- - //--- This function does the work to create the different models - //--- that will be used. This memory is owned by the user. It will - //--- be passed to the application interface and used by the algorithms. - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - - //--- - //--- Symmetric Travelling Salesman Problem (TSP) - //--- min sum{e in E} c_e x_e - //--- s.t. x(delta(v)) = 2, for v in V (1) - //--- x(delta(S)) >= 2, for S subseteq V (2) (gen-dynamic) - //--- x in {0,1}, for all e in E (3) - //--- - UtilGraphLib & graphLib = m_tsp.m_graphLib; - int numCols = graphLib.n_edges; - - //--- - //--- create the cost vector c - //--- - m_objective = new double[numCols]; - if(!m_objective) - throw UtilExceptionMemory("createModels", "TSP_DecompApp"); - copy(graphLib.edge_wt, graphLib.edge_wt + numCols, m_objective); - setModelObjective(m_objective, numCols); - - //--- - //--- Two matching relaxation. - //--- core = { some subset of (2) } - //--- relax = { (1) and (3) } => 2-matching relaxation - //--- NOTE: for validity, must generate rest of (2) - //--- - - //--- - //--- One tree relaxation. - //--- core = { 1 } - //--- relax = { one-tree TODO write algebra } => 1-tree relaxation - //--- NOTE: for validity, must generate rest of (2) - //--- - if(m_appParam.ModelNameCore == "SUBTOUR"){ - DecompConstraintSet * model = new DecompConstraintSet(); - createModelTrivialSEC(model); - m_models.push_back(model); - setModelCore(model, m_appParam.ModelNameCore); - } - if(m_appParam.ModelNameRelax == "2MATCH" || - m_appParam.ModelNameCore == "2MATCH"){ - DecompConstraintSet * model = new DecompConstraintSet(); - createModel2Match(model); - m_models.push_back(model); - if(m_appParam.ModelNameRelax == "2MATCH"){ - assert(m_appParam.ModelNameCore == "SUBTOUR"); - setModelRelax(model, m_appParam.ModelNameRelax); - } - else{ - assert(m_appParam.ModelNameRelax == "SUBTOUR"); - setModelRelax(NULL); - setModelCore (model, m_appParam.ModelNameCore); - } - } - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); +void TSP_DecompApp::createModels() { + + //--- + //--- This function does the work to create the different models + //--- that will be used. This memory is owned by the user. It will + //--- be passed to the application interface and used by the algorithms. + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + + //--- + //--- Symmetric Travelling Salesman Problem (TSP) + //--- min sum{e in E} c_e x_e + //--- s.t. x(delta(v)) = 2, for v in V (1) + //--- x(delta(S)) >= 2, for S subseteq V (2) (gen-dynamic) + //--- x in {0,1}, for all e in E (3) + //--- + UtilGraphLib &graphLib = m_tsp.m_graphLib; + int numCols = graphLib.n_edges; + + //--- + //--- create the cost vector c + //--- + m_objective = new double[numCols]; + if (!m_objective) + throw UtilExceptionMemory("createModels", "TSP_DecompApp"); + copy(graphLib.edge_wt, graphLib.edge_wt + numCols, m_objective); + setModelObjective(m_objective, numCols); + + //--- + //--- Two matching relaxation. + //--- core = { some subset of (2) } + //--- relax = { (1) and (3) } => 2-matching relaxation + //--- NOTE: for validity, must generate rest of (2) + //--- + + //--- + //--- One tree relaxation. + //--- core = { 1 } + //--- relax = { one-tree TODO write algebra } => 1-tree relaxation + //--- NOTE: for validity, must generate rest of (2) + //--- + if (m_appParam.ModelNameCore == "SUBTOUR") { + DecompConstraintSet *model = new DecompConstraintSet(); + createModelTrivialSEC(model); + m_models.push_back(model); + setModelCore(model, m_appParam.ModelNameCore); + } + if (m_appParam.ModelNameRelax == "2MATCH" || + m_appParam.ModelNameCore == "2MATCH") { + DecompConstraintSet *model = new DecompConstraintSet(); + createModel2Match(model); + m_models.push_back(model); + if (m_appParam.ModelNameRelax == "2MATCH") { + assert(m_appParam.ModelNameCore == "SUBTOUR"); + setModelRelax(model, m_appParam.ModelNameRelax); + } else { + assert(m_appParam.ModelNameRelax == "SUBTOUR"); + setModelRelax(NULL); + setModelCore(model, m_appParam.ModelNameCore); + } + } + UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); } //===========================================================================// -void TSP_DecompApp::createModel2Match(DecompConstraintSet * modelCS){ - - //--- - //--- s.t. x(delta(v)) = 2, for v in V (1) [A', b'] - //--- x in {0,1}, for all e = E (3) [A', b'] - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModel2Match()", m_appParam.LogLevel, 2); - - UtilGraphLib & graphLib = m_tsp.m_graphLib; - int n_cols = graphLib.n_edges; - int * rowIndices = new int[2 * n_cols]; - int * colIndices = new int[2 * n_cols]; - double * elements = new double[2 * n_cols]; - int edge_index = 0; - int triplet_index = 0; - for(int u = 1; u < graphLib.n_vertices; u++){ - for(int v = 0; v < u; v++){ - rowIndices[triplet_index] = u; - rowIndices[triplet_index + 1] = v; - colIndices[triplet_index] = edge_index; - colIndices[triplet_index + 1] = edge_index; - triplet_index += 2; - edge_index++; - } - } - UtilFillN(elements, 2 * n_cols, 1.0); - UtilFillN(modelCS->colLB, n_cols, 0.0); - UtilFillN(modelCS->colUB, n_cols, 1.0); - UtilFillN(modelCS->rowLB, graphLib.n_vertices, 2.0); - UtilFillN(modelCS->rowUB, graphLib.n_vertices, 2.0); - UtilIotaN(modelCS->integerVars, n_cols, 0); - - modelCS->M = new CoinPackedMatrix(true, rowIndices, colIndices, - elements, 2 * n_cols); - - UTIL_DELARR(rowIndices); - UTIL_DELARR(colIndices); - UTIL_DELARR(elements); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModel2Match()", m_appParam.LogLevel, 2); +void TSP_DecompApp::createModel2Match(DecompConstraintSet *modelCS) { + + //--- + //--- s.t. x(delta(v)) = 2, for v in V (1) [A', b'] + //--- x in {0,1}, for all e = E (3) [A', b'] + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createModel2Match()", + m_appParam.LogLevel, 2); + + UtilGraphLib &graphLib = m_tsp.m_graphLib; + int n_cols = graphLib.n_edges; + int *rowIndices = new int[2 * n_cols]; + int *colIndices = new int[2 * n_cols]; + double *elements = new double[2 * n_cols]; + int edge_index = 0; + int triplet_index = 0; + for (int u = 1; u < graphLib.n_vertices; u++) { + for (int v = 0; v < u; v++) { + rowIndices[triplet_index] = u; + rowIndices[triplet_index + 1] = v; + colIndices[triplet_index] = edge_index; + colIndices[triplet_index + 1] = edge_index; + triplet_index += 2; + edge_index++; + } + } + UtilFillN(elements, 2 * n_cols, 1.0); + UtilFillN(modelCS->colLB, n_cols, 0.0); + UtilFillN(modelCS->colUB, n_cols, 1.0); + UtilFillN(modelCS->rowLB, graphLib.n_vertices, 2.0); + UtilFillN(modelCS->rowUB, graphLib.n_vertices, 2.0); + UtilIotaN(modelCS->integerVars, n_cols, 0); + + modelCS->M = + new CoinPackedMatrix(true, rowIndices, colIndices, elements, 2 * n_cols); + + UTIL_DELARR(rowIndices); + UTIL_DELARR(colIndices); + UTIL_DELARR(elements); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModel2Match()", + m_appParam.LogLevel, 2); } //===========================================================================// -void TSP_DecompApp::createModelTrivialSEC(DecompConstraintSet * modelCS){ - - //--- - //--- Since we generate SECs (2) dynamically, we will simply start - //--- with some trivial SECs (|S| = 2). - //--- ?? todo - make it |S|=2, actually using redudant |S|=1 - //--- ?? what you have below is actually a relaxation of (1)!! >= - int u, v; - UtilGraphLib & graphLib = m_tsp.m_graphLib; - int n_cols = graphLib.n_edges; - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelTrivialSEC()", m_appParam.LogLevel, 2); - - modelCS->M = new CoinPackedMatrix(false, 0, 0); - for(u = 0; u < graphLib.n_vertices; u++){ - CoinPackedVector row; - for(v = 0; v < graphLib.n_vertices; v++){ - if(u == v) - continue; - row.insert(UtilIndexU(u,v), 1.0); - } - modelCS->rowLB.push_back(2.0); - modelCS->rowUB.push_back(m_infinity); - modelCS->M->appendRow(row); - } - UtilFillN(modelCS->colLB, n_cols, 0.0); - UtilFillN(modelCS->colUB, n_cols, 1.0); - UtilIotaN(modelCS->integerVars, n_cols, 0); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelTrivialSEC()", m_appParam.LogLevel, 2); +void TSP_DecompApp::createModelTrivialSEC(DecompConstraintSet *modelCS) { + + //--- + //--- Since we generate SECs (2) dynamically, we will simply start + //--- with some trivial SECs (|S| = 2). + //--- ?? todo - make it |S|=2, actually using redudant |S|=1 + //--- ?? what you have below is actually a relaxation of (1)!! >= + int u, v; + UtilGraphLib &graphLib = m_tsp.m_graphLib; + int n_cols = graphLib.n_edges; + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelTrivialSEC()", + m_appParam.LogLevel, 2); + + modelCS->M = new CoinPackedMatrix(false, 0, 0); + for (u = 0; u < graphLib.n_vertices; u++) { + CoinPackedVector row; + for (v = 0; v < graphLib.n_vertices; v++) { + if (u == v) + continue; + row.insert(UtilIndexU(u, v), 1.0); + } + modelCS->rowLB.push_back(2.0); + modelCS->rowUB.push_back(m_infinity); + modelCS->M->appendRow(row); + } + UtilFillN(modelCS->colLB, n_cols, 0.0); + UtilFillN(modelCS->colUB, n_cols, 1.0); + UtilIotaN(modelCS->integerVars, n_cols, 0); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelTrivialSEC()", + m_appParam.LogLevel, 2); } //===========================================================================// -int TSP_DecompApp::generateCuts(const double * x, - DecompCutList & newCuts){ - - - UtilPrintFuncBegin(m_osLog, m_classTag, - "generateCuts()", m_appParam.LogLevel, 2); - - int n_cuts = 0; - UtilGraphLib & graphLib = m_tsp.m_graphLib; - TSP_Concorde & tspConcorde = m_tsp.m_concorde; - tspConcorde.buildSubGraph(graphLib.n_vertices, - graphLib.n_edges, x); - - if(m_appParam.CutSubtoursX) - n_cuts += generateCutsSubtour(newCuts); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "generateCuts()", m_appParam.LogLevel, 2); - return n_cuts; -} +int TSP_DecompApp::generateCuts(const double *x, DecompCutList &newCuts) { -//===========================================================================// -DecompSolverStatus -TSP_DecompApp::solveRelaxed(const int whichBlock, - const double * redCostX, - const double convexDual, - DecompVarList & varList){ - - UtilPrintFuncBegin(m_osLog, m_classTag, - "solveRelaxed()", m_appParam.LogLevel, 2); - - DecompSolverStatus solverStatus = DecompSolStatNoSolution; - UtilGraphLib & graphLib = m_tsp.m_graphLib; - Graph & cgV = m_tsp.m_boost.m_cgV; - int n_vertices = graphLib.n_vertices; - int u, index; - - //TODO: BranchEnforceInSubProb option? - - if(m_appParam.ModelNameRelax == "SUBTOUR"){ - vector< pair > edge_cost; - edge_cost.reserve(n_vertices); - for(u = 0; u < n_vertices; u++){ - if(u != m_tsp.m_vert){ - index = UtilIndexU(m_tsp.m_vert, u); - edge_cost.push_back(make_pair(index, redCostX[index])); - } - } - solveOneTree(redCostX, convexDual, edge_cost, varList, cgV); - solverStatus = DecompSolStatOptimal; - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "solveRelaxed()", m_appParam.LogLevel, 2); - return solverStatus; + UtilPrintFuncBegin(m_osLog, m_classTag, "generateCuts()", m_appParam.LogLevel, + 2); + + int n_cuts = 0; + UtilGraphLib &graphLib = m_tsp.m_graphLib; + TSP_Concorde &tspConcorde = m_tsp.m_concorde; + tspConcorde.buildSubGraph(graphLib.n_vertices, graphLib.n_edges, x); + + if (m_appParam.CutSubtoursX) + n_cuts += generateCutsSubtour(newCuts); + + UtilPrintFuncEnd(m_osLog, m_classTag, "generateCuts()", m_appParam.LogLevel, + 2); + return n_cuts; } //===========================================================================// -void TSP_DecompApp::solveOneTree(const double * cost, - const double alpha, - vector< pair > & edge_cost, - DecompVarList & vars, - Graph & g) { - - UtilPrintFuncBegin(m_osLog, m_classTag, - "solveOneTree()", m_appParam.LogLevel, 2); - - property_map::type e_index_g = get(edge_index, g); - property_map::type e_weight_g = get(edge_weight, g); - - UtilGraphLib & graphLib = m_tsp.m_graphLib; - const int max_exchanges = 4; - - //--- - //--- (partial) sort in increasing order - //--- - int sort_size = std::min(static_cast(edge_cost.size()), - 2 + max_exchanges); - partial_sort(edge_cost.begin(), - edge_cost.begin() + sort_size, - edge_cost.end(), UtilIsLessThan()); - - if(m_appParam.LogLevel > 4){ - (*m_osLog) << "Partial Sorted List [size = " << sort_size << "]" << endl; - for(vector< pair >::iterator tmp = edge_cost.begin(); - tmp != edge_cost.end(); tmp++){ - (*m_osLog) << "\nsorted edge_cost: " << (*tmp).first; - UtilPrintEdge((*tmp).first); - (*m_osLog) << " cost: " << (*tmp).second; - } - (*m_osLog) << "\n"; - } - - //--- - //--- update the edge weights in boost object - //--- - int index; - graph_traits::edge_iterator ei, ei_end; - for(tie(ei,ei_end) = edges(g); ei != ei_end; ei++){ - index = e_index_g(*ei); - e_weight_g[*ei] = cost[index]; - if(m_appParam.LogLevel > 4) - (*m_osLog) << "cost edge( " - << source(*ei,g) << "," << target(*ei,g) << "): " - << cost[index]; - assert(index == UtilIndexU(static_cast(source(*ei,g)), - static_cast(target(*ei,g)))); - } - - - - int n_edges = graphLib.n_edges; - int n_vertices = graphLib.n_vertices; - vector inMST(n_edges, false); - - vector inds; - vector els(n_vertices, 1.0); - double rc = 0.0; - double obj = 0.0; - inds.reserve(graphLib.n_vertices); - - //TODO: this should all be in boostGraphI - //--- - //--- find the minimum spanning tree - //--- - - //boost::print_graph(g); - - vector::edge_descriptor> spanning_tree; - vector::edge_descriptor>::iterator vei; - kruskal_minimum_spanning_tree(g, back_inserter(spanning_tree)); - - if(m_appParam.LogLevel > 4) - (*m_osLog) << "Spanning Tree:" << endl; - - int edge_index; - for (vei = spanning_tree.begin(); vei != spanning_tree.end(); ++vei) { - edge_index = e_index_g[*vei]; - rc += cost[edge_index]; - obj += graphLib.edge_wt[edge_index]; - inMST[edge_index] = true; - inds.push_back(edge_index); - - if(m_appParam.LogLevel > 4){ - UtilPrintEdge(edge_index); - (*m_osLog) << " -> " << cost[edge_index] << " rc : " << rc << endl; +DecompSolverStatus TSP_DecompApp::solveRelaxed(const int whichBlock, + const double *redCostX, + const double convexDual, + DecompVarList &varList) { + + UtilPrintFuncBegin(m_osLog, m_classTag, "solveRelaxed()", m_appParam.LogLevel, + 2); + + DecompSolverStatus solverStatus = DecompSolStatNoSolution; + UtilGraphLib &graphLib = m_tsp.m_graphLib; + Graph &cgV = m_tsp.m_boost.m_cgV; + int n_vertices = graphLib.n_vertices; + int u, index; + + // TODO: BranchEnforceInSubProb option? + + if (m_appParam.ModelNameRelax == "SUBTOUR") { + vector> edge_cost; + edge_cost.reserve(n_vertices); + for (u = 0; u < n_vertices; u++) { + if (u != m_tsp.m_vert) { + index = UtilIndexU(m_tsp.m_vert, u); + edge_cost.push_back(make_pair(index, redCostX[index])); } + } + solveOneTree(redCostX, convexDual, edge_cost, varList, cgV); + solverStatus = DecompSolStatOptimal; + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "solveRelaxed()", m_appParam.LogLevel, + 2); + return solverStatus; +} - } - - const double bigM = m_infinity; - int exchange = 0; - vector< pair >::iterator vpi = edge_cost.begin(); - - //--- - //--- add the cheapest edge to vert - //--- - inds.push_back((*vpi).first); - rc += (*vpi).second; - obj += graphLib.edge_wt[(*vpi).first]; - - if(m_appParam.LogLevel > 4){ - (*m_osLog) << "Adding edge:" << endl; +//===========================================================================// +void TSP_DecompApp::solveOneTree(const double *cost, const double alpha, + vector> &edge_cost, + DecompVarList &vars, Graph &g) { + + UtilPrintFuncBegin(m_osLog, m_classTag, "solveOneTree()", m_appParam.LogLevel, + 2); + + property_map::type e_index_g = get(edge_index, g); + property_map::type e_weight_g = get(edge_weight, g); + + UtilGraphLib &graphLib = m_tsp.m_graphLib; + const int max_exchanges = 4; + + //--- + //--- (partial) sort in increasing order + //--- + int sort_size = + std::min(static_cast(edge_cost.size()), 2 + max_exchanges); + partial_sort(edge_cost.begin(), edge_cost.begin() + sort_size, + edge_cost.end(), UtilIsLessThan()); + + if (m_appParam.LogLevel > 4) { + (*m_osLog) << "Partial Sorted List [size = " << sort_size << "]" << endl; + for (vector>::iterator tmp = edge_cost.begin(); + tmp != edge_cost.end(); tmp++) { + (*m_osLog) << "\nsorted edge_cost: " << (*tmp).first; + UtilPrintEdge((*tmp).first); + (*m_osLog) << " cost: " << (*tmp).second; + } + (*m_osLog) << "\n"; + } + + //--- + //--- update the edge weights in boost object + //--- + int index; + graph_traits::edge_iterator ei, ei_end; + for (tie(ei, ei_end) = edges(g); ei != ei_end; ei++) { + index = e_index_g(*ei); + e_weight_g[*ei] = cost[index]; + if (m_appParam.LogLevel > 4) + (*m_osLog) << "cost edge( " << source(*ei, g) << "," << target(*ei, g) + << "): " << cost[index]; + assert(index == UtilIndexU(static_cast(source(*ei, g)), + static_cast(target(*ei, g)))); + } + + int n_edges = graphLib.n_edges; + int n_vertices = graphLib.n_vertices; + vector inMST(n_edges, false); + + vector inds; + vector els(n_vertices, 1.0); + double rc = 0.0; + double obj = 0.0; + inds.reserve(graphLib.n_vertices); + + // TODO: this should all be in boostGraphI + //--- + //--- find the minimum spanning tree + //--- + + // boost::print_graph(g); + + vector::edge_descriptor> spanning_tree; + vector::edge_descriptor>::iterator vei; + kruskal_minimum_spanning_tree(g, back_inserter(spanning_tree)); + + if (m_appParam.LogLevel > 4) + (*m_osLog) << "Spanning Tree:" << endl; + + int edge_index; + for (vei = spanning_tree.begin(); vei != spanning_tree.end(); ++vei) { + edge_index = e_index_g[*vei]; + rc += cost[edge_index]; + obj += graphLib.edge_wt[edge_index]; + inMST[edge_index] = true; + inds.push_back(edge_index); + + if (m_appParam.LogLevel > 4) { + UtilPrintEdge(edge_index); + (*m_osLog) << " -> " << cost[edge_index] << " rc : " << rc << endl; + } + } + + const double bigM = m_infinity; + int exchange = 0; + vector>::iterator vpi = edge_cost.begin(); + + //--- + //--- add the cheapest edge to vert + //--- + inds.push_back((*vpi).first); + rc += (*vpi).second; + obj += graphLib.edge_wt[(*vpi).first]; + + if (m_appParam.LogLevel > 4) { + (*m_osLog) << "Adding edge:" << endl; + UtilPrintEdge((*vpi).first); + (*m_osLog) << " -> " << cost[(*vpi).first] << " rc : " << rc << endl; + } + + vpi++; + for (; vpi != edge_cost.end(); vpi++) { + if (exchange >= max_exchanges) + break; + if (cost[(*vpi).first] >= bigM / 2.0) + break; + + //--- + //--- add the 2nd cheapest and keep exchanging this one + //--- + inds.push_back((*vpi).first); + rc += (*vpi).second; + obj += graphLib.edge_wt[(*vpi).first]; + + if (m_appParam.LogLevel > 4) { + (*m_osLog) << "Adding edges:" << endl; UtilPrintEdge((*vpi).first); (*m_osLog) << " -> " << cost[(*vpi).first] << " rc : " << rc << endl; - } - - vpi++; - for(; vpi != edge_cost.end(); vpi++){ - if(exchange >= max_exchanges) - break; - if(cost[(*vpi).first] >= bigM/2.0) - break; - - //--- - //--- add the 2nd cheapest and keep exchanging this one - //--- - inds.push_back((*vpi).first); - rc += (*vpi).second; - obj += graphLib.edge_wt[(*vpi).first]; - - if(m_appParam.LogLevel > 4){ - (*m_osLog) << "Adding edges:" << endl; - UtilPrintEdge((*vpi).first); - (*m_osLog) << " -> " << cost[(*vpi).first] << " rc : " << rc << endl; - (*m_osLog) << "Creating var with reduced = " << rc - alpha - << " obj = " << obj << endl; - } - - DecompVar * oneTree = new DecompVar(inds, els, rc - alpha, obj); - //oneTree->setBlockId(0);//this will happen by default - vars.push_back(oneTree); - - exchange++; - inds.pop_back(); - rc -= (*vpi).second; - obj -= graphLib.edge_wt[(*vpi).first]; - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "solveOneTree()", m_appParam.LogLevel, 2); + (*m_osLog) << "Creating var with reduced = " << rc - alpha + << " obj = " << obj << endl; + } + + DecompVar *oneTree = new DecompVar(inds, els, rc - alpha, obj); + // oneTree->setBlockId(0);//this will happen by default + vars.push_back(oneTree); + + exchange++; + inds.pop_back(); + rc -= (*vpi).second; + obj -= graphLib.edge_wt[(*vpi).first]; + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "solveOneTree()", m_appParam.LogLevel, + 2); } //===========================================================================// -bool TSP_DecompApp::APPisUserFeasible(const double * x, - const int n_cols, - const double tolZero){ - - //--- - //--- A feasible TSP route: - //--- a.) is binary (assume it already is when it gets here) - //--- b.) is connected - //--- c.) all nodes have degree 2 - //--- - - UtilPrintFuncBegin(m_osLog, m_classTag, - "APPisUserFeasible()", m_appParam.LogLevel, 2); - - const int n_vertices = m_tsp.m_graphLib.n_vertices; - TSP_Boost & tspBoost = m_tsp.m_boost; - - //wipe out the old support graph - tspBoost.clearSubGraph(); - - //construct the current support graph - tspBoost.buildSubGraph(n_cols, x); - - //find the connected components of support_graph - vector component(n_vertices); - int n_comp = tspBoost.findConnectedComponents(component); - - - //STOP? is binary already? - //if (b) n_comp == 1, and (a) binary, then construct a feasible solution - if(n_comp == 1){ - - //(c) all nodes have degree 2 - // NOTE: for CPM with 2-matching constraints this should already - // be true already - for(int i = 0; i < n_vertices; i++){ - if(tspBoost.getDegree(i) != 2){ - return false; - } +bool TSP_DecompApp::APPisUserFeasible(const double *x, const int n_cols, + const double tolZero) { + + //--- + //--- A feasible TSP route: + //--- a.) is binary (assume it already is when it gets here) + //--- b.) is connected + //--- c.) all nodes have degree 2 + //--- + + UtilPrintFuncBegin(m_osLog, m_classTag, "APPisUserFeasible()", + m_appParam.LogLevel, 2); + + const int n_vertices = m_tsp.m_graphLib.n_vertices; + TSP_Boost &tspBoost = m_tsp.m_boost; + + // wipe out the old support graph + tspBoost.clearSubGraph(); + + // construct the current support graph + tspBoost.buildSubGraph(n_cols, x); + + // find the connected components of support_graph + vector component(n_vertices); + int n_comp = tspBoost.findConnectedComponents(component); + + // STOP? is binary already? + // if (b) n_comp == 1, and (a) binary, then construct a feasible solution + if (n_comp == 1) { + + //(c) all nodes have degree 2 + // NOTE: for CPM with 2-matching constraints this should already + // be true already + for (int i = 0; i < n_vertices; i++) { + if (tspBoost.getDegree(i) != 2) { + return false; } + } + return true; + + /* + if(isBinary(x, n_cols, tolZero)){ + constructRoute(m_sg); return true; - - /* - if(isBinary(x, n_cols, tolZero)){ - constructRoute(m_sg); - return true; - } - else{ - return false; //it is connected, but fractional - } - */ - } - - //TODO: feasibility cuts? - //(*m_osLog) << "Not Feasible: disconnected, n_comp : " << n_comp << endl; - - UtilPrintFuncEnd(m_osLog, m_classTag, - "APPisUserFeasible()", m_appParam.LogLevel, 2); - - return false; + } + else{ + return false; //it is connected, but fractional + } + */ + } + + // TODO: feasibility cuts? + //(*m_osLog) << "Not Feasible: disconnected, n_comp : " << n_comp << endl; + + UtilPrintFuncEnd(m_osLog, m_classTag, "APPisUserFeasible()", + m_appParam.LogLevel, 2); + + return false; } //===========================================================================// -void TSP_DecompApp::printOriginalColumn(const int index, - ostream * os) const { - UtilPrintEdge(index, os); +void TSP_DecompApp::printOriginalColumn(const int index, ostream *os) const { + UtilPrintEdge(index, os); } diff --git a/Dip/examples/TSP/TSP_Main.cpp b/Dip/examples/TSP/TSP_Main.cpp old mode 100755 new mode 100644 index 10e8c8ac..d329b32b --- a/Dip/examples/TSP/TSP_Main.cpp +++ b/Dip/examples/TSP/TSP_Main.cpp @@ -26,115 +26,105 @@ #include "UtilTimer.h" //===========================================================================// -int main(int argc, char ** argv){ - try{ - //--- - //--- create the utility class for parsing parameters - //--- - UtilParameters utilParam(argc, argv); - - bool doCut = utilParam.GetSetting("doCut", true); - bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - - UtilTimer timer; - double timeSetupReal = 0.0; - double timeSetupCpu = 0.0; - double timeSolveReal = 0.0; - double timeSolveCpu = 0.0; - - //--- - //--- start overall timer - //--- - timer.start(); - - //--- - //--- create the user application (a DecompApp) - //--- - TSP_DecompApp tsp(utilParam); +int main(int argc, char **argv) { + try { + //--- + //--- create the utility class for parsing parameters + //--- + UtilParameters utilParam(argc, argv); - //--- - //--- create the algorithm (a DecompAlgo) - //--- - DecompAlgo * algo = NULL; - assert(doCut + doPriceCut == 1); + bool doCut = utilParam.GetSetting("doCut", true); + bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - //--- - //--- create the CPM algorithm object - //--- - if(doCut) - algo = new DecompAlgoC(&tsp, utilParam); + UtilTimer timer; + double timeSetupReal = 0.0; + double timeSetupCpu = 0.0; + double timeSolveReal = 0.0; + double timeSolveCpu = 0.0; - //--- - //--- create the PC algorithm object - //--- - if(doPriceCut) - algo = new DecompAlgoPC(&tsp, utilParam); + //--- + //--- start overall timer + //--- + timer.start(); - //--- - //--- create the driver AlpsDecomp model - //--- - int status = 0; - AlpsDecompModel alpsModel(utilParam, algo); + //--- + //--- create the user application (a DecompApp) + //--- + TSP_DecompApp tsp(utilParam); - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - - //--- - //--- solve - //--- - timer.start(); - status = alpsModel.solve(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); + //--- + //--- create the algorithm (a DecompAlgo) + //--- + DecompAlgo *algo = NULL; + assert(doCut + doPriceCut == 1); + + //--- + //--- create the CPM algorithm object + //--- + if (doCut) + algo = new DecompAlgoC(&tsp, utilParam); + + //--- + //--- create the PC algorithm object + //--- + if (doPriceCut) + algo = new DecompAlgoPC(&tsp, utilParam); + + //--- + //--- create the driver AlpsDecomp model + //--- + int status = 0; + AlpsDecompModel alpsModel(utilParam, algo); + + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); + //--- + //--- solve + //--- + timer.start(); + status = alpsModel.solve(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); + + //--- + //--- sanity check + //--- + cout << setiosflags(ios::fixed | ios::showpoint); + cout << "Status= " << status << " BestLB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalLB(), 5) << " BestUB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalUB(), 5) << " Nodes= " << setw(6) + << alpsModel.getNumNodesProcessed() << " SetupCPU= " << timeSetupCpu + << " SolveCPU= " << timeSolveCpu + << " TotalCPU= " << timeSetupCpu + timeSolveCpu + << " SetupReal= " << timeSetupReal << " SolveReal= " << timeSolveReal + << " TotalReal= " << timeSetupReal + timeSolveReal << endl; + + if (status == AlpsExitStatusOptimal && tsp.getBestKnownUB() < 1.0e50) { //--- - //--- sanity check + //--- the assumption here is that the BestKnownLB/UB is optimal //--- - cout << setiosflags(ios::fixed|ios::showpoint); - cout << "Status= " << status - << " BestLB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalLB(),5) - << " BestUB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalUB(),5) - << " Nodes= " << setw(6) - << alpsModel.getNumNodesProcessed() - << " SetupCPU= " << timeSetupCpu - << " SolveCPU= " << timeSolveCpu - << " TotalCPU= " << timeSetupCpu + timeSolveCpu - << " SetupReal= " << timeSetupReal - << " SolveReal= " << timeSolveReal - << " TotalReal= " << timeSetupReal + timeSolveReal - << endl; - - if(status == AlpsExitStatusOptimal && tsp.getBestKnownUB() < 1.0e50){ - //--- - //--- the assumption here is that the BestKnownLB/UB is optimal - //--- - double diff - = fabs(tsp.getBestKnownUB() - alpsModel.getGlobalUB()); - if(diff > 1.0e-4){ - cerr << "ERROR. BestKnownUB= " << tsp.getBestKnownUB() - << " but DECOMP claims GlobalUB= " - << alpsModel.getGlobalUB() << endl; - throw UtilException("Invalid claim of optimal.", - "main", "DECOMP"); - } + double diff = fabs(tsp.getBestKnownUB() - alpsModel.getGlobalUB()); + if (diff > 1.0e-4) { + cerr << "ERROR. BestKnownUB= " << tsp.getBestKnownUB() + << " but DECOMP claims GlobalUB= " << alpsModel.getGlobalUB() + << endl; + throw UtilException("Invalid claim of optimal.", "main", "DECOMP"); } - - //--- - //--- free local memory - //--- - delete algo; - - } - catch(CoinError & ex){ - cerr << "COIN Exception [ " << ex.message() << " ]" - << " at " << ex.fileName() << ":L" << ex.lineNumber() - << " in " << ex.className() << "::" << ex.methodName() << endl; - return 1; - } - return 0; + } + + //--- + //--- free local memory + //--- + delete algo; + + } catch (CoinError &ex) { + cerr << "COIN Exception [ " << ex.message() << " ]" + << " at " << ex.fileName() << ":L" << ex.lineNumber() << " in " + << ex.className() << "::" << ex.methodName() << endl; + return 1; + } + return 0; } - diff --git a/Dip/examples/TSP/TSP_SubtourCut.cpp b/Dip/examples/TSP/TSP_SubtourCut.cpp old mode 100755 new mode 100644 index 18d28afd..0a66b801 --- a/Dip/examples/TSP/TSP_SubtourCut.cpp +++ b/Dip/examples/TSP/TSP_SubtourCut.cpp @@ -5,7 +5,7 @@ (c) Copyright 2004 Lehigh University. All Rights Reserved. - This software is licensed under the Common Public License. Please see + This software is licensed under the Common Public License. Please see accompanying file for terms. ---------------------------------------------------------------------------*/ @@ -15,169 +15,158 @@ #include "UtilMacros.h" /*-------------------------------------------------------------------------*/ -void TSP_SubtourCut::init(){ - //setCutType(); - switch(m_storage){ - case VECTOR: - create_bitset(); - break; - case BITSET: - create_vector(); - break; - case BOTH: - break; - default: - //throw exception - assert(0); - return; - } +void TSP_SubtourCut::init() { + // setCutType(); + switch (m_storage) { + case VECTOR: + create_bitset(); + break; + case BITSET: + create_vector(); + break; + case BOTH: + break; + default: + // throw exception + assert(0); + return; + } } /*-------------------------------------------------------------------------*/ -bool TSP_SubtourCut::isSame(const DecompCut * cut) const{ - const TSP_SubtourCut * sec_cut = dynamic_cast(cut); - if(!sec_cut) - return false; - - if(m_type != sec_cut->m_type) - return false; - switch(m_storage){ - case VECTOR: - return m_S == sec_cut->m_S; - case BITSET: - case BOTH: - return m_inS == sec_cut->m_inS; - } - return false; +bool TSP_SubtourCut::isSame(const DecompCut *cut) const { + const TSP_SubtourCut *sec_cut = dynamic_cast(cut); + if (!sec_cut) + return false; + + if (m_type != sec_cut->m_type) + return false; + switch (m_storage) { + case VECTOR: + return m_S == sec_cut->m_S; + case BITSET: + case BOTH: + return m_inS == sec_cut->m_inS; + } + return false; } /*-------------------------------------------------------------------------*/ -void TSP_SubtourCut::setCutType(){ - //what does concorde do? - //sec_type = getSize() <= (n_vertices - 1)/2 ? SIDE : ACROSS; - m_type = ACROSS; +void TSP_SubtourCut::setCutType() { + // what does concorde do? + // sec_type = getSize() <= (n_vertices - 1)/2 ? SIDE : ACROSS; + m_type = ACROSS; } -//another function that probably belongs in a utility class +// another function that probably belongs in a utility class /*-------------------------------------------------------------------------*/ -void TSP_SubtourCut::create_bitset(){ - //create bitset from vector - for(int i = 0; i < m_nverts; i++) - m_inS.push_back(false);//?? - for(vector::iterator it = m_S.begin(); it != m_S.end(); it++) - m_inS[*it] = true; - m_storage = m_storage == VECTOR ? BOTH : BITSET; +void TSP_SubtourCut::create_bitset() { + // create bitset from vector + for (int i = 0; i < m_nverts; i++) + m_inS.push_back(false); //?? + for (vector::iterator it = m_S.begin(); it != m_S.end(); it++) + m_inS[*it] = true; + m_storage = m_storage == VECTOR ? BOTH : BITSET; } - -//another function that probably belongs in a utility class + +// another function that probably belongs in a utility class /*-------------------------------------------------------------------------*/ -void TSP_SubtourCut::create_vector(){ - //create vector from bistet - //m_S.reserve(m_inS.count());//is this worth it? or is count costly? - for(unsigned int u = 0; u < m_inS.size(); u++) - if(m_inS[u]) - m_S.push_back(u); - m_storage = m_storage == BITSET ? BOTH : VECTOR; +void TSP_SubtourCut::create_vector() { + // create vector from bistet + // m_S.reserve(m_inS.count());//is this worth it? or is count costly? + for (unsigned int u = 0; u < m_inS.size(); u++) + if (m_inS[u]) + m_S.push_back(u); + m_storage = m_storage == BITSET ? BOTH : VECTOR; } /*-------------------------------------------------------------------------*/ -void TSP_SubtourCut::setBounds(double infinity){ - switch(m_type){ - case ACROSS: - setLowerBound(2.0); - setUpperBound(infinity); - break; - case SIDE: - setLowerBound(-infinity); - setUpperBound(static_cast(m_S.size()) - 1.0); - break; - default: - assert(0); - } +void TSP_SubtourCut::setBounds(double infinity) { + switch (m_type) { + case ACROSS: + setLowerBound(2.0); + setUpperBound(infinity); + break; + case SIDE: + setLowerBound(-infinity); + setUpperBound(static_cast(m_S.size()) - 1.0); + break; + default: + assert(0); + } } /*-------------------------------------------------------------------------*/ -void TSP_SubtourCut::expandCutToRow(CoinPackedVector * row){ - vector indices; - vector elements; - - switch(m_type){ - case ACROSS: - { - for(unsigned int i = 0; i < m_S.size(); i++){ - for(int v = 0; v < m_nverts; v++){ - if(m_inS[v] || m_S[i] == v) - continue; - indices.push_back(UtilIndexU(m_S[i],v)); - } - } - fill_n(back_inserter(elements), indices.size(), 1.0); - } - break; - - case SIDE: - { - for(unsigned int i = 0; i < m_S.size(); i++) - for(unsigned int j = i + 1; j < m_S.size(); j++) - indices.push_back(UtilIndexU(m_S[i], m_S[j])); - fill_n(back_inserter(elements), indices.size(), 1.0); +void TSP_SubtourCut::expandCutToRow(CoinPackedVector *row) { + vector indices; + vector elements; + + switch (m_type) { + case ACROSS: { + for (unsigned int i = 0; i < m_S.size(); i++) { + for (int v = 0; v < m_nverts; v++) { + if (m_inS[v] || m_S[i] == v) + continue; + indices.push_back(UtilIndexU(m_S[i], v)); } - break; - default: - cerr << "ERROR expandCutToRow sec_type" << endl; - abort(); - } - row->setVector(static_cast(indices.size()), - &indices[0], &elements[0], false); + } + fill_n(back_inserter(elements), indices.size(), 1.0); + } break; + + case SIDE: { + for (unsigned int i = 0; i < m_S.size(); i++) + for (unsigned int j = i + 1; j < m_S.size(); j++) + indices.push_back(UtilIndexU(m_S[i], m_S[j])); + fill_n(back_inserter(elements), indices.size(), 1.0); + } break; + default: + cerr << "ERROR expandCutToRow sec_type" << endl; + abort(); + } + row->setVector(static_cast(indices.size()), &indices[0], &elements[0], + false); } /*-------------------------------------------------------------------------*/ -void TSP_SubtourCut::print(double infinity, ostream * os) const{ - double lb, ub; - switch(m_type){ - case ACROSS: - (*os) << "ACROSS "; - break; - case SIDE: - (*os) << "SIDE "; - break; - } - - switch(m_storage){ - case VECTOR: - { - (*os) << "S: "; - for(vector::const_iterator it = m_S.begin(); - it != m_S.end(); it++) - (*os) << *it << " "; - } - break; - case BITSET: - case BOTH: - { - (*os) << "S: "; - for(int i = 0; i < m_nverts; i++) - if(m_inS[i]) - (*os) << i << " "; - } - break; - default: - cerr << "ERROR in print - BAD cut storage_type" << endl; - abort(); - } - lb = getLowerBound(); - ub = getUpperBound(); - if(lb > -infinity){ - (*os) << "\tm_lb\t" << lb; - } - else{ - (*os) << "\tm_lb\t-INF"; - } - if(ub < infinity){ - (*os) << "\tm_ub\t" << ub; - } - else{ - (*os) << "\tm_ub\t INF"; - } - (*os) << endl; +void TSP_SubtourCut::print(double infinity, ostream *os) const { + double lb, ub; + switch (m_type) { + case ACROSS: + (*os) << "ACROSS "; + break; + case SIDE: + (*os) << "SIDE "; + break; + } + + switch (m_storage) { + case VECTOR: { + (*os) << "S: "; + for (vector::const_iterator it = m_S.begin(); it != m_S.end(); it++) + (*os) << *it << " "; + } break; + case BITSET: + case BOTH: { + (*os) << "S: "; + for (int i = 0; i < m_nverts; i++) + if (m_inS[i]) + (*os) << i << " "; + } break; + default: + cerr << "ERROR in print - BAD cut storage_type" << endl; + abort(); + } + lb = getLowerBound(); + ub = getUpperBound(); + if (lb > -infinity) { + (*os) << "\tm_lb\t" << lb; + } else { + (*os) << "\tm_lb\t-INF"; + } + if (ub < infinity) { + (*os) << "\tm_ub\t" << ub; + } else { + (*os) << "\tm_ub\t INF"; + } + (*os) << endl; } diff --git a/Dip/examples/VRP/VRP_DecompApp.cpp b/Dip/examples/VRP/VRP_DecompApp.cpp old mode 100755 new mode 100644 index a4f12c06..958ed1d6 --- a/Dip/examples/VRP/VRP_DecompApp.cpp +++ b/Dip/examples/VRP/VRP_DecompApp.cpp @@ -21,431 +21,410 @@ #include "DecompAlgo.h" //===========================================================================// -void VRP_DecompApp::initializeApp() { - - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); - - //--- - //--- read VRPLIB instance - //--- - string fileName = m_appParam.DataDir - + UtilDirSlash() + m_appParam.Instance + ".vrp"; - UtilGraphLib & graphLib = m_vrp.m_graphLib; - graphLib.read_data(fileName.c_str()); - m_vrp.m_numRoutes = m_appParam.NumRoutes; - - //--- - //--- set pointer for CVRPsep object - //--- - m_cvrpSep.init(&m_vrp); - - //--- - //--- init Concorde object - //--- +void VRP_DecompApp::initializeApp() { + + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", + m_appParam.LogLevel, 2); + + //--- + //--- read VRPLIB instance + //--- + string fileName = + m_appParam.DataDir + UtilDirSlash() + m_appParam.Instance + ".vrp"; + UtilGraphLib &graphLib = m_vrp.m_graphLib; + graphLib.read_data(fileName.c_str()); + m_vrp.m_numRoutes = m_appParam.NumRoutes; + + //--- + //--- set pointer for CVRPsep object + //--- + m_cvrpSep.init(&m_vrp); + + //--- + //--- init Concorde object + //--- #ifdef VRP_DECOMPAPP_USECONCORDE - m_concorde.init(&m_vrp); + m_concorde.init(&m_vrp); #endif - //--- - //--- read best known lb/ub - //--- - //--- Columns in vrplib.opt: - //--- 1. Problem Instance - //--- 2. # of Customers - //--- 3. # of Vehicles - //--- 4. Vehicle Capacity - //--- 5. Tightness (Demand/Capacity) - //--- 6. Gap % - //--- 7. Upper Bound - //--- - string bestKnownFile - = m_appParam.DataDir + UtilDirSlash() + "vrplib.opt"; - ifstream is; - string instanceName; - int numCustomers; - int numVehicles; - int capacity; - double tightness; - double gap; - double upperBound; - UtilOpenFile(is, bestKnownFile); - while(!is.eof()){ - is >> instanceName - >> numCustomers - >> numVehicles - >> capacity - >> tightness - >> gap - >> upperBound; - instanceName = UtilStrTrim(instanceName); - if(instanceName == m_appParam.Instance){ - //gap = 100*(ub-lb)/ub - //lb = ub - gap*ub/100 - m_bestKnownLB = upperBound - (gap * upperBound / 100.0); - m_bestKnownUB = upperBound; - printf("Instance = %s, BestLB = %g, BestUB = %g\n", - instanceName.c_str(), m_bestKnownLB, m_bestKnownUB); - break; - } - } - is.close(); + //--- + //--- read best known lb/ub + //--- + //--- Columns in vrplib.opt: + //--- 1. Problem Instance + //--- 2. # of Customers + //--- 3. # of Vehicles + //--- 4. Vehicle Capacity + //--- 5. Tightness (Demand/Capacity) + //--- 6. Gap % + //--- 7. Upper Bound + //--- + string bestKnownFile = m_appParam.DataDir + UtilDirSlash() + "vrplib.opt"; + ifstream is; + string instanceName; + int numCustomers; + int numVehicles; + int capacity; + double tightness; + double gap; + double upperBound; + UtilOpenFile(is, bestKnownFile); + while (!is.eof()) { + is >> instanceName >> numCustomers >> numVehicles >> capacity >> + tightness >> gap >> upperBound; + instanceName = UtilStrTrim(instanceName); + if (instanceName == m_appParam.Instance) { + // gap = 100*(ub-lb)/ub + // lb = ub - gap*ub/100 + m_bestKnownLB = upperBound - (gap * upperBound / 100.0); + m_bestKnownUB = upperBound; + printf("Instance = %s, BestLB = %g, BestUB = %g\n", instanceName.c_str(), + m_bestKnownLB, m_bestKnownUB); + break; + } + } + is.close(); - //--- - //--- create models - //--- - createModels(); + //--- + //--- create models + //--- + createModels(); - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_appParam.LogLevel, 2); + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", m_appParam.LogLevel, + 2); } //===========================================================================// -void VRP_DecompApp::createModels(){ - //--- - //--- This function does the work to create the different models - //--- that will be used. This memory is owned by the user. It will - //--- be passed to the application interface and used by the algorithms. - //--- - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); - - //--- - //--- Vehicle Routing Problem (VRP) - //--- - //--- min sum{e in E} c_e x_e - //--- s.t. sum{e in delta(0)} x_e = 2k (1) - //--- sum{e in delta(i)} x_e = 2, forall i in V (2) - //--- sum{e in delta(S)} x_e >= 2b(S), forall S sub N, |S| > 1 (3) - //--- x_e = {0,1}, for all e = {i,j}, i,j!=0 (4) - //--- x_e = {0,2}, for all e = {0,j} (5) - //--- - //--- Some notes on conventions: - //--- a.) j = 0 is depot - //--- b.) |E| = V(V-1)/2 - //--- c.) order of edges: i = 1 -> V-1, j = 0 -> i-1 - //--- - UtilGraphLib & graphLib = m_vrp.m_graphLib; - int n_cols = graphLib.n_edges; - - //--- - //--- create the cost vector c - //--- - m_objective = new double[n_cols]; - if(!m_objective) - throw UtilExceptionMemory("createModels", "VRP_DecompApp"); - copy(graphLib.edge_wt, graphLib.edge_wt + n_cols, m_objective); - setModelObjective(m_objective, n_cols); - - //--- - //--- Possible decompositions: - //--- - //--- CPM: - //--- A'' = 2-degree (on customers) and 2k-degree on depot - //--- with GSECs generated dynamically. - //--- - //--- PC/RC: - //--- A' = Multiple Traveling Salesman Problem - //--- A'' = 2-degree (on customers) and 2k-degree on depot - //--- with GSECs generated dynamically. - //--- NOTE: In this case, 2-degree and 2k-degree is actually - //--- redundant with MTSP polytope. All we really have to do - //--- is generated violated GSECs. But, starting with nothing - //--- would cause a Catch-22 in framework. So, start with at - //--- least something. Maybe start with trivial GSECs? or just - //--- the 2k-degree constraint? - //--- - //TODO: k-tree, b-matching? - // mTSP is actually a nesting of k-tree? could be interesting... - // mTSP is also a nesting of b-matching... - // but what makes vrp hard is the GSECs... ? - // some relaxation we can try that includes GSECs? - // solve GSECs with relaxed =2 with Decomp called recursively?? - if(m_appParam.ModelNameCore == "2DEGREE"){ - DecompConstraintSet * model = new DecompConstraintSet(); - createModelTwoDegree(model); - m_models.push_back(model); - setModelCore(model, m_appParam.ModelNameCore); - } - if(m_appParam.ModelNameRelax == "2DEGREE"){ - DecompConstraintSet * model = new DecompConstraintSet(); - createModelTwoDegree(model); - m_models.push_back(model); - setModelRelax(model, m_appParam.ModelNameRelax); - } - - //current design - must set empty constraint set - if(m_appParam.ModelNameRelax == "MTSP"){ - DecompConstraintSet * model = new DecompConstraintSet(); - m_models.push_back(model); - setModelRelax(model, m_appParam.ModelNameRelax); - } +void VRP_DecompApp::createModels() { + //--- + //--- This function does the work to create the different models + //--- that will be used. This memory is owned by the user. It will + //--- be passed to the application interface and used by the algorithms. + //--- + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); + + //--- + //--- Vehicle Routing Problem (VRP) + //--- + //--- min sum{e in E} c_e x_e + //--- s.t. sum{e in delta(0)} x_e = 2k (1) + //--- sum{e in delta(i)} x_e = 2, forall i in V (2) + //--- sum{e in delta(S)} x_e >= 2b(S), forall S sub N, |S| > 1 (3) + //--- x_e = {0,1}, for all e = {i,j}, i,j!=0 (4) + //--- x_e = {0,2}, for all e = {0,j} (5) + //--- + //--- Some notes on conventions: + //--- a.) j = 0 is depot + //--- b.) |E| = V(V-1)/2 + //--- c.) order of edges: i = 1 -> V-1, j = 0 -> i-1 + //--- + UtilGraphLib &graphLib = m_vrp.m_graphLib; + int n_cols = graphLib.n_edges; + + //--- + //--- create the cost vector c + //--- + m_objective = new double[n_cols]; + if (!m_objective) + throw UtilExceptionMemory("createModels", "VRP_DecompApp"); + copy(graphLib.edge_wt, graphLib.edge_wt + n_cols, m_objective); + setModelObjective(m_objective, n_cols); + + //--- + //--- Possible decompositions: + //--- + //--- CPM: + //--- A'' = 2-degree (on customers) and 2k-degree on depot + //--- with GSECs generated dynamically. + //--- + //--- PC/RC: + //--- A' = Multiple Traveling Salesman Problem + //--- A'' = 2-degree (on customers) and 2k-degree on depot + //--- with GSECs generated dynamically. + //--- NOTE: In this case, 2-degree and 2k-degree is actually + //--- redundant with MTSP polytope. All we really have to do + //--- is generated violated GSECs. But, starting with nothing + //--- would cause a Catch-22 in framework. So, start with at + //--- least something. Maybe start with trivial GSECs? or just + //--- the 2k-degree constraint? + //--- + // TODO: k-tree, b-matching? + // mTSP is actually a nesting of k-tree? could be interesting... + // mTSP is also a nesting of b-matching... + // but what makes vrp hard is the GSECs... ? + // some relaxation we can try that includes GSECs? + // solve GSECs with relaxed =2 with Decomp called recursively?? + if (m_appParam.ModelNameCore == "2DEGREE") { + DecompConstraintSet *model = new DecompConstraintSet(); + createModelTwoDegree(model); + m_models.push_back(model); + setModelCore(model, m_appParam.ModelNameCore); + } + if (m_appParam.ModelNameRelax == "2DEGREE") { + DecompConstraintSet *model = new DecompConstraintSet(); + createModelTwoDegree(model); + m_models.push_back(model); + setModelRelax(model, m_appParam.ModelNameRelax); + } - if(m_appParam.ModelNameRelax == "ESPPRCC"){ - DecompConstraintSet * model = new DecompConstraintSet(); - createModelESPPCC(model); - m_models.push_back(model); - m_modelESPPRC = model; - setModelRelax(model, m_appParam.ModelNameRelax); - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModels()", m_appParam.LogLevel, 2); -} + // current design - must set empty constraint set + if (m_appParam.ModelNameRelax == "MTSP") { + DecompConstraintSet *model = new DecompConstraintSet(); + m_models.push_back(model); + setModelRelax(model, m_appParam.ModelNameRelax); + } -//===========================================================================// -void VRP_DecompApp::createModelTwoDegree(DecompConstraintSet * model){ - - //--- - //--- s.t. sum{e in delta(0)} x_e = 2k (1) - //--- sum{e in delta(i)} x_e = 2, forall i in V (2) - //--- - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelTwoDegree()", m_appParam.LogLevel, 2); - - UtilGraphLib & graphLib = m_vrp.m_graphLib; - const int n_cols = graphLib.n_edges; - const int n_rows = graphLib.n_vertices; //includes depot - const int n_vertices = graphLib.n_vertices; - const int n_edges = graphLib.n_edges; - - //--- - //--- reserve some space for efficient fill-in of col and row bounds - //--- - model->colLB.reserve(n_cols); - model->colUB.reserve(n_cols); - model->rowLB.reserve(n_rows); - model->rowUB.reserve(n_rows); - - //--- - //--- set the column lower and upper bounds - //--- - UtilFillN(model->colLB, n_cols, 0.0); - UtilFillN(model->colUB, n_cols, 1.0); - - //--- - //--- edges coming from depot can have value 2 (adjust upper bounds) - //--- - int i, j; - int depot_index; - for(i = 1; i < n_vertices; i++){ - depot_index = UtilIndexU(0, i); - model->colUB[depot_index] = 2.0; - } + if (m_appParam.ModelNameRelax == "ESPPRCC") { + DecompConstraintSet *model = new DecompConstraintSet(); + createModelESPPCC(model); + m_models.push_back(model); + m_modelESPPRC = model; + setModelRelax(model, m_appParam.ModelNameRelax); + } - //--- - //--- set the row lower and upper bounds (lb=ub=2) - //--- the first row (vertex = 0) represents the depot (lb=ub=2k) - //--- - fill_n(back_inserter(model->rowLB), n_rows, 2.0); - fill_n(back_inserter(model->rowUB), n_rows, 2.0); - model->rowLB[0] = 2.0 * m_vrp.m_numRoutes; - model->rowUB[0] = 2.0 * m_vrp.m_numRoutes; - - //--- - //--- two row entries (coeff = 1.0) for each column (edge = (u,v)) - //--- at row u and row v - //--- - int nEdgesX2 = 2 * n_edges; - int * rowIndices = new int[nEdgesX2]; - int * colIndices = new int[nEdgesX2]; - double * elements = new double[nEdgesX2]; - CoinAssertHint(rowIndices && colIndices && elements, - "Error: Out of Memory"); - - int edge_index = 0; - int triplet_index = 0; - for(i = 1; i < n_vertices; i++){ - for(j = 0; j < i; j++){ - rowIndices[triplet_index] = i; - rowIndices[triplet_index + 1] = j; - colIndices[triplet_index] = edge_index; - colIndices[triplet_index + 1] = edge_index; - triplet_index += 2; - edge_index++; - } - } - UtilFillN(elements, nEdgesX2, 1.0); - - //--- - //--- create a column-ordered CoinPackedMatrix - //--- - model->M = new CoinPackedMatrix(true, - rowIndices, - colIndices, - elements, - nEdgesX2); - - //--- - //--- mark the integers - //--- - UtilIotaN(model->integerVars, n_cols, 0); - - //--- - //--- free local memory - //--- - UTIL_DELARR(rowIndices); - UTIL_DELARR(colIndices); - UTIL_DELARR(elements); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModelTwoDegree()", m_appParam.LogLevel, 2); + UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", m_appParam.LogLevel, + 2); } //===========================================================================// -int VRP_DecompApp::generateCuts(const double * x, - DecompCutList & newCuts){ - - UtilPrintFuncBegin(m_osLog, m_classTag, - "generateCuts()", m_appParam.LogLevel, 2); - - UtilGraphLib & graphLib = m_vrp.m_graphLib; - const int nEdges = graphLib.n_edges; - - //--- - //--- calculate the number of nonzeros in vector x - //--- - //CAREFUL: tolerance used here must match - // tolerance use in buildLpSol! - int nNzs = UtilNumNonzeros(x, nEdges, DecompEpsilon); - - //--- - //--- build CVRPsep LP solution object - //--- - m_cvrpSep.buildLpSol(x, nNzs,DecompEpsilon); - - //--- - //--- separate capacity cuts - //--- - m_cvrpSep.sepCapacityCuts(); - - //--- - //--- create DecompCuts from CVRPsep cuts - //--- - m_cvrpSep.createVrpCuts(newCuts, m_infinity); - - UtilPrintFuncEnd(m_osLog, m_classTag, - "generateCuts()", m_appParam.LogLevel, 2); - - return newCuts.size(); +void VRP_DecompApp::createModelTwoDegree(DecompConstraintSet *model) { + + //--- + //--- s.t. sum{e in delta(0)} x_e = 2k (1) + //--- sum{e in delta(i)} x_e = 2, forall i in V (2) + //--- + + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelTwoDegree()", + m_appParam.LogLevel, 2); + + UtilGraphLib &graphLib = m_vrp.m_graphLib; + const int n_cols = graphLib.n_edges; + const int n_rows = graphLib.n_vertices; // includes depot + const int n_vertices = graphLib.n_vertices; + const int n_edges = graphLib.n_edges; + + //--- + //--- reserve some space for efficient fill-in of col and row bounds + //--- + model->colLB.reserve(n_cols); + model->colUB.reserve(n_cols); + model->rowLB.reserve(n_rows); + model->rowUB.reserve(n_rows); + + //--- + //--- set the column lower and upper bounds + //--- + UtilFillN(model->colLB, n_cols, 0.0); + UtilFillN(model->colUB, n_cols, 1.0); + + //--- + //--- edges coming from depot can have value 2 (adjust upper bounds) + //--- + int i, j; + int depot_index; + for (i = 1; i < n_vertices; i++) { + depot_index = UtilIndexU(0, i); + model->colUB[depot_index] = 2.0; + } + + //--- + //--- set the row lower and upper bounds (lb=ub=2) + //--- the first row (vertex = 0) represents the depot (lb=ub=2k) + //--- + fill_n(back_inserter(model->rowLB), n_rows, 2.0); + fill_n(back_inserter(model->rowUB), n_rows, 2.0); + model->rowLB[0] = 2.0 * m_vrp.m_numRoutes; + model->rowUB[0] = 2.0 * m_vrp.m_numRoutes; + + //--- + //--- two row entries (coeff = 1.0) for each column (edge = (u,v)) + //--- at row u and row v + //--- + int nEdgesX2 = 2 * n_edges; + int *rowIndices = new int[nEdgesX2]; + int *colIndices = new int[nEdgesX2]; + double *elements = new double[nEdgesX2]; + CoinAssertHint(rowIndices && colIndices && elements, "Error: Out of Memory"); + + int edge_index = 0; + int triplet_index = 0; + for (i = 1; i < n_vertices; i++) { + for (j = 0; j < i; j++) { + rowIndices[triplet_index] = i; + rowIndices[triplet_index + 1] = j; + colIndices[triplet_index] = edge_index; + colIndices[triplet_index + 1] = edge_index; + triplet_index += 2; + edge_index++; + } + } + UtilFillN(elements, nEdgesX2, 1.0); + + //--- + //--- create a column-ordered CoinPackedMatrix + //--- + model->M = + new CoinPackedMatrix(true, rowIndices, colIndices, elements, nEdgesX2); + + //--- + //--- mark the integers + //--- + UtilIotaN(model->integerVars, n_cols, 0); + + //--- + //--- free local memory + //--- + UTIL_DELARR(rowIndices); + UTIL_DELARR(colIndices); + UTIL_DELARR(elements); + + UtilPrintFuncEnd(m_osLog, m_classTag, "createModelTwoDegree()", + m_appParam.LogLevel, 2); } //===========================================================================// -DecompSolverStatus VRP_DecompApp::solveRelaxed(const int whichBlock, - const double * redCostX, - const double convexDual, - DecompVarList & varList){ - - - UtilPrintFuncBegin(m_osLog, m_classTag, - "solveRelaxed()", m_appParam.LogLevel, 2); - - int i, status = 0; - double coeff; - int index; - vector vrpRouteInd; - vector vrpRouteEls; - - DecompSolverStatus solverStatus = DecompSolStatNoSolution; - if(m_appParam.ModelNameRelax == "MTSP"){ -#ifdef VRP_DECOMPAPP_USECONCORDE - //--- - //--- translate the reduced cost to the expanded graph for MTSP - //--- - - m_concorde.setExpandedCost(redCostX); - - //--- - //--- solve the TSP - //--- - status = m_concorde.solveTSP(vrpRouteInd, vrpRouteEls, m_infinity); - assert(!status); - - double varRedCost = 0.0; - double varOrigCost = 0.0; - for(i = 0; i < static_cast(vrpRouteInd.size()); i++){ - coeff = vrpRouteEls[i]; - index = vrpRouteInd[i]; - varRedCost += coeff * redCostX[index]; - varOrigCost += coeff * m_objective[index]; - } - UTIL_DEBUG(m_appParam.LogLevel, 5, - (*m_osLog) << "VAR varRedCost=" << varRedCost-convexDual; - (*m_osLog) << "varOrigCost=" << varOrigCost << endl; - ); - - DecompVar * var = new DecompVar(vrpRouteInd, - vrpRouteEls, - varRedCost - convexDual, - varOrigCost); - var->setBlockId(0); - varList.push_back(var); - solverStatus = DecompSolStatOptimal; -#endif - } +int VRP_DecompApp::generateCuts(const double *x, DecompCutList &newCuts) { + UtilPrintFuncBegin(m_osLog, m_classTag, "generateCuts()", m_appParam.LogLevel, + 2); - if(m_appParam.ModelNameRelax == "ESPPRCC"){ - const int blockId = 0; - const DecompAlgo * decompAlgo = getDecompAlgo(); - //DecompAlgoModel & decompAlgoModel - // = decompAlgo->getModelRelax(blockId); + UtilGraphLib &graphLib = m_vrp.m_graphLib; + const int nEdges = graphLib.n_edges; + //--- + //--- calculate the number of nonzeros in vector x + //--- + // CAREFUL: tolerance used here must match + // tolerance use in buildLpSol! + int nNzs = UtilNumNonzeros(x, nEdges, DecompEpsilon); + //--- + //--- build CVRPsep LP solution object + //--- + m_cvrpSep.buildLpSol(x, nNzs, DecompEpsilon); + //--- + //--- separate capacity cuts + //--- + m_cvrpSep.sepCapacityCuts(); - //solve then map back and set to exact - cannot let - // built in solve it - exit(1); - } + //--- + //--- create DecompCuts from CVRPsep cuts + //--- + m_cvrpSep.createVrpCuts(newCuts, m_infinity); + + UtilPrintFuncEnd(m_osLog, m_classTag, "generateCuts()", m_appParam.LogLevel, + 2); + + return newCuts.size(); +} +//===========================================================================// +DecompSolverStatus VRP_DecompApp::solveRelaxed(const int whichBlock, + const double *redCostX, + const double convexDual, + DecompVarList &varList) { + + UtilPrintFuncBegin(m_osLog, m_classTag, "solveRelaxed()", m_appParam.LogLevel, + 2); + + int i, status = 0; + double coeff; + int index; + vector vrpRouteInd; + vector vrpRouteEls; + + DecompSolverStatus solverStatus = DecompSolStatNoSolution; + if (m_appParam.ModelNameRelax == "MTSP") { +#ifdef VRP_DECOMPAPP_USECONCORDE + //--- + //--- translate the reduced cost to the expanded graph for MTSP + //--- + + m_concorde.setExpandedCost(redCostX); + + //--- + //--- solve the TSP + //--- + status = m_concorde.solveTSP(vrpRouteInd, vrpRouteEls, m_infinity); + assert(!status); + + double varRedCost = 0.0; + double varOrigCost = 0.0; + for (i = 0; i < static_cast(vrpRouteInd.size()); i++) { + coeff = vrpRouteEls[i]; + index = vrpRouteInd[i]; + varRedCost += coeff * redCostX[index]; + varOrigCost += coeff * m_objective[index]; + } + UTIL_DEBUG(m_appParam.LogLevel, 5, + (*m_osLog) << "VAR varRedCost=" << varRedCost - convexDual; + (*m_osLog) << "varOrigCost=" << varOrigCost << endl;); + + DecompVar *var = new DecompVar(vrpRouteInd, vrpRouteEls, + varRedCost - convexDual, varOrigCost); + var->setBlockId(0); + varList.push_back(var); + solverStatus = DecompSolStatOptimal; +#endif + } + + if (m_appParam.ModelNameRelax == "ESPPRCC") { + const int blockId = 0; + const DecompAlgo *decompAlgo = getDecompAlgo(); + // DecompAlgoModel & decompAlgoModel + // = decompAlgo->getModelRelax(blockId); + + // solve then map back and set to exact - cannot let + // built in solve it + exit(1); + } - UtilPrintFuncEnd(m_osLog, m_classTag, - "solveRelaxed()", m_appParam.LogLevel, 2); - return solverStatus; + UtilPrintFuncEnd(m_osLog, m_classTag, "solveRelaxed()", m_appParam.LogLevel, + 2); + return solverStatus; } //===========================================================================// -bool VRP_DecompApp::APPisUserFeasible(const double * x, - const int n_cols, - const double tolZero){ - - //--- - //--- A feasible VRP solution: - //--- a.) integral (assume it already is when it gets here) - //--- b.) 2 degree at each node i in V - //--- c.) 2k degree at depot - //--- d.) remove the depot, then each component must satisfy capacity - //--- - //TODO: back to retuning cuts from here? disconnected gsecs... - - UtilPrintFuncBegin(m_osLog, m_classTag, - "APPisUserFeasible()", m_appParam.LogLevel, 2); - - UtilGraphLib & graphLib = m_vrp.m_graphLib; - bool feasible = true; - - //--- - //--- wipe out the old support graphs - //--- - m_boost.clearSubGraph(m_boost.m_sg); - m_boost.clearSubGraph(m_boost.m_sg0); - - //--- - //--- construct the current support graph to m_sg - //--- - m_boost.buildSubGraph(n_cols, x); - - //--- - //--- b.) 2 degree at each node i in V - //--- c.) 2k degree at depot (since we can have single customer - //--- routes where x[i,0] = 2.0, we cannot just check degree - //--- of depot in support graph) - //--- - int v; - const int n_vertices = graphLib.n_vertices; +bool VRP_DecompApp::APPisUserFeasible(const double *x, const int n_cols, + const double tolZero) { + + //--- + //--- A feasible VRP solution: + //--- a.) integral (assume it already is when it gets here) + //--- b.) 2 degree at each node i in V + //--- c.) 2k degree at depot + //--- d.) remove the depot, then each component must satisfy capacity + //--- + // TODO: back to retuning cuts from here? disconnected gsecs... + + UtilPrintFuncBegin(m_osLog, m_classTag, "APPisUserFeasible()", + m_appParam.LogLevel, 2); + + UtilGraphLib &graphLib = m_vrp.m_graphLib; + bool feasible = true; + + //--- + //--- wipe out the old support graphs + //--- + m_boost.clearSubGraph(m_boost.m_sg); + m_boost.clearSubGraph(m_boost.m_sg0); + + //--- + //--- construct the current support graph to m_sg + //--- + m_boost.buildSubGraph(n_cols, x); + + //--- + //--- b.) 2 degree at each node i in V + //--- c.) 2k degree at depot (since we can have single customer + //--- routes where x[i,0] = 2.0, we cannot just check degree + //--- of depot in support graph) + //--- + int v; + const int n_vertices = graphLib.n_vertices; #if 0 //this is always true because always part of LP.... think... if(!UtilIsZero(m_boost.getDepotDegree() - (2 * m_vrp.m_numRoutes))){ @@ -465,120 +444,108 @@ bool VRP_DecompApp::APPisUserFeasible(const double * x, } #endif - //--- - //--- copy the support graph to m_sg0 and remove vertex 0 - //--- - m_boost.copyGraph(m_boost.m_sg, m_boost.m_sg0); - m_boost.clearVertex(0, m_boost.m_sg0); - - UTIL_DEBUG(m_appParam.LogLevel, 5, - (*m_osLog) << "m_sg:\n"; - m_boost.printGraph(m_boost.m_sg); - (*m_osLog) << "m_sg:\n"; - m_boost.printGraph(m_boost.m_sg0); - - string baseName = "isFeas"; - const DecompAlgo * decompAlgo = getDecompAlgo(); - string fileNameDot = baseName - + ".n" + UtilIntToStr(decompAlgo->getNodeIndex()) - + ".c" + UtilIntToStr(decompAlgo->getCutCallsTotal()) - + ".p" + UtilIntToStr(decompAlgo->getPriceCallsTotal()) + ".dot"; - m_boost.printDotFile(fileNameDot, - graphLib.vertex_wt, - m_boost.m_sg); - ); - - - //--- - //--- d.) remove the depot, then each component must satisfy capacity - //--- - vector component(n_vertices); - int c; - int n_comp - = m_boost.findConnectedComponents(m_boost.m_sg0, component); - - //--- - //--- NOTE: - //--- we might have the case that even though solution is integral - //--- and degree of all verts = 2 and depot degree = 2k but not all - //--- vertices have a path to depot (i.e., we have subtours) - //--- - //--- in this case, we will have more components than routes - //--- - //--- since we find components of sg0, 0 will be a component itself - //--- so we are looking for numRoutes+1 components - //--- - if(n_comp != (m_vrp.m_numRoutes+1)){ - UTIL_DEBUG(m_appParam.LogLevel, 2, - (*m_osLog) - << "not feasible, n_comp=" << n_comp - << " n_routes=" << m_vrp.m_numRoutes << endl; - ); - return false; - } + //--- + //--- copy the support graph to m_sg0 and remove vertex 0 + //--- + m_boost.copyGraph(m_boost.m_sg, m_boost.m_sg0); + m_boost.clearVertex(0, m_boost.m_sg0); + + UTIL_DEBUG( + m_appParam.LogLevel, 5, (*m_osLog) << "m_sg:\n"; + m_boost.printGraph(m_boost.m_sg); (*m_osLog) << "m_sg:\n"; + m_boost.printGraph(m_boost.m_sg0); + + string baseName = "isFeas"; + const DecompAlgo *decompAlgo = getDecompAlgo(); + string fileNameDot = + baseName + ".n" + UtilIntToStr(decompAlgo->getNodeIndex()) + ".c" + + UtilIntToStr(decompAlgo->getCutCallsTotal()) + ".p" + + UtilIntToStr(decompAlgo->getPriceCallsTotal()) + ".dot"; + m_boost.printDotFile(fileNameDot, graphLib.vertex_wt, m_boost.m_sg);); + + //--- + //--- d.) remove the depot, then each component must satisfy capacity + //--- + vector component(n_vertices); + int c; + int n_comp = m_boost.findConnectedComponents(m_boost.m_sg0, component); + + //--- + //--- NOTE: + //--- we might have the case that even though solution is integral + //--- and degree of all verts = 2 and depot degree = 2k but not all + //--- vertices have a path to depot (i.e., we have subtours) + //--- + //--- in this case, we will have more components than routes + //--- + //--- since we find components of sg0, 0 will be a component itself + //--- so we are looking for numRoutes+1 components + //--- + if (n_comp != (m_vrp.m_numRoutes + 1)) { + UTIL_DEBUG(m_appParam.LogLevel, 2, + (*m_osLog) << "not feasible, n_comp=" << n_comp + << " n_routes=" << m_vrp.m_numRoutes << endl;); + return false; + } - vector< vector > comp; - for(c = 0; c < n_comp; c++){ - vector compV(n_vertices, false); - comp.push_back(compV); - } + vector> comp; + for (c = 0; c < n_comp; c++) { + vector compV(n_vertices, false); + comp.push_back(compV); + } - vector comp_count(n_comp, 0); - vector comp_demand(n_comp, 0); - //vector comp_cut(n_comp, 0.0); - for (v = 1; v < n_vertices; ++v){ - UTIL_DEBUG(m_appParam.LogLevel, 2, - (*m_osLog) << "component[" << v << "] = " << component[v] - << " comp_demand = " << comp_demand[component[v]] - << " vrp.vertex_wt[" << v << "] = " << graphLib.vertex_wt[v] - << endl; - ); - c = component[v]; - comp[c][v] = true; - comp_demand[c] += graphLib.vertex_wt[v]; - comp_count[c] ++; - //comp_cut[c] += solution[INDEX_U(0,v)]; - } + vector comp_count(n_comp, 0); + vector comp_demand(n_comp, 0); + // vector comp_cut(n_comp, 0.0); + for (v = 1; v < n_vertices; ++v) { + UTIL_DEBUG(m_appParam.LogLevel, 2, + (*m_osLog) << "component[" << v << "] = " << component[v] + << " comp_demand = " << comp_demand[component[v]] + << " vrp.vertex_wt[" << v + << "] = " << graphLib.vertex_wt[v] << endl;); + c = component[v]; + comp[c][v] = true; + comp_demand[c] += graphLib.vertex_wt[v]; + comp_count[c]++; + // comp_cut[c] += solution[INDEX_U(0,v)]; + } - - for(c = 0; c < n_comp; c++){ - //each component's cut value must be either 0 or 2 since we are integral - //if the comp_cut = 0, then we have a violated SEC - //if the comp_demand > capacity, then we have a violated GSEC - - //--- - //--- this is ok, it just means a vehicle only serves one client - //--- TODO: sanity check, in this case should have x[0,j] = 2 - //--- - if(comp_count[c] <= 1) - continue; - - //this cannot happen given current core...? - //if(comp_cut[c] < m_param.tolerance){ - //new_cuts.push_back(new VRP_GSECCut(comp[c], m_vrp.vertex_wt, - //m_vrp.capacity)); - // feasible = false; - //} - - if(comp_demand[c] > (graphLib.capacity + tolZero)){ - //new_cuts.push_back(new VRP_GSECCut(comp[c], m_vrp.vertex_wt, - // m_vrp.capacity)); - UTIL_DEBUG(m_appParam.LogLevel, 2, - (*m_osLog) - << "demand of comp " << c << " is " << comp_demand[c] - << " and exceeds cap " << graphLib.capacity; - ); - UtilPrintFuncEnd(m_osLog, m_classTag, - "APPisUserFeasible()", m_appParam.LogLevel, 2); - return false; - } + for (c = 0; c < n_comp; c++) { + // each component's cut value must be either 0 or 2 since we are integral + // if the comp_cut = 0, then we have a violated SEC + // if the comp_demand > capacity, then we have a violated GSEC + + //--- + //--- this is ok, it just means a vehicle only serves one client + //--- TODO: sanity check, in this case should have x[0,j] = 2 + //--- + if (comp_count[c] <= 1) + continue; + + // this cannot happen given current core...? + // if(comp_cut[c] < m_param.tolerance){ + // new_cuts.push_back(new VRP_GSECCut(comp[c], m_vrp.vertex_wt, + // m_vrp.capacity)); + // feasible = false; + //} + + if (comp_demand[c] > (graphLib.capacity + tolZero)) { + // new_cuts.push_back(new VRP_GSECCut(comp[c], m_vrp.vertex_wt, + // m_vrp.capacity)); + UTIL_DEBUG(m_appParam.LogLevel, 2, + (*m_osLog) + << "demand of comp " << c << " is " << comp_demand[c] + << " and exceeds cap " << graphLib.capacity;); + UtilPrintFuncEnd(m_osLog, m_classTag, "APPisUserFeasible()", + m_appParam.LogLevel, 2); + return false; + } } return feasible; } //===========================================================================// -void VRP_DecompApp::printOriginalColumn(const int index, - ostream * os) const { - UtilPrintEdge(index, os); +void VRP_DecompApp::printOriginalColumn(const int index, ostream *os) const { + UtilPrintEdge(index, os); } diff --git a/Dip/examples/VRP/VRP_GSECCut.cpp b/Dip/examples/VRP/VRP_GSECCut.cpp old mode 100755 new mode 100644 index 0744480d..7fd3b3b8 --- a/Dip/examples/VRP/VRP_GSECCut.cpp +++ b/Dip/examples/VRP/VRP_GSECCut.cpp @@ -5,245 +5,232 @@ (c) Copyright 2004 Lehigh University. All Rights Reserved. - This software is licensed under the Common Public License. Please see + This software is licensed under the Common Public License. Please see accompanying file for terms. ---------------------------------------------------------------------------*/ #include "VRP_GSECCut.h" #include "UtilMacros.h" -//how to deal with logs +// how to deal with logs /*-------------------------------------------------------------------------*/ -void VRP_GSECCut::expandCutToRow(CoinPackedVector * row){ - //UtilPrintFuncBegin(&cout, m_classTag, - // "expandCutToRow()", 3, 2); - - vector indices; - vector elements; - switch(m_type){ - case ACROSS: - { - for(unsigned int i = 0; i < m_S.size(); i++){ - for(int v = 0; v < m_nverts; v++){ - if(m_inS[v] || m_S[i] == v) - continue; - indices.push_back(UtilIndexU(m_S[i],v)); - } - } - fill_n(back_inserter(elements), indices.size(), 1.0); +void VRP_GSECCut::expandCutToRow(CoinPackedVector *row) { + // UtilPrintFuncBegin(&cout, m_classTag, + // "expandCutToRow()", 3, 2); + + vector indices; + vector elements; + switch (m_type) { + case ACROSS: { + for (unsigned int i = 0; i < m_S.size(); i++) { + for (int v = 0; v < m_nverts; v++) { + if (m_inS[v] || m_S[i] == v) + continue; + indices.push_back(UtilIndexU(m_S[i], v)); } - break; - - case SIDE: - { - for(unsigned int i = 0; i < m_S.size(); i++) - for(unsigned int j = i + 1; j < m_S.size(); j++) - indices.push_back(UtilIndexU(m_S[i], m_S[j])); - fill_n(back_inserter(elements), indices.size(), 1.0); - } - break; - default: - cerr << "ERROR expandCutToRow sec_type" << endl; - abort(); - } - row->setVector(indices.size(), &indices[0], &elements[0], false); - - //UtilPrintVector(indices); - //UtilPrintVector(elements); - - //UtilPrintFuncEnd(&cout, m_classTag, - // "expandCutToRow()", 3, 2); - + } + fill_n(back_inserter(elements), indices.size(), 1.0); + } break; + + case SIDE: { + for (unsigned int i = 0; i < m_S.size(); i++) + for (unsigned int j = i + 1; j < m_S.size(); j++) + indices.push_back(UtilIndexU(m_S[i], m_S[j])); + fill_n(back_inserter(elements), indices.size(), 1.0); + } break; + default: + cerr << "ERROR expandCutToRow sec_type" << endl; + abort(); + } + row->setVector(indices.size(), &indices[0], &elements[0], false); + + // UtilPrintVector(indices); + // UtilPrintVector(elements); + + // UtilPrintFuncEnd(&cout, m_classTag, + // "expandCutToRow()", 3, 2); } /*-------------------------------------------------------------------------*/ -void VRP_GSECCut::print(ostream * os) const{ - //DecompCut::print(os); - switch(m_type){ - case ACROSS: - (*os) << "ACROSS "; - break; - case SIDE: - (*os) << "SIDE "; - break; - case SIDE_COMPL: - (*os) << "SIDE_COMPL "; - break; - } - - switch(m_storage){ - case VECTOR: - { - vector::const_iterator it; - (*os) << "S: "; - for(it = m_S.begin(); it != m_S.end(); it++) - (*os) << *it << " "; - } - break; - case BITSET: - case BOTH: - { - int i; - (*os) << "S: "; - for(i = 0; i < m_nverts; i++) - if(m_inS[i]) - (*os) << i << " "; - } - break; - case NONE: - default: - cerr << "ERROR in print - BAD cut storage_type" << endl; - abort(); - } - (*os) << endl; +void VRP_GSECCut::print(ostream *os) const { + // DecompCut::print(os); + switch (m_type) { + case ACROSS: + (*os) << "ACROSS "; + break; + case SIDE: + (*os) << "SIDE "; + break; + case SIDE_COMPL: + (*os) << "SIDE_COMPL "; + break; + } + + switch (m_storage) { + case VECTOR: { + vector::const_iterator it; + (*os) << "S: "; + for (it = m_S.begin(); it != m_S.end(); it++) + (*os) << *it << " "; + } break; + case BITSET: + case BOTH: { + int i; + (*os) << "S: "; + for (i = 0; i < m_nverts; i++) + if (m_inS[i]) + (*os) << i << " "; + } break; + case NONE: + default: + cerr << "ERROR in print - BAD cut storage_type" << endl; + abort(); + } + (*os) << endl; } /*-------------------------------------------------------------------------*/ -void VRP_GSECCut::setBounds(double infinity){ - - //(i) ACROSS: 2 * ceil( sum{i in S} d_i / C ) - //(ii) SIDE: |S| - ceil( sum{i in S} d_i / C ) - //(iii) SIDE_COMPL: |Shat| - ceil( sum{i in S} d_i / C ) - - //cout << "m_demandS: " << m_demandS << " m_cap: " << m_capacity << endl; - int bin = - static_cast(ceil( static_cast(m_demandS) / m_capacity )); - switch(m_type){ - case ACROSS: - setLowerBound(2.0 * bin); - setUpperBound(infinity); - break; - case SIDE: - setLowerBound(-infinity); - setUpperBound(getSize() - bin); - break; - case SIDE_COMPL: - setLowerBound(-infinity); - setUpperBound(((m_nverts - 1) - getSize()) - bin); - break; - default: - cerr << "ERROR in setBounds - BAD cut type" << endl; - abort(); - } - - /* - cout << "in setBounds LB: "; - if(getLowerBound() > -(infinity/2)) - cout << getLowerBound(); - else - cout << "-inf"; - cout << "in setBounds UB: "; - if(getUpperBound() < (infinity/2)) - cout << getUpperBound(); - else - cout << "inf"; - */ - +void VRP_GSECCut::setBounds(double infinity) { + + //(i) ACROSS: 2 * ceil( sum{i in S} d_i / C ) + //(ii) SIDE: |S| - ceil( sum{i in S} d_i / C ) + //(iii) SIDE_COMPL: |Shat| - ceil( sum{i in S} d_i / C ) + + // cout << "m_demandS: " << m_demandS << " m_cap: " << m_capacity << endl; + int bin = static_cast(ceil(static_cast(m_demandS) / m_capacity)); + switch (m_type) { + case ACROSS: + setLowerBound(2.0 * bin); + setUpperBound(infinity); + break; + case SIDE: + setLowerBound(-infinity); + setUpperBound(getSize() - bin); + break; + case SIDE_COMPL: + setLowerBound(-infinity); + setUpperBound(((m_nverts - 1) - getSize()) - bin); + break; + default: + cerr << "ERROR in setBounds - BAD cut type" << endl; + abort(); + } + + /* + cout << "in setBounds LB: "; + if(getLowerBound() > -(infinity/2)) + cout << getLowerBound(); + else + cout << "-inf"; + cout << "in setBounds UB: "; + if(getUpperBound() < (infinity/2)) + cout << getUpperBound(); + else + cout << "inf"; + */ } /*-------------------------------------------------------------------------*/ -bool VRP_GSECCut::isSame(const DecompCut * cut) const{ - const VRP_GSECCut * gsec_cut = dynamic_cast(cut); - if(!gsec_cut) - return false; - - if(m_type != gsec_cut->m_type) - return false; - switch(m_storage){ - case VECTOR: - return m_S == gsec_cut->m_S; - case BITSET: - case BOTH: - return m_inS == gsec_cut->m_inS; - case NONE: - return false; - } - return false; +bool VRP_GSECCut::isSame(const DecompCut *cut) const { + const VRP_GSECCut *gsec_cut = dynamic_cast(cut); + if (!gsec_cut) + return false; + + if (m_type != gsec_cut->m_type) + return false; + switch (m_storage) { + case VECTOR: + return m_S == gsec_cut->m_S; + case BITSET: + case BOTH: + return m_inS == gsec_cut->m_inS; + case NONE: + return false; + } + return false; } /*-------------------------------------------------------------------------*/ -void VRP_GSECCut::setStorage(){ - switch(m_storage){ - case VECTOR: - create_bitset(); - break; - case BITSET: - create_vector(); - break; - case BOTH: - break; - default: - //throw exception - m_storage = NONE; - return; - } +void VRP_GSECCut::setStorage() { + switch (m_storage) { + case VECTOR: + create_bitset(); + break; + case BITSET: + create_vector(); + break; + case BOTH: + break; + default: + // throw exception + m_storage = NONE; + return; + } } -//another function that probably belongs in a utility class +// another function that probably belongs in a utility class /*-------------------------------------------------------------------------*/ -void VRP_GSECCut::create_bitset(){ - //create bitset from vector - m_inS.resize(m_nverts); - for(vector::iterator it = m_S.begin(); it != m_S.end(); it++) - m_inS.set(*it); - m_storage = m_storage == VECTOR ? BOTH : BITSET; +void VRP_GSECCut::create_bitset() { + // create bitset from vector + m_inS.resize(m_nverts); + for (vector::iterator it = m_S.begin(); it != m_S.end(); it++) + m_inS.set(*it); + m_storage = m_storage == VECTOR ? BOTH : BITSET; } - -//another function that probably belongs in a utility class + +// another function that probably belongs in a utility class /*-------------------------------------------------------------------------*/ -void VRP_GSECCut::create_vector(){ - //create vector from bistet - m_S.reserve(m_inS.count());//is this worth it? or is count costly? - for(unsigned int u = 0; u < m_inS.size(); u++) - if(m_inS[u]) - m_S.push_back(u); - m_storage = m_storage == BITSET ? BOTH : VECTOR; +void VRP_GSECCut::create_vector() { + // create vector from bistet + m_S.reserve(m_inS.count()); // is this worth it? or is count costly? + for (unsigned int u = 0; u < m_inS.size(); u++) + if (m_inS[u]) + m_S.push_back(u); + m_storage = m_storage == BITSET ? BOTH : VECTOR; } /* SIDE_COMPL needs more thought... see Decomp's VRP_GSEC_cut */ /*-------------------------------------------------------------------------*/ /* TODO */ -void VRP_GSECCut::setCutType(){ - //m_type = ACROSS; - m_type = SIDE; +void VRP_GSECCut::setCutType() { + // m_type = ACROSS; + m_type = SIDE; } /*-------------------------------------------------------------------------*/ -void VRP_GSECCut::setDemand(const int * vertex_wt){ - if(m_demandS != 0) - return; - switch(m_type){ - case VECTOR: - case BOTH: - for(unsigned int i = 0; i < m_S.size(); i++){ - m_demandS += vertex_wt[m_S[i]]; - //printf("m_demandS=%d, i:%d m_S:%d vwt:%d\n", - // m_demandS, i, m_S[i], vertex_wt[m_S[i]]); - } - break; - case BITSET: - for(unsigned int u = 0; u < m_inS.size(); u++){ - if(m_inS[u]){ - m_demandS += vertex_wt[u]; - //printf("m_demandS=%d, u:%d vwt:%d\n", - // m_demandS, u, vertex_wt[u]); - } +void VRP_GSECCut::setDemand(const int *vertex_wt) { + if (m_demandS != 0) + return; + switch (m_type) { + case VECTOR: + case BOTH: + for (unsigned int i = 0; i < m_S.size(); i++) { + m_demandS += vertex_wt[m_S[i]]; + // printf("m_demandS=%d, i:%d m_S:%d vwt:%d\n", + // m_demandS, i, m_S[i], vertex_wt[m_S[i]]); + } + break; + case BITSET: + for (unsigned int u = 0; u < m_inS.size(); u++) { + if (m_inS[u]) { + m_demandS += vertex_wt[u]; + // printf("m_demandS=%d, u:%d vwt:%d\n", + // m_demandS, u, vertex_wt[u]); } - break; - } + } + break; + } } /*-------------------------------------------------------------------------*/ -const int VRP_GSECCut::getSize(){ - switch(m_type){ - case VECTOR: - case BOTH: - return m_S.size(); - case BITSET: - return m_inS.count(); - } - return 0; +const int VRP_GSECCut::getSize() { + switch (m_type) { + case VECTOR: + case BOTH: + return m_S.size(); + case BITSET: + return m_inS.count(); + } + return 0; } - - diff --git a/Dip/examples/VRP/VRP_Main.cpp b/Dip/examples/VRP/VRP_Main.cpp old mode 100755 new mode 100644 index 1c637717..ce7894cb --- a/Dip/examples/VRP/VRP_Main.cpp +++ b/Dip/examples/VRP/VRP_Main.cpp @@ -13,8 +13,8 @@ //===========================================================================// //===========================================================================// -#include "UtilTimer.h" #include "UtilParameters.h" +#include "UtilTimer.h" //===========================================================================// #include "VRP_DecompApp.h" //===========================================================================// @@ -26,114 +26,104 @@ //===========================================================================// //===========================================================================// -int main(int argc, char ** argv){ - try{ - //--- - //--- create the utility class for parsing parameters - //--- - UtilParameters utilParam(argc, argv); +int main(int argc, char **argv) { + try { + //--- + //--- create the utility class for parsing parameters + //--- + UtilParameters utilParam(argc, argv); - bool doCut = utilParam.GetSetting("doCut", true); - bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - string Instance = utilParam.GetSetting("Instance", ".", "VRP"); - - UtilTimer timer; - double timeSetupReal = 0.0; - double timeSetupCpu = 0.0; - double timeSolveReal = 0.0; - double timeSolveCpu = 0.0; - - //--- - //--- start overall timer - //--- - timer.start(); - - //--- - //--- create the user application (a DecompApp) - //--- - VRP_DecompApp vrp(utilParam); - - //--- - //--- create the algorithm (a DecompAlgo) - //--- - DecompAlgo * algo = NULL; - assert(doCut + doPriceCut == 1); - - //--- - //--- create the CPM algorithm object - //--- - if(doCut) - algo = new DecompAlgoC(&vrp, utilParam); - - //--- - //--- create the PC algorithm object - //--- - if(doPriceCut) - algo = new DecompAlgoPC(&vrp, utilParam); + bool doCut = utilParam.GetSetting("doCut", true); + bool doPriceCut = utilParam.GetSetting("doPriceCut", false); + string Instance = utilParam.GetSetting("Instance", ".", "VRP"); - //--- - //--- create the driver AlpsDecomp model - //--- - int status = 0; - AlpsDecompModel alpsModel(utilParam, algo); - - timer.stop(); - timeSetupCpu = timer.getCpuTime(); - timeSetupReal = timer.getRealTime(); - - //--- - //--- solve - //--- - timer.start(); - status = alpsModel.solve(); - timer.stop(); - timeSolveCpu = timer.getCpuTime(); - timeSolveReal = timer.getRealTime(); + UtilTimer timer; + double timeSetupReal = 0.0; + double timeSetupCpu = 0.0; + double timeSolveReal = 0.0; + double timeSolveCpu = 0.0; + + //--- + //--- start overall timer + //--- + timer.start(); + + //--- + //--- create the user application (a DecompApp) + //--- + VRP_DecompApp vrp(utilParam); + + //--- + //--- create the algorithm (a DecompAlgo) + //--- + DecompAlgo *algo = NULL; + assert(doCut + doPriceCut == 1); + + //--- + //--- create the CPM algorithm object + //--- + if (doCut) + algo = new DecompAlgoC(&vrp, utilParam); + + //--- + //--- create the PC algorithm object + //--- + if (doPriceCut) + algo = new DecompAlgoPC(&vrp, utilParam); + //--- + //--- create the driver AlpsDecomp model + //--- + int status = 0; + AlpsDecompModel alpsModel(utilParam, algo); + + timer.stop(); + timeSetupCpu = timer.getCpuTime(); + timeSetupReal = timer.getRealTime(); + + //--- + //--- solve + //--- + timer.start(); + status = alpsModel.solve(); + timer.stop(); + timeSolveCpu = timer.getCpuTime(); + timeSolveReal = timer.getRealTime(); + + //--- + //--- sanity check + //--- + cout << setiosflags(ios::fixed | ios::showpoint); + cout << "Status= " << status << " BestLB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalLB(), 5) << " BestUB= " << setw(10) + << UtilDblToStr(alpsModel.getGlobalUB(), 5) << " Nodes= " << setw(6) + << alpsModel.getNumNodesProcessed() << " SetupCPU= " << timeSetupCpu + << " SolveCPU= " << timeSolveCpu + << " TotalCPU= " << timeSetupCpu + timeSolveCpu + << " SetupReal= " << timeSetupReal << " SolveReal= " << timeSolveReal + << " TotalReal= " << timeSetupReal + timeSolveReal << endl; + + if (status == AlpsExitStatusOptimal && vrp.getBestKnownUB() < 1.0e50) { //--- - //--- sanity check + //--- the assumption here is that the BestKnownLB/UB is optimal //--- - cout << setiosflags(ios::fixed|ios::showpoint); - cout << "Status= " << status - << " BestLB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalLB(),5) - << " BestUB= " << setw(10) - << UtilDblToStr(alpsModel.getGlobalUB(),5) - << " Nodes= " << setw(6) - << alpsModel.getNumNodesProcessed() - << " SetupCPU= " << timeSetupCpu - << " SolveCPU= " << timeSolveCpu - << " TotalCPU= " << timeSetupCpu + timeSolveCpu - << " SetupReal= " << timeSetupReal - << " SolveReal= " << timeSolveReal - << " TotalReal= " << timeSetupReal + timeSolveReal - << endl; - - if(status == AlpsExitStatusOptimal && vrp.getBestKnownUB() < 1.0e50){ - //--- - //--- the assumption here is that the BestKnownLB/UB is optimal - //--- - double diff - = fabs(vrp.getBestKnownUB() - alpsModel.getGlobalUB()); - if(diff > 1.0e-4){ - cerr << "ERROR. BestKnownUB= " << vrp.getBestKnownUB() - << " but DECOMP claims GlobalUB= " - << alpsModel.getGlobalUB() << endl; - throw UtilException("Invalid claim of optimal.", - "main", "DECOMP"); - } + double diff = fabs(vrp.getBestKnownUB() - alpsModel.getGlobalUB()); + if (diff > 1.0e-4) { + cerr << "ERROR. BestKnownUB= " << vrp.getBestKnownUB() + << " but DECOMP claims GlobalUB= " << alpsModel.getGlobalUB() + << endl; + throw UtilException("Invalid claim of optimal.", "main", "DECOMP"); } + } - //--- - //--- free local memory - //--- - delete algo; - } - catch(CoinError & ex){ - cerr << "COIN Exception [ " << ex.message() << " ]" - << " at " << ex.fileName() << ":L" << ex.lineNumber() - << " in " << ex.className() << "::" << ex.methodName() << endl; - } - return 0; + //--- + //--- free local memory + //--- + delete algo; + } catch (CoinError &ex) { + cerr << "COIN Exception [ " << ex.message() << " ]" + << " at " << ex.fileName() << ":L" << ex.lineNumber() << " in " + << ex.className() << "::" << ex.methodName() << endl; + } + return 0; } - diff --git a/Dip/examples/VRP/VRP_SPPRC.cpp b/Dip/examples/VRP/VRP_SPPRC.cpp index 27645803..951700b6 100644 --- a/Dip/examples/VRP/VRP_SPPRC.cpp +++ b/Dip/examples/VRP/VRP_SPPRC.cpp @@ -20,127 +20,125 @@ #include "DecompAlgo.h" //===========================================================================// -void VRP_DecompApp::createModelESPPCC(DecompConstraintSet * model){ - //--- - //--- We create the ESPPCC model as an IP just for the sake of - //--- debugging. The model is actually over a directed graph, so - //--- the variables are not actually in the space of the original - //--- model. So, we will not feed this model to the framework - but - //--- we will use it for debuggin in the solveRelaxed function. - //--- - //--- OR, we can feed it to framework to be called like others - //--- but also have a function to map from this space to original space - //--- that the user must fill-in. - //--- - - //--- - //--- TODO: enforce branching decisions - //--- +void VRP_DecompApp::createModelESPPCC(DecompConstraintSet *model) { + //--- + //--- We create the ESPPCC model as an IP just for the sake of + //--- debugging. The model is actually over a directed graph, so + //--- the variables are not actually in the space of the original + //--- model. So, we will not feed this model to the framework - but + //--- we will use it for debuggin in the solveRelaxed function. + //--- + //--- OR, we can feed it to framework to be called like others + //--- but also have a function to map from this space to original space + //--- that the user must fill-in. + //--- - //--- - //--- Elementary Shortest Path with Capacity Constraints - //--- depot = 0 (start) and n+1 (end) - //--- C = 1...n (customers) - //--- N = C union {0, n+1} - //--- - //--- Model with complete directed graph (|A| = |N|^2 edges). - //--- For simplicity of index scheme, just use full graph and - //-- fix edges to 0 that cannot exist (like self loops, etc). - //--- - //--- sum{i in C, j in N} d[i] x[i,j] <= q - //--- sum{j in N} x[0,j] = 1 - //--- sum{i in N} x[i,h] - sum{j in N} x[h,j] = 0, for h in C - //--- sum{i in N} x[i,n+1] = 1 - //--- x[i,j] = 0, for all {(i,j) in A : i=j or i=n+1 or j=0} - //--- x[i,j] in {0,1} in A - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelESPPCC()", m_appParam.LogLevel, 2); + //--- + //--- TODO: enforce branching decisions + //--- - UtilGraphLib & graphLib = m_vrp.m_graphLib; - const double capacity = graphLib.capacity; - const int numCustomers = graphLib.n_vertices - 1; - const int numVertices = numCustomers + 2; - const int numCols = numVertices * numVertices; - const int numRows = numVertices + 1; - int colIndex, i, j, h; - - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - if(!model->M) - throw UtilExceptionMemory("createModelPartMDKP", "MMKP_DecompApp"); - model->M->setDimensions(0, numCols); - model->reserve(numRows, numCols); + //--- + //--- Elementary Shortest Path with Capacity Constraints + //--- depot = 0 (start) and n+1 (end) + //--- C = 1...n (customers) + //--- N = C union {0, n+1} + //--- + //--- Model with complete directed graph (|A| = |N|^2 edges). + //--- For simplicity of index scheme, just use full graph and + //-- fix edges to 0 that cannot exist (like self loops, etc). + //--- + //--- sum{i in C, j in N} d[i] x[i,j] <= q + //--- sum{j in N} x[0,j] = 1 + //--- sum{i in N} x[i,h] - sum{j in N} x[h,j] = 0, for h in C + //--- sum{i in N} x[i,n+1] = 1 + //--- x[i,j] = 0, for all {(i,j) in A : i=j or i=n+1 or j=0} + //--- x[i,j] in {0,1} in A + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelESPPCC()", + m_appParam.LogLevel, 2); - //--- - //--- sum{i in C, j in N} d[i] x[i,j] <= q - //--- - CoinPackedVector rowCap; - for(i = 1; i <= numCustomers; i++){ - colIndex = i * numVertices; - for(j = 0; j < numVertices; j++){ - rowCap.insert(colIndex++, 1.0); - } - } - model->appendRow(rowCap, -m_infinity, capacity); + UtilGraphLib &graphLib = m_vrp.m_graphLib; + const double capacity = graphLib.capacity; + const int numCustomers = graphLib.n_vertices - 1; + const int numVertices = numCustomers + 2; + const int numCols = numVertices * numVertices; + const int numRows = numVertices + 1; + int colIndex, i, j, h; - //--- - //--- sum{j in N} x[0,j] = 1 - //--- - CoinPackedVector rowFlowDepot1; - colIndex = 0; - for(j = 0; j < numVertices; j++) - rowFlowDepot1.insert(colIndex++, 1.0); - model->appendRow(rowFlowDepot1, 1.0, 1.0); - - //--- - //--- sum{i in N} x[i,n+1] = 1 - //--- - CoinPackedVector rowFlowDepot2; - colIndex = (numVertices-1) * numVertices; - for(j = 0; j < numVertices; j++) - rowFlowDepot2.insert(colIndex++, 1.0); - model->appendRow(rowFlowDepot2, 1.0, 1.0); + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + if (!model->M) + throw UtilExceptionMemory("createModelPartMDKP", "MMKP_DecompApp"); + model->M->setDimensions(0, numCols); + model->reserve(numRows, numCols); - //--- - //--- sum{i in N} x[i,h] - sum{j in N} x[h,j] = 0, for h in C - //--- - for(h = 1; h <= numCustomers; h++){ - CoinPackedVector row; - for(j = 0; j < numVertices; j++){ - if(h==j) - continue; - row.insert(diGraphIndex(j,h,numVertices), 1.0); - row.insert(diGraphIndex(h,j,numVertices), -1.0); - } - model->appendRow(row, 0.0, 0.0); - } + //--- + //--- sum{i in C, j in N} d[i] x[i,j] <= q + //--- + CoinPackedVector rowCap; + for (i = 1; i <= numCustomers; i++) { + colIndex = i * numVertices; + for (j = 0; j < numVertices; j++) { + rowCap.insert(colIndex++, 1.0); + } + } + model->appendRow(rowCap, -m_infinity, capacity); + + //--- + //--- sum{j in N} x[0,j] = 1 + //--- + CoinPackedVector rowFlowDepot1; + colIndex = 0; + for (j = 0; j < numVertices; j++) + rowFlowDepot1.insert(colIndex++, 1.0); + model->appendRow(rowFlowDepot1, 1.0, 1.0); - //--- - //--- set the col upper and lower bounds - //--- - UtilFillN(model->colLB, numCols, 0.0); - UtilFillN(model->colUB, numCols, 1.0); + //--- + //--- sum{i in N} x[i,n+1] = 1 + //--- + CoinPackedVector rowFlowDepot2; + colIndex = (numVertices - 1) * numVertices; + for (j = 0; j < numVertices; j++) + rowFlowDepot2.insert(colIndex++, 1.0); + model->appendRow(rowFlowDepot2, 1.0, 1.0); - //--- - //--- x[i,j] = 0, for all {(i,j) in A : i=j or i=n+1 or j=0} - //--- - colIndex = 0; - for(i = 0; i < numVertices; i++){ - for(j = 0; j < numVertices; j++){ - if(i == j || - i == (numCustomers+1) || - j == 0){ - model->colUB[colIndex] = 0.0; - } - colIndex++; + //--- + //--- sum{i in N} x[i,h] - sum{j in N} x[h,j] = 0, for h in C + //--- + for (h = 1; h <= numCustomers; h++) { + CoinPackedVector row; + for (j = 0; j < numVertices; j++) { + if (h == j) + continue; + row.insert(diGraphIndex(j, h, numVertices), 1.0); + row.insert(diGraphIndex(h, j, numVertices), -1.0); + } + model->appendRow(row, 0.0, 0.0); + } + + //--- + //--- set the col upper and lower bounds + //--- + UtilFillN(model->colLB, numCols, 0.0); + UtilFillN(model->colUB, numCols, 1.0); + + //--- + //--- x[i,j] = 0, for all {(i,j) in A : i=j or i=n+1 or j=0} + //--- + colIndex = 0; + for (i = 0; i < numVertices; i++) { + for (j = 0; j < numVertices; j++) { + if (i == j || i == (numCustomers + 1) || j == 0) { + model->colUB[colIndex] = 0.0; } - } - - //--- - //--- set the indices of the integer variables of model - //--- - UtilIotaN(model->integerVars, numCols, 0); + colIndex++; + } + } + + //--- + //--- set the indices of the integer variables of model + //--- + UtilIotaN(model->integerVars, numCols, 0); - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModelESPPCC()", m_appParam.LogLevel, 2); + UtilPrintFuncBegin(m_osLog, m_classTag, "createModelESPPCC()", + m_appParam.LogLevel, 2); } diff --git a/Dip/src/AlpsDecompModel.cpp b/Dip/src/AlpsDecompModel.cpp index bbe752e0..f1486b7c 100644 --- a/Dip/src/AlpsDecompModel.cpp +++ b/Dip/src/AlpsDecompModel.cpp @@ -21,160 +21,144 @@ using namespace std; //===========================================================================// -void AlpsDecompModel::setAlpsSettings() -{ - //TODO: use stream not cout - UtilPrintFuncBegin(&cout, m_classTag, - "setAlpsSettings()", m_param.msgLevel, 3); - AlpsPar()->setEntry(AlpsParams::logFileLevel, m_param.logFileLevel); - AlpsPar()->setEntry(AlpsParams::printSolution, m_param.printSolution); - AlpsPar()->setEntry(AlpsParams::checkMemory, m_param.checkMemory); - AlpsPar()->setEntry(AlpsParams::msgLevel, m_param.msgLevel); - AlpsPar()->setEntry(AlpsParams::nodeLimit, m_param.nodeLimit); - AlpsPar()->setEntry(AlpsParams::nodeLogInterval, m_param.nodeLogInterval); - double timeLimit = m_decompAlgo->getParam().TimeLimit; - AlpsPar()->setEntry(AlpsParams::timeLimit, timeLimit); - UtilPrintFuncEnd(&cout, m_classTag, - "setAlpsSettings()", m_param.msgLevel, 3); +void AlpsDecompModel::setAlpsSettings() { + // TODO: use stream not cout + UtilPrintFuncBegin(&cout, m_classTag, "setAlpsSettings()", m_param.msgLevel, + 3); + AlpsPar()->setEntry(AlpsParams::logFileLevel, m_param.logFileLevel); + AlpsPar()->setEntry(AlpsParams::printSolution, m_param.printSolution); + AlpsPar()->setEntry(AlpsParams::checkMemory, m_param.checkMemory); + AlpsPar()->setEntry(AlpsParams::msgLevel, m_param.msgLevel); + AlpsPar()->setEntry(AlpsParams::nodeLimit, m_param.nodeLimit); + AlpsPar()->setEntry(AlpsParams::nodeLogInterval, m_param.nodeLogInterval); + double timeLimit = m_decompAlgo->getParam().TimeLimit; + AlpsPar()->setEntry(AlpsParams::timeLimit, timeLimit); + UtilPrintFuncEnd(&cout, m_classTag, "setAlpsSettings()", m_param.msgLevel, 3); } - //===========================================================================// -AlpsTreeNode* AlpsDecompModel::createRoot() -{ - //--- - //--- Create the root node description and set explicit (no diff'ing) - //--- NOTE: Alps will delete this memory; - //--- - UtilPrintFuncBegin(&cout, m_classTag, - "createRoot()", m_param.msgLevel, 3); - AlpsDecompTreeNode* root = new AlpsDecompTreeNode(); - assert(root); - CoinAssert(m_decompAlgo); - const DecompSubModel& modelCore = m_decompAlgo->getModelCore(); - CoinAssert(modelCore.getModel()->getColLB()); - CoinAssert(modelCore.getModel()->getColUB()); - AlpsDecompNodeDesc* desc - = new AlpsDecompNodeDesc(this, - modelCore.getModel()->getColLB(), - modelCore.getModel()->getColUB()); - assert(desc); - root->setDesc(desc); - //root->setExplicit(1); - UtilPrintFuncEnd(&cout, m_classTag, - "setAlpsSettings()", m_param.msgLevel, 3); - return root; +AlpsTreeNode *AlpsDecompModel::createRoot() { + //--- + //--- Create the root node description and set explicit (no diff'ing) + //--- NOTE: Alps will delete this memory; + //--- + UtilPrintFuncBegin(&cout, m_classTag, "createRoot()", m_param.msgLevel, 3); + AlpsDecompTreeNode *root = new AlpsDecompTreeNode(); + assert(root); + CoinAssert(m_decompAlgo); + const DecompSubModel &modelCore = m_decompAlgo->getModelCore(); + CoinAssert(modelCore.getModel()->getColLB()); + CoinAssert(modelCore.getModel()->getColUB()); + AlpsDecompNodeDesc *desc = new AlpsDecompNodeDesc( + this, modelCore.getModel()->getColLB(), modelCore.getModel()->getColUB()); + assert(desc); + root->setDesc(desc); + // root->setExplicit(1); + UtilPrintFuncEnd(&cout, m_classTag, "setAlpsSettings()", m_param.msgLevel, 3); + return root; } //===========================================================================// -bool AlpsDecompModel::fathomAllNodes() -{ - double feasBound = ALPS_OBJ_MAX; - double relBound = ALPS_OBJ_MAX; - double gapVal = ALPS_OBJ_MAX; - double currAbsGap_ = ALPS_OBJ_MAX; - double currRelGap_ = ALPS_OBJ_MAX; - AlpsTreeNode* bestNode = NULL; - // Compute gap - feasBound = broker_->getIncumbentValue(); - bestNode = broker_->getBestNode(); - - //printf("feasBound= %12.10f\n", feasBound); - if (bestNode) { - relBound = bestNode->getQuality(); - m_bestLB = relBound; - //printf("bestNode m_bestLB= %12.10f\n", m_bestLB); - } else { - m_bestLB = getKnowledgeBroker()->getBestQuality(); - //printf("no bestNode m_bestLB= %12.10f\n", m_bestLB); - } - - if (relBound > ALPS_OBJ_MAX_LESS) { - currAbsGap_ = currRelGap_ = 0.0; - } else if (feasBound < ALPS_OBJ_MAX_LESS) { - gapVal = ALPS_MAX(0, feasBound - relBound); - currAbsGap_ = ALPS_MAX(0, gapVal); - currRelGap_ = 100.0 * UtilCalculateGap(relBound, feasBound, - m_decompAlgo->getInfinity()); - } - - //printf("+++ Process %d: currAbsGap_ %g, currRelGap_%g\n", - // broker_->getProcRank(), currAbsGap_, currRelGap_); - //TODO: make option - double optimalAbsGap_ = 1.0e-6; - double optimalRelGap_ = 0.01;//0.01% - //TODO: cutoffIncrement (currentUB-cutoffIncrement) - if ( (currAbsGap_ <= optimalAbsGap_ + ALPS_ZERO) || - (currRelGap_ <= optimalRelGap_ + ALPS_ZERO) ) { - m_bestLB = feasBound; - return true; - } else { - return false; - } +bool AlpsDecompModel::fathomAllNodes() { + double feasBound = ALPS_OBJ_MAX; + double relBound = ALPS_OBJ_MAX; + double gapVal = ALPS_OBJ_MAX; + double currAbsGap_ = ALPS_OBJ_MAX; + double currRelGap_ = ALPS_OBJ_MAX; + AlpsTreeNode *bestNode = NULL; + // Compute gap + feasBound = broker_->getIncumbentValue(); + bestNode = broker_->getBestNode(); + + // printf("feasBound= %12.10f\n", feasBound); + if (bestNode) { + relBound = bestNode->getQuality(); + m_bestLB = relBound; + // printf("bestNode m_bestLB= %12.10f\n", m_bestLB); + } else { + m_bestLB = getKnowledgeBroker()->getBestQuality(); + // printf("no bestNode m_bestLB= %12.10f\n", m_bestLB); + } + + if (relBound > ALPS_OBJ_MAX_LESS) { + currAbsGap_ = currRelGap_ = 0.0; + } else if (feasBound < ALPS_OBJ_MAX_LESS) { + gapVal = ALPS_MAX(0, feasBound - relBound); + currAbsGap_ = ALPS_MAX(0, gapVal); + currRelGap_ = 100.0 * UtilCalculateGap(relBound, feasBound, + m_decompAlgo->getInfinity()); + } + + // printf("+++ Process %d: currAbsGap_ %g, currRelGap_%g\n", + // broker_->getProcRank(), currAbsGap_, currRelGap_); + // TODO: make option + double optimalAbsGap_ = 1.0e-6; + double optimalRelGap_ = 0.01; // 0.01% + // TODO: cutoffIncrement (currentUB-cutoffIncrement) + if ((currAbsGap_ <= optimalAbsGap_ + ALPS_ZERO) || + (currRelGap_ <= optimalRelGap_ + ALPS_ZERO)) { + m_bestLB = feasBound; + return true; + } else { + return false; + } } - - - //===========================================================================// -AlpsExitStatus AlpsDecompModel::solve() -{ - /** \todo Parallel version. */ +AlpsExitStatus AlpsDecompModel::solve() { + /** \todo Parallel version. */ #ifdef UTIL_USE_TIMERS - globalTimer.reset(); + globalTimer.reset(); #endif - UtilPrintFuncBegin(&cout, m_classTag, - "solve()", m_param.msgLevel, 3); - //--- - //--- Since the setup phase for DECOMP includes generating initial - //--- columns and creating the master problem it could be - //--- a significant amount of time. So, we need to adjust the - //--- time limit for the residual. - //--- - DecompAlgo* decompAlgo = getDecompAlgo(); - DecompStats& decompStats = decompAlgo->getStats(); - DecompParam& decompParam = decompAlgo->getMutableParam(); - double timeLimit = decompParam.TimeLimit; - double timeLeft = timeLimit - decompStats.timerOverall.getRealTime(); - AlpsPar()->setEntry(AlpsParams::timeLimit, timeLeft); - //--- - //--- copy relevant parameters to DecompParam from AlpsParam - //--- - decompParam.NodeLimit = m_param.nodeLimit; - //--- - //--- declare an AlpsKnowledgeBroker for serial application - //--- - AlpsKnowledgeBrokerSerial alpsBroker(0, NULL, *this); - //--- - //--- search for the best solution - //--- - alpsBroker.search(this); - - if (m_param.msgLevel > 0) { - m_decompAlgo->getDecompStats().printOverallStats(); - } - - //--- - //--- store best LB/UB objective found - //--- - m_bestUB = alpsBroker.getBestQuality(); - m_nodesProcessed = alpsBroker.getNumNodesProcessed(); - - if (alpsBroker.getSolStatus() != AlpsExitStatusOptimal) { - AlpsTreeNode* bestNode = NULL; - //if stops on time, have the nodes been free'd? - bestNode = alpsBroker.getBestNode(); - - if (bestNode) { - m_bestLB = bestNode->getQuality(); - } else { - m_bestLB = -ALPS_OBJ_MAX; - } - } - - m_alpsStatus = alpsBroker.getSolStatus(); - UtilPrintFuncEnd(&cout, m_classTag, - "solve()", m_param.msgLevel, 3); - return alpsBroker.getSolStatus(); + UtilPrintFuncBegin(&cout, m_classTag, "solve()", m_param.msgLevel, 3); + //--- + //--- Since the setup phase for DECOMP includes generating initial + //--- columns and creating the master problem it could be + //--- a significant amount of time. So, we need to adjust the + //--- time limit for the residual. + //--- + DecompAlgo *decompAlgo = getDecompAlgo(); + DecompStats &decompStats = decompAlgo->getStats(); + DecompParam &decompParam = decompAlgo->getMutableParam(); + double timeLimit = decompParam.TimeLimit; + double timeLeft = timeLimit - decompStats.timerOverall.getRealTime(); + AlpsPar()->setEntry(AlpsParams::timeLimit, timeLeft); + //--- + //--- copy relevant parameters to DecompParam from AlpsParam + //--- + decompParam.NodeLimit = m_param.nodeLimit; + //--- + //--- declare an AlpsKnowledgeBroker for serial application + //--- + AlpsKnowledgeBrokerSerial alpsBroker(0, NULL, *this); + //--- + //--- search for the best solution + //--- + alpsBroker.search(this); + + if (m_param.msgLevel > 0) { + m_decompAlgo->getDecompStats().printOverallStats(); + } + + //--- + //--- store best LB/UB objective found + //--- + m_bestUB = alpsBroker.getBestQuality(); + m_nodesProcessed = alpsBroker.getNumNodesProcessed(); + + if (alpsBroker.getSolStatus() != AlpsExitStatusOptimal) { + AlpsTreeNode *bestNode = NULL; + // if stops on time, have the nodes been free'd? + bestNode = alpsBroker.getBestNode(); + + if (bestNode) { + m_bestLB = bestNode->getQuality(); + } else { + m_bestLB = -ALPS_OBJ_MAX; + } + } + + m_alpsStatus = alpsBroker.getSolStatus(); + UtilPrintFuncEnd(&cout, m_classTag, "solve()", m_param.msgLevel, 3); + return alpsBroker.getSolStatus(); } - diff --git a/Dip/src/AlpsDecompTreeNode.cpp b/Dip/src/AlpsDecompTreeNode.cpp index a9ce3dea..128efbe9 100644 --- a/Dip/src/AlpsDecompTreeNode.cpp +++ b/Dip/src/AlpsDecompTreeNode.cpp @@ -21,11 +21,11 @@ #include "DecompApp.h" //===========================================================================// -#include "AlpsKnowledgeBroker.h" -#include "AlpsDecompTreeNode.h" +#include "AlpsDecompModel.h" #include "AlpsDecompNodeDesc.h" #include "AlpsDecompSolution.h" -#include "AlpsDecompModel.h" +#include "AlpsDecompTreeNode.h" +#include "AlpsKnowledgeBroker.h" //===========================================================================// #include "CoinUtility.hpp" @@ -33,585 +33,542 @@ using namespace std; //===========================================================================// -AlpsTreeNode* -AlpsDecompTreeNode::createNewTreeNode(AlpsNodeDesc*& desc) const -{ - //--- - //--- Create a new tree node, set node description. - //--- NOTE: we are not using differencing, constructs node from scratch - //--- - AlpsDecompModel* model - = dynamic_cast(desc->getModel()); - AlpsDecompParam& param = model->getParam(); - UtilPrintFuncBegin(&cout, m_classTag, - "createNewTreeNode()", param.msgLevel, 3); - AlpsDecompTreeNode* node = new AlpsDecompTreeNode(); - node->desc_ = desc; - UtilPrintFuncEnd(&cout, m_classTag, - "createNewTreeNode()", param.msgLevel, 3); - return node; +AlpsTreeNode *AlpsDecompTreeNode::createNewTreeNode(AlpsNodeDesc *&desc) const { + //--- + //--- Create a new tree node, set node description. + //--- NOTE: we are not using differencing, constructs node from scratch + //--- + AlpsDecompModel *model = dynamic_cast(desc->getModel()); + AlpsDecompParam ¶m = model->getParam(); + UtilPrintFuncBegin(&cout, m_classTag, "createNewTreeNode()", param.msgLevel, + 3); + AlpsDecompTreeNode *node = new AlpsDecompTreeNode(); + node->desc_ = desc; + UtilPrintFuncEnd(&cout, m_classTag, "createNewTreeNode()", param.msgLevel, 3); + return node; } //===========================================================================// -bool AlpsDecompTreeNode::checkIncumbent(AlpsDecompModel* model, - const DecompSolution* decompSol) -{ - DecompAlgo* decompAlgo = model->getDecompAlgo(); - //--- - //--- decompAlgo found an IP (and user) feasible point - //--- - double currentUB = getKnowledgeBroker()->getIncumbentValue(); - double candidateUB = decompSol->getQuality(); - UTIL_DEBUG(model->getParam().msgLevel, 3, - cout - << "DecompAlgo found IP incum = " - << UtilDblToStr(candidateUB) - << " currentUB " << UtilDblToStr(currentUB) << endl; - ); - - if (candidateUB < currentUB) { - //--- - //--- create a new solution and add to alps knowledge - //--- - AlpsDecompSolution* alpsDecompSol = - new AlpsDecompSolution(decompSol->getSize(), - decompSol->getValues(), - decompSol->getQuality(), - decompAlgo->getDecompApp(), - getIndex(), - getDepth()); - getKnowledgeBroker()->addKnowledge(AlpsKnowledgeTypeSolution, - alpsDecompSol, - candidateUB); - //--- - //--- print the new solution (if debugging) - //--- - UTIL_DEBUG(model->getParam().msgLevel, 3, - const DecompApp * app - = decompAlgo->getDecompApp(); - const DecompConstraintSet * modelCore - = decompAlgo->getModelCore().getModel(); - app->printOriginalSolution(decompSol->getSize(), - modelCore->getColNames(), - decompSol->getValues());); - return true; - } - - return false; +bool AlpsDecompTreeNode::checkIncumbent(AlpsDecompModel *model, + const DecompSolution *decompSol) { + DecompAlgo *decompAlgo = model->getDecompAlgo(); + //--- + //--- decompAlgo found an IP (and user) feasible point + //--- + double currentUB = getKnowledgeBroker()->getIncumbentValue(); + double candidateUB = decompSol->getQuality(); + UTIL_DEBUG(model->getParam().msgLevel, 3, + cout << "DecompAlgo found IP incum = " << UtilDblToStr(candidateUB) + << " currentUB " << UtilDblToStr(currentUB) << endl;); + + if (candidateUB < currentUB) { + //--- + //--- create a new solution and add to alps knowledge + //--- + AlpsDecompSolution *alpsDecompSol = new AlpsDecompSolution( + decompSol->getSize(), decompSol->getValues(), decompSol->getQuality(), + decompAlgo->getDecompApp(), getIndex(), getDepth()); + getKnowledgeBroker()->addKnowledge(AlpsKnowledgeTypeSolution, alpsDecompSol, + candidateUB); + //--- + //--- print the new solution (if debugging) + //--- + UTIL_DEBUG(model->getParam().msgLevel, 3, + const DecompApp *app = decompAlgo->getDecompApp(); + const DecompConstraintSet *modelCore = + decompAlgo->getModelCore().getModel(); + app->printOriginalSolution(decompSol->getSize(), + modelCore->getColNames(), + decompSol->getValues());); + return true; + } + + return false; } //===========================================================================// -int AlpsDecompTreeNode::process(bool isRoot, - bool rampUp) -{ - //--- - //--- get pointer / reference to model, node description, decomp algo(s) - //--- - AlpsDecompNodeDesc* desc - = dynamic_cast(desc_); - AlpsDecompModel* model - = dynamic_cast(desc->getModel()); - AlpsDecompParam& param = model->getParam(); - DecompAlgo* decompAlgo = model->getDecompAlgo(); - CoinAssertDebug(desc && model); - UtilPrintFuncBegin(&cout, m_classTag, - "process()", param.msgLevel, 3); - UTIL_DEBUG(param.msgLevel, 3, - cout - << "Start process of node: " << getIndex() - << " (parent = " << getParentIndex() << ")" << endl; - ); - int status = AlpsReturnStatusOk; - bool doFathom = false; - DecompStatus decompStatus = STAT_FEASIBLE; - double relTolerance = 0.0001; //0.01% means optimal (make param) - double gap; - //--- - //--- check if this can be fathomed based on parent by objective cutoff - //--- - double currentUB = getKnowledgeBroker()->getIncumbentValue(); - double parentObjValue = getQuality(); - double primalTolerance = 1.0e-6; - double globalLB = -decompAlgo->getInfinity(); - double globalUB = decompAlgo->getInfinity(); - double thisQuality; - AlpsTreeNode* bestNode = NULL; - const double* lbs = desc->lowerBounds_; - const double* ubs = desc->upperBounds_; - const DecompApp* app = decompAlgo->getDecompApp(); - DecompConstraintSet* modelCore = decompAlgo->getModelCore().getModel(); - const int n_cols = modelCore->getNumCols(); - //TODO: cutoffIncrement (currentUB-cutoffIncrement) - - /** \todo get primalTolerance from parameter */ - if ((parentObjValue - primalTolerance) > currentUB) { - doFathom = true; - UTIL_DEBUG(param.msgLevel, 3, - cout << "Fathom since parentObjValue=" - << setw(10) << UtilDblToStr(parentObjValue) - << " currentUB = " << setw(10) << UtilDblToStr(currentUB) << endl; - ); - goto TERM_PROCESS; - } - - //--- - //--- the destructor initializes quality_ = infinity - //--- we really want -infinity - //--- - if (isRoot) { - quality_ = -ALPS_OBJ_MAX; - } - - //--- - //--- reset user-currentUB (if none given, this will have no effect) - //--- - decompAlgo->setObjBoundIP(decompAlgo->getCutoffUB()); - - if (!isRoot) { - //--- - //--- set the master column bounds (for this node in tree) - //--- - //--- - //--- for debugging, print column bounds that differ from original - //--- - UTIL_MSG(param.msgLevel, 3, - int c; - double diffLB; - double diffUB; - vector& colLBCore = modelCore->colLB; - vector& colUBCore = modelCore->colUB; - - for (c = 0; c < n_cols; c++) { - diffLB = lbs[c] - colLBCore[c]; - diffUB = ubs[c] - colUBCore[c]; - - if (!UtilIsZero(diffLB) || !UtilIsZero(diffUB)) { +int AlpsDecompTreeNode::process(bool isRoot, bool rampUp) { + //--- + //--- get pointer / reference to model, node description, decomp algo(s) + //--- + AlpsDecompNodeDesc *desc = dynamic_cast(desc_); + AlpsDecompModel *model = dynamic_cast(desc->getModel()); + AlpsDecompParam ¶m = model->getParam(); + DecompAlgo *decompAlgo = model->getDecompAlgo(); + CoinAssertDebug(desc && model); + UtilPrintFuncBegin(&cout, m_classTag, "process()", param.msgLevel, 3); + UTIL_DEBUG(param.msgLevel, 3, + cout << "Start process of node: " << getIndex() + << " (parent = " << getParentIndex() << ")" << endl;); + int status = AlpsReturnStatusOk; + bool doFathom = false; + DecompStatus decompStatus = STAT_FEASIBLE; + double relTolerance = 0.0001; // 0.01% means optimal (make param) + double gap; + //--- + //--- check if this can be fathomed based on parent by objective cutoff + //--- + double currentUB = getKnowledgeBroker()->getIncumbentValue(); + double parentObjValue = getQuality(); + double primalTolerance = 1.0e-6; + double globalLB = -decompAlgo->getInfinity(); + double globalUB = decompAlgo->getInfinity(); + double thisQuality; + AlpsTreeNode *bestNode = NULL; + const double *lbs = desc->lowerBounds_; + const double *ubs = desc->upperBounds_; + const DecompApp *app = decompAlgo->getDecompApp(); + DecompConstraintSet *modelCore = decompAlgo->getModelCore().getModel(); + const int n_cols = modelCore->getNumCols(); + // TODO: cutoffIncrement (currentUB-cutoffIncrement) + + /** \todo get primalTolerance from parameter */ + if ((parentObjValue - primalTolerance) > currentUB) { + doFathom = true; + UTIL_DEBUG(param.msgLevel, 3, + cout << "Fathom since parentObjValue=" << setw(10) + << UtilDblToStr(parentObjValue) << " currentUB = " + << setw(10) << UtilDblToStr(currentUB) << endl;); + goto TERM_PROCESS; + } + + //--- + //--- the destructor initializes quality_ = infinity + //--- we really want -infinity + //--- + if (isRoot) { + quality_ = -ALPS_OBJ_MAX; + } + + //--- + //--- reset user-currentUB (if none given, this will have no effect) + //--- + decompAlgo->setObjBoundIP(decompAlgo->getCutoffUB()); + + if (!isRoot) { + //--- + //--- set the master column bounds (for this node in tree) + //--- + //--- + //--- for debugging, print column bounds that differ from original + //--- + UTIL_MSG( + param.msgLevel, 3, int c; double diffLB; double diffUB; + vector &colLBCore = modelCore->colLB; + vector &colUBCore = modelCore->colUB; + + for (c = 0; c < n_cols; c++) { + diffLB = lbs[c] - colLBCore[c]; + diffUB = ubs[c] - colUBCore[c]; + + if (!UtilIsZero(diffLB) || !UtilIsZero(diffUB)) { cout << "bound-diffs c: " << c << " -> "; app->printOriginalColumn(c, &cout); - cout << "\t(lb,ub): (" << colLBCore[c] << "," - << colUBCore[c] << ")\t->\t(" << lbs[c] - << "," << ubs[c] << ")" << endl; - } - } - ); - decompAlgo->setMasterBounds(lbs, ubs); - decompAlgo->setSubProbBounds(lbs, ubs); - } else { - //--- - //--- check to see if we got lucky in generating init vars - //--- - if (decompAlgo->getXhatIPBest()) { - checkIncumbent(model, decompAlgo->getXhatIPBest()); - } - - //--- - //--- This is a first attempt at a redesign of branching rows. - //--- We still have all of them explicitly defined, but we - //--- relax them and only explicitly enforce things as we branch. - //--- - //--- A more advanced attempt would treat branching rows as cuts - //--- and add them dynamically. - //--- - //--- In root node, set all branching rows to "free" by relaxing - //--- lb and ub. - //--- - //--- NOTE: this should also be done for all nodes except for the - //--- rows that represent bounds that we have branched on. - //--- - if (decompAlgo->getAlgo() == PRICE_AND_CUT) { - int c; - double* lbsInf = new double[n_cols]; - double* ubsInf = new double[n_cols]; - - for (c = 0; c < n_cols; c++) { - lbsInf[c] = -decompAlgo->getInfinity(); - ubsInf[c] = decompAlgo->getInfinity(); - //printf("root c:%d lb=%g ub=%g\n", - // c, lbs[c], ubs[c]); - } - - decompAlgo->setMasterBounds(lbsInf, ubsInf); - UTIL_DELARR(lbsInf); - UTIL_DELARR(ubsInf); - //actually, don't need to do this - these should already be set - decompAlgo->setSubProbBounds(lbs, ubs); - } - } - - //--- - //--- update the currentUB value for decomp algo - //--- - currentUB = getKnowledgeBroker()->getIncumbentValue(); - decompAlgo->setObjBoundIP(currentUB);//?? - gap = decompAlgo->getInfinity(); - globalUB = getKnowledgeBroker()->getIncumbentValue(); - - if (!isRoot) { - bestNode = getKnowledgeBroker()->getBestNode(); - globalLB = bestNode->getQuality(); - //--- - //--- if the overall gap is tight enough, fathom whatever is left - //--- - //TODO: cutoffIncrement (currentUB-cutoffIncrement) - gap = UtilCalculateGap(globalLB, globalUB, decompAlgo->getInfinity()); - - if (gap <= relTolerance) { - doFathom = true; - UTIL_MSG(param.msgLevel, 3, - cout << "Fathom Node " << getIndex() << " since globalLB= " - << setw(10) << UtilDblToStr(globalLB) - << " globalUB = " << setw(10) << UtilDblToStr(globalUB) - << " gap = " << setw(10) << UtilDblToStr(gap) << endl; - ); - goto TERM_PROCESS; - } - } - - //--- - //--- solve the bounding problem (DecompAlgo) - //--- - decompStatus = decompAlgo->processNode(this, globalLB, globalUB); - - //--- - //--- during processNode, did we find any IP feasible points? - //--- - if (decompAlgo->getXhatIPBest()) { - if (checkIncumbent(model, decompAlgo->getXhatIPBest())) { - decompStatus = STAT_IP_FEASIBLE; - } + cout << "\t(lb,ub): (" << colLBCore[c] << "," << colUBCore[c] + << ")\t->\t(" << lbs[c] << "," << ubs[c] << ")" << endl; + } + }); + decompAlgo->setMasterBounds(lbs, ubs); + decompAlgo->setSubProbBounds(lbs, ubs); + } else { + //--- + //--- check to see if we got lucky in generating init vars + //--- + if (decompAlgo->getXhatIPBest()) { + checkIncumbent(model, decompAlgo->getXhatIPBest()); + } + + //--- + //--- This is a first attempt at a redesign of branching rows. + //--- We still have all of them explicitly defined, but we + //--- relax them and only explicitly enforce things as we branch. + //--- + //--- A more advanced attempt would treat branching rows as cuts + //--- and add them dynamically. + //--- + //--- In root node, set all branching rows to "free" by relaxing + //--- lb and ub. + //--- + //--- NOTE: this should also be done for all nodes except for the + //--- rows that represent bounds that we have branched on. + //--- + if (decompAlgo->getAlgo() == PRICE_AND_CUT) { + int c; + double *lbsInf = new double[n_cols]; + double *ubsInf = new double[n_cols]; - //--- - //--- update the local currentUB value and the decomp global UB - //--- - currentUB = getKnowledgeBroker()->getIncumbentValue(); - decompAlgo->setObjBoundIP(currentUB); - } - - switch (decompStatus) { - case STAT_FEASIBLE: - case STAT_IP_FEASIBLE: - //--- - //--- the relaxation is feasible - //--- if the new bound is > current currentUB, fathom - //--- else , branch - //--- - thisQuality = decompAlgo->getObjBestBoundLB(); //LB (min) - currentUB = getKnowledgeBroker()->getIncumbentValue(); //UB (min) - - if (thisQuality > quality_) { - quality_ = thisQuality; - } - - //watch tolerance here... if quality is close enough, fathom it - gap = UtilCalculateGap(thisQuality, currentUB, decompAlgo->getInfinity()); - - //if(gap <= relTolerance){ - if (quality_ >= currentUB) { - doFathom = true; - UTIL_DEBUG(param.msgLevel, 3, - cout << "Fathom since thisQuality= " - << setw(10) << UtilDblToStr(thisQuality) - << " quality_= " << setw(10) << UtilDblToStr(quality_) - << " currentUB = " << setw(10) << UtilDblToStr(currentUB) - << " gap = " << setw(10) << UtilDblToStr(gap) << endl; - ); + for (c = 0; c < n_cols; c++) { + lbsInf[c] = -decompAlgo->getInfinity(); + ubsInf[c] = decompAlgo->getInfinity(); + // printf("root c:%d lb=%g ub=%g\n", + // c, lbs[c], ubs[c]); } + decompAlgo->setMasterBounds(lbsInf, ubsInf); + UTIL_DELARR(lbsInf); + UTIL_DELARR(ubsInf); + // actually, don't need to do this - these should already be set + decompAlgo->setSubProbBounds(lbs, ubs); + } + } + + //--- + //--- update the currentUB value for decomp algo + //--- + currentUB = getKnowledgeBroker()->getIncumbentValue(); + decompAlgo->setObjBoundIP(currentUB); //?? + gap = decompAlgo->getInfinity(); + globalUB = getKnowledgeBroker()->getIncumbentValue(); + + if (!isRoot) { + bestNode = getKnowledgeBroker()->getBestNode(); + globalLB = bestNode->getQuality(); + //--- + //--- if the overall gap is tight enough, fathom whatever is left + //--- + // TODO: cutoffIncrement (currentUB-cutoffIncrement) + gap = UtilCalculateGap(globalLB, globalUB, decompAlgo->getInfinity()); + + if (gap <= relTolerance) { + doFathom = true; UTIL_MSG(param.msgLevel, 3, - cout << "Node " << getIndex() - << " quality " << UtilDblToStr(quality_) - << " currentUB " << UtilDblToStr(currentUB) - << " doFathom " << doFathom << endl; - ); - break; - case STAT_INFEASIBLE: - //--- - //--- the relaxation is infeasible, fathom - //--- - thisQuality = -ALPS_OBJ_MAX; - doFathom = true; - UTIL_MSG(param.msgLevel, 3, - cout << "Fathom since node infeasible\n"; - ); - break; - default: - assert(0); - } - - //TODO: control by decomp log level? - UTIL_MSG(param.msgLevel, 3, - cout << "Node " << getIndex() - << " bestQuality " << UtilDblToStr(quality_) - << " bestFeasible " << UtilDblToStr(currentUB) << endl; - ); + cout << "Fathom Node " << getIndex() + << " since globalLB= " << setw(10) << UtilDblToStr(globalLB) + << " globalUB = " << setw(10) << UtilDblToStr(globalUB) + << " gap = " << setw(10) << UtilDblToStr(gap) << endl;); + goto TERM_PROCESS; + } + } + + //--- + //--- solve the bounding problem (DecompAlgo) + //--- + decompStatus = decompAlgo->processNode(this, globalLB, globalUB); + + //--- + //--- during processNode, did we find any IP feasible points? + //--- + if (decompAlgo->getXhatIPBest()) { + if (checkIncumbent(model, decompAlgo->getXhatIPBest())) { + decompStatus = STAT_IP_FEASIBLE; + } + + //--- + //--- update the local currentUB value and the decomp global UB + //--- + currentUB = getKnowledgeBroker()->getIncumbentValue(); + decompAlgo->setObjBoundIP(currentUB); + } + + switch (decompStatus) { + case STAT_FEASIBLE: + case STAT_IP_FEASIBLE: + //--- + //--- the relaxation is feasible + //--- if the new bound is > current currentUB, fathom + //--- else , branch + //--- + thisQuality = decompAlgo->getObjBestBoundLB(); // LB (min) + currentUB = getKnowledgeBroker()->getIncumbentValue(); // UB (min) + + if (thisQuality > quality_) { + quality_ = thisQuality; + } + + // watch tolerance here... if quality is close enough, fathom it + gap = UtilCalculateGap(thisQuality, currentUB, decompAlgo->getInfinity()); + + // if(gap <= relTolerance){ + if (quality_ >= currentUB) { + doFathom = true; + UTIL_DEBUG(param.msgLevel, 3, + cout << "Fathom since thisQuality= " << setw(10) + << UtilDblToStr(thisQuality) << " quality_= " << setw(10) + << UtilDblToStr(quality_) << " currentUB = " << setw(10) + << UtilDblToStr(currentUB) << " gap = " << setw(10) + << UtilDblToStr(gap) << endl;); + } + + UTIL_MSG(param.msgLevel, 3, + cout << "Node " << getIndex() << " quality " + << UtilDblToStr(quality_) << " currentUB " + << UtilDblToStr(currentUB) << " doFathom " << doFathom + << endl;); + break; + case STAT_INFEASIBLE: + //--- + //--- the relaxation is infeasible, fathom + //--- + thisQuality = -ALPS_OBJ_MAX; + doFathom = true; + UTIL_MSG(param.msgLevel, 3, cout << "Fathom since node infeasible\n";); + break; + default: + assert(0); + } + + // TODO: control by decomp log level? + UTIL_MSG(param.msgLevel, 3, + cout << "Node " << getIndex() << " bestQuality " + << UtilDblToStr(quality_) << " bestFeasible " + << UtilDblToStr(currentUB) << endl;); TERM_PROCESS: - //STOP: if do fathom when node limit hit, then it gives wrong LB - // what is the proper status setting if node limit is hit to stop - // but not fathom so as to lose the proper bound - //if(param.nodeLimit == 0) - // status = AlpsExitStatusNodeLimit; - //--- - //--- for nodeLimit == 0, we do not want it to look for - //--- branching candidates since in some cases we stop due to - //--- gap without a branching candidate and do not want to have to - //--- return (since we are not evaluating any more nodes anyway) - //--- so, we fake it by acting like a branching candidate was found - //--- - decompAlgo->postProcessNode(decompStatus); - - if (param.nodeLimit == 0) { - setStatus(AlpsNodeStatusPregnant); - } else if (doFathom) { // || param.nodeLimit == 0){ - setStatus(AlpsNodeStatusFathomed); - } else { - status = chooseBranchingObject(model); - if (getStatus() == AlpsNodeStatusPregnant){ - decompAlgo->postProcessBranch(decompStatus); - } - } - - UtilPrintFuncEnd(&cout, m_classTag, - "process()", param.msgLevel, 3); - return status; + // STOP: if do fathom when node limit hit, then it gives wrong LB + // what is the proper status setting if node limit is hit to stop + // but not fathom so as to lose the proper bound + // if(param.nodeLimit == 0) + // status = AlpsExitStatusNodeLimit; + //--- + //--- for nodeLimit == 0, we do not want it to look for + //--- branching candidates since in some cases we stop due to + //--- gap without a branching candidate and do not want to have to + //--- return (since we are not evaluating any more nodes anyway) + //--- so, we fake it by acting like a branching candidate was found + //--- + decompAlgo->postProcessNode(decompStatus); + + if (param.nodeLimit == 0) { + setStatus(AlpsNodeStatusPregnant); + } else if (doFathom) { // || param.nodeLimit == 0){ + setStatus(AlpsNodeStatusFathomed); + } else { + status = chooseBranchingObject(model); + if (getStatus() == AlpsNodeStatusPregnant) { + decompAlgo->postProcessBranch(decompStatus); + } + } + + UtilPrintFuncEnd(&cout, m_classTag, "process()", param.msgLevel, 3); + return status; } //===========================================================================// -int AlpsDecompTreeNode::chooseBranchingObject(AlpsModel* model) -{ - AlpsDecompNodeDesc* desc = - dynamic_cast(desc_); - AlpsDecompModel* m = dynamic_cast(desc->getModel()); - AlpsDecompParam& param = m->getParam(); - UtilPrintFuncBegin(&cout, m_classTag, "chooseBranchingObject()", - param.msgLevel, 3); - bool gotBranch = m->getDecompAlgo()->chooseBranchSet(downBranchLB_, - downBranchUB_, - upBranchLB_, - upBranchUB_); - - if (!gotBranch) { - setStatus(AlpsNodeStatusEvaluated); - //--- - //--- but if we can't branch on this and it DID finish pricing out - //--- that means DW_LB=DW_UB for that node, then we are done - //--- processing it and we should fathom(?) - //--- all we have to check is that LB=UB, since LB is updated - //--- despite the tailoff - so their should be a gap... - //--- the UB for this node, not the global UB... - //--- - //printf("BestLB at this Node = %g\n", decompAlgo->getObjBestBoundLB()); - //printf("BestLB at this Node = %g\n", decompAlgo->getObjBestBoundUB();) - } else { - //--- - //--- we can go ahead and branch on this variable - //--- meaning we will produce children (hence, the name pregnant) - //--- - setStatus(AlpsNodeStatusPregnant); - } - - UtilPrintFuncEnd(&cout, m_classTag, "chooseBranchingObject()", - param.msgLevel, 3); - return AlpsReturnStatusOk; +int AlpsDecompTreeNode::chooseBranchingObject(AlpsModel *model) { + AlpsDecompNodeDesc *desc = dynamic_cast(desc_); + AlpsDecompModel *m = dynamic_cast(desc->getModel()); + AlpsDecompParam ¶m = m->getParam(); + UtilPrintFuncBegin(&cout, m_classTag, "chooseBranchingObject()", + param.msgLevel, 3); + bool gotBranch = m->getDecompAlgo()->chooseBranchSet( + downBranchLB_, downBranchUB_, upBranchLB_, upBranchUB_); + + if (!gotBranch) { + setStatus(AlpsNodeStatusEvaluated); + //--- + //--- but if we can't branch on this and it DID finish pricing out + //--- that means DW_LB=DW_UB for that node, then we are done + //--- processing it and we should fathom(?) + //--- all we have to check is that LB=UB, since LB is updated + //--- despite the tailoff - so their should be a gap... + //--- the UB for this node, not the global UB... + //--- + // printf("BestLB at this Node = %g\n", decompAlgo->getObjBestBoundLB()); + // printf("BestLB at this Node = %g\n", decompAlgo->getObjBestBoundUB();) + } else { + //--- + //--- we can go ahead and branch on this variable + //--- meaning we will produce children (hence, the name pregnant) + //--- + setStatus(AlpsNodeStatusPregnant); + } + + UtilPrintFuncEnd(&cout, m_classTag, "chooseBranchingObject()", param.msgLevel, + 3); + return AlpsReturnStatusOk; } //===========================================================================// -std::vector< CoinTriple > -AlpsDecompTreeNode::branch() -{ - AlpsDecompNodeDesc* desc - = dynamic_cast(desc_); - AlpsDecompModel* m - = dynamic_cast(desc->getModel()); - AlpsDecompParam& param = m->getParam(); - AlpsDecompNodeDesc* child = 0; - DecompAlgo* decompAlgo = m->getDecompAlgo(); - DecompParam& decompParam = decompAlgo->getMutableParam(); - UtilPrintFuncBegin(&cout, m_classTag, "branch()", param.msgLevel, 3); - //--- - //--- the return of the branch method expects a vector of triples - //--- that contain the following: - //--- (1) AlpsNodeDesc* - a ptr to the node description - //--- (2) AlpsNodeStatus - the inital status of the node (candidate) - //--- (3) double - the objective best lower bound - //--- - std::vector< CoinTriple > newNodes; - //--- - //--- get the current node's lb/ub in original space - //--- - double* oldLbs = desc->lowerBounds_; - double* oldUbs = desc->upperBounds_; - const int numCols = desc->numberCols_; - CoinAssert(oldLbs && oldUbs && numCols); - - //--- - //--- check to make sure the branching variables have been determined - //--- - if ((downBranchLB_.size() + downBranchUB_.size() == 0) || - (upBranchLB_.size() + upBranchUB_.size() == 0)) { - std::cout << "AlpsDecompError: " - << "downBranch_.size() = " - << downBranchLB_.size() + downBranchUB_.size() - << "; upBranch_.size() = " - << upBranchLB_.size() + upBranchUB_.size() - << "; index_ = " << index_ << std::endl; - throw CoinError("empty branch variable set(s)", - "branch", "AlpsDecompTreeNode"); - } - - //--- - //--- create space for the new bounds for the children - //--- - double* newLbs = new double[numCols]; - double* newUbs = new double[numCols]; - std::copy(oldLbs, oldLbs + numCols, newLbs); - std::copy(oldUbs, oldUbs + numCols, newUbs); - //--- - //--- the objective estimate of the new nodes are init'd to the - //--- current node's objective (the new node's parent's objective) - //--- - double objVal(getQuality()); - - //--- - //--- Branch down - //--- - for (unsigned i = 0; i < downBranchLB_.size(); i++) { - if ((downBranchLB_[i].first < 0) || - (downBranchLB_[i].first >= numCols)) { - std::cout << "AlpsDecompError: downBranchLB_[" << i << "] variable = " - << downBranchLB_[i].first << "; numCols = " - << numCols << "; index_ = " << index_ << std::endl; - throw CoinError("branch index is out of range", - "branch", "AlpsDecompTreeNode"); - } - - newLbs[downBranchLB_[i].first] = downBranchLB_[i].second; - } - - for (unsigned i = 0; i < downBranchUB_.size(); i++) { - if ((downBranchUB_[i].first < 0) || - (downBranchUB_[i].first >= numCols)) { - std::cout << "AlpsDecompError: downBranchUB_[" << i << "] variable = " - << downBranchUB_[i].first << "; numCols = " - << numCols << "; index_ = " << index_ << std::endl; - throw CoinError("branch index is out of range", - "branch", "AlpsDecompTreeNode"); - } - - newUbs[downBranchUB_[i].first] = downBranchUB_[i].second; - } - - assert(downBranchLB_.size() + downBranchUB_.size() > 0); - child = new AlpsDecompNodeDesc(m, newLbs, newUbs); - child->setBranchedDir(-1);//enum? - - if (decompParam.BranchStrongIter) { - double globalUB = getKnowledgeBroker()->getIncumbentValue(); - int solveMasterAsMip = decompParam.SolveMasterAsMip; - int limitTotalCutIters = decompParam.TotalCutItersLimit; - int limitTotalPriceIters = decompParam.TotalPriceItersLimit; - //--- - //--- calculate an estimate on the lower bound after branching - //--- - //decompParam.TotalCutItersLimit = decompParam.BranchStrongIter; - decompParam.TotalCutItersLimit = 0; - decompParam.TotalPriceItersLimit = decompParam.BranchStrongIter; - decompParam.SolveMasterAsMip = 0; - decompAlgo->setStrongBranchIter(true); - decompAlgo->setMasterBounds(newLbs, newUbs); - decompAlgo->setSubProbBounds(newLbs, newUbs); - decompAlgo->processNode(this, objVal, globalUB); - decompAlgo->setStrongBranchIter(false); - decompParam.TotalCutItersLimit = limitTotalCutIters; - decompParam.TotalPriceItersLimit = limitTotalPriceIters; - decompParam.SolveMasterAsMip = solveMasterAsMip; - //TOOD: what if it stops in Phase1 - //how will this work in CPM? - } - - newNodes.push_back(CoinMakeTriple(static_cast(child), - AlpsNodeStatusCandidate, - objVal)); - //--- - //--- Branch up - //--- - //TODO: this can be done more cheaply than a full copy - std::copy(oldLbs, oldLbs + numCols, newLbs); - std::copy(oldUbs, oldUbs + numCols, newUbs); - - for (unsigned i = 0; i < upBranchLB_.size(); i++) { - if ((upBranchLB_[i].first < 0) || - (upBranchLB_[i].first >= numCols)) { - std::cout << "AlpsDecompError: upBranchLB_[" << i << "] variable = " - << upBranchLB_[i].first << "; numCols = " - << numCols << "; index_ = " << index_ << std::endl; - throw CoinError("branch index is out of range", - "branch", "AlpsDecompTreeNode"); - } - - newLbs[upBranchLB_[i].first] = upBranchLB_[i].second; - } - - for (unsigned i = 0; i < upBranchUB_.size(); i++) { - if ((upBranchUB_[i].first < 0) || - (upBranchUB_[i].first >= numCols)) { - std::cout << "AlpsDecompError: upBranchUB_[" << i << "] variable = " - << upBranchUB_[i].first << "; numCols = " - << numCols << "; index_ = " << index_ << std::endl; - throw CoinError("branch index is out of range", - "branch", "AlpsDecompTreeNode"); - } - - newUbs[upBranchUB_[i].first] = upBranchUB_[i].second; - } - - assert(upBranchLB_.size() + upBranchUB_.size() > 0); - child = new AlpsDecompNodeDesc(m, newLbs, newUbs); - child->setBranchedDir(1);//enum? - - if (decompParam.BranchStrongIter) { - double globalUB = getKnowledgeBroker()->getIncumbentValue(); - int solveMasterAsMip = decompParam.SolveMasterAsMip; - int limitTotalCutIters = decompParam.TotalCutItersLimit; - int limitTotalPriceIters = decompParam.TotalPriceItersLimit; - //--- - //--- calculate an estimate on the lower bound after branching - //--- - //decompParam.TotalCutItersLimit = decompParam.BranchStrongIter; - decompParam.TotalCutItersLimit = 0; - decompParam.TotalPriceItersLimit = decompParam.BranchStrongIter; - decompParam.SolveMasterAsMip = 0; - decompAlgo->setStrongBranchIter(true); - decompAlgo->setMasterBounds(newLbs, newUbs); - decompAlgo->setSubProbBounds(newLbs, newUbs); - decompAlgo->processNode(this, objVal, globalUB); - decompAlgo->setStrongBranchIter(false); - decompParam.TotalCutItersLimit = limitTotalCutIters; - decompParam.TotalPriceItersLimit = limitTotalPriceIters; - decompParam.SolveMasterAsMip = solveMasterAsMip; - } - - newNodes.push_back(CoinMakeTriple(static_cast(child), - AlpsNodeStatusCandidate, - objVal)); - - //--- - //--- clean-up - //--- - if (newLbs != 0) { - delete [] newLbs; - newLbs = 0; - } - - if (newUbs != 0) { - delete [] newUbs; - newUbs = 0; - } - - //--- - //--- change this node's status to branched - //--- - setStatus(AlpsNodeStatusBranched); - UtilPrintFuncEnd(&cout, m_classTag, "branch()", param.msgLevel, 3); - return newNodes; +std::vector> +AlpsDecompTreeNode::branch() { + AlpsDecompNodeDesc *desc = dynamic_cast(desc_); + AlpsDecompModel *m = dynamic_cast(desc->getModel()); + AlpsDecompParam ¶m = m->getParam(); + AlpsDecompNodeDesc *child = 0; + DecompAlgo *decompAlgo = m->getDecompAlgo(); + DecompParam &decompParam = decompAlgo->getMutableParam(); + UtilPrintFuncBegin(&cout, m_classTag, "branch()", param.msgLevel, 3); + //--- + //--- the return of the branch method expects a vector of triples + //--- that contain the following: + //--- (1) AlpsNodeDesc* - a ptr to the node description + //--- (2) AlpsNodeStatus - the inital status of the node (candidate) + //--- (3) double - the objective best lower bound + //--- + std::vector> newNodes; + //--- + //--- get the current node's lb/ub in original space + //--- + double *oldLbs = desc->lowerBounds_; + double *oldUbs = desc->upperBounds_; + const int numCols = desc->numberCols_; + CoinAssert(oldLbs && oldUbs && numCols); + + //--- + //--- check to make sure the branching variables have been determined + //--- + if ((downBranchLB_.size() + downBranchUB_.size() == 0) || + (upBranchLB_.size() + upBranchUB_.size() == 0)) { + std::cout << "AlpsDecompError: " + << "downBranch_.size() = " + << downBranchLB_.size() + downBranchUB_.size() + << "; upBranch_.size() = " + << upBranchLB_.size() + upBranchUB_.size() + << "; index_ = " << index_ << std::endl; + throw CoinError("empty branch variable set(s)", "branch", + "AlpsDecompTreeNode"); + } + + //--- + //--- create space for the new bounds for the children + //--- + double *newLbs = new double[numCols]; + double *newUbs = new double[numCols]; + std::copy(oldLbs, oldLbs + numCols, newLbs); + std::copy(oldUbs, oldUbs + numCols, newUbs); + //--- + //--- the objective estimate of the new nodes are init'd to the + //--- current node's objective (the new node's parent's objective) + //--- + double objVal(getQuality()); + + //--- + //--- Branch down + //--- + for (unsigned i = 0; i < downBranchLB_.size(); i++) { + if ((downBranchLB_[i].first < 0) || (downBranchLB_[i].first >= numCols)) { + std::cout << "AlpsDecompError: downBranchLB_[" << i + << "] variable = " << downBranchLB_[i].first + << "; numCols = " << numCols << "; index_ = " << index_ + << std::endl; + throw CoinError("branch index is out of range", "branch", + "AlpsDecompTreeNode"); + } + + newLbs[downBranchLB_[i].first] = downBranchLB_[i].second; + } + + for (unsigned i = 0; i < downBranchUB_.size(); i++) { + if ((downBranchUB_[i].first < 0) || (downBranchUB_[i].first >= numCols)) { + std::cout << "AlpsDecompError: downBranchUB_[" << i + << "] variable = " << downBranchUB_[i].first + << "; numCols = " << numCols << "; index_ = " << index_ + << std::endl; + throw CoinError("branch index is out of range", "branch", + "AlpsDecompTreeNode"); + } + + newUbs[downBranchUB_[i].first] = downBranchUB_[i].second; + } + + assert(downBranchLB_.size() + downBranchUB_.size() > 0); + child = new AlpsDecompNodeDesc(m, newLbs, newUbs); + child->setBranchedDir(-1); // enum? + + if (decompParam.BranchStrongIter) { + double globalUB = getKnowledgeBroker()->getIncumbentValue(); + int solveMasterAsMip = decompParam.SolveMasterAsMip; + int limitTotalCutIters = decompParam.TotalCutItersLimit; + int limitTotalPriceIters = decompParam.TotalPriceItersLimit; + //--- + //--- calculate an estimate on the lower bound after branching + //--- + // decompParam.TotalCutItersLimit = decompParam.BranchStrongIter; + decompParam.TotalCutItersLimit = 0; + decompParam.TotalPriceItersLimit = decompParam.BranchStrongIter; + decompParam.SolveMasterAsMip = 0; + decompAlgo->setStrongBranchIter(true); + decompAlgo->setMasterBounds(newLbs, newUbs); + decompAlgo->setSubProbBounds(newLbs, newUbs); + decompAlgo->processNode(this, objVal, globalUB); + decompAlgo->setStrongBranchIter(false); + decompParam.TotalCutItersLimit = limitTotalCutIters; + decompParam.TotalPriceItersLimit = limitTotalPriceIters; + decompParam.SolveMasterAsMip = solveMasterAsMip; + // TOOD: what if it stops in Phase1 + // how will this work in CPM? + } + + newNodes.push_back(CoinMakeTriple(static_cast(child), + AlpsNodeStatusCandidate, objVal)); + //--- + //--- Branch up + //--- + // TODO: this can be done more cheaply than a full copy + std::copy(oldLbs, oldLbs + numCols, newLbs); + std::copy(oldUbs, oldUbs + numCols, newUbs); + + for (unsigned i = 0; i < upBranchLB_.size(); i++) { + if ((upBranchLB_[i].first < 0) || (upBranchLB_[i].first >= numCols)) { + std::cout << "AlpsDecompError: upBranchLB_[" << i + << "] variable = " << upBranchLB_[i].first + << "; numCols = " << numCols << "; index_ = " << index_ + << std::endl; + throw CoinError("branch index is out of range", "branch", + "AlpsDecompTreeNode"); + } + + newLbs[upBranchLB_[i].first] = upBranchLB_[i].second; + } + + for (unsigned i = 0; i < upBranchUB_.size(); i++) { + if ((upBranchUB_[i].first < 0) || (upBranchUB_[i].first >= numCols)) { + std::cout << "AlpsDecompError: upBranchUB_[" << i + << "] variable = " << upBranchUB_[i].first + << "; numCols = " << numCols << "; index_ = " << index_ + << std::endl; + throw CoinError("branch index is out of range", "branch", + "AlpsDecompTreeNode"); + } + + newUbs[upBranchUB_[i].first] = upBranchUB_[i].second; + } + + assert(upBranchLB_.size() + upBranchUB_.size() > 0); + child = new AlpsDecompNodeDesc(m, newLbs, newUbs); + child->setBranchedDir(1); // enum? + + if (decompParam.BranchStrongIter) { + double globalUB = getKnowledgeBroker()->getIncumbentValue(); + int solveMasterAsMip = decompParam.SolveMasterAsMip; + int limitTotalCutIters = decompParam.TotalCutItersLimit; + int limitTotalPriceIters = decompParam.TotalPriceItersLimit; + //--- + //--- calculate an estimate on the lower bound after branching + //--- + // decompParam.TotalCutItersLimit = decompParam.BranchStrongIter; + decompParam.TotalCutItersLimit = 0; + decompParam.TotalPriceItersLimit = decompParam.BranchStrongIter; + decompParam.SolveMasterAsMip = 0; + decompAlgo->setStrongBranchIter(true); + decompAlgo->setMasterBounds(newLbs, newUbs); + decompAlgo->setSubProbBounds(newLbs, newUbs); + decompAlgo->processNode(this, objVal, globalUB); + decompAlgo->setStrongBranchIter(false); + decompParam.TotalCutItersLimit = limitTotalCutIters; + decompParam.TotalPriceItersLimit = limitTotalPriceIters; + decompParam.SolveMasterAsMip = solveMasterAsMip; + } + + newNodes.push_back(CoinMakeTriple(static_cast(child), + AlpsNodeStatusCandidate, objVal)); + + //--- + //--- clean-up + //--- + if (newLbs != 0) { + delete[] newLbs; + newLbs = 0; + } + + if (newUbs != 0) { + delete[] newUbs; + newUbs = 0; + } + + //--- + //--- change this node's status to branched + //--- + setStatus(AlpsNodeStatusBranched); + UtilPrintFuncEnd(&cout, m_classTag, "branch()", param.msgLevel, 3); + return newNodes; } - diff --git a/Dip/src/DecompAlgo.cpp b/Dip/src/DecompAlgo.cpp index a961544d..f96335bc 100644 --- a/Dip/src/DecompAlgo.cpp +++ b/Dip/src/DecompAlgo.cpp @@ -13,12 +13,12 @@ //===========================================================================// //===========================================================================// -#include "DecompApp.h" #include "DecompAlgo.h" -#include "DecompAlgoD.h" #include "DecompAlgoC.h" -#include "DecompCutOsi.h" #include "DecompAlgoCGL.h" +#include "DecompAlgoD.h" +#include "DecompApp.h" +#include "DecompCutOsi.h" #include "DecompSolverResult.h" #ifdef _OPENMP @@ -30,4253 +30,3979 @@ //#define STAB_DUMERLE //===========================================================================// -#include "OsiClpSolverInterface.hpp" -#include "CglGomory.hpp" -#include "CglProbing.hpp" -#include "CglKnapsackCover.hpp" #include "CglClique.hpp" #include "CglFlowCover.hpp" +#include "CglGomory.hpp" +#include "CglKnapsackCover.hpp" #include "CglMixedIntegerRounding2.hpp" +#include "CglProbing.hpp" +#include "OsiClpSolverInterface.hpp" using namespace std; //===========================================================================// struct SolveRelaxedThreadArgs { - DecompAlgo* algo; - vector* subModel; - int nBaseCoreRows; - double* u; - double* redCostX; - const double* origCost; - int n_origCols; - bool checkDup; - bool doExact; - bool doCutoff; - DecompVarList* vars; + DecompAlgo *algo; + vector *subModel; + int nBaseCoreRows; + double *u; + double *redCostX; + const double *origCost; + int n_origCols; + bool checkDup; + bool doExact; + bool doCutoff; + DecompVarList *vars; }; - //===========================================================================// -void DecompAlgo::checkBlocksColumns() -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "checkBlocksColumns()", m_param.LogDebugLevel, 2); - - if (m_modelRelax.size() == 0) { - UtilPrintFuncEnd(m_osLog, m_classTag, - "checkBlocksColumns()", m_param.LogDebugLevel, 2); +void DecompAlgo::checkBlocksColumns() { + UtilPrintFuncBegin(m_osLog, m_classTag, "checkBlocksColumns()", + m_param.LogDebugLevel, 2); + + if (m_modelRelax.size() == 0) { + UtilPrintFuncEnd(m_osLog, m_classTag, "checkBlocksColumns()", + m_param.LogDebugLevel, 2); + return; + } + + //--- + //--- sanity check that the blocks are column disjoint + //--- + map::iterator mid1; + map::iterator mid2; + + for (mid1 = m_modelRelax.begin(); mid1 != m_modelRelax.end(); mid1++) { + DecompSubModel &modelRelax1 = (*mid1).second; + DecompConstraintSet *model = modelRelax1.getModel(); + + if (!model || !model->getMatrix()) { + UtilPrintFuncEnd(m_osLog, m_classTag, "checkBlocksColumns()", + m_param.LogDebugLevel, 2); return; - } - - //--- - //--- sanity check that the blocks are column disjoint - //--- - map::iterator mid1; - map::iterator mid2; - - for (mid1 = m_modelRelax.begin(); mid1 != m_modelRelax.end(); mid1++) { - DecompSubModel& modelRelax1 = (*mid1).second; - DecompConstraintSet* model = modelRelax1.getModel(); - - if (!model || !model->getMatrix()) { - UtilPrintFuncEnd(m_osLog, m_classTag, - "checkBlocksColumns()", m_param.LogDebugLevel, 2); - return; - } + } + + set &activeCols1 = modelRelax1.getModel()->activeColumnsS; + + for (mid2 = m_modelRelax.begin(); mid2 != m_modelRelax.end(); mid2++) { + if (mid1 == mid2) { + continue; + } + + DecompSubModel &modelRelax2 = (*mid2).second; + set &activeCols2 = modelRelax2.getModel()->activeColumnsS; + set activeCols1inter2; + // this is very expensive - can we improve? + set_intersection(activeCols1.begin(), activeCols1.end(), + activeCols2.begin(), activeCols2.end(), + inserter(activeCols1inter2, activeCols1inter2.begin())); + + if (activeCols1inter2.size() > 0) { + cerr << "NOTE: the columns in block " << modelRelax1.getBlockId() + << " -> " << modelRelax1.getModelName() << " and block " + << modelRelax2.getBlockId() << " -> " << modelRelax2.getModelName() + << " overlap." << endl; + set::iterator it; + + for (it = activeCols1inter2.begin(); it != activeCols1inter2.end(); + it++) { + (*m_osLog) << "Column " << setw(5) << *it << " -> "; + + if (modelRelax2.getModel()->colNames.size() > 0) + (*m_osLog) << setw(25) << modelRelax2.getModel()->colNames[*it]; + + (*m_osLog) << " is found in both blocks." << endl; + } + + throw UtilException("Columns in some blocks overlap.", + "checkBlocksColumns", "DecompAlgo"); + } + } + } + + //--- + //--- sanity check that the union of active columns in blocks + //--- should cover all columns in core - if not, these are 'master-only' + //--- columns which can be dealt with using either LD or the using the + //--- ideas of Rob Pratt discussion (9/27/09), or defined explicitly + //--- by user + //--- + set activeColsUnion; + set::iterator sit; + + for (mid1 = m_modelRelax.begin(); mid1 != m_modelRelax.end(); mid1++) { + DecompSubModel &modelRelax = (*mid1).second; + DecompConstraintSet *model = modelRelax.getModel(); + assert(model); + set &activeCols = model->activeColumnsS; + set_union(activeCols.begin(), activeCols.end(), activeColsUnion.begin(), + activeColsUnion.end(), + inserter(activeColsUnion, activeColsUnion.begin())); + } + + const DecompSubModel &modelCore = getModelCore(); + + // add the master-only variables ot the set union + const vector &masterOnlyCols = modelCore.getModel()->getMasterOnlyCols(); + + set masterOnlyColsSet(masterOnlyCols.begin(), masterOnlyCols.end()); + + set_union(masterOnlyColsSet.begin(), masterOnlyColsSet.end(), + activeColsUnion.begin(), activeColsUnion.end(), + inserter(activeColsUnion, activeColsUnion.begin())); + + bool allColsCovered = true; + + for (int i = 0; i < modelCore.getModel()->getNumCols(); i++) { + sit = activeColsUnion.find(i); + + if (sit == activeColsUnion.end()) { + (*m_osLog) << "Column " << setw(5) << i << " -> " << setw(25) + << modelCore.getModel()->colNames[i] + << " is missing from union of blocks." << endl; + allColsCovered = false; + } + } + + if (!allColsCovered) + throw UtilException("Some columns not covered in blocks", + "checkBlocksColumns", "DecompAlgo"); + + UtilPrintFuncEnd(m_osLog, m_classTag, "checkBlocksColumns()", + m_param.LogDebugLevel, 2); +} - set& activeCols1 - = modelRelax1.getModel()->activeColumnsS; +//===========================================================================// +void DecompAlgo::initSetup() { + UTIL_MSG(m_param.LogLevel, 2, + (*m_osLog) << "Initial Algo Setup" + << " (algo = " << DecompAlgoStr[m_algo] << ")\n";); + UtilPrintFuncBegin(m_osLog, m_classTag, "initSetup()", m_param.LogDebugLevel, + 2); + + //--- + //--- create DecompSubModel objects from DecompModel objects + //--- these just store pointers to the models provided by user + //--- and will store pointers to the approriate OSI objects + //--- + getModelsFromApp(); + m_numConvexCon = static_cast(m_modelRelax.size()); + DecompConstraintSet *modelCore = m_modelCore.getModel(); + assert(modelCore); + UTIL_DEBUG( + m_param.LogDebugLevel, 1, + + if (modelCore) { + (*m_osLog) << "ModelCore cols: " << modelCore->getNumCols() + << " rows: " << modelCore->getNumRows() << "\n"; + } else { (*m_osLog) << "ModelCore is Empty.\n"; }); + //--- + //--- copy master-only columns from modelCore + //--- + const vector &masterOnlyCols = modelCore->getMasterOnlyCols(); + m_masterOnlyCols.clear(); + m_masterOnlyCols.reserve(UtilGetSize(masterOnlyCols)); + std::copy(masterOnlyCols.begin(), masterOnlyCols.end(), + std::back_inserter(m_masterOnlyCols)); + + //--- + //--- sanity checks on user input + //--- + if (m_param.DebugCheckBlocksColumns) { + checkBlocksColumns(); + } + + //--- + //--- if we have a core, allocate a pool of memory for re-use + //--- + if (modelCore) { + m_memPool.allocateMemory(modelCore->getNumCols(), modelCore->getNumRows()); + } + + //--- + //--- By default the relaxation can be solved using a generic IP solver. + //--- + //--- Here, for each relaxation, we initialize an OSI interface and load + //--- the problem data. + //--- + map::iterator mit; + map>::iterator mivt; + vector::iterator vit; + + for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { + createOsiSubProblem((*mit).second); + } + + for (mivt = m_modelRelaxNest.begin(); mivt != m_modelRelaxNest.end(); + mivt++) { + for (vit = (*mivt).second.begin(); vit != (*mivt).second.end(); vit++) { + createOsiSubProblem((*vit)); + } + } + + // assert(m_numConvexCon >= 1); + UTIL_DEBUG( + m_param.LogDebugLevel, 1, + (*m_osLog) << "Number of Convexity Constraints: " << m_numConvexCon + << endl; - for (mid2 = m_modelRelax.begin(); mid2 != m_modelRelax.end(); mid2++) { - if (mid1 == mid2) { - continue; - } + for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { + DecompConstraintSet *model = (*mit).second.getModel(); + + if (model && model->M) { + (*m_osLog) << "ModelRelax cols: " << model->getNumCols() + << " rows: " << model->getNumRows() << endl; + } + }); + //--- + //--- open memory to store the current solution (in terms of x) + //--- + const int nCols = modelCore->getNumCols(); + const double *colLB = modelCore->getColLB(); + const double *colUB = modelCore->getColUB(); + assert(nCols > 0); + m_xhat = new double[nCols]; + m_colLBNode = new double[nCols]; + m_colUBNode = new double[nCols]; + assert(m_xhat && m_colLBNode && m_colUBNode); + memcpy(m_colLBNode, colLB, nCols * sizeof(double)); + memcpy(m_colUBNode, colUB, nCols * sizeof(double)); + //--- + //--- PC: create an initial set of points F'[0] subseteq F' (c + eps) + //--- DC: create an initial set of points F'[0] subseteq F' (xhat + eps) + //--- RC: do nothing - DecompAlgo base?? WHY - need an shat to get going + //--- C: do nothing - DecompAlgo base + //--- + DecompVarList initVars; + m_nodeStats.varsThisCall += generateInitVars(initVars); + + //--- + //--- create the master OSI interface + //--- + + m_masterSI = getOsiLpSolverInterface(); + m_infinity = m_masterSI->getInfinity(); + + CoinAssertHint(m_masterSI, "Error: Out of Memory"); + m_masterSI->messageHandler()->setLogLevel(m_param.LogLpLevel); + + //--- + //--- init CGL object + //--- NOTE: do not allow PC gomory cuts for now + //--- + m_cgl = new DecompAlgoCGL(m_param.LogDebugLevel, m_algo); + m_cgl->setLogStream(m_osLog); + m_cgl->setLogLevel(m_param.LogDebugLevel); + m_cgl->initGenerators(m_param.CutCglClique, m_param.CutCglOddHole, + m_param.CutCglFlowC, m_param.CutCglKnapC, + m_param.CutCglMir, m_param.CutCglGomory); + //--- + //--- create master problem + //--- + createMasterProblem(initVars); + UTIL_MSG(m_param.LogLevel, 2, + (*m_osLog) << "Model core nCols= " << modelCore->getNumCols() + << " nRows = " << modelCore->getNumRows() << "\n";); + + //--- + //--- construct cutgen solver interface + //--- + if (m_param.CutCGL) { + m_cutgenSI = new OsiClpSolverInterface(); + assert(m_cutgenSI); + loadSIFromModel(m_cutgenSI, true); + + //--- + //--- add an objective cut to the cut generator LP + //--- obj >= globalLB + //--- + if (m_algo == PRICE_AND_CUT) { + //--- + //--- THINK: + //--- this is causing an issue later - because packs 0's in matrix + //--- once gets to cut generator + //--- + // CoinPackedVector objCut(nCols, getOrigObjective()); + CoinPackedVector objCut; + const double *objCoeff = getOrigObjective(); + int i; - DecompSubModel& modelRelax2 = (*mid2).second; - set& activeCols2 - = modelRelax2.getModel()->activeColumnsS; - set activeCols1inter2; - //this is very expensive - can we improve? - set_intersection(activeCols1.begin(), activeCols1.end(), - activeCols2.begin(), activeCols2.end(), - inserter(activeCols1inter2, - activeCols1inter2.begin())); - - if (activeCols1inter2.size() > 0) { - cerr << "NOTE: the columns in block " << modelRelax1.getBlockId() - << " -> " << modelRelax1.getModelName() << " and block " - << modelRelax2.getBlockId() - << " -> " << modelRelax2.getModelName() << " overlap." - << endl; - set::iterator it; + for (i = 0; i < m_cutgenSI->getNumCols(); i++) { + if (!UtilIsZero(objCoeff[i])) { + objCut.insert(i, objCoeff[i]); + } + } + + m_cutgenObjCutInd = m_cutgenSI->getNumRows(); + m_cutgenSI->addRow(objCut, -m_infinity, m_infinity); + } + } + + //--- + //--- construct auxillary compact lp interface + //--- + if (m_param.InitCompactSolve) { + // TODO: would be nice if we could utilize IP presolve here? + m_auxSI = getOsiLpSolverInterface(); + assert(m_auxSI); + loadSIFromModel(m_auxSI); + } + + /*#ifdef STAB_DUMERLE + //--- + //--- using the cut gen OSI, solve the initial LP + //--- for the compact formulation to get starting duals + //--- TODO: what if no CutCGL - need its own object + //--- + //--- we are only going to use the duals from core as estimates + //--- of the duals for master + //--- + assert(m_param.CutCGL); + m_cutgenSI->initialSolve(); + assert(m_cutgenSI->isProvenOptimal()); + + const double * dualSol = m_cutgenSI->getRowPrice(); + const vector & rowNames = m_cutgenSI->getRowNames(); + + int r; + for(r = 0; r < modelCore->getNumRows(); r++){ + if(fabs(dualSol[r]) > DecompEpsilon){ + if(r < static_cast(rowNames.size())){ + printf("INIT DUAL FOR CORE ROW[%6d -> %25s] = %12.10f\n", + r, rowNames[r].c_str(), dualSol[r]); + } + else + printf("INIT DUAL[%6d] = %12.10f\n", r, dualSol[r]); + } + } + #endif*/ + UtilPrintFuncEnd(m_osLog, m_classTag, "initSetup()", m_param.LogDebugLevel, + 2); +} - for (it = activeCols1inter2.begin(); - it != activeCols1inter2.end(); it++) { - (*m_osLog) << "Column " << setw(5) << *it << " -> "; +//===========================================================================// +void DecompAlgo::createOsiSubProblem(DecompSubModel &subModel) { + // TODO: design question, we are assuming that master solver is + // an LP solver and relaxed solver is an IP - it really should + // be a generic object and an LP or IP solver is just one option + // for a solver + OsiSolverInterface *subprobSI = NULL; + DecompConstraintSet *model = subModel.getModel(); + + if (!model || !model->M) { + //--- + //--- if not using built-in solver, make sure user has + //--- provided a solver function + //--- TODO: how? + //--- + // const DecompApp * app = getDecompApp(); + return; + } + + UtilPrintFuncBegin(m_osLog, m_classTag, "createOsiSubProblem()", + m_param.LogDebugLevel, 2); + int nInts = model->getNumInts(); + int nCols = model->getNumCols(); + int nRows = model->getNumRows(); + + subprobSI = getOsiIpSolverInterface(); + + assert(subprobSI); + subprobSI->messageHandler()->setLogLevel(m_param.LogLpLevel); + // TODO: use assign vs load? just pass pointers? + subprobSI->loadProblem(*model->getMatrix(), model->getColLB(), + model->getColUB(), + NULL, // null objective + model->getRowLB(), model->getRowUB()); + + if (nInts > 0) { + subprobSI->setInteger(model->getIntegerVars(), nInts); + if (m_param.DecompIPSolver == "CPLEX" && + m_param.DecompLPSolver == "CPLEX") { +#ifdef DIP_HAS_CPX + OsiCpxSolverInterface *osiCpx = + dynamic_cast(subprobSI); + osiCpx->switchToMIP(); +#endif + } + } - if (modelRelax2.getModel()->colNames.size() > 0) - (*m_osLog) - << setw(25) << modelRelax2.getModel()->colNames[*it]; + //--- + //--- set column and row names (if they exist) + //--- + string objName = "objective"; + vector &colNames = model->colNames; + vector &rowNames = model->rowNames; + subprobSI->setIntParam(OsiNameDiscipline, 1); // 1=Lazy, 2=Full - (*m_osLog) << " is found in both blocks." << endl; - } + if (colNames.size()) { + subprobSI->setColNames(colNames, 0, nCols, 0); + } - throw UtilException("Columns in some blocks overlap.", - "checkBlocksColumns", "DecompAlgo"); - } - } - } - - //--- - //--- sanity check that the union of active columns in blocks - //--- should cover all columns in core - if not, these are 'master-only' - //--- columns which can be dealt with using either LD or the using the - //--- ideas of Rob Pratt discussion (9/27/09), or defined explicitly - //--- by user - //--- - set activeColsUnion; - set::iterator sit; - - for (mid1 = m_modelRelax.begin(); mid1 != m_modelRelax.end(); mid1++) { - DecompSubModel& modelRelax = (*mid1).second; - DecompConstraintSet* model = modelRelax.getModel(); - assert(model); - set& activeCols = model->activeColumnsS; - set_union(activeCols.begin(), activeCols.end(), - activeColsUnion.begin(), activeColsUnion.end(), - inserter(activeColsUnion, activeColsUnion.begin())); - } - - const DecompSubModel& modelCore = getModelCore(); - - // add the master-only variables ot the set union - const vector& masterOnlyCols = modelCore.getModel()->getMasterOnlyCols(); - - set masterOnlyColsSet(masterOnlyCols.begin(), masterOnlyCols.end()); - - set_union(masterOnlyColsSet.begin(), masterOnlyColsSet.end(), - activeColsUnion.begin(), activeColsUnion.end(), - inserter(activeColsUnion, activeColsUnion.begin())); - - bool allColsCovered = true; - - for (int i = 0; i < modelCore.getModel()->getNumCols(); i++) { - sit = activeColsUnion.find(i); - - if (sit == activeColsUnion.end()) { - (*m_osLog) << "Column " << setw(5) << i << " -> " - << setw(25) << modelCore.getModel()->colNames[i] - << " is missing from union of blocks." << endl; - allColsCovered = false; - } - } + if (rowNames.size()) { + subprobSI->setRowNames(rowNames, 0, nRows, 0); + } - if (!allColsCovered) - throw UtilException("Some columns not covered in blocks", - "checkBlocksColumns", "DecompAlgo"); + subprobSI->setObjName(objName); + UTIL_DEBUG( + m_param.LogDebugLevel, 5, int i; - UtilPrintFuncEnd(m_osLog, m_classTag, - "checkBlocksColumns()", m_param.LogDebugLevel, 2); + for (i = 0; i < nCols; i++) { + (*m_osLog) << "User column name (" << i << ") = " << colNames[i] + << endl; + } for (i = 0; i < nCols; i++) { + (*m_osLog) << "OSI column name (" << i + << ") = " << subprobSI->getColName(i) << endl; + }); + //--- + //--- set subproblem pointer + //--- + subModel.setOsi(subprobSI); + UtilPrintFuncEnd(m_osLog, m_classTag, "createOsiSubProblem()", + m_param.LogDebugLevel, 2); } //===========================================================================// -void DecompAlgo::initSetup() -{ - UTIL_MSG(m_param.LogLevel, 2, - (*m_osLog) - << "Initial Algo Setup" - << " (algo = " << DecompAlgoStr[m_algo] << ")\n"; - ); - UtilPrintFuncBegin(m_osLog, m_classTag, - "initSetup()", m_param.LogDebugLevel, 2); - - //--- - //--- create DecompSubModel objects from DecompModel objects - //--- these just store pointers to the models provided by user - //--- and will store pointers to the approriate OSI objects - //--- - getModelsFromApp(); - m_numConvexCon = static_cast(m_modelRelax.size()); - DecompConstraintSet* modelCore = m_modelCore.getModel(); - assert(modelCore); - UTIL_DEBUG(m_param.LogDebugLevel, 1, - - if (modelCore) { - (*m_osLog) << "ModelCore cols: " << modelCore->getNumCols() - << " rows: " << modelCore->getNumRows() - << "\n"; - } else { - (*m_osLog) << "ModelCore is Empty.\n"; - } - ); - //--- - //--- copy master-only columns from modelCore - //--- - const vector& masterOnlyCols = modelCore->getMasterOnlyCols(); - m_masterOnlyCols.clear(); - m_masterOnlyCols.reserve(UtilGetSize(masterOnlyCols)); - std::copy(masterOnlyCols.begin(), masterOnlyCols.end(), - std::back_inserter(m_masterOnlyCols)); - - //--- - //--- sanity checks on user input - //--- - if (m_param.DebugCheckBlocksColumns) { - checkBlocksColumns(); - } - - //--- - //--- if we have a core, allocate a pool of memory for re-use - //--- - if (modelCore) { - m_memPool.allocateMemory(modelCore->getNumCols(), - modelCore->getNumRows()); - } - - //--- - //--- By default the relaxation can be solved using a generic IP solver. - //--- - //--- Here, for each relaxation, we initialize an OSI interface and load - //--- the problem data. - //--- - map ::iterator mit; - map >::iterator mivt; - vector ::iterator vit; - - for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { - createOsiSubProblem((*mit).second); - } - - for (mivt = m_modelRelaxNest.begin(); - mivt != m_modelRelaxNest.end(); mivt++) { - for (vit = (*mivt).second.begin(); - vit != (*mivt).second.end(); vit++) { - createOsiSubProblem((*vit)); - } - } - - //assert(m_numConvexCon >= 1); - UTIL_DEBUG(m_param.LogDebugLevel, 1, - (*m_osLog) << "Number of Convexity Constraints: " - << m_numConvexCon << endl; - - for (mit = m_modelRelax.begin(); - mit != m_modelRelax.end(); mit++) { - DecompConstraintSet* model = (*mit).second.getModel(); - - if (model && model->M) { - (*m_osLog) - << "ModelRelax cols: " << model->getNumCols() - << " rows: " << model->getNumRows() - << endl; - } - } - ); - //--- - //--- open memory to store the current solution (in terms of x) - //--- - const int nCols = modelCore->getNumCols(); - const double* colLB = modelCore->getColLB(); - const double* colUB = modelCore->getColUB(); - assert(nCols > 0); - m_xhat = new double[nCols]; - m_colLBNode = new double[nCols]; - m_colUBNode = new double[nCols]; - assert(m_xhat && m_colLBNode && m_colUBNode); - memcpy(m_colLBNode, colLB, nCols * sizeof(double)); - memcpy(m_colUBNode, colUB, nCols * sizeof(double)); - //--- - //--- PC: create an initial set of points F'[0] subseteq F' (c + eps) - //--- DC: create an initial set of points F'[0] subseteq F' (xhat + eps) - //--- RC: do nothing - DecompAlgo base?? WHY - need an shat to get going - //--- C: do nothing - DecompAlgo base - //--- - DecompVarList initVars; - m_nodeStats.varsThisCall += generateInitVars(initVars); - - //--- - //--- create the master OSI interface - //--- - - m_masterSI = getOsiLpSolverInterface(); - m_infinity = m_masterSI->getInfinity(); - - CoinAssertHint(m_masterSI, "Error: Out of Memory"); - m_masterSI->messageHandler()->setLogLevel(m_param.LogLpLevel); - - //--- - //--- init CGL object - //--- NOTE: do not allow PC gomory cuts for now - //--- - m_cgl = new DecompAlgoCGL(m_param.LogDebugLevel, - m_algo); - m_cgl->setLogStream(m_osLog); - m_cgl->setLogLevel (m_param.LogDebugLevel); - m_cgl->initGenerators(m_param.CutCglClique, - m_param.CutCglOddHole, - m_param.CutCglFlowC, - m_param.CutCglKnapC, - m_param.CutCglMir, - m_param.CutCglGomory); - //--- - //--- create master problem - //--- - createMasterProblem(initVars); - UTIL_MSG(m_param.LogLevel, 2, - (*m_osLog) - << "Model core nCols= " << modelCore->getNumCols() - << " nRows = " << modelCore->getNumRows() << "\n"; - ); - - //--- - //--- construct cutgen solver interface - //--- - if (m_param.CutCGL) { - m_cutgenSI = new OsiClpSolverInterface(); - assert(m_cutgenSI); - loadSIFromModel(m_cutgenSI, true); - - //--- - //--- add an objective cut to the cut generator LP - //--- obj >= globalLB - //--- - if (m_algo == PRICE_AND_CUT) { - //--- - //--- THINK: - //--- this is causing an issue later - because packs 0's in matrix - //--- once gets to cut generator - //--- - //CoinPackedVector objCut(nCols, getOrigObjective()); - CoinPackedVector objCut; - const double* objCoeff = getOrigObjective(); - int i; - - for (i = 0; i < m_cutgenSI->getNumCols(); i++) { - if (!UtilIsZero(objCoeff[i])) { - objCut.insert(i, objCoeff[i]); - } - } - - m_cutgenObjCutInd = m_cutgenSI->getNumRows(); - m_cutgenSI->addRow(objCut, -m_infinity, m_infinity); - } - } - - //--- - //--- construct auxillary compact lp interface - //--- - if (m_param.InitCompactSolve) { - //TODO: would be nice if we could utilize IP presolve here? - m_auxSI = getOsiLpSolverInterface(); - assert(m_auxSI); - loadSIFromModel(m_auxSI); - } - - /*#ifdef STAB_DUMERLE - //--- - //--- using the cut gen OSI, solve the initial LP - //--- for the compact formulation to get starting duals - //--- TODO: what if no CutCGL - need its own object - //--- - //--- we are only going to use the duals from core as estimates - //--- of the duals for master - //--- - assert(m_param.CutCGL); - m_cutgenSI->initialSolve(); - assert(m_cutgenSI->isProvenOptimal()); - - const double * dualSol = m_cutgenSI->getRowPrice(); - const vector & rowNames = m_cutgenSI->getRowNames(); - - int r; - for(r = 0; r < modelCore->getNumRows(); r++){ - if(fabs(dualSol[r]) > DecompEpsilon){ - if(r < static_cast(rowNames.size())){ - printf("INIT DUAL FOR CORE ROW[%6d -> %25s] = %12.10f\n", - r, rowNames[r].c_str(), dualSol[r]); - } - else - printf("INIT DUAL[%6d] = %12.10f\n", r, dualSol[r]); - } - } - #endif*/ - UtilPrintFuncEnd(m_osLog, m_classTag, - "initSetup()", m_param.LogDebugLevel, 2); +void DecompAlgo::getModelsFromApp() { + UtilPrintFuncBegin(m_osLog, m_classTag, "getModelsFromApp()", + m_param.LogDebugLevel, 2); + + //--- + //--- check to make sure certain things are set + //--- + if (!m_app->m_objective) + throw UtilException("Application objective has not been set", + "getModelsFromApp", "DecompAlgo"); + + if (!m_app->m_modelCore.getModel()) + throw UtilException("Application core constraint set has not been set", + "getModelsFromApp", "DecompAlgo"); + + m_objective = m_app->m_objective; + m_modelCore = m_app->m_modelCore; + map::iterator mit; + map>::iterator mivt; + vector::iterator vit; + + for (mit = m_app->m_modelRelax.begin(); mit != m_app->m_modelRelax.end(); + mit++) { + //--- + //--- this constructs a DecompSubModel from a DecompModel + //--- + DecompSubModel subModel = (*mit).second; + m_modelRelax.insert(make_pair((*mit).first, subModel)); + } + + for (mivt = m_app->m_modelRelaxNest.begin(); + mivt != m_app->m_modelRelaxNest.end(); mivt++) { + vector v; + + for (vit = (*mivt).second.begin(); vit != (*mivt).second.end(); vit++) { + v.push_back(*vit); + } + + m_modelRelaxNest.insert(make_pair((*mivt).first, v)); + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "getModelsFromApp()", + m_param.LogDebugLevel, 2); } //===========================================================================// -void DecompAlgo::createOsiSubProblem(DecompSubModel& subModel) -{ - //TODO: design question, we are assuming that master solver is - // an LP solver and relaxed solver is an IP - it really should - // be a generic object and an LP or IP solver is just one option - // for a solver - OsiSolverInterface* subprobSI = NULL; - DecompConstraintSet* model = subModel.getModel(); - - if (!model || !model->M) { - //--- - //--- if not using built-in solver, make sure user has - //--- provided a solver function - //--- TODO: how? - //--- - //const DecompApp * app = getDecompApp(); - return; - } - - UtilPrintFuncBegin(m_osLog, m_classTag, - "createOsiSubProblem()", m_param.LogDebugLevel, 2); - int nInts = model->getNumInts(); - int nCols = model->getNumCols(); - int nRows = model->getNumRows(); - - subprobSI = getOsiIpSolverInterface(); - - assert(subprobSI); - subprobSI->messageHandler()->setLogLevel(m_param.LogLpLevel); - //TODO: use assign vs load? just pass pointers? - subprobSI->loadProblem(*model->getMatrix(), - model->getColLB(), - model->getColUB(), - NULL, //null objective - model->getRowLB(), - model->getRowUB()); - - if (nInts > 0) { - subprobSI->setInteger(model->getIntegerVars(), nInts); - if (m_param.DecompIPSolver == "CPLEX" && m_param.DecompLPSolver == "CPLEX"){ -#ifdef DIP_HAS_CPX - OsiCpxSolverInterface* osiCpx - = dynamic_cast(subprobSI); - osiCpx->switchToMIP(); -#endif - } - } - - //--- - //--- set column and row names (if they exist) - //--- - string objName = "objective"; - vector& colNames = model->colNames; - vector& rowNames = model->rowNames; - subprobSI->setIntParam(OsiNameDiscipline, 1);//1=Lazy, 2=Full - - if (colNames.size()) { - subprobSI->setColNames(colNames, 0, nCols, 0); - } - - if (rowNames.size()) { - subprobSI->setRowNames(rowNames, 0, nRows, 0); - } - - subprobSI->setObjName(objName); - UTIL_DEBUG(m_param.LogDebugLevel, 5, - int i; - - for (i = 0; i < nCols; i++) { - (*m_osLog) << "User column name (" << i << ") = " - << colNames[i] << endl; - } - for (i = 0; i < nCols; i++) { - (*m_osLog) << "OSI column name (" << i << ") = " - << subprobSI->getColName(i) << endl; - } - ); - //--- - //--- set subproblem pointer - //--- - subModel.setOsi(subprobSI); - UtilPrintFuncEnd(m_osLog, m_classTag, - "createOsiSubProblem()", m_param.LogDebugLevel, 2); +void DecompAlgo::loadSIFromModel(OsiSolverInterface *si, bool doInt) { + //--- + //--- Initialize the solver interface. + //--- min c(x) + //--- A' x >= b' [optional] + //--- A''x >= b'' + //--- l <= x <= u + //--- + //--- relaxV contains [A', b' ] in terms of x (if explicit) + //--- core contains [A'', b''] in terms of x + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "loadSIFromModel()", + m_param.LogDebugLevel, 2); + DecompConstraintSet *core = m_modelCore.getModel(); + DecompConstraintSet *relax = NULL; + int nCols = core->getNumCols(); + int nRowsC = core->getNumRows(); + int nRowsR = 0; + int nRows = nRowsC; + //--- + //--- create matrix from core matrix + //--- + CoinPackedMatrix *M = new CoinPackedMatrix(*core->M); + assert(M); + //--- + //--- append to bottom the relax matrix/matrices + //--- create block file (for use in MILPBlock app) + //--- + ofstream os; + + if (m_param.LogDumpModel >= 2) { + os.open("blockFile.txt"); // or + } + + map::iterator mit; + + for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { + relax = (*mit).second.getModel(); + + // TODO: for cut gen do we really want this model explicit?? + // currently cannot do if sparse without alot of work + if (!relax || !relax->M) { + continue; + } + + const vector &rowNames = relax->getRowNames(); + + nRowsR = relax->getNumRows(); + + if (m_param.LogDumpModel >= 2) { + int r; + + // os << (*mit).second.getBlockId(); + // os << " " << nRowsR << endl; + // for(r = 0; r < nRowsR; r++){ + // os << nRows + r << " "; + //} + // os << endl; + for (r = 0; r < nRowsR; r++) { + os << (*mit).second.getBlockId() << " " << rowNames[r] << endl; + } + } + + nRows += nRowsR; + + if (relax->isSparse()) { + CoinPackedMatrix *MDense = relax->sparseToOrigMatrix(); + assert(MDense); + M->bottomAppendPackedMatrix(*MDense); + UTIL_DELPTR(MDense); + } else { + M->bottomAppendPackedMatrix(*relax->M); + } + } + + if (m_param.LogDumpModel >= 2) { + os.close(); + } + + //--- + //--- set column and row bounds and objective coeffs + //--- + double *colLB = new double[nCols]; + double *colUB = new double[nCols]; + double *objCoeff = new double[nCols]; + double *rowLB = new double[nRows]; + double *rowUB = new double[nRows]; + assert(colLB && colUB && objCoeff && rowLB && rowUB); + memcpy(colLB, core->getColLB(), nCols * sizeof(double)); + memcpy(colUB, core->getColUB(), nCols * sizeof(double)); + memcpy(objCoeff, getOrigObjective(), nCols * sizeof(double)); + memcpy(rowLB, core->getRowLB(), nRowsC * sizeof(double)); + memcpy(rowUB, core->getRowUB(), nRowsC * sizeof(double)); + int rowIndex = nRowsC; + + for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { + relax = (*mit).second.getModel(); + + if (!relax || !relax->M) { + continue; + } + + nRowsR = relax->getNumRows(); + memcpy(rowLB + rowIndex, relax->getRowLB(), nRowsR * sizeof(double)); + memcpy(rowUB + rowIndex, relax->getRowUB(), nRowsR * sizeof(double)); + rowIndex += nRowsR; + } + + //--- + //--- assign problem pointers to OSI object (OSI will delete this memory) + //--- + assert(M->getNumRows() == nRows); + assert(M->getNumCols() == nCols); + si->assignProblem(M, colLB, colUB, objCoeff, rowLB, rowUB); + + //--- + //--- set integer variables + //--- + //--- Due to a design issue with OsiCpx/Xxx, if we declare + //--- integers in the master formulation, we have issues retrieving + //--- any information. So, the master formulation must be the + //--- continuous relaxation (even in the standard CPM case). + //--- + //--- However, the CGL framework expects integer arguments. So, + //--- even in the CPM case (like the PC case) we are going to need + //--- to carry around a second copy of the core in an OSI object + //--- we call the cutgenSI. This will have to be kept up to date in + //--- both cases CPM and PC. This also is a problem for gomory cuts + //--- or any cut generator that depends on the LP solver specific + //--- information (like basis for gomory). + //--- + //--- TODO: we might be able to get around gomory in future by + //--- setting the basis in cutgenSI (from masterSI) during CPM and + //--- taking current recomposed point and doing a crossover to basis + //--- in PC case. + //--- + if (doInt) { + int nInts = core->getNumInts(); + + if (nInts > 0) { + si->setInteger(&core->getIntegerVars()[0], nInts); + } + } + + //--- + //--- set column and row names + //--- + si->setIntParam(OsiNameDiscipline, 1); // 1=Lazy + string objName = "objective"; + vector &colNames = core->colNames; + vector &rowNamesC = core->rowNames; + + if (colNames.size()) { + si->setColNames(colNames, 0, nCols, 0); + } + + if (rowNamesC.size()) { + si->setRowNames(rowNamesC, 0, nRowsC, 0); + } + + si->setObjName(objName); + rowIndex = nRowsC; + + for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { + relax = (*mit).second.getModel(); + + if (!relax || !relax->M) { + continue; + } + + vector &rowNamesR = relax->rowNames; + nRowsR = relax->getNumRows(); + + if (rowNamesR.size()) { + si->setRowNames(rowNamesR, 0, nRowsR, rowIndex); + } + + rowIndex += nRowsR; + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "loadSIFromModel()", + m_param.LogDebugLevel, 2); } //===========================================================================// -void DecompAlgo::getModelsFromApp() -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "getModelsFromApp()", m_param.LogDebugLevel, 2); - - //--- - //--- check to make sure certain things are set - //--- - if (!m_app->m_objective) - throw UtilException("Application objective has not been set", - "getModelsFromApp", "DecompAlgo"); - - if (!m_app->m_modelCore.getModel()) - throw UtilException("Application core constraint set has not been set", - "getModelsFromApp", "DecompAlgo"); - - m_objective = m_app->m_objective; - m_modelCore = m_app->m_modelCore; - map ::iterator mit; - map >::iterator mivt; - vector ::iterator vit; - - for (mit = m_app->m_modelRelax.begin(); - mit != m_app->m_modelRelax.end(); mit++) { - //--- - //--- this constructs a DecompSubModel from a DecompModel - //--- - DecompSubModel subModel = (*mit).second; - m_modelRelax.insert(make_pair((*mit).first, subModel)); - } - - for (mivt = m_app->m_modelRelaxNest.begin(); - mivt != m_app->m_modelRelaxNest.end(); mivt++) { - vector v; +void DecompAlgo::createMasterProblem(DecompVarList &initVars) { + //--- + //--- Initialize the solver interface for the master problem. + //--- + //--- For the master constraint system: + //--- m_modelCore contains [A'', b''], in terms of x. + //--- m_modelRelax contains [A', b'], and might contiain multiple blocks. + //--- + //--- For each block we must add a convexity constraint. Let K be the set + //--- of blocks. + //--- + //--- Notation: + //--- n = orig number of vars + //--- m'' = orig number of rows in A'', b'' + //--- |K| = the number of blocks that defines [A' b'] + //--- s = a solution (e.p.) to the relaxed problem, size (1xn) + //--- c = original cost, size (1xn) + //--- F'[k] = the current set of relaxed e.p.'s for block k in K + //--- a''[i,j] = entry at row i, column j for A'' matrix + //--- C = original set of columns (n = |C|) + //--- R'' = original set of rows in A'' (m''=|R''|) + //--- + //--- The Dantzig-Wolfe LP: + //--- + //--- min sum{k in K, s in F'[k]} + //--- sum{j in C}(c[j] * s[j]) * lambda[k][s] + //--- s.t. sum{k in K, s in F'[k]} + //--- sum{j in C}(a''[i,j] * s[j])* lambda[k][s] ~ b''[i], i in R'' + //--- sum{s in F'[k]} lambda[k][s] = 1, k in K + //--- lambda[k][s] >= 0, k in K, s in F'[k] + //--- + //--- NOTE: if 0 is feasible to subproblem, we can relax convexity to <= 1 + //--- + //--- + //--- Change for Phase I model. + //--- Add a slack and/or surplus variable to each master constraint + //--- including the bounds for branching?? THINK... + //--- + //--- THINK: + //--- Do we bother removing these vars once feasible? What about the + //--- fact that adding cuts could once again cause infeasible.... + //--- + //--- What do we do after a branching? jump back to Phase I? + //--- + //--- + //--- Phase I: + //--- min sum{i in R''} (splus[i] + sminus[i]) + //--- + //--- Phase II: + //--- min sum{k in K, s in F'[k]} + //--- sum{j in C}(c[j] * s[j]) * lambda[k][s] + //--- + //--- s.t. sum{k in K, s in F'[k]} + //--- sum{j in C}(a''[i,j] * s[j])* lambda[k][s] + + //--- splus[i] - sminus[i] ~ b''[i], i in R'' + //--- sum{s in F'[k]} lambda[k][s] = 1, k in K + //--- lambda[k][s] >= 0, k in K, s in F'[k] + //--- splus[i] >= 0, i in R'' + //--- sminus[i] >= 0, i in R'' + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createMasterProblem()", + m_param.LogDebugLevel, 2); + DecompConstraintSet *modelCore = m_modelCore.getModel(); + assert(modelCore); // TODO - can core be empty? + int r, startRow, endRow; + int nColsCore = modelCore->getNumCols(); + int nRowsCore = modelCore->getNumRows(); + int nIntVars = modelCore->getNumInts(); + int nInitVars = static_cast(initVars.size()); + assert(initVars.size() > 0); // TODO: this should be OK + double *dblArrNCoreCols = new double[nColsCore]; + assert(dblArrNCoreCols); + //--- + //--- TODO: + //--- MO vars do not need an explicit row in master even if + //--- BranchEnforceInMaster (these are enforced directly by + //--- MO column bounds) + //--- + //--- + //--- set the row counts + //--- + m_nRowsOrig = nRowsCore; + + if (m_param.BranchEnforceInMaster) { + m_nRowsBranch = 2 * nIntVars; + } else { + m_nRowsBranch = 0; + } + + m_nRowsConvex = m_numConvexCon; + m_nRowsCuts = 0; + //--- + //--- set the row types of the rows + //--- original rows, branch rows, convexity rows + //--- + UtilFillN(m_masterRowType, m_nRowsOrig, DecompRow_Original); + UtilFillN(m_masterRowType, m_nRowsBranch, DecompRow_Branch); + UtilFillN(m_masterRowType, m_nRowsConvex, DecompRow_Convex); + + //--- + //--- In order to implement simple branching, we are going to + //--- treat all column bounds as explicit constraints. Then branching + //--- for DW can be done in the same way it is done for regular CPM. + //--- We want to add these directly to the core so as to facilitate + //--- operations to expand rows. Basically, we treat these just like cuts. + //--- + if (m_param.BranchEnforceInMaster) { + coreMatrixAppendColBounds(); + } + + nRowsCore = modelCore->getNumRows(); + // THINK - what need this for? + // number of original core rows + modelCore->nBaseRowsOrig = modelCore->nBaseRows; + // number of original core rows plus branching rows + modelCore->nBaseRows = modelCore->getNumRows(); + //--- + //--- create a matrix for the master LP + //--- make room for original rows, branching rows and convexity rows + //--- + int nRows = m_nRowsOrig + m_nRowsConvex; + + if (m_param.BranchEnforceInMaster) { + nRows += m_nRowsBranch; + } + + int nMOVars = static_cast(m_masterOnlyCols.size()); + int nColsMax = + nInitVars + 2 * (m_nRowsOrig + m_nRowsBranch + m_nRowsConvex) + nMOVars; + double *colLB = new double[nColsMax]; + double *colUB = new double[nColsMax]; + double *objCoeff = new double[nColsMax]; + double *denseCol = new double[nRows]; + CoinPackedMatrix *masterM = new CoinPackedMatrix(true, 0, 0); + vector colNames; + assert(colLB && colUB && objCoeff && denseCol && masterM); + //--- + //--- set the number of rows, we will add columns + //--- + masterM->setDimensions(nRows, 0); + //--- + //--- create artifical columns in master LP for: + //--- original rows + //--- branching rows + //--- convexity rows + //--- + startRow = 0; + endRow = m_nRowsOrig; + masterMatrixAddArtCols(masterM, colLB, colUB, objCoeff, colNames, startRow, + endRow, DecompRow_Original); + // create columns for master only variables + masterMatrixAddMOCols(masterM, colLB, colUB, objCoeff, colNames); + + if (m_nRowsBranch > 0) { + startRow = m_nRowsOrig; + endRow = m_nRowsOrig + m_nRowsBranch; + masterMatrixAddArtCols(masterM, colLB, colUB, objCoeff, colNames, startRow, + endRow, DecompRow_Branch); + } + + startRow = m_nRowsOrig + m_nRowsBranch; + endRow = m_nRowsOrig + m_nRowsBranch + m_nRowsConvex; + masterMatrixAddArtCols(masterM, colLB, colUB, objCoeff, colNames, startRow, + endRow, DecompRow_Convex); + int colIndex = 0; + int blockIndex = 0; + DecompVarList::iterator li; + + // TODO: + // this should be calling a function to add var to lp so don't dup code + // TODO: + // check for duplicates in initVars + for (li = initVars.begin(); li != initVars.end(); li++) { + //--- + //--- appending these variables (lambda) to end of matrix + //--- after the artificials + //--- + colIndex = masterM->getNumCols(); + m_colIndexUnique = colIndex; + //--- + //--- store the col index for this var in the master LP + //--- NOTE: if we remove columns, this will be wrong + //--- + (*li)->setColMasterIndex(colIndex); + //--- + //--- we expect the user to define the block id in the var object + //--- + blockIndex = (*li)->getBlockId(); + //--- + //--- give the column a name + //--- + string colName; + + if ((*li)->getVarType() == DecompVar_Point) { + // std::cout << "The generated variable type is " + // << DecompVar_Point << std::endl; + colName = "lam(c_" + UtilIntToStr(m_colIndexUnique) + ",b_" + + UtilIntToStr(blockIndex) + ")"; + } else if ((*li)->getVarType() == DecompVar_Ray) { + // std::cout << "The generated variable type is " + // << DecompVar_Ray << std::endl; + colName = "theta(c_" + UtilIntToStr(m_colIndexUnique) + ",b_" + + UtilIntToStr(blockIndex) + ")"; + } + + colNames.push_back(colName); + UTIL_DEBUG(m_param.LogDebugLevel, 5, + (*li)->print(m_infinity, m_osLog, m_app);); + //--- + //--- get dense column = A''s, append convexity constraint on end + //--- this memory is re-used, so be sure to clear out + //--- + // STOP: see addVarsToPool - here init, so no cuts to deal with + // but we don't use dense array here - make a difference? + // modelCore->M->times((*li)->m_s, denseCol); + (*li)->fillDenseArr(modelCore->getNumCols(), dblArrNCoreCols); + modelCore->M->times(dblArrNCoreCols, denseCol); + UtilFillN(denseCol + nRowsCore, m_numConvexCon, 0.0); + assert(blockIndex >= 0); + assert(blockIndex < m_numConvexCon); + denseCol[nRowsCore + blockIndex] = 1.0; + + if ((*li)->getVarType() == DecompVar_Ray) { + denseCol[nRowsCore + blockIndex] = 0.0; + } + + //--- + //--- create a sparse column from the dense column + //--- + // THINK: do i need a DecompCol? + // THINK: does this allocate memory for coinpackedvec twice? + CoinPackedVector *sparseCol = UtilPackedVectorFromDense( + nRowsCore + m_numConvexCon, denseCol, m_param.TolZero); + UTIL_DEBUG(m_param.LogDebugLevel, 5, (*m_osLog) << "\nSparse Col: \n"; + UtilPrintPackedVector(*sparseCol, m_osLog);); + // TODO: check for duplicates (against m_vars) + // or force initVars to be sent in with no dups? + //--- + //--- append the sparse column to the matrix + //--- + masterM->appendCol(*sparseCol); + colLB[colIndex] = 0.0; + colUB[colIndex] = m_infinity; + objCoeff[colIndex] = 0.0; // PHASE I + //--- + //--- set master column type + //--- + m_masterColType.push_back(DecompCol_Structural); + //--- + //--- clean-up + //--- + UTIL_DELPTR(sparseCol); + } // END: for(li = initVars.begin(); li != initVars.end(); li++) + + //--- + //--- insert the initial set of variables into the master variable list + //--- + // THINK: now doing in loop, so can check for dups + appendVars(initVars); + //--- + //--- THINK: do we want to adjust modelCore directly here? + //--- adjust row bounds for convexity constraint + //--- + // TODO: in memory + double *zeroSol = new double[nColsCore]; + assert(zeroSol); + UtilFillN(zeroSol, nColsCore, 0.0); + // TODO - REVISIT - that's not the right check + // needs to be feasible to subproblem? + // TODO: DETECT THIS + // bool isZeroFeas = isIPFeasible(zeroSol); + int isZeroFeas = m_param.MasterConvexityLessThan; + UTIL_DEBUG(m_param.LogDebugLevel, 5, + + if (isZeroFeas)(*m_osLog) + << "Zero Sol is Feasible - relax convexity con.\n";) { + ; + } + + //--- + //--- row bounds from core including + //--- original rows + //--- branching rows + //--- + vector masterRowLB(modelCore->rowLB); + vector masterRowUB(modelCore->rowUB); + + //--- + //--- row bounds for convexity constraints + //--- + if (isZeroFeas) { + for (r = 0; r < m_numConvexCon; r++) { + masterRowLB.push_back(-m_infinity); + masterRowUB.push_back(1.0); + } + } else { + for (r = 0; r < m_numConvexCon; r++) { + masterRowLB.push_back(1.0); + masterRowUB.push_back(1.0); + } + } + + //--- + //--- load the problem into master's solver interface + //--- + assert(masterM->getNumRows() == static_cast(masterRowLB.size())); + assert(masterM->getNumRows() == static_cast(masterRowUB.size())); + assert(masterM->getNumRows() == static_cast(m_masterRowType.size())); + assert(masterM->getNumCols() == static_cast(m_masterColType.size())); + m_masterSI->loadProblem(*masterM, colLB, colUB, objCoeff, &masterRowLB[0], + &masterRowUB[0]); + //--- + //--- load column and row names to OSI + //--- + int nRowNames = static_cast(modelCore->rowNames.size()); + int nColNames = static_cast(colNames.size()); + + if (nRowNames || nColNames) { + m_masterSI->setIntParam(OsiNameDiscipline, 2); // Full-Names + } + + if (nRowNames > 0) { + assert(nRowNames == modelCore->getNumRows()); + m_masterSI->setRowNames(modelCore->rowNames, 0, nRowNames, 0); + vector conRowNames; + + for (r = 0; r < m_numConvexCon; r++) { + string rowName = "conv(b_" + UtilIntToStr(r) + ")"; + conRowNames.push_back(rowName); + } + + m_masterSI->setRowNames(conRowNames, 0, m_numConvexCon, nRowNames); + string objName = "objective"; + m_masterSI->setObjName(objName); + } + + if (nColNames > 0) { + m_masterSI->setColNames(colNames, 0, nColNames, 0); + } + + // TODO: make a function + UTIL_DEBUG( + m_param.LogDebugLevel, 4, + + for (r = 0; r < m_masterSI->getNumRows(); r++) { + const string rowN = m_masterSI->getRowName(r); + (*m_osLog) << "Row[" << setw(4) << r << "] Name: " << setw(30) << rowN + << " Type: " << setw(20) + << DecompRowTypeStr[m_masterRowType[r]] << endl; + } for (int c = 0; c < m_masterSI->getNumCols(); c++) { + const string colN = m_masterSI->getColName(c); + (*m_osLog) << "Col[" << setw(4) << c << "] Name: " << setw(30) << colN + << " Type: " << setw(20) + << DecompColTypeStr[m_masterColType[c]] << endl; + }); + //--- + //--- reset unique col index id + //--- + m_colIndexUnique = masterM->getNumCols(); + //--- + //--- free local memory + //--- + UTIL_DELPTR(masterM); + UTIL_DELARR(denseCol); + UTIL_DELARR(colLB); + UTIL_DELARR(colUB); + UTIL_DELARR(objCoeff); + UTIL_DELARR(zeroSol); + UTIL_DELARR(dblArrNCoreCols); + UtilPrintFuncEnd(m_osLog, m_classTag, "createMasterProblem()", + m_param.LogDebugLevel, 2); +} - for (vit = (*mivt).second.begin(); - vit != (*mivt).second.end(); vit++) { - v.push_back(*vit); - } +//===========================================================================// - m_modelRelaxNest.insert(make_pair((*mivt).first, v)); - } +void DecompAlgo::masterMatrixAddMOCols(CoinPackedMatrix *masterM, double *colLB, + double *colUB, double *objCoeff, + vector &colNames) { + int nMOVars = static_cast(m_masterOnlyCols.size()); + + if (nMOVars <= 0) { + return; + } + + DecompConstraintSet *modelCore = m_modelCore.getModel(); + assert(modelCore); + assert(!modelCore->isSparse()); + const double *colLBCore = modelCore->getColLB(); + const double *colUBCore = modelCore->getColUB(); + const vector &colNamesCore = modelCore->getColNames(); + //--- + //--- add the submatrix for core rows cross master-only columns + //--- to the master formulation (this will be a col-ordered matrix) + //--- + const CoinPackedMatrix *matrixCore = modelCore->getMatrix(); + CoinPackedMatrix matrixCoreTmp(*matrixCore); + + if (!matrixCoreTmp.isColOrdered()) { + matrixCoreTmp.reverseOrdering(); + } + + //////STOP + const CoinPackedVectorBase **colBlock = + new const CoinPackedVectorBase *[nMOVars]; + + for (int i = 0; i < nMOVars; i++) { + CoinShallowPackedVector colS = + matrixCoreTmp.getVector(modelCore->getMasterOnlyCols()[i]); + CoinPackedVector *col = new CoinPackedVector( + colS.getNumElements(), colS.getIndices(), colS.getElements()); + colBlock[i] = col; + /* + for(int j = 0 ; j < colS.getNumElements(); j++){ + + std::cout << "The column vector of masterOnly " + << j << " contains " << j << " th element is " + << col->getElements()[j] << std::endl; + std::cout << "The index is " << col->getIndices()[j] + << std::endl; + + } + */ + } + + // todo - use ptrs, allocate only if need transpose + // CoinPackedMatrix matrixMO(matrixCoreTmp); + // matrixMO.setDimensions(matrixCore->getNumRows(), 0); + // this won't work - wind up with 3x3 vs 3cols x all rows in core + // need to construct manually + // use appendRows + // matrixMO.submatrixOfWithDuplicates(matrixCoreTmp, + // nMOVars, &m_masterOnlyCols[0]); + // assert(matrixMO.isColOrdered()); + // assert(masterM->isColOrdered()); + // masterM->majorAppendSameOrdered(matrixMO); + masterM->appendCols(nMOVars, colBlock); + //--- + //--- set master-onlys: lb, ub, obj, names + //--- + int j, k; + int nMasterCols = masterM->getNumCols(); + + for (int i = 0; i < nMOVars; i++) { + k = nMasterCols + i - nMOVars; + j = m_masterOnlyCols[i]; + colLB[k] = colLBCore[j]; + colUB[k] = colUBCore[j]; + objCoeff[k] = 0; + colNames.push_back(colNamesCore[j]); + m_masterColType.push_back(DecompCol_MasterOnly); + // m_masterColType.push_back(DecompCol_Structural_NoDelete); + m_masterOnlyColsMap.insert(make_pair(j, k)); + } + + // free local memory + for (int i = 0; i < nMOVars; i++) { + UTIL_DELPTR(colBlock[i]); + } + + UTIL_DELARR(colBlock); +} - UtilPrintFuncEnd(m_osLog, m_classTag, - "getModelsFromApp()", m_param.LogDebugLevel, 2); +//===========================================================================// +void DecompAlgo::masterMatrixAddArtCol(vector &colBeg, + vector &colInd, + vector &colVal, char LorG, + int rowIndex, int colIndex, + DecompColType colType, double &colLB, + double &colUB, double &objCoeff) { + // CoinPackedVector artCol; + // if(LorG == 'L') + // artCol.insert(rowIndex, -1.0); + // else + // artCol.insert(rowIndex, 1.0); + // masterM->appendCol(artCol); + colInd.push_back(rowIndex); + + if (LorG == 'L') { + colVal.push_back(-1.0); + } else { + colVal.push_back(1.0); + } + + colBeg.push_back(static_cast(colBeg.size())); + colLB = 0.0; + colUB = m_infinity; + objCoeff = 1.0; + m_masterColType.push_back(colType); + m_masterArtCols.push_back(colIndex); } //===========================================================================// -void DecompAlgo::loadSIFromModel(OsiSolverInterface* si, - bool doInt) -{ - //--- - //--- Initialize the solver interface. - //--- min c(x) - //--- A' x >= b' [optional] - //--- A''x >= b'' - //--- l <= x <= u - //--- - //--- relaxV contains [A', b' ] in terms of x (if explicit) - //--- core contains [A'', b''] in terms of x - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "loadSIFromModel()", m_param.LogDebugLevel, 2); - DecompConstraintSet* core = m_modelCore.getModel(); - DecompConstraintSet* relax = NULL; - int nCols = core->getNumCols(); - int nRowsC = core->getNumRows(); - int nRowsR = 0; - int nRows = nRowsC; - //--- - //--- create matrix from core matrix - //--- - CoinPackedMatrix* M = new CoinPackedMatrix(*core->M); - assert(M); - //--- - //--- append to bottom the relax matrix/matrices - //--- create block file (for use in MILPBlock app) - //--- - ofstream os; - - if (m_param.LogDumpModel >= 2) { - os.open("blockFile.txt"); // or - } - - map::iterator mit; - - for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { - relax = (*mit).second.getModel(); - - //TODO: for cut gen do we really want this model explicit?? - // currently cannot do if sparse without alot of work - if (!relax || !relax->M) { - continue; +void DecompAlgo::masterMatrixAddArtCols(CoinPackedMatrix *masterM, + double *colLB, double *colUB, + double *objCoeff, + vector &colNames, int startRow, + int endRow, DecompRowType rowType) { + //--- + //--- min sp + sm + //--- + //--- ax = b --> ax + sp - sm = b, sp >= 0, sm >= 0 + //--- ax <= b --> ax - sm <= b, sm >= 0 + //--- ax >= b --> ax + sp >= b, sp >= 0 + //--- + DecompConstraintSet *modelCore = m_modelCore.getModel(); + vector &rowSense = modelCore->rowSense; + vector &rowNames = modelCore->rowNames; + int nCoreRows = modelCore->getNumRows(); + bool hasNames = rowNames.empty() ? false : true; + int r, colIndex; + string colName, strIndex, colNameL, colNameG; + DecompColType colTypeL, colTypeG; + + switch (rowType) { + case DecompRow_Original: + colNameL = "sOL(c_"; + colNameG = "sOG(c_"; + colTypeL = DecompCol_ArtForRowL; + colTypeG = DecompCol_ArtForRowG; + break; + case DecompRow_Branch: + colNameL = "sBL(c_"; + colNameG = "sBG(c_"; + colTypeL = DecompCol_ArtForBranchL; + colTypeG = DecompCol_ArtForBranchG; + break; + case DecompRow_Convex: + colNameL = "sCL(c_"; + colNameG = "sCG(c_"; + colTypeL = DecompCol_ArtForConvexL; + colTypeG = DecompCol_ArtForConvexG; + break; + default: + throw UtilException("Bad row type", "masterMatrixAddArtCols", "DecompAlgo"); + } + + string rowNameR; + char rowSenseR; + colIndex = masterM->getNumCols(); + vector colBeg; + vector colInd; + vector colVal; + colBeg.push_back(0); + + for (r = startRow; r < endRow; r++) { + if (hasNames) { + strIndex = UtilIntToStr(colIndex); + } + + if (rowType == DecompRow_Convex) { + rowSenseR = 'E'; // NOTE: what if <=? + rowNameR = "convex(b_" + UtilIntToStr(r - nCoreRows) + ")"; + } else { + rowSenseR = rowSense[r]; + rowNameR = rowNames[r]; + } + + // printf("rowSense[%d]=%c\n", r, rowSense[r]); + switch (rowSenseR) { + case 'L': + masterMatrixAddArtCol(colBeg, colInd, colVal, 'L', r, colIndex, colTypeL, + colLB[colIndex], colUB[colIndex], + objCoeff[colIndex]); + + if (hasNames) { + colName = colNameL + strIndex + "_" + rowNameR + ")"; + colNames.push_back(colName); } - const vector& rowNames = relax->getRowNames(); + m_artColIndToRowInd.insert(make_pair(colIndex, r)); + colIndex++; + break; + case 'G': + masterMatrixAddArtCol(colBeg, colInd, colVal, 'G', r, colIndex, colTypeG, + colLB[colIndex], colUB[colIndex], + objCoeff[colIndex]); - nRowsR = relax->getNumRows(); + if (hasNames) { + colName = colNameG + strIndex + "_" + rowNameR + ")"; + colNames.push_back(colName); + } - if (m_param.LogDumpModel >= 2) { - int r; + m_artColIndToRowInd.insert(make_pair(colIndex, r)); + colIndex++; + break; + case 'E': + masterMatrixAddArtCol(colBeg, colInd, colVal, 'L', r, colIndex, colTypeL, + colLB[colIndex], colUB[colIndex], + objCoeff[colIndex]); - //os << (*mit).second.getBlockId(); - //os << " " << nRowsR << endl; - //for(r = 0; r < nRowsR; r++){ - // os << nRows + r << " "; - //} - //os << endl; - for (r = 0; r < nRowsR; r++) { - os << (*mit).second.getBlockId() - << " " << rowNames[r] << endl; - } + if (hasNames) { + colName = colNameL + strIndex + "_" + rowNameR + ")"; + colNames.push_back(colName); } - nRows += nRowsR; + m_artColIndToRowInd.insert(make_pair(colIndex, r)); + colIndex++; + masterMatrixAddArtCol(colBeg, colInd, colVal, 'G', r, colIndex, colTypeG, + colLB[colIndex], colUB[colIndex], + objCoeff[colIndex]); - if (relax->isSparse()) { - CoinPackedMatrix* MDense = relax->sparseToOrigMatrix(); - assert(MDense); - M->bottomAppendPackedMatrix(*MDense); - UTIL_DELPTR(MDense); - } else { - M->bottomAppendPackedMatrix(*relax->M); - } - } - - if (m_param.LogDumpModel >= 2) { - os.close(); - } - - //--- - //--- set column and row bounds and objective coeffs - //--- - double* colLB = new double[nCols]; - double* colUB = new double[nCols]; - double* objCoeff = new double[nCols]; - double* rowLB = new double[nRows]; - double* rowUB = new double[nRows]; - assert(colLB && colUB && objCoeff && rowLB && rowUB); - memcpy(colLB, core->getColLB(), nCols * sizeof(double)); - memcpy(colUB, core->getColUB(), nCols * sizeof(double)); - memcpy(objCoeff, getOrigObjective(), nCols * sizeof(double)); - memcpy(rowLB, core->getRowLB(), nRowsC * sizeof(double)); - memcpy(rowUB, core->getRowUB(), nRowsC * sizeof(double)); - int rowIndex = nRowsC; - - for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { - relax = (*mit).second.getModel(); - - if (!relax || !relax->M) { - continue; + if (hasNames) { + colName = colNameG + strIndex + "_" + rowNameR + ")"; + colNames.push_back(colName); } - nRowsR = relax->getNumRows(); - memcpy(rowLB + rowIndex, relax->getRowLB(), nRowsR * sizeof(double)); - memcpy(rowUB + rowIndex, relax->getRowUB(), nRowsR * sizeof(double)); - rowIndex += nRowsR; - } - - //--- - //--- assign problem pointers to OSI object (OSI will delete this memory) - //--- - assert(M->getNumRows() == nRows); - assert(M->getNumCols() == nCols); - si->assignProblem(M, colLB, colUB, objCoeff, rowLB, rowUB); - - //--- - //--- set integer variables - //--- - //--- Due to a design issue with OsiCpx/Xxx, if we declare - //--- integers in the master formulation, we have issues retrieving - //--- any information. So, the master formulation must be the - //--- continuous relaxation (even in the standard CPM case). - //--- - //--- However, the CGL framework expects integer arguments. So, - //--- even in the CPM case (like the PC case) we are going to need - //--- to carry around a second copy of the core in an OSI object - //--- we call the cutgenSI. This will have to be kept up to date in - //--- both cases CPM and PC. This also is a problem for gomory cuts - //--- or any cut generator that depends on the LP solver specific - //--- information (like basis for gomory). - //--- - //--- TODO: we might be able to get around gomory in future by - //--- setting the basis in cutgenSI (from masterSI) during CPM and - //--- taking current recomposed point and doing a crossover to basis - //--- in PC case. - //--- - if (doInt) { - int nInts = core->getNumInts(); - - if (nInts > 0) { - si->setInteger(&core->getIntegerVars()[0], nInts); - } - } + m_artColIndToRowInd.insert(make_pair(colIndex, r)); + colIndex++; + break; + default: + throw UtilException( + "Range constraints are not yet supported. Please break up your range " + "constraints into two constraints.", + "masterMatrixAddArtCols", "DecompAlgo"); + } + } + + masterM->appendCols(static_cast(colBeg.size()) - 1, &colBeg[0], + &colInd[0], &colVal[0]); +} + +//===========================================================================// +void DecompAlgo::coreMatrixAppendColBounds() { + //--- + //--- In order to implement simple branching, we are going to + //--- treat all column bounds as explicit constraints. Then branching + //--- for DW can be done in the same way it is done for regular CPM. + //--- + //--- THINK: this needs some investigation. In some cases, this is not a + //--- great idea for performance. But, the advantage is in ease of + //--- implementation. The user does not need to do any sort of specialzed + //--- branching for DW. + //--- + //--- NOTE: this idea won't work for identical subproblem case + //--- + int i, j; + char sense; + double rhs; + bool doNames = true; // TODO: make an option + DecompConstraintSet *modelCore = m_modelCore.getModel(); + const int nIntVars = modelCore->getNumInts(); + const double *colLBCore = modelCore->getColLB(); + const double *colUBCore = modelCore->getColUB(); + const int *integerVars = modelCore->getIntegerVars(); + vector &colNames = modelCore->getColNamesMutable(); + vector &rowNames = modelCore->getRowNamesMutable(); + // TODO: use mem pool? or just create block (identity) if doing PC? + const int numRows = 2 * nIntVars; + int *rowStarts = new int[numRows + 1]; + int *rowInd = new int[numRows]; + double *rowEls = new double[numRows]; + assert(rowStarts && rowInd && rowEls); + //--- + //--- first nColsCore rows are x <= u + //--- second nColsCore rows are x >= l + //--- + rowStarts[0] = 0; + + for (i = 0; i < numRows; i++) { + if (i < nIntVars) { + j = integerVars[i]; + // x <= u + rowStarts[i + 1] = rowStarts[i] + 1; + rowInd[i] = j; + rowEls[i] = 1.0; + } else { + // x >= l + j = integerVars[i - nIntVars]; + rowStarts[i + 1] = rowStarts[i] + 1; + rowInd[i] = j; + rowEls[i] = 1.0; + } + } + + //--- + //--- append as actual rows to A'' (duals used in pricing) + //--- + modelCore->M->appendRows(numRows, rowStarts, rowInd, rowEls); + + //--- + //--- now convert to sense for hashing + //--- + for (i = 0; i < numRows; i++) { + if (i < nIntVars) { + // x <= u + j = modelCore->integerVars[i]; + modelCore->rowLB.push_back(-m_infinity); + modelCore->rowUB.push_back(colUBCore[j]); + sense = 'L'; + rhs = colUBCore[j]; + + if (doNames) { + string rowName = "ub(" + colNames[j] + ")"; + rowNames.push_back(rowName); + } + } else { + // x >= l + j = modelCore->integerVars[i - nIntVars]; + modelCore->rowLB.push_back(colLBCore[j]); + modelCore->rowUB.push_back(m_infinity); + sense = 'G'; + rhs = colLBCore[j]; + + if (doNames) { + string rowName = "lb(" + colNames[j] + ")"; + rowNames.push_back(rowName); + } + } + + modelCore->rowRhs.push_back(rhs); + modelCore->rowSense.push_back(sense); + assert(sense != 'R'); + assert(sense != 'N'); + string rowHash = + UtilCreateStringHash(1, rowInd + i, rowEls + i, sense, rhs, m_infinity); + modelCore->rowHash.push_back(rowHash); + } + + UTIL_DELARR(rowStarts); + UTIL_DELARR(rowInd); + UTIL_DELARR(rowEls); +} - //--- - //--- set column and row names - //--- - si->setIntParam(OsiNameDiscipline, 1);//1=Lazy - string objName = "objective"; - vector& colNames = core->colNames; - vector& rowNamesC = core->rowNames; +//===========================================================================// +void DecompAlgo::breakOutPartial(const double *xHat, DecompVarList &newVars, + const double intTol) { + if (m_numConvexCon <= 1) { + return; + } + + UtilPrintFuncBegin(m_osLog, m_classTag, "breakOutPartial()", + m_param.LogDebugLevel, 1); + // TODO: what if modelRelax is not defined? + // TODO: if lambda=1, don't bother, it means the partial + // is already there + DecompConstraintSet *modelCore = m_modelCore.getModel(); + const char *integerMark = modelCore->getIntegerMark(); + //--- + //--- for each block, check to see if active integer columns + //--- are integral - if so, use these as candidate columns + //--- + const double *objCoeff = getOrigObjective(); + map::iterator mit; + vector::const_iterator vit; + + for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { + DecompSubModel &subModel = (*mit).second; + DecompConstraintSet *model = subModel.getModel(); + int b = subModel.getBlockId(); + const vector &activeCols = model->getActiveColumns(); + bool blockFeasible = true; + + for (vit = activeCols.begin(); vit != activeCols.end(); vit++) { + if (integerMark[*vit] != 'I') { + continue; + } + + if (!(UtilIsIntegral(xHat[*vit], intTol))) { + blockFeasible = false; + break; + } + } + + if (blockFeasible) { + vector ind; + vector els; + double origCost = 0.0; - if (colNames.size()) { - si->setColNames(colNames, 0, nCols, 0); - } + for (vit = activeCols.begin(); vit != activeCols.end(); vit++) { + if (!UtilIsZero(xHat[*vit])) { + ind.push_back(*vit); + els.push_back(xHat[*vit]); + origCost += objCoeff[*vit]; + } + } - if (rowNamesC.size()) { - si->setRowNames(rowNamesC, 0, nRowsC, 0); - } + if (ind.size() > 0) { // THINK: allow 0-cols?? + DecompVar *var = new DecompVar(ind, els, -1.0, origCost); + var->setBlockId(b); + newVars.push_back(var); + } + } + } - si->setObjName(objName); - rowIndex = nRowsC; + // printf("newVars = %d\n", newVars.size()); + UtilPrintFuncEnd(m_osLog, m_classTag, "breakOutPartial()", + m_param.LogDebugLevel, 1); +} - for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { - relax = (*mit).second.getModel(); +//===========================================================================// +DecompStatus DecompAlgo::processNode(const AlpsDecompTreeNode *node, + const double globalLB, + const double globalUB) { + if (node == NULL) { + throw UtilException("NULL node being processed.", "processNode", + "DecompAlgo"); + } + + m_curNode = node; + int nodeIndex = node->getIndex(); + double mostNegRC = 0.0; + DecompConstraintSet *modelCore = m_modelCore.getModel(); + m_stabEpsilon = 0.0; + UtilPrintFuncBegin(m_osLog, m_classTag, "processNode()", + m_param.LogDebugLevel, 1); + + if (m_algo == RELAX_AND_CUT) { + throw UtilException( + "In this version of DIP, Relax and Cut is currently disabled.", + "processNode", "DecompAlgo"); + } + + //--- + //--- print the global gap + //--- + UTIL_MSG(m_param.LogLevel, 2, + double gap = UtilCalculateGap(globalLB, globalUB, m_infinity); + (*m_osLog) << "Process Node " << nodeIndex + << " (algo = " << DecompAlgoStr[m_algo] + << ", phaseLast = " << DecompPhaseStr[m_phaseLast] + << ") gLB = " << UtilDblToStr(globalLB) + << " gUB = " << UtilDblToStr(globalUB) + << " gap = " << UtilDblToStr(gap, 5) << " time = " + << UtilDblToStr(globalTimer.getRealTime(), 3) << endl;); + //--- + //--- init status + //--- + m_useInitLpDuals = true; + m_status = STAT_UNKNOWN; + m_globalLB = globalLB; + m_globalUB = globalUB; + + //--- + //--- check solveMasterAsMip setting + //--- on by default, but if only one block, turn off + //--- + if (m_numConvexCon == 1) { + m_param.SolveMasterAsMip = 0; + } + + //--- + //--- if problem is a pure LP, set MasterGapLimit = 1.e-8 + //--- + if (modelCore->integerVars.size() == 0) { + m_param.MasterGapLimit = 1.0e-8; + UTIL_MSG(m_param.LogLevel, 1, + (*m_osLog) << "Problem is an LP. Reset param MasterGapLimit = " + << m_param.MasterGapLimit << endl;); + } + + //--- + //--- init stats and timer + //--- + m_stats.timerDecomp.reset(); + m_nodeStats.init(); + m_nodeStats.nodeIndex = nodeIndex; + // NOTE: changed on 5/25/2010 + // if we use the parent LB, then stabilized won't + // move until much later + // does this change effect anything else? wrt to short + // cutting and fathoming - check this + // you also have to watch for tailoff - if you set to + // parent obj and it takes a while to get there, then + // it will look like it is tailing off and you might stop + // short + // m_nodeStats.objBest.first = globalLB; + // if(m_param.DualStab) + m_nodeStats.objBest.first = -m_infinity; + // else + // m_nodeStats.objBest.first = globalLB; + m_nodeStats.objBest.second = globalUB; + m_compressColsLastPrice = 0; + m_compressColsLastNumCols = m_masterSI->getNumCols(); + m_phaseIObj.clear(); + //--- + //--- get initial phase + //--- + //--- CPM <-- CUT + //--- PC (node = 0) <-- PRICEI + //--- (node > 0) <-- + //--- + m_firstPhase2Call = false; + phaseInit(m_phaseLast); + m_phase = m_phaseLast; + + //--- + //--- it is possible that phaseInit can find + //--- the node infeasible + //--- + if (m_phase == PHASE_DONE) { + m_status = STAT_INFEASIBLE; + } else { + // TODO: put sb candidate id in name of file + if (m_param.LogDumpModel > 1) { + string baseName = "masterProb"; - if (!relax || !relax->M) { - continue; + if (m_isStrongBranch) { + baseName += "_SB"; } - vector& rowNamesR = relax->rowNames; - nRowsR = relax->getNumRows(); + printCurrentProblem(m_masterSI, baseName, m_nodeStats.nodeIndex, + m_nodeStats.cutCallsTotal, + m_nodeStats.priceCallsTotal); + } + + //--- + //--- find the initial solution (dual and/or primal) + //--- + m_status = solutionUpdate(m_phase, true); + } + + if (m_status != STAT_INFEASIBLE) { + // for CPM, can't this just access from m_masterSI? + recomposeSolution(getMasterPrimalSolution(), m_xhat); + UTIL_DEBUG(m_param.LogDebugLevel, 4, + m_app->printOriginalSolution(modelCore->getNumCols(), + modelCore->getColNames(), m_xhat);); + + // TODO: solution pool? + // TODO: check if this is IP feasible + // make that a function + if (isIPFeasible(m_xhat)) { + if (m_app->APPisUserFeasible(m_xhat, modelCore->getNumCols(), + m_param.TolZero)) { + // printf("m_xhat is APP FEASIBLE, m_xhatIPFeas size = %d\n", + // (int)m_xhatIPFeas.size()); + // check for dup sol + bool isDup = m_xhatIPFeas.size() > 0 ? true : false; + vector::iterator vit; + + for (vit = m_xhatIPFeas.begin(); vit != m_xhatIPFeas.end(); vit++) { + const DecompSolution *xhatIPFeas = *vit; + const double *values = xhatIPFeas->getValues(); + + for (int c = 0; c < modelCore->getNumCols(); c++) { + if (!UtilIsZero(values[c] - m_xhat[c])) { + isDup = false; + break; + } + } + } + + if (isDup) { + // printf("IS DUP, not pushing\n"); + } else { + DecompSolution *decompSol = new DecompSolution( + modelCore->getNumCols(), m_xhat, getOrigObjective()); + // getMasterObjValue()); + // solution pool? + m_xhatIPFeas.push_back(decompSol); + // printf("m_xhatIPFeas size = %d\n", + // (int)m_xhatIPFeas.size()); + } + } + + vector::iterator vi; + DecompSolution *viBest = NULL; + double bestBoundUB = m_nodeStats.objBest.second; - if (rowNamesR.size()) { - si->setRowNames(rowNamesR, 0, nRowsR, rowIndex); - } + for (vi = m_xhatIPFeas.begin(); vi != m_xhatIPFeas.end(); vi++) { + const DecompSolution *xhatIPFeas = *vi; + + if (xhatIPFeas->getQuality() <= bestBoundUB) { + bestBoundUB = xhatIPFeas->getQuality(); + viBest = *vi; + } + } + + if (viBest) { + // save the best + setObjBoundIP(bestBoundUB); + m_xhatIPBest = viBest; + } + } + + // for CPM, dont' we need to update obj lb here? in case that no cuts + // are found, then node is done and we need to update boundx + if (m_algo == CUT) { + updateObjBound(); + } + } + + //--- + //--- main processing loop + //--- + while (m_phase != PHASE_DONE) { + // TODO: LP only? + UTIL_MSG( + m_param.LogLevel, 2, double lpGap = getNodeLPGap(); + double ipGap = getNodeIPGap(); + int nHistorySize = static_cast(m_nodeStats.objHistoryBound.size()); + + if (nHistorySize > 0) { + DecompObjBound &objBound = + m_nodeStats.objHistoryBound[nHistorySize - 1]; + (*m_osLog) << setiosflags(ios::right); + (*m_osLog) << "Processing Node " << setw(3) << nodeIndex + << " algo= " << setw(13) << DecompAlgoStr[m_algo] + << " phase= " << setw(12) << DecompPhaseStr[m_phase] + << " c= " << setw(4) << m_nodeStats.cutCallsTotal + << " p= " << setw(4) << m_nodeStats.priceCallsTotal + << " LB= " << setw(10) + << UtilDblToStr(objBound.thisBound, 3) + << " UB= " << setw(10) + << UtilDblToStr(objBound.thisBoundUB, 3) + << " nodeLB= " << setw(10) + << UtilDblToStr(m_nodeStats.objBest.first, 3) + << " gLB= " << setw(10) << UtilDblToStr(m_globalLB, 3) + << " gUB= " << setw(10) + << UtilDblToStr(m_nodeStats.objBest.second, 3) + << " lpGap= " << setw(10) << UtilDblToStr(lpGap, 3) + << " ipGap= " << setw(10) << UtilDblToStr(ipGap, 3) + << " time= " << setw(10) + << UtilDblToStr(globalTimer.getCpuTime(), 2) << endl; + } else { + // TODO + }); + //--- + //--- update the phase based on parms, status and current phase + //--- + phaseUpdate(m_phase, m_status); + + //--- + //--- check if we have exceeded time + //--- THINK: should this check be in phaseUpdate? + //--- + if (m_stats.timerOverall.isPast(m_param.TimeLimit)) { + UTIL_MSG(m_param.LogLevel, 2, + (*m_osLog) << "Node " << nodeIndex + << " process stopping on time." << endl;); + m_stopCriteria = DecompStopTime; + m_phase = PHASE_DONE; + } + + //--- + //--- if the lower bound meets the global ub, we are done + //--- careful here - do NOT do this check in phase1 since + //--- ub is based on original objective while lb is based + //--- on phase 1 objective + //--- + //--- TOOD: seems confusing to store bounds from different objectives + //--- in the same structure - maybe should use m_nodeStats1/2 + //--- + //--- TKR (8/20/19): removed tolerance used on comparison below to be + //--- consistent with seemingly duplicative check in + //--- AlpsDecompTreeNode::process(). TODO: determine + //--- whether we need a tolerance here and whether the + //--- the duplicate checks can/should be eliminated. + if (m_phase != PHASE_PRICE1 && + (m_nodeStats.objBest.first >= (m_nodeStats.objBest.second))) { + UTIL_MSG( + m_param.LogLevel, 2, + (*m_osLog) << "Node " << nodeIndex << " process stopping on bound." + << " This LB= " << UtilDblToStr(m_nodeStats.objBest.first) + << " Global UB= " + << UtilDblToStr(m_nodeStats.objBest.second) << "." + << endl;); + m_stopCriteria = DecompStopBound; + m_phase = PHASE_DONE; + } + + if (m_phase == PHASE_DONE) { + break; + } - rowIndex += nRowsR; - } + bool isGapTight = false; + DecompVarList newVars; + DecompCutList newCuts; - UtilPrintFuncEnd(m_osLog, m_classTag, - "loadSIFromModel()", m_param.LogDebugLevel, 2); -} + switch (m_phase) { + case PHASE_PRICE1: + case PHASE_PRICE2: { + m_nodeStats.priceCallsRound++; + m_nodeStats.priceCallsTotal++; -//===========================================================================// -void DecompAlgo::createMasterProblem(DecompVarList& initVars) -{ - //--- - //--- Initialize the solver interface for the master problem. - //--- - //--- For the master constraint system: - //--- m_modelCore contains [A'', b''], in terms of x. - //--- m_modelRelax contains [A', b'], and might contiain multiple blocks. - //--- - //--- For each block we must add a convexity constraint. Let K be the set - //--- of blocks. - //--- - //--- Notation: - //--- n = orig number of vars - //--- m'' = orig number of rows in A'', b'' - //--- |K| = the number of blocks that defines [A' b'] - //--- s = a solution (e.p.) to the relaxed problem, size (1xn) - //--- c = original cost, size (1xn) - //--- F'[k] = the current set of relaxed e.p.'s for block k in K - //--- a''[i,j] = entry at row i, column j for A'' matrix - //--- C = original set of columns (n = |C|) - //--- R'' = original set of rows in A'' (m''=|R''|) - //--- - //--- The Dantzig-Wolfe LP: - //--- - //--- min sum{k in K, s in F'[k]} - //--- sum{j in C}(c[j] * s[j]) * lambda[k][s] - //--- s.t. sum{k in K, s in F'[k]} - //--- sum{j in C}(a''[i,j] * s[j])* lambda[k][s] ~ b''[i], i in R'' - //--- sum{s in F'[k]} lambda[k][s] = 1, k in K - //--- lambda[k][s] >= 0, k in K, s in F'[k] - //--- - //--- NOTE: if 0 is feasible to subproblem, we can relax convexity to <= 1 - //--- - //--- - //--- Change for Phase I model. - //--- Add a slack and/or surplus variable to each master constraint - //--- including the bounds for branching?? THINK... - //--- - //--- THINK: - //--- Do we bother removing these vars once feasible? What about the - //--- fact that adding cuts could once again cause infeasible.... - //--- - //--- What do we do after a branching? jump back to Phase I? - //--- - //--- - //--- Phase I: - //--- min sum{i in R''} (splus[i] + sminus[i]) - //--- - //--- Phase II: - //--- min sum{k in K, s in F'[k]} - //--- sum{j in C}(c[j] * s[j]) * lambda[k][s] - //--- - //--- s.t. sum{k in K, s in F'[k]} - //--- sum{j in C}(a''[i,j] * s[j])* lambda[k][s] + - //--- splus[i] - sminus[i] ~ b''[i], i in R'' - //--- sum{s in F'[k]} lambda[k][s] = 1, k in K - //--- lambda[k][s] >= 0, k in K, s in F'[k] - //--- splus[i] >= 0, i in R'' - //--- sminus[i] >= 0, i in R'' - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "createMasterProblem()", m_param.LogDebugLevel, 2); - DecompConstraintSet* modelCore = m_modelCore.getModel(); - assert(modelCore); //TODO - can core be empty? - int r, startRow, endRow; - int nColsCore = modelCore->getNumCols(); - int nRowsCore = modelCore->getNumRows(); - int nIntVars = modelCore->getNumInts(); - int nInitVars = static_cast(initVars.size()); - assert(initVars.size() > 0);//TODO: this should be OK - double* dblArrNCoreCols = new double[nColsCore]; - assert(dblArrNCoreCols); - //--- - //--- TODO: - //--- MO vars do not need an explicit row in master even if - //--- BranchEnforceInMaster (these are enforced directly by - //--- MO column bounds) - //--- - //--- - //--- set the row counts - //--- - m_nRowsOrig = nRowsCore; - - if (m_param.BranchEnforceInMaster) { - m_nRowsBranch = 2 * nIntVars; - } else { - m_nRowsBranch = 0; - } - - m_nRowsConvex = m_numConvexCon; - m_nRowsCuts = 0; - //--- - //--- set the row types of the rows - //--- original rows, branch rows, convexity rows - //--- - UtilFillN(m_masterRowType, m_nRowsOrig, DecompRow_Original); - UtilFillN(m_masterRowType, m_nRowsBranch, DecompRow_Branch); - UtilFillN(m_masterRowType, m_nRowsConvex, DecompRow_Convex); - - //--- - //--- In order to implement simple branching, we are going to - //--- treat all column bounds as explicit constraints. Then branching - //--- for DW can be done in the same way it is done for regular CPM. - //--- We want to add these directly to the core so as to facilitate - //--- operations to expand rows. Basically, we treat these just like cuts. - //--- - if (m_param.BranchEnforceInMaster) { - coreMatrixAppendColBounds(); - } - - nRowsCore = modelCore->getNumRows(); - //THINK - what need this for? - //number of original core rows - modelCore->nBaseRowsOrig = modelCore->nBaseRows; - //number of original core rows plus branching rows - modelCore->nBaseRows = modelCore->getNumRows(); - //--- - //--- create a matrix for the master LP - //--- make room for original rows, branching rows and convexity rows - //--- - int nRows = m_nRowsOrig + m_nRowsConvex; - - if (m_param.BranchEnforceInMaster) { - nRows += m_nRowsBranch; - } - - int nMOVars = static_cast(m_masterOnlyCols.size()); - int nColsMax = nInitVars - + 2 * (m_nRowsOrig + m_nRowsBranch + m_nRowsConvex) - + nMOVars; - double* colLB = new double[nColsMax]; - double* colUB = new double[nColsMax]; - double* objCoeff = new double[nColsMax]; - double* denseCol = new double[nRows]; - CoinPackedMatrix* masterM = new CoinPackedMatrix(true, 0, 0); - vector colNames; - assert(colLB && colUB && objCoeff && denseCol && masterM); - //--- - //--- set the number of rows, we will add columns - //--- - masterM->setDimensions(nRows, 0); - //--- - //--- create artifical columns in master LP for: - //--- original rows - //--- branching rows - //--- convexity rows - //--- - startRow = 0; - endRow = m_nRowsOrig; - masterMatrixAddArtCols(masterM, - colLB, - colUB, - objCoeff, - colNames, - startRow, endRow, DecompRow_Original); - // create columns for master only variables - masterMatrixAddMOCols(masterM, - colLB, - colUB, - objCoeff, - colNames); - - if (m_nRowsBranch > 0) { - startRow = m_nRowsOrig; - endRow = m_nRowsOrig + m_nRowsBranch; - masterMatrixAddArtCols(masterM, - colLB, - colUB, - objCoeff, - colNames, - startRow, endRow, DecompRow_Branch); - } - - startRow = m_nRowsOrig + m_nRowsBranch; - endRow = m_nRowsOrig + m_nRowsBranch + m_nRowsConvex; - masterMatrixAddArtCols(masterM, - colLB, - colUB, - objCoeff, - colNames, - startRow, endRow, DecompRow_Convex); - int colIndex = 0; - int blockIndex = 0; - DecompVarList::iterator li; - - //TODO: - // this should be calling a function to add var to lp so don't dup code - //TODO: - // check for duplicates in initVars - for (li = initVars.begin(); li != initVars.end(); li++) { - //--- - //--- appending these variables (lambda) to end of matrix - //--- after the artificials - //--- - colIndex = masterM->getNumCols(); - m_colIndexUnique = colIndex; - //--- - //--- store the col index for this var in the master LP - //--- NOTE: if we remove columns, this will be wrong - //--- - (*li)->setColMasterIndex(colIndex); - //--- - //--- we expect the user to define the block id in the var object - //--- - blockIndex = (*li)->getBlockId(); //--- - //--- give the column a name + //--- after adding some rows, the columns in the var pool + //--- might no longer be valid, so we need to re-expand everything //--- - string colName; - - if ((*li)->getVarType() == DecompVar_Point) { - // std::cout << "The generated variable type is " - // << DecompVar_Point << std::endl; - colName = "lam(c_" + UtilIntToStr(m_colIndexUnique) - + ",b_" + UtilIntToStr(blockIndex) + ")"; - } else if ((*li)->getVarType() == DecompVar_Ray) { - // std::cout << "The generated variable type is " - // << DecompVar_Ray << std::endl; - colName = "theta(c_" + UtilIntToStr(m_colIndexUnique) - + ",b_" + UtilIntToStr(blockIndex) + ")"; + if (m_varpool.size() > 0) { + if (!m_varpool.colsAreValid()) { + UTIL_MSG(m_param.LogDebugLevel, 3, + (*m_osLog) << "EXPANDING varpool.\n";); + m_varpool.reExpand(*modelCore, m_param.TolZero); + } + + //--- + //--- THINK.... + //--- + if (m_status == STAT_FEASIBLE) { + m_varpool.setReducedCosts(getMasterDualSolution(), m_status); + } else { + // if doing RC, never called?? + const double *u = getDualRays(1)[0]; + m_varpool.setReducedCosts(u, m_status); + UTIL_DELARR(u); + } } - colNames.push_back(colName); - UTIL_DEBUG(m_param.LogDebugLevel, 5, - (*li)->print(m_infinity, m_osLog, m_app); - ); //--- - //--- get dense column = A''s, append convexity constraint on end - //--- this memory is re-used, so be sure to clear out + //--- attempt to generate some new variables with rc < 0 //--- - //STOP: see addVarsToPool - here init, so no cuts to deal with - // but we don't use dense array here - make a difference? - //modelCore->M->times((*li)->m_s, denseCol); - (*li)->fillDenseArr(modelCore->getNumCols(), - dblArrNCoreCols); - modelCore->M->times(dblArrNCoreCols, denseCol); - UtilFillN(denseCol + nRowsCore, m_numConvexCon, 0.0); - assert(blockIndex >= 0); - assert(blockIndex < m_numConvexCon); - denseCol[nRowsCore + blockIndex] = 1.0; + mostNegRC = 0.0; + m_nodeStats.varsThisCall = generateVars(newVars, mostNegRC); + m_nodeStats.varsThisRound += m_nodeStats.varsThisCall; + m_nodeStats.cutsThisCall = 0; + map::iterator mit; - if ((*li)->getVarType() == DecompVar_Ray) { - denseCol[nRowsCore + blockIndex] = 0.0; + for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { + (*mit).second.setCounter((*mit).second.getCounter() + 1); } + // Store the m_numCols and use it in updateObjBound function + m_numCols = m_masterSI->getNumCols(); + + if (m_isColGenExact && m_rrIterSinceAll == 0 && + m_status == STAT_FEASIBLE) { + isGapTight = updateObjBound(mostNegRC); + } + + if (m_nodeStats.varsThisCall > 0) { + //--- + //--- add the newly generated variables to the var pool + //--- + addVarsToPool(newVars); + //--- + //--- add variables from the variable pool to the master problem + //--- + addVarsFromPool(); + } + + // printf("m_isColGenExact = %d\n", m_isColGenExact); + // printf("m_rrIterSinceAll = %d\n", m_rrIterSinceAll); + // printf("m_status = %d\n", m_status); + // TODO: don't need check m_isColGenExact if we + // use LB's in mostNegRC (rather than varRedCost)... + + /*if(m_isColGenExact && + m_rrIterSinceAll == 0 && + m_status == STAT_FEASIBLE && + m_phase == PHASE_PRICE2) + isGapTight = updateObjBoundLB(mostNegRC);*/ + //--- - //--- create a sparse column from the dense column - //--- - // THINK: do i need a DecompCol? - // THINK: does this allocate memory for coinpackedvec twice? - CoinPackedVector* sparseCol - = UtilPackedVectorFromDense(nRowsCore + m_numConvexCon, - denseCol, m_param.TolZero); - UTIL_DEBUG(m_param.LogDebugLevel, 5, - (*m_osLog) << "\nSparse Col: \n"; - UtilPrintPackedVector(*sparseCol, m_osLog); - ); - //TODO: check for duplicates (against m_vars) - // or force initVars to be sent in with no dups? - //--- - //--- append the sparse column to the matrix - //--- - masterM->appendCol(*sparseCol); - colLB[colIndex] = 0.0; - colUB[colIndex] = m_infinity; - objCoeff[colIndex] = 0.0; //PHASE I - //--- - //--- set master column type - //--- - m_masterColType.push_back(DecompCol_Structural); - //--- - //--- clean-up + //--- update stab parameters delta=duals, epsilon reduced //--- - UTIL_DELPTR(sparseCol); - } //END: for(li = initVars.begin(); li != initVars.end(); li++) - - //--- - //--- insert the initial set of variables into the master variable list - //--- - //THINK: now doing in loop, so can check for dups - appendVars(initVars); - //--- - //--- THINK: do we want to adjust modelCore directly here? - //--- adjust row bounds for convexity constraint - //--- - //TODO: in memory - double* zeroSol = new double[nColsCore]; - assert(zeroSol); - UtilFillN(zeroSol, nColsCore, 0.0); - //TODO - REVISIT - that's not the right check - // needs to be feasible to subproblem? - //TODO: DETECT THIS - //bool isZeroFeas = isIPFeasible(zeroSol); - int isZeroFeas = m_param.MasterConvexityLessThan; - UTIL_DEBUG(m_param.LogDebugLevel, 5, - - if (isZeroFeas) - (*m_osLog) << "Zero Sol is Feasible - relax convexity con.\n"; - ) { - ; - } - - //--- - //--- row bounds from core including - //--- original rows - //--- branching rows - //--- - vector masterRowLB(modelCore->rowLB); - vector masterRowUB(modelCore->rowUB); - - //--- - //--- row bounds for convexity constraints - //--- - if (isZeroFeas) { - for (r = 0; r < m_numConvexCon; r++) { - masterRowLB.push_back(-m_infinity); - masterRowUB.push_back(1.0); - } - } else { - for (r = 0; r < m_numConvexCon; r++) { - masterRowLB.push_back(1.0); - masterRowUB.push_back(1.0); + /*#ifdef STAB_DUMERLE + m_stabEpsilon *= 0.95; + dualSol = m_masterSI->getRowPrice(); + int i, r; + for(i = 0; i < m_masterSI->getNumCols(); i++){ + DecompColType type = m_masterColType[i]; + if(isMasterColArtificial(i)){ + if(type == DecompCol_ArtForRowL || + type == DecompCol_ArtForBranchL || + type == DecompCol_ArtForCutL){ + r = m_artColIndToRowInd[i]; + printf("Master Col i=%d type=%s r=%d dual=%g\n", + i, DecompColTypeStr[type].c_str(), r, dualSol[r]); + m_masterSI->setObjCoeff(i, -dualSol[r]); + } + else if(type == DecompCol_ArtForRowG || + type == DecompCol_ArtForBranchG || + type == DecompCol_ArtForCutG){ + r = m_artColIndToRowInd[i]; + printf("Master Col i=%d type=%s r=%d dual=%g\n", + i, DecompColTypeStr[type].c_str(), r, dualSol[r]); + m_masterSI->setObjCoeff(i, dualSol[r]); + } + //CAN'T DO THIS IF IN PHASEI! + //m_masterSI->setColBounds(i, 0.0, 0.0);//TODO + m_masterSI->setColBounds(i, 0.0, m_stabEpsilon);//TODO + } + } + #endif*/ + } break; + case PHASE_CUT: + m_nodeStats.cutCallsRound++; + m_nodeStats.cutCallsTotal++; + + //--- + //--- after adding some cols, the rows in the cut pool + //--- might no longer be valid, so we need to re-expand everything + //--- + if (!m_cutpool.rowsAreValid() && (m_cutpool.size() > 0)) { + UTIL_MSG(m_param.LogDebugLevel, 3, + (*m_osLog) << "EXPANDING cutpool.\n";); + m_cutpool.reExpand(m_vars, modelCore->getNumCols(), m_nArtCols); + } + + // THINK: here is where you will do sep of m_xhat vs shat + m_cutpool.calcViolations(m_xhat); + //--- + //--- attempt to generate some new cuts with vio > 0 + //--- + m_nodeStats.cutsThisCall = generateCuts(m_xhat, newCuts); + m_nodeStats.cutsThisRound += m_nodeStats.cutsThisCall; + m_nodeStats.varsThisCall = 0; + + if (m_nodeStats.cutsThisCall > 0) { + // this updates the lb based on last solve, not this solve! + // gen cut doesn't change bound until we resolve + // if(m_algo == CUT) + // updateObjBoundLB(); + //--- + //--- add the newly generated cuts to the cut pool + //--- + addCutsToPool(m_xhat, newCuts, m_nodeStats.cutsThisCall); + //--- + //--- add cuts from the cut pool to the master problem + //--- + addCutsFromPool(); } - } - - //--- - //--- load the problem into master's solver interface - //--- - assert(masterM->getNumRows() == static_cast(masterRowLB.size())); - assert(masterM->getNumRows() == static_cast(masterRowUB.size())); - assert(masterM->getNumRows() == static_cast(m_masterRowType.size())); - assert(masterM->getNumCols() == static_cast(m_masterColType.size())); - m_masterSI->loadProblem(*masterM, - colLB, colUB, objCoeff, - &masterRowLB[0], - &masterRowUB[0]); - //--- - //--- load column and row names to OSI - //--- - int nRowNames = static_cast(modelCore->rowNames.size()); - int nColNames = static_cast(colNames.size()); - - if (nRowNames || nColNames) { - m_masterSI->setIntParam(OsiNameDiscipline, 2); //Full-Names - } - - if (nRowNames > 0) { - assert(nRowNames == modelCore->getNumRows()); - m_masterSI->setRowNames(modelCore->rowNames, 0, nRowNames, 0); - vector conRowNames; - - for (r = 0; r < m_numConvexCon; r++) { - string rowName = "conv(b_" + UtilIntToStr(r) + ")"; - conRowNames.push_back(rowName); + + break; + case PHASE_DONE: { + map::iterator mit; + + for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { + (*mit).second.setCounter(0); } + } break; + default: + assert(0); + } + + //--- + //--- Careful here -- for many apps the user will use heuristics + //--- during column generation phase. If we update LB after each + //--- column added we might stop too early if this LB exceeds the + //--- tree's global upper bound. + //--- + //--- We need the user to tell us if they solved it exactly or not. + //--- + // this should be in phaseUpdate? + // TODO: moved this into phaseUpdate for PC - need to revisit CPM! + // TODO: now moved to phaseUpdate - what about case of no branch object!? + if (m_phase != PHASE_DONE) { + //--- + //--- perform a solution update + //--- PC: take PARM steps of simplex + //--- ?? DC: take PARM steps of simplex (INF case?) + //--- RC: take PARM steps of subgradient + //--- VC: take PARM steps of volume + //--- + if (m_param.LogDumpModel > 1) { + string baseName = "masterProb"; + + if (m_isStrongBranch) { + baseName += "_SB"; + } + + printCurrentProblem(m_masterSI, baseName, m_nodeStats.nodeIndex, + m_nodeStats.cutCallsTotal, + m_nodeStats.priceCallsTotal); + } + + //--- + //--- check to see if something got added + //--- + int nChanges = m_nodeStats.cutsThisCall + m_nodeStats.varsThisCall; + UTIL_DEBUG( + m_param.LogDebugLevel, 3, + (*m_osLog) << "nNewVars = " << m_nodeStats.varsThisCall << endl; + (*m_osLog) << "nNewCuts = " << m_nodeStats.cutsThisCall << endl;); + + if (!isDone()) { + if (nChanges) { + // why is this not in the switch statement? + m_status = solutionUpdate(m_phase); + + // make this some update that can be override for CPM vs PC + // or move this update to phaseUpdate??? + if (m_nodeStats.cutsThisCall > 0) { + updateObjBound(); + } + } + } + + ////////////////THINK + //???? shouldn't we have recompose and look for IP feasible + // inside of solutionUpdate - as it should be checked in every + // case + // what happens often in pricing - you find integer point, + // but lb can be improved so you price, but still get integer point + // as min, so you keep adding the same ip point - need cmp to + // check for dups - esp against previous one + //--- + //--- check if IP feasible (are we done?) + //--- TODO: for nonexplicity, also check user app isfeasible + //--- + // TODO: should this whole section be phaseDone? + if (m_status != STAT_INFEASIBLE) { + recomposeSolution(getMasterPrimalSolution(), m_xhat); + UTIL_DEBUG(m_param.LogDebugLevel, 4, + m_app->printOriginalSolution(modelCore->getNumCols(), + modelCore->getColNames(), + m_xhat);); + + // TODO: solution pool? + // this is checked again in phase update... + // first, check to see if LP solution is already ip and user feas + if (isIPFeasible(m_xhat)) { + if (m_app->APPisUserFeasible(m_xhat, modelCore->getNumCols(), + m_param.TolZero)) { + // check for dup sol + bool isDup = m_xhatIPFeas.size() > 0 ? true : false; + vector::iterator vit; + + for (vit = m_xhatIPFeas.begin(); vit != m_xhatIPFeas.end(); vit++) { + const DecompSolution *xhatIPFeas = *vit; + const double *values = xhatIPFeas->getValues(); + + for (int c = 0; c < modelCore->getNumCols(); c++) { + if (!UtilIsZero(values[c] - m_xhat[c])) { + isDup = false; + break; + } + } + } - m_masterSI->setRowNames(conRowNames, 0, m_numConvexCon, nRowNames); - string objName = "objective"; - m_masterSI->setObjName(objName); - } - - if (nColNames > 0) { - m_masterSI->setColNames(colNames, 0, nColNames, 0); - } - - //TODO: make a function - UTIL_DEBUG(m_param.LogDebugLevel, 4, - - for (r = 0; r < m_masterSI->getNumRows(); r++) { - const string rowN = m_masterSI->getRowName(r); - (*m_osLog) << "Row[" << setw(4) << r << "] Name: " - << setw(30) << rowN << " Type: " - << setw(20) << DecompRowTypeStr[m_masterRowType[r]] - << endl; - } - for (int c = 0; c < m_masterSI->getNumCols(); c++) { - const string colN = m_masterSI->getColName(c); - (*m_osLog) << "Col[" << setw(4) << c << "] Name: " - << setw(30) << colN << " Type: " - << setw(20) << DecompColTypeStr[m_masterColType[c]] - << endl; - } - ); - //--- - //--- reset unique col index id - //--- - m_colIndexUnique = masterM->getNumCols(); - //--- - //--- free local memory - //--- - UTIL_DELPTR(masterM); - UTIL_DELARR(denseCol); - UTIL_DELARR(colLB); - UTIL_DELARR(colUB); - UTIL_DELARR(objCoeff); - UTIL_DELARR(zeroSol); - UTIL_DELARR(dblArrNCoreCols); - UtilPrintFuncEnd(m_osLog, m_classTag, - "createMasterProblem()", m_param.LogDebugLevel, 2); + if (isDup) { + // printf("IS DUP, not pushing\n"); + } else { + DecompSolution *decompSol = new DecompSolution( + modelCore->getNumCols(), m_xhat, getOrigObjective()); + // solution pool? + m_xhatIPFeas.push_back(decompSol); + } + } + + //--- + //--- TODO: + //--- + //--- for multi-block, if integer feasible solution, + //--- break up into block partial columns and add + //--- to masterLP + //--- + } + + //--- + //--- TODO: + //--- Rob Pratt Idea (2/5/10) + //--- for multi-block, if block is integer feasible + //--- add to masterLP directly - then need a resolve + //--- to get back to current status + //--- + if (m_param.BreakOutPartial) { + DecompVarList partialVars; + breakOutPartial(m_xhat, partialVars); + + if (partialVars.size()) { + //--- + //--- add the newly generated variables to the var pool + //--- + addVarsToPool(partialVars); + //--- + //--- add variables from the variable pool to master problem + //--- + addVarsFromPool(); + //--- + //--- update if any changes were made + //--- + nChanges = m_nodeStats.cutsThisCall + m_nodeStats.varsThisCall; + (*m_osLog) << "BreakOutPartial newVars = " << partialVars.size() + << endl; + } + } + + // TODO: + m_app->APPheuristics(m_xhat, getOrigObjective(), m_xhatIPFeas); + // TODO: make this a function! + vector::iterator vi; + DecompSolution *viBest = NULL; + double bestBoundUB = m_nodeStats.objBest.second; + + for (vi = m_xhatIPFeas.begin(); vi != m_xhatIPFeas.end(); vi++) { + const DecompSolution *xhatIPFeas = *vi; + + if (xhatIPFeas->getQuality() <= bestBoundUB) { + bestBoundUB = xhatIPFeas->getQuality(); + viBest = *vi; + } + } + + if (viBest) { + // save the best + setObjBoundIP(bestBoundUB); + m_xhatIPBest = viBest; + } + } + + if (nChanges && m_phase != PHASE_PRICE1) { + //--- + //--- check on tailoff + //--- + if (isTailoffLB(m_param.TailoffLength, m_param.TailoffPercent)) { + UTIL_MSG(m_param.LogLevel, 2, + (*m_osLog) << "Tailing off. Stop processing node." << endl;); + m_stopCriteria = DecompStopTailOff; + m_phaseLast = m_phase; + m_phase = PHASE_DONE; + } + + //--- + //--- did the master objective change? + //--- if not, make sure the columns just added cannot be + //--- deleted + //--- + int i; + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "m_masterObjLast = " << setw(10) + << UtilDblToStr(m_masterObjLast) + << " thisMaster = " << setw(10) + << UtilDblToStr(getMasterObjValue()) + << " varsThisCall = " << setw(5) + << m_nodeStats.varsThisCall;); + + // what if it just changed due to cuts? + if (UtilIsZero(m_masterObjLast - getMasterObjValue(), 1.0e-4)) { + m_objNoChange = true; + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "No objective change" << endl;); + + // 0 1 2 3 4 + // w new vars + // 4x 3x 2 1 0 + // "DecompCol_Structural_NoDelete", + if (m_nodeStats.varsThisCall > 0) { + int sz = static_cast(m_masterColType.size()); + + for (i = sz - 1; i >= sz - m_nodeStats.varsThisCall; i--) { + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) + << "Col " << i << " has type " + << DecompColTypeStr[m_masterColType[i]] << endl;); + assert(m_masterColType[i] == DecompCol_Structural); + m_masterColType[i] = DecompCol_Structural_NoDelete; + } + } + } else { + m_objNoChange = false; + UTIL_DEBUG(m_param.LogDebugLevel, 3, (*m_osLog) << endl;); + } + + m_masterObjLast = getMasterObjValue(); + + if (m_phase != PHASE_DONE && m_param.CompressColumns) { + //--- + //--- adjust columns effectiveness count + //--- + adjustColumnsEffCnt(); + //--- + //--- periodically, get rid of ineffective columns + //--- periodic: + //--- every K iterations OR + //--- numCols has doubled since last compression + //--- + compressColumns(); + } + } + } + } // while(phase != PHASE_DONE) + + phaseDone(); + + // need to check again, if we get ip feasible in first LP + // but this will cause dups... if we also find above? + if (m_xhatIPFeas.size() == 0 && m_status != STAT_INFEASIBLE) { + // this is checked again in phase update... + // first, check to see if LP solution is already ip and user feas + if (isIPFeasible(m_xhat)) { + if (m_app->APPisUserFeasible(m_xhat, modelCore->getNumCols(), + m_param.TolZero)) { + DecompSolution *decompSol = new DecompSolution( + modelCore->getNumCols(), m_xhat, getOrigObjective()); + m_xhatIPFeas.push_back(decompSol); + m_xhatIPBest = decompSol; + } + } + } + + if (m_xhatIPBest) { + UTIL_DEBUG(m_param.LogLevel, 3, + (*m_osLog) << "Best Feasible Solution with Quality = " + << UtilDblToStr(m_xhatIPBest->getQuality(), 6) + << "\n"; + m_app->printOriginalSolution(modelCore->getNumCols(), + modelCore->getColNames(), + m_xhatIPBest->getValues());); + } + + UTIL_DEBUG( + m_param.LogDebugLevel, 3, + (*m_osLog) << "StatOut : " << DecompStatusStr[m_status] << "\n"; + (*m_osLog) << "StopCriteria: " << DecompAlgoStopStr[m_stopCriteria] + << "\n"; + (*m_osLog) << "RelGap : " << UtilDblToStr(m_relGap, 6) << "\n";); + m_stats.thisDecomp.push_back(m_stats.timerDecomp.getRealTime()); + // if i am root and doing price and cut, solve this IP to get ub... + // e.g., cutting stock works well -> better to do at AlpsDecompTreeNode + UTIL_MSG(m_param.LogDebugLevel, 3, m_stats.printOverallStats(m_osLog);); + + if (m_param.LogObjHistory) { + m_nodeStats.printObjHistoryBound(m_osLog); + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "processNode()", m_param.LogDebugLevel, + 1); + return m_status; +} + +//--------------------------------------------------------------------- // +void DecompAlgo::setSubProbBounds(const double *lbs, const double *ubs) { + // NOTE: set them in either case so customized user + // can access the information from branching + // if(!m_param.BranchEnforceInSubProb) + // return; + UtilPrintFuncBegin(m_osLog, m_classTag, "setSubProbBounds()", + m_param.LogDebugLevel, 2); + //--- + //--- make copy so we can enforce in subproblems + //--- THINK: If serial mode, why not just a pointer into node desc? + //--- + DecompConstraintSet *modelCore = m_modelCore.getModel(); + const int nCols = modelCore->getNumCols(); + memcpy(m_colLBNode, lbs, nCols * sizeof(double)); + memcpy(m_colUBNode, ubs, nCols * sizeof(double)); + UtilPrintFuncEnd(m_osLog, m_classTag, "setSubProbBounds()", + m_param.LogDebugLevel, 2); +} + +//--------------------------------------------------------------------- // +void DecompAlgo::setMasterBounds(const double *lbs, const double *ubs) { + UtilPrintFuncBegin(m_osLog, m_classTag, "setMasterBounds()", + m_param.LogDebugLevel, 2); + + // TODO: how to handle case where relax is not defined explicitly + // like in GAP... + // if (!m_param.BranchEnforceInMaster) { + // assert(m_param.BranchEnforceInSubProb); + if (m_branchingImplementation == DecompBranchInSubproblem) { + //--- + //--- Must remove (or fix to 0) any column in master that + //--- does not satisfy the branching bounds. + //--- However -- be careful that these bounds should + //--- only be applied to their relevant blocks. + //--- + //--- For example, if branch is x(abc,2)=1, and 2 is + //--- the block id, we do not want to remove columns + //--- in block 1 where x(abc,1)=0. That is a partial + //--- column which might have x(abc,2)=0 in projected + //--- space, but should remain in that branching node. + //--- Otherwise, it will just be reproduced at the next + //--- phase of generating vars for block 0. + //--- + DecompVarList::iterator li; + int masterColIndex; + DecompConstraintSet *modelCore = m_modelCore.getModel(); + const int nCols = modelCore->getNumCols(); + const double *colUB = m_masterSI->getColUpper(); + double *denseS = new double[nCols]; + map::iterator mit; + + for (li = m_vars.begin(); li != m_vars.end(); li++) { + masterColIndex = (*li)->getColMasterIndex(); + assert(isMasterColStructural(masterColIndex)); + mit = m_modelRelax.find((*li)->getBlockId()); + assert(mit != m_modelRelax.end()); + + if (!(*li)->doesSatisfyBounds(nCols, denseS, mit->second, lbs, ubs)) { + //--- + //--- if needs to be fixed + //--- + if (colUB[masterColIndex] > DecompEpsilon) { + m_masterSI->setColBounds(masterColIndex, 0.0, 0.0); + + if (m_param.LogDebugLevel >= 4) { + (*m_osLog) << "Set masterColIndex=" << masterColIndex << " UB to 0" + << endl; + (*li)->print(m_infinity, m_osLog, modelCore->getColNames()); + } + } + } else { + //--- + //--- if needs to be unfixed (from previous node) + //--- + if (colUB[masterColIndex] <= 0) { + m_masterSI->setColBounds(masterColIndex, 0.0, m_infinity); + + if (m_param.LogDebugLevel >= 4) { + (*m_osLog) << "Set masterColIndex=" << masterColIndex + << " UB to INF" << endl; + (*li)->print(m_infinity, m_osLog, modelCore->getColNames()); + } + } + } + } + + UTIL_DELARR(denseS); + } else if (m_branchingImplementation == DecompBranchInMaster) { + int c, coreColIndex; + DecompConstraintSet *modelCore = m_modelCore.getModel(); + const int nIntVars = modelCore->getNumInts(); + const int *integerVars = modelCore->getIntegerVars(); + + // speical treat master-only variables, add variable bounds + // directly on the master-only variables + if (m_param.BranchEnforceInSubProb == true && + m_branchingImplementation == DecompBranchInMaster) { + for (c = 0; c < nIntVars; c++) { + coreColIndex = integerVars[c]; + + if (std::find(m_masterOnlyCols.begin(), m_masterOnlyCols.end(), + coreColIndex) != m_masterOnlyCols.end()) { + m_masterSI->setColBounds(m_masterOnlyColsMap[coreColIndex], + lbs[coreColIndex], ubs[coreColIndex]); + } + } + } else { + const int beg = modelCore->nBaseRowsOrig; + // TODO: can reuse this memory + int nRows = 2 * nIntVars; + int *index = new int[nRows]; + char *sense = new char[nRows]; + double *rhs = new double[nRows]; + double *range = new double[nRows]; + + // lbs,ubs is indexed on core column index + // but c is being looped over integers here... + //--- + //--- the row index for column c's UB (x <= u) is: beg + c + //--- the row index for column c's LB (x >= l) is: beg + nIntVars + c + //--- + + for (c = 0; c < nIntVars; c++) { + // x <= u + coreColIndex = integerVars[c]; + index[c] = beg + c; // row index into master + sense[c] = 'L'; + rhs[c] = ubs[coreColIndex]; + range[c] = 0.0; + + if (m_masterRowType[beg + c] != DecompRow_Branch) { + printf("ERROR: row %d type: %s\n", beg + c, + DecompRowTypeStr[m_masterRowType[beg + c]].c_str()); + } + + assert(m_masterRowType[beg + c] == DecompRow_Branch); + } + + for (c = nIntVars; c < (2 * nIntVars); c++) { + // x >= l + coreColIndex = integerVars[c - nIntVars]; + index[c] = beg + c; + sense[c] = 'G'; + rhs[c] = lbs[coreColIndex]; + range[c] = 0.0; + + if (m_masterRowType[beg + c] != DecompRow_Branch) { + printf("ERROR: row %d type: %s\n", beg + c, + DecompRowTypeStr[m_masterRowType[beg + c]].c_str()); + } + + assert(m_masterRowType[beg + c] == DecompRow_Branch); + } + + m_masterSI->setRowSetTypes(index, index + (2 * nIntVars), sense, rhs, + range); + UTIL_DELARR(index); + UTIL_DELARR(sense); + UTIL_DELARR(rhs); + UTIL_DELARR(range); + } + } + + if (m_param.BranchEnforceInSubProb == true) { + m_branchingImplementation = DecompBranchInSubproblem; + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "setMasterBounds()", + m_param.LogDebugLevel, 2); } //===========================================================================// +DecompStatus DecompAlgo::solutionUpdate(const DecompPhase phase, bool resolve, + // TODO: not currently used? + const int maxInnerIter, + const int maxOuterIter) { + UtilPrintFuncBegin(m_osLog, m_classTag, "solutionUpdate()", + m_param.LogDebugLevel, 2); + m_stats.timerOther1.reset(); + int i; + DecompStatus status = STAT_UNKNOWN; + + //--- + //--- solve the master as an integer program + //--- since the user might have given us a good IP feasible + //--- init solution, let's always solve master as IP as soon + //--- as we get into PHASE 2 + //--- + if (m_param.SolveMasterAsMip && + ((m_phase != PHASE_PRICE1 && m_nodeStats.priceCallsTotal && + m_nodeStats.priceCallsTotal % m_param.SolveMasterAsMipFreqPass == 0) || + m_firstPhase2Call)) { + UTIL_MSG(m_param.LogLevel, 2, + (*m_osLog) << "solveMasterAsMip: PriceCallsTotal=" + << m_nodeStats.priceCallsTotal + << " m_firstPhase2Call = " << m_firstPhase2Call + << endl;); + solveMasterAsMIP(); + + if (m_firstPhase2Call) { + m_firstPhase2Call = false; + } + } + + // if(m_phase == PHASE_PRICE2) + // if(m_firstPhase2Call) + // m_firstPhase2Call = false; + //--- + //--- was missing all along? 9/28/09 + //--- + //#ifdef __DECOMP_LP_CLP__ + // m_masterSI->setHintParam(OsiDoPresolveInResolve, false, OsiHintDo); + //#else + // m_masterSI->setHintParam(OsiDoPresolveInResolve, true, OsiHintDo); + //#endif + // m_masterSI->setIntParam(OsiMaxNumIteration, maxInnerIter); + // THINK: + // if we allow for interior, need crossover too? + + if (m_param.DecompLPSolver == "CPLEX") { +#ifdef DIP_HAS_CPX + OsiCpxSolverInterface *masterCpxSI = + dynamic_cast(m_masterSI); + CPXENVptr env = masterCpxSI->getEnvironmentPtr(); + CPXsetintparam(env, CPX_PARAM_PREIND, CPX_ON); + CPXsetintparam(env, CPX_PARAM_SCRIND, CPX_ON); + CPXsetintparam(env, CPX_PARAM_SIMDISPLAY, 2); + // int preInd = 0; + // CPXgetintparam(env, CPX_PARAM_PREIND, &preInd); + // printf("preind=%d\n",preInd); +#endif + } -void DecompAlgo::masterMatrixAddMOCols(CoinPackedMatrix* masterM, - double* colLB, - double* colUB, - double* objCoeff, - vector& colNames) -{ - int nMOVars = static_cast(m_masterOnlyCols.size()); + switch (phase) { + case PHASE_PRICE1: + case PHASE_PRICE2: + m_masterSI->setDblParam(OsiDualObjectiveLimit, m_infinity); - if (nMOVars <= 0) { - return; - } - - DecompConstraintSet* modelCore = m_modelCore.getModel(); - assert(modelCore); - assert(!modelCore->isSparse()); - const double* colLBCore = modelCore->getColLB(); - const double* colUBCore = modelCore->getColUB(); - const vector& colNamesCore = modelCore->getColNames(); - //--- - //--- add the submatrix for core rows cross master-only columns - //--- to the master formulation (this will be a col-ordered matrix) - //--- - const CoinPackedMatrix* matrixCore = modelCore->getMatrix(); - CoinPackedMatrix matrixCoreTmp(*matrixCore); - - if (!matrixCoreTmp.isColOrdered()) { - matrixCoreTmp.reverseOrdering(); - } - - //////STOP - const CoinPackedVectorBase** colBlock = - new const CoinPackedVectorBase*[nMOVars]; - - for (int i = 0; i < nMOVars; i++) { - CoinShallowPackedVector colS = - matrixCoreTmp.getVector(modelCore->getMasterOnlyCols()[i]); - CoinPackedVector* col = new CoinPackedVector(colS.getNumElements(), - colS.getIndices(), - colS.getElements()); - colBlock[i] = col; - /* - for(int j = 0 ; j < colS.getNumElements(); j++){ - - std::cout << "The column vector of masterOnly " - << j << " contains " << j << " th element is " - << col->getElements()[j] << std::endl; - std::cout << "The index is " << col->getIndices()[j] - << std::endl; - - } - */ - } - - //todo - use ptrs, allocate only if need transpose - //CoinPackedMatrix matrixMO(matrixCoreTmp); - //matrixMO.setDimensions(matrixCore->getNumRows(), 0); - //this won't work - wind up with 3x3 vs 3cols x all rows in core - // need to construct manually - //use appendRows - //matrixMO.submatrixOfWithDuplicates(matrixCoreTmp, - // nMOVars, &m_masterOnlyCols[0]); - //assert(matrixMO.isColOrdered()); - // assert(masterM->isColOrdered()); - //masterM->majorAppendSameOrdered(matrixMO); - masterM->appendCols(nMOVars, colBlock); - //--- - //--- set master-onlys: lb, ub, obj, names - //--- - int j, k; - int nMasterCols = masterM->getNumCols(); - - for (int i = 0; i < nMOVars; i++) { - k = nMasterCols + i - nMOVars ; - j = m_masterOnlyCols[i]; - colLB[k] = colLBCore[j]; - colUB[k] = colUBCore[j]; - objCoeff[k] = 0; - colNames.push_back(colNamesCore[j]); - m_masterColType.push_back(DecompCol_MasterOnly); - //m_masterColType.push_back(DecompCol_Structural_NoDelete); - m_masterOnlyColsMap.insert(make_pair(j, k)); - } - - //free local memory - for (int i = 0; i < nMOVars; i++) { - UTIL_DELPTR(colBlock[i]); - } - - UTIL_DELARR(colBlock); + if (m_param.SolveMasterUpdateAlgo == DecompDualSimplex) { + m_masterSI->setHintParam(OsiDoDualInResolve, true, OsiHintDo); + } else { + m_masterSI->setHintParam(OsiDoDualInResolve, false, OsiHintDo); + } + + // TODO: interior + // if(m_algo == DECOMP)//THINK! + // m_masterSI->setHintParam(OsiDoPresolveInResolve, false, OsiHintDo); + + if (m_param.DecompLPSolver == "CPLEX" && m_param.DoInteriorPoint) { +#ifdef DIP_HAS_CPX + // int cpxStat=0, cpxMethod=0; + OsiCpxSolverInterface *masterCpxSI = + dynamic_cast(m_masterSI); + CPXENVptr env = masterCpxSI->getEnvironmentPtr(); + CPXLPptr lp = + masterCpxSI->getLpPtr(OsiCpxSolverInterface::KEEPCACHED_ALL); + // CPXhybbaropt(env, lp, 0);//if crossover, defeat purpose + CPXbaropt(env, lp); + // cpxMethod = CPXgetmethod(env, lp); + // cpxStat = CPXgetstat(env, lp); + // if(cpxStat) + // printf("cpxMethod=%d, cpxStat = %d\n", cpxMethod, cpxStat); +#endif + } else { + if (resolve) { + // m_masterSI->writeMps("temp"); + m_masterSI->resolve(); + } else { + m_masterSI->initialSolve(); + } + } + break; + case PHASE_CUT: + m_masterSI->setHintParam(OsiDoDualInResolve, true, OsiHintDo); + + if (resolve) { + m_masterSI->resolve(); + } else { + m_masterSI->initialSolve(); + } + + break; + default: + assert(0); + } + + UTIL_MSG(m_param.LogDebugLevel, 3, + (*m_osLog) << "Solution update n_cols:" << setw(10) + << m_masterSI->getNumCols() << " n_rows: " << setw(10) + << m_masterSI->getNumRows() << " n_iter: " << setw(10) + << m_masterSI->getIterationCount() + << " time: " << setw(10) + << m_stats.timerOther1.getRealTime() << endl;); + if (m_param.DecompLPSolver == "Clp") { +#ifdef DIP_HAS_CLP + UTIL_DEBUG(m_param.LogDebugLevel, 4, { + OsiClpSolverInterface *osiClp = + dynamic_cast(m_masterSI); + printf("clp status = %d\n", osiClp->getModelPtr()->status()); + printf("clp prob status = %d\n", + osiClp->getModelPtr()->problemStatus()); + printf("clp second status = %d\n", + osiClp->getModelPtr()->secondaryStatus()); + }); +#endif + } + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "Iteration Count : " + << m_masterSI->getIterationCount() << "\n" + << "isAbandoned() : " + << m_masterSI->isAbandoned() << "\n" + << "isProvenOptimal() : " + << m_masterSI->isProvenOptimal() << "\n" + << "isProvenPrimalInfeasible() : " + << m_masterSI->isProvenPrimalInfeasible() << "\n" + << "isProvenDualInfeasible() : " + << m_masterSI->isProvenDualInfeasible() << "\n" + << "isPrimalObjectiveLimitReached : " + << m_masterSI->isDualObjectiveLimitReached() << "\n" + << "isDualObjectiveLimitReached : " + << m_masterSI->isDualObjectiveLimitReached() << "\n" + << "isIterationLimitReached : " + << m_masterSI->isIterationLimitReached() << "\n";); + + if (m_masterSI->isProvenOptimal()) { + status = STAT_FEASIBLE; + // if we are using cpx, we need to save the + // solution and we cannot use getColSolution() later on + // for example, after addCols is called, cache is lost + const int nCols = m_masterSI->getNumCols(); + const int nRows = m_masterSI->getNumRows(); + const double *primSol = m_masterSI->getColSolution(); + // Need to distinguish the primSol after we added master-only variables + const double *dualSol = m_masterSI->getRowPrice(); + const double *rc = m_masterSI->getReducedCost(); + m_reducedCost.clear(); + m_reducedCost.reserve(nCols); + m_reducedCost.assign(rc, rc + nCols); + assert((int)m_reducedCost.size() == nCols); + m_primSolution.clear(); + m_primSolution.reserve(nCols); + m_dualSolution.clear(); + m_dualSolution.reserve(nRows); + m_primSolution.assign(primSol, primSol + nCols); + m_dualSolution.assign(dualSol, dualSol + nRows); + assert((int)m_primSolution.size() == nCols); + assert((int)m_dualSolution.size() == nRows); + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "MasterObj : " + << UtilDblToStr(getMasterObjValue()) << "\n";); + + // sanity check + if (m_algo != CUT) { + // checkMasterDualObj(); + } + + //--- + //--- adjust dual solution + //--- DecompAlgo call adjusts based on dual stabilization method + //--- + adjustMasterDualSolution(); + + //--- + //--- HACK: there is some bug in CLP where infeasible is declared optimal + //--- but then we get back solution at state when it internally gave up + //--- + //--- Check to see if some lambda < 0 - i.e., junk. If so, assume that + //--- it meant to return infeasible. + //--- + for (i = 0; i < nCols; i++) { + // If there is master only variables, primSol will contain values of those + // master Only variables the the notation of LAMBDA is a little bit + // abused... + if (primSol[i] < m_masterSI->getColLower()[i] - 1) { + std::cout << "The bad upper bound is " << m_masterSI->getColUpper()[i] + << std::endl; + std::cout << "primSol[ " << i << "] is" << primSol[i] << std::endl; + std::cout << "The bad lower bound is " << m_masterSI->getColLower()[i] + << std::endl; + (*m_osLog) << "ERROR: NEGATIVE LAMBDA, but Osi returns as optimal" + << " assume it was meant to be infeasible." << endl; + status = STAT_INFEASIBLE; + } + } + } else if (m_masterSI->isProvenPrimalInfeasible() || + m_masterSI->isProvenDualInfeasible()) { + // for interior, if infeasible, the status is not + // getting picked up properly by OSI + status = STAT_INFEASIBLE; + //--- + //--- it is possible that presolver determined infeasibility + //--- but, we will need a dual ray, so we should resolve with + //--- presolve off + //--- + m_masterSI->setDblParam(OsiDualObjectiveLimit, m_infinity); + m_masterSI->setHintParam(OsiDoPresolveInResolve, false, OsiHintDo); + m_masterSI->resolve(); + m_masterSI->setHintParam(OsiDoPresolveInResolve, true, OsiHintDo); + } else { +#ifdef DO_INTERIOR + + if (m_masterSI->isDualObjectiveLimitReached()) { + status = STAT_INFEASIBLE; + } else +#endif + { + assert(0); + } + } + + //--- + //--- HACK: there is some bug in CLP where infeasible is declared optimal + //--- but then we get back solution at state when it internally gave up + //--- + //--- Check to see if some lambda < 0 - i.e., junk. If so, assume that + //--- it meant to return infeasible. + //--- + m_stats.thisSolUpdate.push_back(m_stats.timerOther1.getRealTime()); + UtilPrintFuncEnd(m_osLog, m_classTag, "solutionUpdate()", + m_param.LogDebugLevel, 2); + return status; } //===========================================================================// -void DecompAlgo::masterMatrixAddArtCol(vector& colBeg, - vector& colInd, - vector& colVal, - char LorG, - int rowIndex, - int colIndex, - DecompColType colType, - double& colLB, - double& colUB, - double& objCoeff) -{ - //CoinPackedVector artCol; - //if(LorG == 'L') - // artCol.insert(rowIndex, -1.0); - //else - // artCol.insert(rowIndex, 1.0); - //masterM->appendCol(artCol); - colInd.push_back(rowIndex); - - if (LorG == 'L') { - colVal.push_back(-1.0); - } else { - colVal.push_back( 1.0); - } - - colBeg.push_back(static_cast(colBeg.size())); - colLB = 0.0; - colUB = m_infinity; - objCoeff = 1.0; - m_masterColType.push_back(colType); - m_masterArtCols.push_back(colIndex); +// NOTE: not ok for CPX... do self? +vector DecompAlgo::getDualRays(int maxNumRays) { + if (m_param.DecompLPSolver == "CPLEX") { + return (getDualRaysCpx(maxNumRays)); + } else if (m_param.DecompLPSolver == "Clp" || + m_param.DecompLPSolver == "Gurobi") { + return (getDualRaysOsi(maxNumRays)); + } else { + throw UtilException("Unknown solver selected.", "getDualRays", + "DecompAlgo"); + } } //===========================================================================// -void DecompAlgo::masterMatrixAddArtCols(CoinPackedMatrix* masterM, - double* colLB, - double* colUB, - double* objCoeff, - vector& colNames, - int startRow, - int endRow, - DecompRowType rowType) -{ - //--- - //--- min sp + sm - //--- - //--- ax = b --> ax + sp - sm = b, sp >= 0, sm >= 0 - //--- ax <= b --> ax - sm <= b, sm >= 0 - //--- ax >= b --> ax + sp >= b, sp >= 0 - //--- - DecompConstraintSet* modelCore = m_modelCore.getModel(); - vector& rowSense = modelCore->rowSense; - vector& rowNames = modelCore->rowNames; - int nCoreRows = modelCore->getNumRows(); - bool hasNames = rowNames.empty() ? false : true; - int r, colIndex; - string colName, strIndex, colNameL, colNameG; - DecompColType colTypeL, colTypeG; - - switch (rowType) { - case DecompRow_Original: - colNameL = "sOL(c_"; - colNameG = "sOG(c_"; - colTypeL = DecompCol_ArtForRowL; - colTypeG = DecompCol_ArtForRowG; - break; - case DecompRow_Branch: - colNameL = "sBL(c_"; - colNameG = "sBG(c_"; - colTypeL = DecompCol_ArtForBranchL; - colTypeG = DecompCol_ArtForBranchG; - break; - case DecompRow_Convex: - colNameL = "sCL(c_"; - colNameG = "sCG(c_"; - colTypeL = DecompCol_ArtForConvexL; - colTypeG = DecompCol_ArtForConvexG; - break; - default: - throw UtilException("Bad row type", - "masterMatrixAddArtCols", "DecompAlgo"); - } - - string rowNameR; - char rowSenseR; - colIndex = masterM->getNumCols(); - vector colBeg; - vector colInd; - vector colVal; - colBeg.push_back(0); - - for (r = startRow; r < endRow; r++) { - if (hasNames) { - strIndex = UtilIntToStr(colIndex); - } +vector DecompAlgo::getDualRaysCpx(int maxNumRays) { +#ifdef DIP_HAS_CPX + bool useMultiRay = true; + if (useMultiRay) { + OsiCpxSolverInterface *siCpx = + dynamic_cast(m_masterSI); + const int m = m_masterSI->getNumRows(); + const int n = m_masterSI->getNumCols(); + const double *rowRhs = m_masterSI->getRightHandSide(); + const char *rowSense = m_masterSI->getRowSense(); + int r, b, c; + vector rays; + // Ax + Is = b + // ax <= b + // ax + s = b, s >= 0 + // ax >= b + // ax + s = b, s <= 0 + UTIL_DEBUG( + m_param.LogDebugLevel, 5, + + for (r = 0; r < m; r++) { + (*m_osLog) << "Row r: " << r << " sense: " << rowSense[r] + << " rhs: " << rowRhs[r] << endl; + }); + m_masterSI->enableSimplexInterface(false); + double *tabRhs = new double[m]; + int *basics = new int[m]; + double *yb = new double[m]; + double *bInvRow = new double[m]; + double *bInvARow = new double[n]; + // STOP ============================================ + // tabRhs and yb do NOT match up.... is this an issue? + // have to hand adjust or use tabRhs since proof is based on B-1 + // which matches up with bhead - what to do in the case of CLP? + // but, we are multiplying this by A'' later on which is based on + // original variable space, not the one adjusted by simplex - so if + // we return the dual ray directly from B-1 then do B-1A by hand - + // do we have a problem? + // need to add a check that B-1A matches my dualray.A calculation + // in generate vars... it might be ok and yb not ok, because the + // adjustments in simplex might only be related to rhs... + // i don't think Osi returns tabRhs... that should be changed + CPXgetbhead(siCpx->getEnvironmentPtr(), + siCpx->getLpPtr(OsiCpxSolverInterface::KEEPCACHED_ALL), basics, + tabRhs); + // as a sanity check print out the basis status next to the yb vs tabRhs + // calculation.... let's see why and where things don't match up... + // yb, where y is a row of B-1 (note, can get from bhead?) + UTIL_DEBUG( + m_param.LogDebugLevel, 6, (*m_osLog) << "\nB-1:"; + + for (r = 0; r < m; r++) { + yb[r] = 0.0; + m_masterSI->getBInvRow(r, bInvRow); + (*m_osLog) << "\nB-1Row r: " << r << ": " << endl; + + for (b = 0; b < m; b++) { + yb[r] += bInvRow[b] * rowRhs[b]; + (*m_osLog) << setw(6) << "bind: " << setw(4) << basics[b] + << setw(12) << bInvRow[b] << " [" << setw(12) + << rowRhs[b] << "] " << setw(8) << " +=: " << setw(12) + << bInvRow[b] * rowRhs[b] << setw(8) + << " yb: " << setw(12) << yb[r] << setw(8) + << " tabRhs: " << setw(12) << tabRhs[r] << endl; + } + + if (!UtilIsZero(yb[r] - tabRhs[r])) { + (*m_osLog) << " DIFF is " << yb[r] - tabRhs[r] << endl; + } + + assert(UtilIsZero(yb[r] - tabRhs[r], 1.0e-4)); + }); + + for (r = 0; r < m; r++) { + yb[r] = 0.0; + m_masterSI->getBInvRow(r, bInvRow); + + for (b = 0; b < m; b++) { + yb[r] += bInvRow[b] * rowRhs[b]; //(B-1)_r.b + } + + if (!UtilIsZero(yb[r] - tabRhs[r])) { + (*m_osLog) << " DIFF is " << yb[r] - tabRhs[r] << endl; + (*m_osLog) << "\nB-1Row r: " << r << ": basics[r]=" << basics[r] + << endl; + yb[r] = 0.0; + + for (b = 0; b < m; b++) { + if (UtilIsZero(bInvRow[b])) { + continue; + } - if (rowType == DecompRow_Convex) { - rowSenseR = 'E';//NOTE: what if <=? - rowNameR = "convex(b_" + UtilIntToStr(r - nCoreRows) + ")"; - } else { - rowSenseR = rowSense[r]; - rowNameR = rowNames[r]; - } + yb[r] += bInvRow[b] * rowRhs[b]; + (*m_osLog) << setw(6) << "bind: " << setw(4) << basics[b] << setw(12) + << bInvRow[b] << " [" << setw(12) << rowRhs[b]; - //printf("rowSense[%d]=%c\n", r, rowSense[r]); - switch (rowSenseR) { - case 'L': - masterMatrixAddArtCol(colBeg, colInd, colVal, - 'L', r, colIndex, colTypeL, - colLB[colIndex], colUB[colIndex], - objCoeff[colIndex]); - - if (hasNames) { - colName = colNameL + strIndex + "_" + rowNameR + ")"; - colNames.push_back(colName); - } + if (basics[b] < 0) { //== -rowIndex-1 + (*m_osLog) << " sense = " << rowSense[-(basics[b] + 1)]; + } - m_artColIndToRowInd.insert(make_pair(colIndex, r)); - colIndex++; - break; - case 'G': - masterMatrixAddArtCol(colBeg, colInd, colVal, - 'G', r, colIndex, colTypeG, - colLB[colIndex], colUB[colIndex], - objCoeff[colIndex]); - - if (hasNames) { - colName = colNameG + strIndex + "_" + rowNameR + ")"; - colNames.push_back(colName); - } + (*m_osLog) << "] " << setw(8) << " +=: " << setw(12) + << bInvRow[b] * rowRhs[b] << setw(8) << " yb: " << setw(12) + << yb[r] << setw(8) << " tabRhs: " << setw(12) << tabRhs[r] + << endl; + } + } - m_artColIndToRowInd.insert(make_pair(colIndex, r)); - colIndex++; - break; - case 'E': - masterMatrixAddArtCol(colBeg, colInd, colVal, - 'L', r, colIndex, colTypeL, - colLB[colIndex], colUB[colIndex], - objCoeff[colIndex]); - - if (hasNames) { - colName = colNameL + strIndex + "_" + rowNameR + ")"; - colNames.push_back(colName); - } + // assert(UtilIsZero(yb[r] - tabRhs[r], 1.0e-4)); + } + + for (r = 0; r < m; r++) { + if (UtilIsZero(tabRhs[r])) { + continue; + } - m_artColIndToRowInd.insert(make_pair(colIndex, r)); - colIndex++; - masterMatrixAddArtCol(colBeg, colInd, colVal, - 'G', r, colIndex, colTypeG, - colLB[colIndex], colUB[colIndex], - objCoeff[colIndex]); + // all pos case? if yb < 0 (then we want to minimize B-1Ax, x in P') + // all neg case? if yb > 0 (then we want to maximize B-1Ax, x in P') + UTIL_DEBUG(m_param.LogDebugLevel, 6, (*m_osLog) << "\nB-1A:";); - if (hasNames) { - colName = colNameG + strIndex + "_" + rowNameR + ")"; - colNames.push_back(colName); - } + if (tabRhs[r] > 0) { // instead of yb + // Ted also checks that it is a slack var here - why? + bool allneg = true; + m_masterSI->getBInvARow(r, bInvARow); + UTIL_DEBUG(m_param.LogDebugLevel, 6, + (*m_osLog) << "\nB-1ARow r: " << r << ": ";); + allneg = true; - m_artColIndToRowInd.insert(make_pair(colIndex, r)); - colIndex++; - break; - default: - throw UtilException("Range constraints are not yet supported. Please break up your range constraints into two constraints.", - "masterMatrixAddArtCols", "DecompAlgo"); - } - } + for (c = 0; c < n; c++) { + UTIL_DEBUG(m_param.LogDebugLevel, 6, + (*m_osLog) << bInvARow[c] << " ";); - masterM->appendCols(static_cast(colBeg.size()) - 1, - &colBeg[0], - &colInd[0], - &colVal[0]); + if (bInvARow[c] >= DecompEpsilon) { + allneg = false; + break; + } + } + + if (allneg) { + UTIL_DEBUG(m_param.LogDebugLevel, 6, (*m_osLog) << " ---> allneg";); + double *dualRay = new double[m]; + m_masterSI->getBInvRow(r, dualRay); + transform(dualRay, dualRay + m, dualRay, negate()); + rays.push_back(dualRay); + } + } else { + bool allpos = true; + m_masterSI->getBInvARow(r, bInvARow); + UTIL_DEBUG(m_param.LogDebugLevel, 6, + (*m_osLog) << "\nB-1ARow r: " << r << ": ";); + allpos = true; + + for (c = 0; c < n; c++) { + UTIL_DEBUG(m_param.LogDebugLevel, 6, + (*m_osLog) << bInvARow[c] << " ";); + + if (bInvARow[c] <= -DecompEpsilon) { + allpos = false; + break; + } + } + + if (allpos) { + UTIL_DEBUG(m_param.LogDebugLevel, 6, (*m_osLog) << " ---> allpos";); + double *dualRay = new double[m]; + m_masterSI->getBInvRow(r, dualRay); + rays.push_back(dualRay); + } + } + } + + UTIL_DELARR(tabRhs); + UTIL_DELARR(basics); + UTIL_DELARR(yb); + UTIL_DELARR(bInvRow); + UTIL_DELARR(bInvARow); + m_masterSI->disableSimplexInterface(); + printf("rays.size = %d\n", static_cast(rays.size())); + + if (rays.size() <= 0) { + printf("NO RAYS using standard lookup - try dualfarkas\n"); + double proof_p; + double *dualRay = new double[m]; + CPXdualfarkas(siCpx->getEnvironmentPtr(), + siCpx->getLpPtr(OsiCpxSolverInterface::KEEPCACHED_ALL), + dualRay, &proof_p); + (*m_osLog) << "After dual farkas proof_p = " << proof_p << "\n"; + transform(dualRay, dualRay + m, dualRay, negate()); + + for (int i = 0; i < m; i++) { + printf("dualRay[%d]: %g\n", i, dualRay[i]); + } + + rays.push_back(dualRay); + } + + // NOTE: you will have dup rays here - need to filter out... + printf("rays.size = %d", static_cast(rays.size())); + + for (size_t i = 0; i < rays.size(); i++) { + bool isProof = isDualRayInfProof( + rays[i], m_masterSI->getMatrixByRow(), m_masterSI->getColLower(), + m_masterSI->getColUpper(), m_masterSI->getRightHandSide(), NULL); + + if (!isProof) { + isDualRayInfProof(rays[i], m_masterSI->getMatrixByRow(), + m_masterSI->getColLower(), m_masterSI->getColUpper(), + m_masterSI->getRightHandSide(), m_osLog); + } + + assert(isProof); + } + + assert(rays.size() > 0); + return rays; + } else { // useMultiRay == false + // TEST THIS + OsiCpxSolverInterface *siCpx = + dynamic_cast(m_masterSI); + const int m = m_masterSI->getNumRows(); + const int n = m_masterSI->getNumCols(); + double proof_p; + bool isProof; + vector rays; + double *ray = new double[m]; + int err = + CPXdualfarkas(siCpx->getEnvironmentPtr(), + siCpx->getLpPtr(OsiCpxSolverInterface::KEEPCACHED_ALL), + ray, &proof_p); // proof_p + + if (err) { + cerr << "CPXdualfarkas returns err " << err << endl; + abort(); + } + + cout << "After dual farkas proof_p = " << proof_p << "\n"; + // We have to flip because in this context we want to max B-1Ax, x in P' + double *pneg = new double[m]; + transform(ray, ray + m, pneg, negate()); + rays.push_back(pneg); +#if 1 + UTIL_DEBUG( + m_app->m_param.LogDebugLevel, 5, + bool isProof = isDualRayInfProof( + rays[0], m_masterSI->getMatrixByRow(), m_masterSI->getColLower(), + m_masterSI->getColUpper(), m_masterSI->getRightHandSide(), NULL); + printf("isProof = %d\n", isProof); printBasisInfo(m_masterSI, m_osLog); + fflush(stdout); + + if (!isProof) { + isDualRayInfProof(ray, m_masterSI->getMatrixByRow(), + m_masterSI->getColLower(), + m_masterSI->getColUpper(), + m_masterSI->getRightHandSide(), m_osLog); + printBasisInfo(m_masterSI, m_osLog); + fflush(stdout); + }); + assert(isDualRayInfProof( + ray, m_masterSI->getMatrixByRow(), m_masterSI->getColLower(), + m_masterSI->getColUpper(), m_masterSI->getRightHandSide(), NULL)); +#endif + return rays; + } +#else + throw UtilException("CPLEX function called when CPLEX is not available", + "getDualRaysCpx", "DecompAlgo"); +#endif } //===========================================================================// -void DecompAlgo::coreMatrixAppendColBounds() -{ - //--- - //--- In order to implement simple branching, we are going to - //--- treat all column bounds as explicit constraints. Then branching - //--- for DW can be done in the same way it is done for regular CPM. - //--- - //--- THINK: this needs some investigation. In some cases, this is not a - //--- great idea for performance. But, the advantage is in ease of - //--- implementation. The user does not need to do any sort of specialzed - //--- branching for DW. - //--- - //--- NOTE: this idea won't work for identical subproblem case - //--- - int i, j; - char sense; - double rhs; - bool doNames = true; //TODO: make an option - DecompConstraintSet* modelCore = m_modelCore.getModel(); - const int nIntVars = modelCore->getNumInts(); - const double* colLBCore = modelCore->getColLB(); - const double* colUBCore = modelCore->getColUB(); - const int* integerVars = modelCore->getIntegerVars(); - vector& colNames = modelCore->getColNamesMutable(); - vector& rowNames = modelCore->getRowNamesMutable(); - //TODO: use mem pool? or just create block (identity) if doing PC? - const int numRows = 2 * nIntVars; - int* rowStarts = new int[numRows + 1]; - int* rowInd = new int[numRows]; - double* rowEls = new double[numRows]; - assert(rowStarts && rowInd && rowEls); - //--- - //--- first nColsCore rows are x <= u - //--- second nColsCore rows are x >= l - //--- - rowStarts[0] = 0; - - for (i = 0; i < numRows; i++) { - if (i < nIntVars) { - j = integerVars[i]; - //x <= u - rowStarts[i + 1] = rowStarts[i] + 1; - rowInd[i] = j; - rowEls[i] = 1.0; +// STOP - try this... +vector DecompAlgo::getDualRaysOsi(int maxNumRays) { + if (m_param.UseMultiRay) { + const int m = m_masterSI->getNumRows(); + const int n = m_masterSI->getNumCols(); + const double *rowRhs = m_masterSI->getRightHandSide(); + const char *rowSense = m_masterSI->getRowSense(); + int i, r, b, c; + vector rays; + UtilPrintFuncBegin(m_osLog, m_classTag, "getDualRays()", + m_param.LogDebugLevel, 2); + UTIL_DEBUG( + m_param.LogDebugLevel, 5, + + for (r = 0; r < m; r++) { + (*m_osLog) << "Row r: " << r << " sense: " << rowSense[r] + << " rhs: " << rowRhs[r] << endl; + }); + m_masterSI->enableSimplexInterface(false); + // with simplex interface, this is slightly different... + const double *primSolution = m_masterSI->getColSolution(); + const double *rowAct = m_masterSI->getRowActivity(); //==slacks? + double *tabRhs = new double[m]; // osi_clp does not give this? + // B-1b just equals x, but what if art column then is slack var + int *basics = new int[m]; + double *yb = new double[m]; + double *bInvRow = new double[m]; + double *bInvARow = new double[n]; + m_masterSI->getBasics(basics); + + for (r = 0; r < m; r++) { + i = basics[r]; + + if (i < n) { + tabRhs[r] = primSolution[i]; // should == B-1b + // printf("tabRhs[c:%d]: %g\n", i, tabRhs[r]); } else { - //x >= l - j = integerVars[i - nIntVars]; - rowStarts[i + 1] = rowStarts[i] + 1; - rowInd[i] = j; - rowEls[i] = 1.0; - } - } - - //--- - //--- append as actual rows to A'' (duals used in pricing) - //--- - modelCore->M->appendRows(numRows, rowStarts, rowInd, rowEls); - - //--- - //--- now convert to sense for hashing - //--- - for (i = 0; i < numRows; i++) { - if (i < nIntVars) { - //x <= u - j = modelCore->integerVars[i]; - modelCore->rowLB.push_back(-m_infinity); - modelCore->rowUB.push_back(colUBCore[j]); - sense = 'L'; - rhs = colUBCore[j]; - - if (doNames) { - string rowName = "ub(" + colNames[j] + ")"; - rowNames.push_back(rowName); - } + // this really should be slack vars... + // assuming clp does Ax-Is = b, s = ax-b ??? nope... + // tabRhs[r] = rowAct[i - n] - rowRhs[i - n]; + tabRhs[r] = rowRhs[i - n] - rowAct[i - n]; + // printf("tabRhs[r:%d]: %g [act: %g rhs: %g sense: %c]\n", + // i-n, tabRhs[r], rowAct[i-n], rowRhs[i-n], rowSense[i-n]); + } + } + + // as a sanity check print out the basis status next to the yb vs tabRhs + // calculation.... let's see why and where things don't match up... + // yb, where y is a row of B-1 (note, can get from bhead?) + // B-1b is tab rhs, is this equivalent to x for struct columns? + UTIL_DEBUG( + m_param.LogDebugLevel, 6, (*m_osLog) << "\nB-1:"; + + for (r = 0; r < m; r++) { + if (UtilIsZero(tabRhs[r])) { + continue; + } + + yb[r] = 0.0; + m_masterSI->getBInvRow(r, bInvRow); + (*m_osLog) << "\nB-1Row r: " << r << ": " << endl; + + for (b = 0; b < m; b++) { + yb[r] += bInvRow[b] * rowRhs[b]; + (*m_osLog) << setw(6) << "bind: " << setw(4) << basics[b] + << setw(12) << bInvRow[b] << " [" << setw(12) + << rowRhs[b] << "] " << setw(8) << " +=: " << setw(12) + << bInvRow[b] * rowRhs[b] << setw(8) + << " yb: " << setw(12) << yb[r] << setw(8) + << " tabRhs: " << setw(12) << tabRhs[r] << endl; + } + + if (!UtilIsZero(yb[r] - tabRhs[r])) { + (*m_osLog) << " DIFF is " << yb[r] - tabRhs[r] << endl; + } + + assert(UtilIsZero(yb[r] - tabRhs[r], 1.0e-4)); + }); + + for (r = 0; r < m; r++) { + if (UtilIsZero(tabRhs[r])) { + continue; + } + + // all pos case? if yb < 0 (then we want to minimize B-1Ax, x in P') + // all neg case? if yb > 0 (then we want to maximize B-1Ax, x in P') + if (tabRhs[r] > 0) { // instead of yb + // Ted also checks that it is a slack var here - why? + bool allneg = true; + // not getting back slacks part here... need? + m_masterSI->getBInvARow(r, bInvARow); + UTIL_DEBUG(m_param.LogDebugLevel, 6, + (*m_osLog) << "B-1ARow r: " << r << ": ";); + allneg = true; + + for (c = 0; c < n; c++) { + UTIL_DEBUG(m_param.LogDebugLevel, 6, + (*m_osLog) << bInvARow[c] << " ";); + + if (bInvARow[c] >= DecompEpsilon) { + allneg = false; + break; + } + } + + if (allneg) { + UTIL_DEBUG(m_param.LogDebugLevel, 6, (*m_osLog) << " ---> allneg";); + double *dualRay = new double[m]; + m_masterSI->getBInvRow(r, dualRay); + transform(dualRay, dualRay + m, dualRay, negate()); + rays.push_back(dualRay); + } } else { - //x >= l - j = modelCore->integerVars[i - nIntVars]; - modelCore->rowLB.push_back(colLBCore[j]); - modelCore->rowUB.push_back(m_infinity); - sense = 'G'; - rhs = colLBCore[j]; - - if (doNames) { - string rowName = "lb(" + colNames[j] + ")"; - rowNames.push_back(rowName); - } - } + bool allpos = true; + m_masterSI->getBInvARow(r, bInvARow); + UTIL_DEBUG(m_param.LogDebugLevel, 6, + (*m_osLog) << "B-1ARow r: " << r << ": ";); + allpos = true; + + for (c = 0; c < n; c++) { + UTIL_DEBUG(m_param.LogDebugLevel, 6, + (*m_osLog) << bInvARow[c] << " ";); + + if (bInvARow[c] <= -DecompEpsilon) { + allpos = false; + break; + } + } + + if (allpos) { + UTIL_DEBUG(m_param.LogDebugLevel, 6, (*m_osLog) << " ---> allpos";); + double *dualRay = new double[m]; + m_masterSI->getBInvRow(r, dualRay); + rays.push_back(dualRay); + } + } + + UTIL_DEBUG(m_param.LogDebugLevel, 6, (*m_osLog) << endl;); + } + + UTIL_DELARR(basics); + UTIL_DELARR(yb); + UTIL_DELARR(bInvRow); + UTIL_DELARR(bInvARow); + m_masterSI->disableSimplexInterface(); + /* + if(rays.size() <= 0){ + double proof_p; + double * dualRay = new double[m]; + CPXdualfarkas(siCpx->getEnvironmentPtr(), + siCpx->getLpPtr(OsiCpxSolverInterface::KEEPCACHED_ALL), + dualRay, &proof_p); + (*m_osLog) << "After dual farkas proof_p = " << proof_p << "\n"; + transform(dualRay, dualRay + m, dualRay, negate()); + for(int i = 0; i < m; i++){ + printf("dualRay[%d]: %g\n", i, dualRay[i]); + } + rays.push_back(dualRay); + } + */ + // NOTE: you will have dup rays here - need to filter out... + UTIL_DEBUG(m_param.LogDebugLevel, 5, + (*m_osLog) << "Number of Rays = " << rays.size() << endl;); + + for (int i = 0; i < (int)rays.size(); i++) { + bool isProof = isDualRayInfProof( + rays[i], m_masterSI->getMatrixByRow(), m_masterSI->getColLower(), + m_masterSI->getColUpper(), m_masterSI->getRightHandSide(), NULL); + + if (!isProof) { + isDualRayInfProof(rays[i], m_masterSI->getMatrixByRow(), + m_masterSI->getColLower(), m_masterSI->getColUpper(), + m_masterSI->getRightHandSide(), m_osLog); + } + + assert(isProof); + } + + assert(rays.size() > 0); + UTIL_DELARR(tabRhs); + UtilPrintFuncEnd(m_osLog, m_classTag, "getDualRays()", + m_param.LogDebugLevel, 2); + return rays; + } else { // m_param.UseMultiRay == false + + UtilPrintFuncBegin(m_osLog, m_classTag, "getDualRays()", + m_param.LogDebugLevel, 2); + vector raysT = m_masterSI->getDualRays(maxNumRays); + const double *rayT = raysT[0]; + assert(rayT); + // stop + // what is yb, that will tell me if i want to opt over uA or -uA + // y^T b + int i; + const CoinPackedMatrix *rowMatrix = m_masterSI->getMatrixByRow(); + const double *rowRhs = m_masterSI->getRightHandSide(); + const int m = rowMatrix->getNumRows(); + double yb = 0.0; + + for (i = 0; i < m; i++) { + yb += rayT[i] * rowRhs[i]; // safe to use rowRhs? or flips in tab going on + } + + (*m_osLog) << " yb = " << yb << endl; + // need tabRhs if doing this way? + // see Clp/examples/decompose.cpp + // he flips the infeasibility ray (always...) + //--- yA >= 0, yb < 0, or --> find a yAs <= 0 (min) + //--- yA <= 0, yb > 0 ?? --> find a yAs >= 0 (max <--> -min) + vector rays; + + if (yb > 0) { + double *pneg = new double[m]; + transform(rayT, rayT + m, pneg, negate()); + rays.push_back(pneg); + } else { + rays.push_back(raysT[0]); + } - modelCore->rowRhs.push_back(rhs); - modelCore->rowSense.push_back(sense); - assert(sense != 'R'); - assert(sense != 'N'); - string rowHash = UtilCreateStringHash(1, - rowInd + i, - rowEls + i, - sense, rhs, - m_infinity); - modelCore->rowHash.push_back(rowHash); - } - - UTIL_DELARR(rowStarts); - UTIL_DELARR(rowInd); - UTIL_DELARR(rowEls); +#if 1 + UTIL_DEBUG( + m_app->m_param.LogDebugLevel, 5, const double *ray = rays[0]; + assert(ray); + bool isProof = isDualRayInfProof( + ray, m_masterSI->getMatrixByRow(), m_masterSI->getColLower(), + m_masterSI->getColUpper(), m_masterSI->getRightHandSide(), NULL); + printf("isProof = %d\n", isProof); fflush(stdout); + + if (!isProof) { + isDualRayInfProof(ray, m_masterSI->getMatrixByRow(), + m_masterSI->getColLower(), + m_masterSI->getColUpper(), + m_masterSI->getRightHandSide(), m_osLog); + printBasisInfo(m_masterSI, m_osLog); + fflush(stdout); + } assert(isDualRayInfProof(ray, m_masterSI->getMatrixByRow(), + m_masterSI->getColLower(), + m_masterSI->getColUpper(), + m_masterSI->getRightHandSide(), NULL));); + ; +#endif + UtilPrintFuncEnd(m_osLog, m_classTag, "getDualRays()", + m_param.LogDebugLevel, 2); + return rays; + } } //===========================================================================// -void DecompAlgo::breakOutPartial(const double* xHat, - DecompVarList& newVars, - const double intTol) -{ - if (m_numConvexCon <= 1) { - return; - } - - UtilPrintFuncBegin(m_osLog, m_classTag, - "breakOutPartial()", m_param.LogDebugLevel, 1); - //TODO: what if modelRelax is not defined? - //TODO: if lambda=1, don't bother, it means the partial - // is already there - DecompConstraintSet* modelCore = m_modelCore.getModel(); - const char* integerMark = modelCore->getIntegerMark(); - //--- - //--- for each block, check to see if active integer columns - //--- are integral - if so, use these as candidate columns - //--- - const double* objCoeff = getOrigObjective(); - map::iterator mit; - vector::const_iterator vit; - - for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { - DecompSubModel& subModel = (*mit).second; - DecompConstraintSet* model = subModel.getModel(); - int b = subModel.getBlockId(); - const vector& activeCols = model->getActiveColumns(); - bool blockFeasible = true; +int DecompAlgo::generateInitVars(DecompVarList &initVars) { + int c, attempts; + double aveC; + DecompConstraintSet *modelCore = m_modelCore.getModel(); + const int limit = m_param.InitVarsLimit; + // Need to get the different strategies for generating initial Vars + const int limit2 = 2 * limit; + // const int limit2 = 1; + const int nCoreCols = modelCore->getNumCols(); + const double *objCoeff = getOrigObjective(); + double timeLimit; + + UtilPrintFuncBegin(m_osLog, m_classTag, "generateInitVars()", + m_param.LogDebugLevel, 2); + m_function = DecompFuncGenerateInitVars; + + //--- + //--- APP: create an initial set of points F'[0] subseteq F' + //--- The base implementation of this function does nothing. + //--- This is the user's chance to implement something application + //--- specific. + //--- + + m_app->generateInitVars(initVars); + + // TODO: think - if user gives a partial feasible solution + // and this part is not run then PI master can be infeasible + // which will cause an issue + // TODO: PI master cannot be infeasible if we use artificials on + // convexity constraints - which we already have - so how is + // that possible? + // Should probably have this on irregardless of what we get from user. + // Another reason this has to run is because if user gives a solution + // with some master-only vars set to their LB=0. This will not be + // added as 0-columns. So, will have convexity constraints that are + // 0=1. + + int nInitVars = static_cast(initVars.size()); + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "nInitVars from app = " << nInitVars + << " userLimit = " << limit << endl;); + + // nInitVars = 0;//THINK + if (nInitVars < limit) { + //--- + //--- create an initial set of points F'[0] subseteq F' + //--- randomly by solving zSP(c + eps), eps = U[0,ave(c)] + //--- + //--- + //--- NOTE: in GAP case, subproblem is knapsack, if use orig cost + //--- all cost > 0, so will get NULL column, later on reduced costs + //--- will give negative values, so this is not a problem + //--- + double *costeps = new double[nCoreCols]; + assert(objCoeff); + aveC = UtilAve(objCoeff, nCoreCols); + attempts = 0; + DecompSolverResult subprobResult(m_infinity); // nCoreCols); + + while ((nInitVars < limit) && (attempts < limit2)) { + //--- + //--- perturb the cost vector + //--- + srand(attempts); + + for (c = 0; c < nCoreCols; c++) { + double r = 0.0; + + if (attempts != 0) { + r = UtilURand(-aveC, aveC); + } + + costeps[c] = objCoeff[c] + r; + } + + //--- + //--- APP: solve zSP(c + eps) + //--- + map::iterator mit; + double sumInitLB = 0.0; // like LR with 0 dual (only first pass) + for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { + DecompSubModel &subModel = (*mit).second; + timeLimit = max(m_param.SubProbTimeLimitExact - + m_stats.timerOverall.getRealTime(), + 0.0); + solveRelaxed(costeps, // reduced cost (fake here) + objCoeff, // original cost vector + 9e15, // alpha (fake here) + nCoreCols, // num core columns + false, // isNested + subModel, + &subprobResult, // results + initVars, // var list to populate + timeLimit); + + if (attempts == 0) { + // TODO: have to treat masterOnly differently + // we don't correctly populate LB/UB in + // subprobResult object - so contribution is wrong + sumInitLB += subprobResult.m_objLB; + // printf("ThisLB = %g, sumInitLB = %g\n", + // subprobResult.m_objLB, sumInitLB); + } + } + + map>::iterator mivt; + vector::iterator vit; + + for (mivt = m_modelRelaxNest.begin(); mivt != m_modelRelaxNest.end(); + mivt++) { + for (vit = (*mivt).second.begin(); vit != (*mivt).second.end(); vit++) { + timeLimit = max(m_param.SubProbTimeLimitExact - + m_stats.timerOverall.getRealTime(), + 0.0); + solveRelaxed(costeps, // reduced cost (fake here) + objCoeff, // original cost vector + 9e15, // alpha (fake here) + nCoreCols, // num core columns + true, // isNested + (*vit), + &subprobResult, // results + initVars, // var list to populate + timeLimit); + } + } + + //--- + //--- THINK: check for duplicate variables - done in solveRelaxed + //--- don't assume the user does the duplicate check - should be + //--- done by col pool also + //--- + nInitVars = static_cast(initVars.size()); + attempts++; + } + + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "\nm_varsThisCall = " << initVars.size() << "\n";); + //--- + //--- TODO: solve a few iterations of subgradient to get init vars? + //--- + //--- TODO: put them in the var pool?? + //--- + UTIL_DELARR(costeps); // TODO: use mem-pool + } + + //--- + //--- generate init vars by solving root LP and + //--- running DC at each iteration + //--- + if (m_param.InitVarsWithCutDC) { + printf("======= BEGIN Gen Init Vars - call CPM process root node\n"); + DecompAlgoC cpm(m_app, *m_utilParam); + cpm.m_param.CutDC = 2; + cpm.processNode(0, -m_infinity, m_infinity); + //--- + //--- copy the vars generated in passes of DC into initVars + //--- to warm-start DW master formulation + //--- + // m_vars.insert(m_vars.end(), cpm.m_vars.begin(), cpm.m_vars.end()); + initVars.splice(initVars.end(), cpm.m_vars); + printf("VARS moved into PC object initVars.size=%d\n", + static_cast(initVars.size())); + // printVars(m_osLog);//use this to warm start DW + // a hidden advantage of decomp in DC? + DecompSolution *bestSol = NULL; + vector::iterator it; + // there will be just one, i think, just need to copy it over here + double thisBound; + double bestBoundUB = m_nodeStats.objBest.second; + + for (it = cpm.m_xhatIPFeas.begin(); it != cpm.m_xhatIPFeas.end(); it++) { + thisBound = (*it)->getQuality(); + printf("From init vars, IP Feasible with Quality = %g\n", thisBound); + + if ((*it)->getQuality() <= bestBoundUB) { + bestBoundUB = (*it)->getQuality(); + bestSol = (*it); + } + } + + // need to make copy of solution, since D.m_xhatIpFeas goes out of scope + if (bestSol) { + DecompSolution *bestSolCp = new DecompSolution(*bestSol); + m_xhatIPFeas.push_back(bestSolCp); + setObjBoundIP(bestSolCp->getQuality()); + m_xhatIPBest = bestSolCp; + m_xhatIPBest->print(); + } + + printf("======= END Gen Init Vars - call CPM process root node\n"); + } + + if (m_param.InitVarsWithIP) { + printf("======= BEGIN Gen Init Vars - call Direct IP solver\n"); + DecompAlgoC direct(m_app, *m_utilParam); + DecompSolverResult *result = NULL; + double oldSetting = m_param.TimeLimit; + m_param.TimeLimit = m_param.InitVarsWithIPTimeLimit; + result = direct.solveDirect(); + m_param.TimeLimit = oldSetting; + + if (result->m_nSolutions) { + //--- + //--- if an incumbent was found, create a var(s) from it + //--- + // TODO: safe to assume 0th is the best + const double *solution = result->getSolution(0); + const DecompVarType varType = + result->m_isUnbounded ? DecompVar_Ray : DecompVar_Point; - for (vit = activeCols.begin(); vit != activeCols.end(); vit++) { - if (integerMark[*vit] != 'I') { - continue; - } + if (m_numConvexCon == 1) { + DecompVar *directVar = + new DecompVar(nCoreCols, solution, 0.0, result->m_objUB, varType); + initVars.push_back(directVar); + } else { + map::iterator mid; + + for (mid = m_modelRelax.begin(); mid != m_modelRelax.end(); mid++) { + int blockId = (*mid).first; + DecompSubModel &modelRelax = (*mid).second; + vector &activeColumns = modelRelax.getModel()->activeColumns; + vector ind; + vector els; + double origCost = 0.0; + vector::iterator it; + + for (it = activeColumns.begin(); it != activeColumns.end(); it++) { + if (!UtilIsZero(solution[*it])) { + ind.push_back(*it); + els.push_back(solution[*it]); + origCost += objCoeff[*it] * solution[*it]; + } + } - if (!(UtilIsIntegral(xHat[*vit], intTol))) { - blockFeasible = false; - break; - } + DecompVar *directVar = + new DecompVar(ind, els, 0.0, origCost, varType); + directVar->setBlockId(blockId); + initVars.push_back(directVar); + } } - if (blockFeasible) { - vector ind; - vector els; - double origCost = 0.0; - - for (vit = activeCols.begin(); vit != activeCols.end(); vit++) { - if (!UtilIsZero(xHat[*vit])) { - ind.push_back(*vit); - els.push_back(xHat[*vit]); - origCost += objCoeff[*vit]; - } - } + //--- + //--- update the upper bound + //--- + double bestBoundUB = m_nodeStats.objBest.second; - if (ind.size() > 0) { //THINK: allow 0-cols?? - DecompVar* var = new DecompVar(ind, els, -1.0, origCost); - var->setBlockId(b); - newVars.push_back(var); - } + if (result->m_objUB < bestBoundUB) { + DecompSolution *directSol = + new DecompSolution(nCoreCols, solution, result->m_objUB); + m_xhatIPFeas.push_back(directSol); + m_xhatIPBest = directSol; + setObjBoundIP(result->m_objUB); } - } + } + + printf("======= END Gen Init Vars - call Direct IP solver\n"); + } + + //--- + //--- check init vars for incumbent + //--- + if (m_numConvexCon == 1) { + DecompVarList::iterator vli; - //printf("newVars = %d\n", newVars.size()); - UtilPrintFuncEnd(m_osLog, m_classTag, - "breakOutPartial()", m_param.LogDebugLevel, 1); + for (vli = initVars.begin(); vli != initVars.end(); vli++) { + //--- + //--- unlikey to happen - but we should check ALL columns + //--- to see if they are IP feasible + //--- + (*vli)->fillDenseArr(modelCore->getNumCols(), m_memPool.dblArrNCoreCols); + + if (isIPFeasible(m_memPool.dblArrNCoreCols)) { + if (m_app->APPisUserFeasible(m_memPool.dblArrNCoreCols, + modelCore->getNumCols(), + m_param.TolZero)) { + DecompSolution *decompSol = new DecompSolution( + modelCore->getNumCols(), m_memPool.dblArrNCoreCols, + (*vli)->getOriginalCost()); + m_xhatIPBest = decompSol; + m_xhatIPFeas.push_back(decompSol); + // printf("var is ip feas with obj = %g\n", + // (*vli)->getOriginalCost()); + setObjBoundIP((*vli)->getOriginalCost()); + } + } + } + } + + //--- + //--- this will update the global UB before we start processing + //--- if we were lucky enough to find an incumbent in init vars + //--- + // setCutoffUB(getCutoffUB()); + m_function = DecompFuncGeneric; + UtilPrintFuncEnd(m_osLog, m_classTag, "generateInitVars()", + m_param.LogDebugLevel, 2); + nInitVars = static_cast(initVars.size()); + return nInitVars; } //===========================================================================// -DecompStatus DecompAlgo::processNode(const AlpsDecompTreeNode* node, - const double globalLB, - const double globalUB) -{ - if (node == NULL) { - throw UtilException("NULL node being processed.", "processNode", - "DecompAlgo"); - } - - m_curNode = node; - int nodeIndex = node->getIndex(); - double mostNegRC = 0.0; - DecompConstraintSet* modelCore = m_modelCore.getModel(); - m_stabEpsilon = 0.0; - UtilPrintFuncBegin(m_osLog, m_classTag, - "processNode()", m_param.LogDebugLevel, 1); - - if (m_algo == RELAX_AND_CUT) { - throw UtilException("In this version of DIP, Relax and Cut is currently disabled.", - "processNode", "DecompAlgo"); - } - - //--- - //--- print the global gap - //--- - UTIL_MSG(m_param.LogLevel, 2, - double gap = UtilCalculateGap(globalLB, globalUB, m_infinity); - (*m_osLog) - << "Process Node " << nodeIndex - << " (algo = " << DecompAlgoStr[m_algo] - << ", phaseLast = " << DecompPhaseStr[m_phaseLast] - << ") gLB = " << UtilDblToStr(globalLB) - << " gUB = " << UtilDblToStr(globalUB) - << " gap = " << UtilDblToStr(gap, 5) - << " time = " << UtilDblToStr(globalTimer.getRealTime(), 3) - << endl; - ); - //--- - //--- init status - //--- - m_useInitLpDuals = true; - m_status = STAT_UNKNOWN; - m_globalLB = globalLB; - m_globalUB = globalUB; - - //--- - //--- check solveMasterAsMip setting - //--- on by default, but if only one block, turn off - //--- - if (m_numConvexCon == 1) { - m_param.SolveMasterAsMip = 0; - } - - //--- - //--- if problem is a pure LP, set MasterGapLimit = 1.e-8 - //--- - if (modelCore->integerVars.size() == 0) { - m_param.MasterGapLimit = 1.0e-8; - UTIL_MSG(m_param.LogLevel, 1, - (*m_osLog) - << "Problem is an LP. Reset param MasterGapLimit = " - << m_param.MasterGapLimit << endl; - ); - } - - //--- - //--- init stats and timer - //--- - m_stats.timerDecomp.reset(); - m_nodeStats.init(); - m_nodeStats.nodeIndex = nodeIndex; - //NOTE: changed on 5/25/2010 - // if we use the parent LB, then stabilized won't - // move until much later - //does this change effect anything else? wrt to short - // cutting and fathoming - check this - //you also have to watch for tailoff - if you set to - // parent obj and it takes a while to get there, then - // it will look like it is tailing off and you might stop - // short - //m_nodeStats.objBest.first = globalLB; - //if(m_param.DualStab) - m_nodeStats.objBest.first = -m_infinity; - //else - //m_nodeStats.objBest.first = globalLB; - m_nodeStats.objBest.second = globalUB; - m_compressColsLastPrice = 0; - m_compressColsLastNumCols = m_masterSI->getNumCols(); - m_phaseIObj.clear(); - //--- - //--- get initial phase - //--- - //--- CPM <-- CUT - //--- PC (node = 0) <-- PRICEI - //--- (node > 0) <-- - //--- - m_firstPhase2Call = false; - phaseInit(m_phaseLast); - m_phase = m_phaseLast; - - //--- - //--- it is possible that phaseInit can find - //--- the node infeasible - //--- - if (m_phase == PHASE_DONE) { - m_status = STAT_INFEASIBLE; - } else { - //TODO: put sb candidate id in name of file - if (m_param.LogDumpModel > 1) { - string baseName = "masterProb"; +// once we do RC, this probably won't be in base anyway +bool DecompAlgo::updateObjBound(const double mostNegRC) { + //--- + //--- C : LB = masterLP obj + //--- PC : LB = zDW_RMP + RC* <= zDW <= zDW_RMP + //--- where RC* is the most negative reduced cost + //--- assuming the relaxation subproblem was solved exactly + //--- + //--- Careful here -- for many apps the user will use heuristics + //--- during column generation phase. If we update LB after each + //--- column added we might stop too early if this LB exceeds the + //--- tree's global upper bound. + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "updateObjBound()", + m_param.LogDebugLevel, 2); + // for DualStab, this returns smoothed duals + int r; + const double *dualSol = getMasterDualSolution(); + const double *rowRhs = m_masterSI->getRightHandSide(); + double zDW_UBPrimal = getMasterObjValue(); + double zDW_UBDual = 0.0; + double zDW_LB = 0.0; + const double *rc = getMasterColReducedCost(); + const double *colLower = m_masterSI->getColLower(); + const double *colUpper = m_masterSI->getColUpper(); + // rStat might not be needed now, but will be needed + // when we support ranged rows. + int *rStat = new int[m_masterSI->getNumRows()]; + int *cStat = new int[m_masterSI->getNumCols()]; + m_masterSI->getBasisStatus(cStat, rStat); + + for (int c = 0; c < m_numCols; c++) { + if (cStat[c] == 3) { + zDW_UBDual += rc[c] * colLower[c]; + } else if (cStat[c] == 2) { + zDW_UBDual += rc[c] * colUpper[c]; + } + } + + int nRows = m_masterSI->getNumRows(); + + for (r = 0; r < nRows; r++) { + zDW_UBDual += dualSol[r] * rowRhs[r]; + } + + // zDW_LB = zDW_UBDual + mostNegRC; + zDW_LB = zDW_UBPrimal + mostNegRC; + setObjBound(zDW_LB, zDW_UBPrimal); + /* + double actDiff = fabs(zDW_UBDual - zDW_UBPrimal); + double unifDiff = actDiff / (1.0 + fabs(zDW_UBPrimal)); + if (!m_param.DualStab && !UtilIsZero(unifDiff, 1e-04)) { + (*m_osLog) << "MasterObj [primal] = " << UtilDblToStr(zDW_UBPrimal) + << endl; + (*m_osLog) << "MasterObj [dual] = " << UtilDblToStr(zDW_UBDual) + << endl; + throw UtilException("Primal and Dual Master Obj Not Matching.", + "updateObjBoundLB", "DecompAlgo"); + } + */ + // TODO: stats - we want to play zDW_LB vs UB... + UTIL_MSG(m_param.LogDebugLevel, 3, + (*m_osLog) << "MasterObj[primal] = " << UtilDblToStr(zDW_UBPrimal) + << "\t" + << "[dual] = " << UtilDblToStr(zDW_UBDual) << "\t" + << "mostNegRC = " << UtilDblToStr(mostNegRC) << "\n" + << "ThisLB = " << UtilDblToStr(zDW_LB) << "\t" + << "BestLB = " << UtilDblToStr(m_nodeStats.objBest.first) + << "\n";); + UTIL_DEBUG(m_param.LogDebugLevel, 2, + (*m_osLog) << "PriceCallsRound= " << setw(3) + << m_nodeStats.priceCallsRound << setw(13) + << "\tmostNegRC=" << setw(13) + << UtilDblToStr(mostNegRC, 4) << setw(13) << "\tthisLB=" + << setw(13) << UtilDblToStr(zDW_LB, 4) << endl;); + + if ((getNodeIndex() == 0) && + (zDW_LB > (m_app->getBestKnownUB() + DecompEpsilon))) { + (*m_osLog) << "ERROR: in root node, bestKnownUB = " + << UtilDblToStr(m_app->getBestKnownUB()) + << " thisBoundLB = " << UtilDblToStr(zDW_LB) << endl; + // assert(0); + } + + //--- + //--- check if the gap is tight (use the best bound) + //--- + bool isGapTight = false; + double tightGap = m_param.MasterGapLimit; + double relGap = getNodeLPGap(); + + if (relGap <= tightGap) { + isGapTight = true; + } + + if (m_param.LogDebugLevel >= 2) { + (*m_osLog) << "DW relGap = " << UtilDblToStr(relGap) + << " isTight = " << isGapTight << "\n"; + } + + UTIL_DELARR(rStat); + UTIL_DELARR(cStat); + m_relGap = relGap; + UtilPrintFuncEnd(m_osLog, m_classTag, "updateObjBound()", + m_param.LogDebugLevel, 2); + return isGapTight; +} - if (m_isStrongBranch) { - baseName += "_SB"; - } +//===========================================================================// +void DecompAlgo::masterPhaseItoII() { + //--- + //--- switch from Phase I to Phase II + //--- + UTIL_MSG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) << "Switching from PhaseI to PhaseII\n";); + int i; + int nMasterCols = m_masterSI->getNumCols(); + //--- + //--- set obj for all columns to original cost + //--- set obj for artificial columns to 0 + //--- fix column bounds for artificial columns to 0 + //--- +#ifdef STAB_DUMERLE + //--- + //--- set cost on slacks to delta, where delta is init'd to + //--- initial duals (this is to be done in Phase 2 only) + //--- + //--- min deltap sp - deltam sm + //--- + //--- ax = b --> ax + sp - sm = b, sp >= 0 <= epsp, sm >= 0 <= epsm + //--- ax <= b --> ax - sm <= b, sm >= 0 <= epsm + //--- ax >= b --> ax + sp >= b, sp >= 0 <= epsp + //--- + int r; + const double *dualSol = NULL; + + if (m_useInitLpDuals) { + dualSol = m_cutgenSI->getRowPrice(); + m_useInitLpDuals = false; + m_stabEpsilon = 0.0; + } else { + dualSol = m_masterSI->getRowPrice(); + } + + assert(nMasterCols == static_cast(m_masterColType.size())); + + for (i = 0; i < nMasterCols; i++) { + DecompColType type = m_masterColType[i]; + + if (type == DecompCol_ArtForRowL || type == DecompCol_ArtForBranchL || + type == DecompCol_ArtForCutL) { + r = m_artColIndToRowInd[i]; + printf("Master Col i=%d type=%s r=%d dual=%g\n", i, + DecompColTypeStr[type].c_str(), r, dualSol[r]); + m_masterSI->setObjCoeff(i, -dualSol[r]); + } else if (type == DecompCol_ArtForRowG || + type == DecompCol_ArtForBranchG || + type == DecompCol_ArtForCutG) { + r = m_artColIndToRowInd[i]; + printf("Master Col i=%d type=%s r=%d dual=%g\n", i, + DecompColTypeStr[type].c_str(), r, dualSol[r]); + m_masterSI->setObjCoeff(i, dualSol[r]); + } else { + m_masterSI->setObjCoeff(i, 0.0); + } - printCurrentProblem(m_masterSI, - baseName, - m_nodeStats.nodeIndex, - m_nodeStats.cutCallsTotal, - m_nodeStats.priceCallsTotal); - } + if (isMasterColArtificial(i)) { + // m_masterSI->setColBounds(i, 0.0, 0.0);//TODO + m_masterSI->setColBounds(i, 0.0, m_stabEpsilon); // TODO + } + } - //--- - //--- find the initial solution (dual and/or primal) - //--- - m_status = solutionUpdate(m_phase, true); - } + DecompVarList::iterator li; - if (m_status != STAT_INFEASIBLE) { - //for CPM, can't this just access from m_masterSI? - recomposeSolution(getMasterPrimalSolution(), m_xhat); - UTIL_DEBUG(m_param.LogDebugLevel, 4, - m_app->printOriginalSolution(modelCore->getNumCols(), - modelCore->getColNames(), - m_xhat); - ); - - //TODO: solution pool? - //TODO: check if this is IP feasible - // make that a function - if (isIPFeasible(m_xhat)) { - if (m_app->APPisUserFeasible(m_xhat, - modelCore->getNumCols(), - m_param.TolZero)) { - //printf("m_xhat is APP FEASIBLE, m_xhatIPFeas size = %d\n", - // (int)m_xhatIPFeas.size()); - //check for dup sol - bool isDup = m_xhatIPFeas.size() > 0 ? true : false; - vector::iterator vit; - - for (vit = m_xhatIPFeas.begin(); - vit != m_xhatIPFeas.end(); vit++) { - const DecompSolution* xhatIPFeas = *vit; - const double* values - = xhatIPFeas->getValues(); - - for (int c = 0; c < modelCore->getNumCols(); c++) { - if (!UtilIsZero(values[c] - m_xhat[c])) { - isDup = false; - break; - } - } - } + for (li = m_vars.begin(); li != m_vars.end(); li++) { + assert(isMasterColStructural((*li)->getColMasterIndex())); + m_masterSI->setObjCoeff((*li)->getColMasterIndex(), + (*li)->getOriginalCost()); + } - if (isDup) { - //printf("IS DUP, not pushing\n"); - } else { - DecompSolution* decompSol - = new DecompSolution(modelCore->getNumCols(), - m_xhat, - getOrigObjective()); - //getMasterObjValue()); - //solution pool? - m_xhatIPFeas.push_back(decompSol); - //printf("m_xhatIPFeas size = %d\n", - // (int)m_xhatIPFeas.size()); - } - } + if (m_param.LogDumpModel > 1) { + string baseName = "masterProb_switchItoII"; - vector::iterator vi; - DecompSolution* viBest = NULL; - double bestBoundUB = m_nodeStats.objBest.second; + if (m_isStrongBranch) { + baseName += "_SB"; + } - for (vi = m_xhatIPFeas.begin(); vi != m_xhatIPFeas.end(); vi++) { - const DecompSolution* xhatIPFeas = *vi; + printCurrentProblem(m_masterSI, baseName, m_nodeStats.nodeIndex, + m_nodeStats.cutCallsTotal, m_nodeStats.priceCallsTotal); + } - if (xhatIPFeas->getQuality() <= bestBoundUB) { - bestBoundUB = xhatIPFeas->getQuality(); - viBest = *vi; - } - } - - if (viBest) { - //save the best - setObjBoundIP(bestBoundUB); - m_xhatIPBest = viBest; - } - } - - //for CPM, dont' we need to update obj lb here? in case that no cuts - // are found, then node is done and we need to update boundx - if (m_algo == CUT) { - updateObjBound(); - } - } - - //--- - //--- main processing loop - //--- - while (m_phase != PHASE_DONE) { - //TODO: LP only? - UTIL_MSG(m_param.LogLevel, 2, - double lpGap = getNodeLPGap(); - double ipGap = getNodeIPGap(); - int nHistorySize - = static_cast(m_nodeStats.objHistoryBound.size()); - - if (nHistorySize > 0) { - DecompObjBound& objBound - = m_nodeStats.objHistoryBound[nHistorySize - 1]; - (*m_osLog) << setiosflags(ios::right); - (*m_osLog) - << "Processing Node " - << setw(3) << nodeIndex - << " algo= " - << setw(13) << DecompAlgoStr[m_algo] - << " phase= " - << setw(12) << DecompPhaseStr[m_phase] - << " c= " << setw(4) - << m_nodeStats.cutCallsTotal - << " p= " << setw(4) - << m_nodeStats.priceCallsTotal - << " LB= " << setw(10) - << UtilDblToStr(objBound.thisBound, 3) - << " UB= " << setw(10) - << UtilDblToStr(objBound.thisBoundUB, 3) - << " nodeLB= " << setw(10) - << UtilDblToStr(m_nodeStats.objBest.first, 3) - << " gLB= " << setw(10) - << UtilDblToStr(m_globalLB, 3) - << " gUB= " << setw(10) - << UtilDblToStr(m_nodeStats.objBest.second, 3) - << " lpGap= " << setw(10) - << UtilDblToStr(lpGap, 3) - << " ipGap= " << setw(10) - << UtilDblToStr(ipGap, 3) - << " time= " << setw(10) - << UtilDblToStr(globalTimer.getCpuTime(), 2) - << endl; - } else { - //TODO - } - ); - //--- - //--- update the phase based on parms, status and current phase - //--- - phaseUpdate(m_phase, m_status); - - //--- - //--- check if we have exceeded time - //--- THINK: should this check be in phaseUpdate? - //--- - if (m_stats.timerOverall.isPast(m_param.TimeLimit)) { - UTIL_MSG(m_param.LogLevel, 2, - (*m_osLog) - << "Node " << nodeIndex << " process stopping on time." - << endl;); - m_stopCriteria = DecompStopTime; - m_phase = PHASE_DONE; - } - - //--- - //--- if the lower bound meets the global ub, we are done - //--- careful here - do NOT do this check in phase1 since - //--- ub is based on original objective while lb is based - //--- on phase 1 objective - //--- - //--- TOOD: seems confusing to store bounds from different objectives - //--- in the same structure - maybe should use m_nodeStats1/2 - //--- - //--- TKR (8/20/19): removed tolerance used on comparison below to be - //--- consistent with seemingly duplicative check in - //--- AlpsDecompTreeNode::process(). TODO: determine - //--- whether we need a tolerance here and whether the - //--- the duplicate checks can/should be eliminated. - if (m_phase != PHASE_PRICE1 && - (m_nodeStats.objBest.first >= - (m_nodeStats.objBest.second))) { - UTIL_MSG(m_param.LogLevel, 2, - (*m_osLog) - << "Node " << nodeIndex << " process stopping on bound." - << " This LB= " - << UtilDblToStr(m_nodeStats.objBest.first) - << " Global UB= " - << UtilDblToStr(m_nodeStats.objBest.second) << "." << endl;); - m_stopCriteria = DecompStopBound; - m_phase = PHASE_DONE; - } - - if (m_phase == PHASE_DONE) { - break; - } - - bool isGapTight = false; - DecompVarList newVars; - DecompCutList newCuts; - - switch (m_phase) { - case PHASE_PRICE1: - case PHASE_PRICE2: { - m_nodeStats.priceCallsRound++; - m_nodeStats.priceCallsTotal++; - - //--- - //--- after adding some rows, the columns in the var pool - //--- might no longer be valid, so we need to re-expand everything - //--- - if (m_varpool.size() > 0) { - if (!m_varpool.colsAreValid()) { - UTIL_MSG(m_param.LogDebugLevel, 3, - (*m_osLog) << "EXPANDING varpool.\n";); - m_varpool.reExpand(*modelCore, m_param.TolZero); - } - - //--- - //--- THINK.... - //--- - if (m_status == STAT_FEASIBLE) { - m_varpool.setReducedCosts(getMasterDualSolution(), m_status); - } else { - //if doing RC, never called?? - const double* u = getDualRays(1)[0]; - m_varpool.setReducedCosts(u, m_status); - UTIL_DELARR(u); - } - } - - //--- - //--- attempt to generate some new variables with rc < 0 - //--- - mostNegRC = 0.0; - m_nodeStats.varsThisCall = generateVars(newVars, mostNegRC); - m_nodeStats.varsThisRound += m_nodeStats.varsThisCall; - m_nodeStats.cutsThisCall = 0; - map::iterator mit; - - for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { - (*mit).second.setCounter((*mit).second.getCounter() + 1); - } - - // Store the m_numCols and use it in updateObjBound function - m_numCols = m_masterSI->getNumCols(); - - if (m_isColGenExact && - m_rrIterSinceAll == 0 && - m_status == STAT_FEASIBLE) { - isGapTight = updateObjBound(mostNegRC); - } - - if (m_nodeStats.varsThisCall > 0) { - //--- - //--- add the newly generated variables to the var pool - //--- - addVarsToPool(newVars); - //--- - //--- add variables from the variable pool to the master problem - //--- - addVarsFromPool(); - } - - //printf("m_isColGenExact = %d\n", m_isColGenExact); - //printf("m_rrIterSinceAll = %d\n", m_rrIterSinceAll); - //printf("m_status = %d\n", m_status); - //TODO: don't need check m_isColGenExact if we - // use LB's in mostNegRC (rather than varRedCost)... - - /*if(m_isColGenExact && - m_rrIterSinceAll == 0 && - m_status == STAT_FEASIBLE && - m_phase == PHASE_PRICE2) - isGapTight = updateObjBoundLB(mostNegRC);*/ - - //--- - //--- update stab parameters delta=duals, epsilon reduced - //--- - /*#ifdef STAB_DUMERLE - m_stabEpsilon *= 0.95; - dualSol = m_masterSI->getRowPrice(); - int i, r; - for(i = 0; i < m_masterSI->getNumCols(); i++){ - DecompColType type = m_masterColType[i]; - if(isMasterColArtificial(i)){ - if(type == DecompCol_ArtForRowL || - type == DecompCol_ArtForBranchL || - type == DecompCol_ArtForCutL){ - r = m_artColIndToRowInd[i]; - printf("Master Col i=%d type=%s r=%d dual=%g\n", - i, DecompColTypeStr[type].c_str(), r, dualSol[r]); - m_masterSI->setObjCoeff(i, -dualSol[r]); - } - else if(type == DecompCol_ArtForRowG || - type == DecompCol_ArtForBranchG || - type == DecompCol_ArtForCutG){ - r = m_artColIndToRowInd[i]; - printf("Master Col i=%d type=%s r=%d dual=%g\n", - i, DecompColTypeStr[type].c_str(), r, dualSol[r]); - m_masterSI->setObjCoeff(i, dualSol[r]); - } - //CAN'T DO THIS IF IN PHASEI! - //m_masterSI->setColBounds(i, 0.0, 0.0);//TODO - m_masterSI->setColBounds(i, 0.0, m_stabEpsilon);//TODO - } - } - #endif*/ - } - break; - case PHASE_CUT: - m_nodeStats.cutCallsRound++; - m_nodeStats.cutCallsTotal++; - - //--- - //--- after adding some cols, the rows in the cut pool - //--- might no longer be valid, so we need to re-expand everything - //--- - if (!m_cutpool.rowsAreValid() && (m_cutpool.size() > 0)) { - UTIL_MSG(m_param.LogDebugLevel, 3, - (*m_osLog) << "EXPANDING cutpool.\n";); - m_cutpool.reExpand(m_vars, - modelCore->getNumCols(), - m_nArtCols); - } - - //THINK: here is where you will do sep of m_xhat vs shat - m_cutpool.calcViolations(m_xhat); - //--- - //--- attempt to generate some new cuts with vio > 0 - //--- - m_nodeStats.cutsThisCall = generateCuts(m_xhat, newCuts); - m_nodeStats.cutsThisRound += m_nodeStats.cutsThisCall; - m_nodeStats.varsThisCall = 0; - - if (m_nodeStats.cutsThisCall > 0) { - //this updates the lb based on last solve, not this solve! - // gen cut doesn't change bound until we resolve - //if(m_algo == CUT) - // updateObjBoundLB(); - //--- - //--- add the newly generated cuts to the cut pool - //--- - addCutsToPool(m_xhat, newCuts, m_nodeStats.cutsThisCall); - //--- - //--- add cuts from the cut pool to the master problem - //--- - addCutsFromPool(); - } - - break; - case PHASE_DONE: { - map::iterator mit; - - for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { - (*mit).second.setCounter(0); - } - } - break; - default: - assert(0); - } - - //--- - //--- Careful here -- for many apps the user will use heuristics - //--- during column generation phase. If we update LB after each - //--- column added we might stop too early if this LB exceeds the - //--- tree's global upper bound. - //--- - //--- We need the user to tell us if they solved it exactly or not. - //--- - //this should be in phaseUpdate? - //TODO: moved this into phaseUpdate for PC - need to revisit CPM! - //TODO: now moved to phaseUpdate - what about case of no branch object!? - if (m_phase != PHASE_DONE) { - //--- - //--- perform a solution update - //--- PC: take PARM steps of simplex - //--- ?? DC: take PARM steps of simplex (INF case?) - //--- RC: take PARM steps of subgradient - //--- VC: take PARM steps of volume - //--- - if (m_param.LogDumpModel > 1) { - string baseName = "masterProb"; - - if (m_isStrongBranch) { - baseName += "_SB"; - } - - printCurrentProblem(m_masterSI, - baseName, - m_nodeStats.nodeIndex, - m_nodeStats.cutCallsTotal, - m_nodeStats.priceCallsTotal); - } - - //--- - //--- check to see if something got added - //--- - int nChanges = m_nodeStats.cutsThisCall + m_nodeStats.varsThisCall; - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "nNewVars = " << m_nodeStats.varsThisCall - << endl; - (*m_osLog) << "nNewCuts = " << m_nodeStats.cutsThisCall - << endl; - ); - - if (!isDone()) { - if (nChanges) { - //why is this not in the switch statement? - m_status = solutionUpdate(m_phase); - - //make this some update that can be override for CPM vs PC - // or move this update to phaseUpdate??? - if (m_nodeStats.cutsThisCall > 0) { - updateObjBound(); - } - } - } - - ////////////////THINK - //???? shouldn't we have recompose and look for IP feasible - // inside of solutionUpdate - as it should be checked in every - // case - //what happens often in pricing - you find integer point, - //but lb can be improved so you price, but still get integer point - //as min, so you keep adding the same ip point - need cmp to - //check for dups - esp against previous one - //--- - //--- check if IP feasible (are we done?) - //--- TODO: for nonexplicity, also check user app isfeasible - //--- - //TODO: should this whole section be phaseDone? - if (m_status != STAT_INFEASIBLE) { - recomposeSolution(getMasterPrimalSolution(), m_xhat); - UTIL_DEBUG(m_param.LogDebugLevel, 4, - m_app->printOriginalSolution(modelCore->getNumCols(), - modelCore->getColNames(), - m_xhat); - ); - - //TODO: solution pool? - //this is checked again in phase update... - //first, check to see if LP solution is already ip and user feas - if (isIPFeasible(m_xhat)) { - if (m_app->APPisUserFeasible(m_xhat, - modelCore->getNumCols(), - m_param.TolZero)) { - //check for dup sol - bool isDup = m_xhatIPFeas.size() > 0 ? true : false; - vector::iterator vit; - - for (vit = m_xhatIPFeas.begin(); - vit != m_xhatIPFeas.end(); vit++) { - const DecompSolution* xhatIPFeas = *vit; - const double* values - = xhatIPFeas->getValues(); - - for (int c = 0; c < modelCore->getNumCols(); c++) { - if (!UtilIsZero(values[c] - m_xhat[c])) { - isDup = false; - break; - } - } - } - - if (isDup) { - //printf("IS DUP, not pushing\n"); - } else { - DecompSolution* decompSol - = new DecompSolution(modelCore->getNumCols(), - m_xhat, - getOrigObjective()); - //solution pool? - m_xhatIPFeas.push_back(decompSol); - } - } - - //--- - //--- TODO: - //--- - //--- for multi-block, if integer feasible solution, - //--- break up into block partial columns and add - //--- to masterLP - //--- - } - - //--- - //--- TODO: - //--- Rob Pratt Idea (2/5/10) - //--- for multi-block, if block is integer feasible - //--- add to masterLP directly - then need a resolve - //--- to get back to current status - //--- - if (m_param.BreakOutPartial) { - DecompVarList partialVars; - breakOutPartial(m_xhat, partialVars); - - if (partialVars.size()) { - //--- - //--- add the newly generated variables to the var pool - //--- - addVarsToPool(partialVars); - //--- - //--- add variables from the variable pool to master problem - //--- - addVarsFromPool(); - //--- - //--- update if any changes were made - //--- - nChanges - = m_nodeStats.cutsThisCall + m_nodeStats.varsThisCall; - (*m_osLog) << "BreakOutPartial newVars = " - << partialVars.size() << endl; - } - } - - //TODO: - m_app->APPheuristics(m_xhat, getOrigObjective(), m_xhatIPFeas); - //TODO: make this a function! - vector::iterator vi; - DecompSolution* viBest = NULL; - double bestBoundUB = m_nodeStats.objBest.second; - - for (vi = m_xhatIPFeas.begin(); vi != m_xhatIPFeas.end(); vi++) { - const DecompSolution* xhatIPFeas = *vi; - - if (xhatIPFeas->getQuality() <= bestBoundUB) { - bestBoundUB = xhatIPFeas->getQuality(); - viBest = *vi; - } - } - - if (viBest) { - //save the best - setObjBoundIP(bestBoundUB); - m_xhatIPBest = viBest; - } - } - - if (nChanges && m_phase != PHASE_PRICE1) { - //--- - //--- check on tailoff - //--- - if (isTailoffLB(m_param.TailoffLength, - m_param.TailoffPercent)) { - UTIL_MSG(m_param.LogLevel, 2, - (*m_osLog) << "Tailing off. Stop processing node." - << endl;); - m_stopCriteria = DecompStopTailOff; - m_phaseLast = m_phase; - m_phase = PHASE_DONE; - } - - //--- - //--- did the master objective change? - //--- if not, make sure the columns just added cannot be - //--- deleted - //--- - int i; - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) - << "m_masterObjLast = " << setw(10) - << UtilDblToStr(m_masterObjLast) - << " thisMaster = " << setw(10) - << UtilDblToStr(getMasterObjValue()) - << " varsThisCall = " << setw(5) - << m_nodeStats.varsThisCall;); - - //what if it just changed due to cuts? - if (UtilIsZero(m_masterObjLast - getMasterObjValue(), 1.0e-4)) { - m_objNoChange = true; - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "No objective change" << endl;); - - //0 1 2 3 4 - // w new vars - //4x 3x 2 1 0 - // "DecompCol_Structural_NoDelete", - if (m_nodeStats.varsThisCall > 0) { - int sz = static_cast(m_masterColType.size()); - - for (i = sz - 1; - i >= sz - m_nodeStats.varsThisCall; i--) { - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "Col " << i << " has type " - << DecompColTypeStr[m_masterColType[i]] - << endl;); - assert(m_masterColType[i] == DecompCol_Structural); - m_masterColType[i] = DecompCol_Structural_NoDelete; - } - } - } else { - m_objNoChange = false; - UTIL_DEBUG(m_param.LogDebugLevel, 3, (*m_osLog) << endl;); - } - - m_masterObjLast = getMasterObjValue(); - - if (m_phase != PHASE_DONE && m_param.CompressColumns) { - //--- - //--- adjust columns effectiveness count - //--- - adjustColumnsEffCnt(); - //--- - //--- periodically, get rid of ineffective columns - //--- periodic: - //--- every K iterations OR - //--- numCols has doubled since last compression - //--- - compressColumns(); - } - } - } - } //while(phase != PHASE_DONE) - - phaseDone(); - - //need to check again, if we get ip feasible in first LP - //but this will cause dups... if we also find above? - if (m_xhatIPFeas.size() == 0 && m_status != STAT_INFEASIBLE) { - //this is checked again in phase update... - //first, check to see if LP solution is already ip and user feas - if (isIPFeasible(m_xhat)) { - if (m_app->APPisUserFeasible(m_xhat, - modelCore->getNumCols(), - m_param.TolZero)) { - DecompSolution* decompSol - = new DecompSolution(modelCore->getNumCols(), - m_xhat, - getOrigObjective()); - m_xhatIPFeas.push_back(decompSol); - m_xhatIPBest = decompSol; - } - } - } - - if (m_xhatIPBest) { - UTIL_DEBUG(m_param.LogLevel, 3, - (*m_osLog) << "Best Feasible Solution with Quality = " - << UtilDblToStr(m_xhatIPBest->getQuality(), 6) << "\n"; - m_app->printOriginalSolution(modelCore->getNumCols(), - modelCore->getColNames(), - m_xhatIPBest->getValues()); - ); - } - - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "StatOut : " - << DecompStatusStr[m_status] << "\n"; - (*m_osLog) << "StopCriteria: " - << DecompAlgoStopStr[m_stopCriteria] << "\n"; - (*m_osLog) << "RelGap : " - << UtilDblToStr(m_relGap, 6) << "\n"; - ); - m_stats.thisDecomp.push_back(m_stats.timerDecomp.getRealTime()); - //if i am root and doing price and cut, solve this IP to get ub... - // e.g., cutting stock works well -> better to do at AlpsDecompTreeNode - UTIL_MSG(m_param.LogDebugLevel, 3, - m_stats.printOverallStats(m_osLog); - ); - - if (m_param.LogObjHistory) { - m_nodeStats.printObjHistoryBound(m_osLog); - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "processNode()", m_param.LogDebugLevel, 1); - return m_status; -} - - -//--------------------------------------------------------------------- // -void DecompAlgo::setSubProbBounds(const double* lbs, - const double* ubs) -{ - //NOTE: set them in either case so customized user - // can access the information from branching - //if(!m_param.BranchEnforceInSubProb) - // return; - UtilPrintFuncBegin(m_osLog, m_classTag, - "setSubProbBounds()", m_param.LogDebugLevel, 2); - //--- - //--- make copy so we can enforce in subproblems - //--- THINK: If serial mode, why not just a pointer into node desc? - //--- - DecompConstraintSet* modelCore = m_modelCore.getModel(); - const int nCols = modelCore->getNumCols(); - memcpy(m_colLBNode, lbs, nCols * sizeof(double)); - memcpy(m_colUBNode, ubs, nCols * sizeof(double)); - UtilPrintFuncEnd(m_osLog, m_classTag, - "setSubProbBounds()", m_param.LogDebugLevel, 2); -} - -//--------------------------------------------------------------------- // -void DecompAlgo::setMasterBounds(const double* lbs, - const double* ubs) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "setMasterBounds()", m_param.LogDebugLevel, 2); - - //TODO: how to handle case where relax is not defined explicitly - // like in GAP... - // if (!m_param.BranchEnforceInMaster) { - // assert(m_param.BranchEnforceInSubProb); - if (m_branchingImplementation == DecompBranchInSubproblem ) { - //--- - //--- Must remove (or fix to 0) any column in master that - //--- does not satisfy the branching bounds. - //--- However -- be careful that these bounds should - //--- only be applied to their relevant blocks. - //--- - //--- For example, if branch is x(abc,2)=1, and 2 is - //--- the block id, we do not want to remove columns - //--- in block 1 where x(abc,1)=0. That is a partial - //--- column which might have x(abc,2)=0 in projected - //--- space, but should remain in that branching node. - //--- Otherwise, it will just be reproduced at the next - //--- phase of generating vars for block 0. - //--- - DecompVarList::iterator li; - int masterColIndex; - DecompConstraintSet* modelCore = m_modelCore.getModel(); - const int nCols = modelCore->getNumCols(); - const double* colUB = m_masterSI->getColUpper(); - double* denseS = new double[nCols]; - map::iterator mit; - - for (li = m_vars.begin(); li != m_vars.end(); li++) { - masterColIndex = (*li)->getColMasterIndex(); - assert(isMasterColStructural(masterColIndex)); - mit = m_modelRelax.find((*li)->getBlockId()); - assert(mit != m_modelRelax.end()); - - if (!(*li)->doesSatisfyBounds(nCols, denseS, - mit->second, - lbs, ubs)) { - //--- - //--- if needs to be fixed - //--- - if (colUB[masterColIndex] > DecompEpsilon) { - m_masterSI->setColBounds(masterColIndex, 0.0, 0.0); - - if (m_param.LogDebugLevel >= 4) { - (*m_osLog) << "Set masterColIndex=" << masterColIndex - << " UB to 0" << endl; - (*li)->print(m_infinity, m_osLog, modelCore->getColNames()); - } - } - } else { - //--- - //--- if needs to be unfixed (from previous node) - //--- - if (colUB[masterColIndex] <= 0) { - m_masterSI->setColBounds(masterColIndex, 0.0, m_infinity); - - if (m_param.LogDebugLevel >= 4) { - (*m_osLog) << "Set masterColIndex=" << masterColIndex - << " UB to INF" << endl; - (*li)->print(m_infinity, m_osLog, modelCore->getColNames()); - } - } - } - } - - UTIL_DELARR(denseS); - } else if (m_branchingImplementation == DecompBranchInMaster) { - int c, coreColIndex; - DecompConstraintSet* modelCore = m_modelCore.getModel(); - const int nIntVars = modelCore->getNumInts(); - const int* integerVars = modelCore->getIntegerVars(); - - // speical treat master-only variables, add variable bounds - // directly on the master-only variables - if (m_param.BranchEnforceInSubProb == true && - m_branchingImplementation == DecompBranchInMaster) { - for (c = 0 ; c < nIntVars; c++) { - coreColIndex = integerVars[c]; - - if (std::find (m_masterOnlyCols.begin(), m_masterOnlyCols.end(), coreColIndex) - != m_masterOnlyCols.end()) { - m_masterSI->setColBounds(m_masterOnlyColsMap[coreColIndex], - lbs[coreColIndex], ubs[coreColIndex]); - } - } - } else { - const int beg = modelCore->nBaseRowsOrig; - //TODO: can reuse this memory - int nRows = 2 * nIntVars; - int* index = new int[nRows]; - char* sense = new char[nRows]; - double* rhs = new double[nRows]; - double* range = new double[nRows]; - - //lbs,ubs is indexed on core column index - // but c is being looped over integers here... - //--- - //--- the row index for column c's UB (x <= u) is: beg + c - //--- the row index for column c's LB (x >= l) is: beg + nIntVars + c - //--- - - for (c = 0; c < nIntVars; c++) { - //x <= u - coreColIndex = integerVars[c]; - index[c] = beg + c; //row index into master - sense[c] = 'L'; - rhs [c] = ubs[coreColIndex]; - range[c] = 0.0; - - if (m_masterRowType[beg + c] != DecompRow_Branch) { - printf("ERROR: row %d type: %s\n", - beg + c, - DecompRowTypeStr[m_masterRowType[beg + c]].c_str()); - } - - assert(m_masterRowType[beg + c] == DecompRow_Branch); - } - - for (c = nIntVars; c < (2 * nIntVars); c++) { - //x >= l - coreColIndex = integerVars[c - nIntVars]; - index[c] = beg + c; - sense[c] = 'G'; - rhs [c] = lbs[coreColIndex]; - range[c] = 0.0; - - if (m_masterRowType[beg + c] != DecompRow_Branch) { - printf("ERROR: row %d type: %s\n", - beg + c, - DecompRowTypeStr[m_masterRowType[beg + c]].c_str()); - } - - assert(m_masterRowType[beg + c] == DecompRow_Branch); - } - - m_masterSI->setRowSetTypes(index, index + (2 * nIntVars), sense, rhs, range); - UTIL_DELARR(index); - UTIL_DELARR(sense); - UTIL_DELARR(rhs); - UTIL_DELARR(range); - } - } - - if (m_param.BranchEnforceInSubProb == true) { - m_branchingImplementation = DecompBranchInSubproblem; - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "setMasterBounds()", m_param.LogDebugLevel, 2); -} - -//===========================================================================// -DecompStatus DecompAlgo::solutionUpdate(const DecompPhase phase, - bool resolve, - //TODO: not currently used? - const int maxInnerIter, - const int maxOuterIter) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "solutionUpdate()", m_param.LogDebugLevel, 2); - m_stats.timerOther1.reset(); - int i; - DecompStatus status = STAT_UNKNOWN; - - //--- - //--- solve the master as an integer program - //--- since the user might have given us a good IP feasible - //--- init solution, let's always solve master as IP as soon - //--- as we get into PHASE 2 - //--- - if (m_param.SolveMasterAsMip && - ((m_phase != PHASE_PRICE1 && - m_nodeStats.priceCallsTotal && - m_nodeStats.priceCallsTotal % m_param.SolveMasterAsMipFreqPass == 0) - || - m_firstPhase2Call)) { - UTIL_MSG(m_param.LogLevel, 2, - (*m_osLog) << "solveMasterAsMip: PriceCallsTotal=" << - m_nodeStats.priceCallsTotal - << " m_firstPhase2Call = " - << m_firstPhase2Call - << endl;); - solveMasterAsMIP(); - - if (m_firstPhase2Call) { - m_firstPhase2Call = false; - } - } - - //if(m_phase == PHASE_PRICE2) - // if(m_firstPhase2Call) - // m_firstPhase2Call = false; - //--- - //--- was missing all along? 9/28/09 - //--- - //#ifdef __DECOMP_LP_CLP__ - //m_masterSI->setHintParam(OsiDoPresolveInResolve, false, OsiHintDo); - //#else - //m_masterSI->setHintParam(OsiDoPresolveInResolve, true, OsiHintDo); - //#endif - //m_masterSI->setIntParam(OsiMaxNumIteration, maxInnerIter); - //THINK: - //if we allow for interior, need crossover too? - - if (m_param.DecompLPSolver == "CPLEX"){ -#ifdef DIP_HAS_CPX - OsiCpxSolverInterface* masterCpxSI - = dynamic_cast(m_masterSI); - CPXENVptr env = masterCpxSI->getEnvironmentPtr(); - CPXsetintparam( env, CPX_PARAM_PREIND, CPX_ON ); - CPXsetintparam( env, CPX_PARAM_SCRIND, CPX_ON ); - CPXsetintparam( env, CPX_PARAM_SIMDISPLAY, 2 ); - //int preInd = 0; - //CPXgetintparam(env, CPX_PARAM_PREIND, &preInd); - //printf("preind=%d\n",preInd); -#endif - } - - switch (phase) { - case PHASE_PRICE1: - case PHASE_PRICE2: - m_masterSI->setDblParam(OsiDualObjectiveLimit, m_infinity); - - if (m_param.SolveMasterUpdateAlgo == DecompDualSimplex) { - m_masterSI->setHintParam(OsiDoDualInResolve, true, OsiHintDo); - } else { - m_masterSI->setHintParam(OsiDoDualInResolve, false, OsiHintDo); - } - - //TODO: interior - //if(m_algo == DECOMP)//THINK! - // m_masterSI->setHintParam(OsiDoPresolveInResolve, false, OsiHintDo); - - if (m_param.DecompLPSolver == "CPLEX" && m_param.DoInteriorPoint){ -#ifdef DIP_HAS_CPX - //int cpxStat=0, cpxMethod=0; - OsiCpxSolverInterface* masterCpxSI - = dynamic_cast(m_masterSI); - CPXENVptr env = masterCpxSI->getEnvironmentPtr(); - CPXLPptr lp = - masterCpxSI->getLpPtr(OsiCpxSolverInterface::KEEPCACHED_ALL); - //CPXhybbaropt(env, lp, 0);//if crossover, defeat purpose - CPXbaropt(env, lp); - //cpxMethod = CPXgetmethod(env, lp); - //cpxStat = CPXgetstat(env, lp); - //if(cpxStat) - // printf("cpxMethod=%d, cpxStat = %d\n", cpxMethod, cpxStat); -#endif - }else{ - if (resolve) { - // m_masterSI->writeMps("temp"); - m_masterSI->resolve(); - } else { - m_masterSI->initialSolve(); - } - } - break; - case PHASE_CUT: - m_masterSI->setHintParam(OsiDoDualInResolve, true, OsiHintDo); - - if (resolve) { - m_masterSI->resolve(); - } else { - m_masterSI->initialSolve(); - } - - break; - default: - assert(0); - } - - UTIL_MSG(m_param.LogDebugLevel, 3, - (*m_osLog) << "Solution update n_cols:" - << setw(10) << m_masterSI->getNumCols() << " n_rows: " - << setw(10) << m_masterSI->getNumRows() << " n_iter: " - << setw(10) << m_masterSI->getIterationCount() << " time: " - << setw(10) << m_stats.timerOther1.getRealTime() - << endl; - ); - if (m_param.DecompLPSolver == "Clp"){ -#ifdef DIP_HAS_CLP - UTIL_DEBUG(m_param.LogDebugLevel, 4, { - OsiClpSolverInterface* osiClp - = dynamic_cast(m_masterSI); - printf("clp status = %d\n", - osiClp->getModelPtr()->status()); - printf("clp prob status = %d\n", - osiClp->getModelPtr()->problemStatus()); - printf("clp second status = %d\n", - osiClp->getModelPtr()->secondaryStatus()); - } - ); -#endif - } - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) - << "Iteration Count : " - << m_masterSI->getIterationCount() << "\n" - << "isAbandoned() : " - << m_masterSI->isAbandoned() << "\n" - << "isProvenOptimal() : " - << m_masterSI->isProvenOptimal() << "\n" - << "isProvenPrimalInfeasible() : " - << m_masterSI->isProvenPrimalInfeasible() << "\n" - << "isProvenDualInfeasible() : " - << m_masterSI->isProvenDualInfeasible() << "\n" - << "isPrimalObjectiveLimitReached : " - << m_masterSI->isDualObjectiveLimitReached() << "\n" - << "isDualObjectiveLimitReached : " - << m_masterSI->isDualObjectiveLimitReached() << "\n" - << "isIterationLimitReached : " - << m_masterSI->isIterationLimitReached() << "\n"; - ); - - if (m_masterSI->isProvenOptimal()) { - status = STAT_FEASIBLE; - //if we are using cpx, we need to save the - //solution and we cannot use getColSolution() later on - //for example, after addCols is called, cache is lost - const int nCols = m_masterSI->getNumCols(); - const int nRows = m_masterSI->getNumRows(); - const double* primSol = m_masterSI->getColSolution(); - // Need to distinguish the primSol after we added master-only variables - const double* dualSol = m_masterSI->getRowPrice(); - const double* rc = m_masterSI->getReducedCost(); - m_reducedCost.clear(); - m_reducedCost.reserve(nCols); - m_reducedCost.assign(rc, rc + nCols); - assert((int)m_reducedCost.size() == nCols); - m_primSolution.clear(); - m_primSolution.reserve(nCols); - m_dualSolution.clear(); - m_dualSolution.reserve(nRows); - m_primSolution.assign(primSol, primSol + nCols); - m_dualSolution.assign(dualSol, dualSol + nRows); - assert((int)m_primSolution.size() == nCols); - assert((int)m_dualSolution.size() == nRows); - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) - << "MasterObj : " - << UtilDblToStr(getMasterObjValue()) << "\n"; - ); - - //sanity check - if (m_algo != CUT) { - //checkMasterDualObj(); - } - - //--- - //--- adjust dual solution - //--- DecompAlgo call adjusts based on dual stabilization method - //--- - adjustMasterDualSolution(); - - //--- - //--- HACK: there is some bug in CLP where infeasible is declared optimal - //--- but then we get back solution at state when it internally gave up - //--- - //--- Check to see if some lambda < 0 - i.e., junk. If so, assume that - //--- it meant to return infeasible. - //--- - for (i = 0; i < nCols; i++) { - // If there is master only variables, primSol will contain values of those master Only variables - // the the notation of LAMBDA is a little bit abused... - if (primSol[i] < m_masterSI->getColLower()[i] - 1) { - std::cout << "The bad upper bound is " << m_masterSI->getColUpper()[i] << std::endl; - std::cout << "primSol[ " << i << "] is" << primSol[i] << std::endl; - std::cout << "The bad lower bound is " << m_masterSI->getColLower()[i] << std::endl; - (*m_osLog) << "ERROR: NEGATIVE LAMBDA, but Osi returns as optimal" - << " assume it was meant to be infeasible." << endl; - status = STAT_INFEASIBLE; - } - } - } else if (m_masterSI->isProvenPrimalInfeasible() || - m_masterSI->isProvenDualInfeasible()) { - //for interior, if infeasible, the status is not - // getting picked up properly by OSI - status = STAT_INFEASIBLE; - //--- - //--- it is possible that presolver determined infeasibility - //--- but, we will need a dual ray, so we should resolve with - //--- presolve off - //--- - m_masterSI->setDblParam(OsiDualObjectiveLimit, m_infinity); - m_masterSI->setHintParam(OsiDoPresolveInResolve, false, OsiHintDo); - m_masterSI->resolve(); - m_masterSI->setHintParam(OsiDoPresolveInResolve, true, OsiHintDo); - } else { -#ifdef DO_INTERIOR - - if (m_masterSI->isDualObjectiveLimitReached()) { - status = STAT_INFEASIBLE; - } else -#endif - { - assert(0); - } - } - - //--- - //--- HACK: there is some bug in CLP where infeasible is declared optimal - //--- but then we get back solution at state when it internally gave up - //--- - //--- Check to see if some lambda < 0 - i.e., junk. If so, assume that - //--- it meant to return infeasible. - //--- - m_stats.thisSolUpdate.push_back(m_stats.timerOther1.getRealTime()); - UtilPrintFuncEnd(m_osLog, m_classTag, - "solutionUpdate()", m_param.LogDebugLevel, 2); - return status; -} - -//===========================================================================// -//NOTE: not ok for CPX... do self? -vector DecompAlgo::getDualRays(int maxNumRays) -{ - if (m_param.DecompLPSolver == "CPLEX"){ - return(getDualRaysCpx(maxNumRays)); - }else if (m_param.DecompLPSolver == "Clp" || - m_param.DecompLPSolver == "Gurobi"){ - return(getDualRaysOsi(maxNumRays)); - }else{ - throw UtilException("Unknown solver selected.", - "getDualRays", "DecompAlgo"); - } -} - -//===========================================================================// -vector DecompAlgo::getDualRaysCpx(int maxNumRays) -{ -#ifdef DIP_HAS_CPX - bool useMultiRay = true; - if (useMultiRay){ - OsiCpxSolverInterface* siCpx - = dynamic_cast(m_masterSI); - const int m = m_masterSI->getNumRows(); - const int n = m_masterSI->getNumCols(); - const double* rowRhs = m_masterSI->getRightHandSide(); - const char* rowSense = m_masterSI->getRowSense(); - int r, b, c; - vector rays; - //Ax + Is = b - // ax <= b - // ax + s = b, s >= 0 - // ax >= b - // ax + s = b, s <= 0 - UTIL_DEBUG(m_param.LogDebugLevel, 5, - - for (r = 0; r < m; r++) { - (*m_osLog) << "Row r: " << r << " sense: " << rowSense[r] - << " rhs: " << rowRhs[r] << endl; - } - ); - m_masterSI->enableSimplexInterface(false); - double* tabRhs = new double[m]; - int* basics = new int[m]; - double* yb = new double[m]; - double* bInvRow = new double[m]; - double* bInvARow = new double[n]; - //STOP ============================================ - //tabRhs and yb do NOT match up.... is this an issue? - //have to hand adjust or use tabRhs since proof is based on B-1 - //which matches up with bhead - what to do in the case of CLP? - //but, we are multiplying this by A'' later on which is based on - //original variable space, not the one adjusted by simplex - so if - //we return the dual ray directly from B-1 then do B-1A by hand - - //do we have a problem? - //need to add a check that B-1A matches my dualray.A calculation - //in generate vars... it might be ok and yb not ok, because the - //adjustments in simplex might only be related to rhs... - //i don't think Osi returns tabRhs... that should be changed - CPXgetbhead(siCpx->getEnvironmentPtr(), - siCpx->getLpPtr(OsiCpxSolverInterface::KEEPCACHED_ALL), - basics, tabRhs); - //as a sanity check print out the basis status next to the yb vs tabRhs - //calculation.... let's see why and where things don't match up... - //yb, where y is a row of B-1 (note, can get from bhead?) - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << "\nB-1:"; - - for (r = 0; r < m; r++) { - yb[r] = 0.0; - m_masterSI->getBInvRow(r, bInvRow); - (*m_osLog) << "\nB-1Row r: " << r << ": " << endl; - - for (b = 0; b < m; b++) { - yb[r] += bInvRow[b] * rowRhs[b]; - (*m_osLog) << setw(6) << "bind: " - << setw(4) << basics[b] - << setw(12) << bInvRow[b] - << " [" - << setw(12) << rowRhs[b] - << "] " - << setw(8) << " +=: " - << setw(12) << bInvRow[b] * rowRhs[b] - << setw(8) << " yb: " - << setw(12) << yb[r] - << setw(8) << " tabRhs: " - << setw(12) << tabRhs[r] << endl; - } - - if (!UtilIsZero(yb[r] - tabRhs[r])) { - (*m_osLog) << " DIFF is " << yb[r] - tabRhs[r] << endl; - } - - assert(UtilIsZero(yb[r] - tabRhs[r], 1.0e-4)); - } - ); - - for (r = 0; r < m; r++) { - yb[r] = 0.0; - m_masterSI->getBInvRow(r, bInvRow); - - for (b = 0; b < m; b++) { - yb[r] += bInvRow[b] * rowRhs[b];//(B-1)_r.b - } - - if (!UtilIsZero(yb[r] - tabRhs[r])) { - (*m_osLog) << " DIFF is " << yb[r] - tabRhs[r] << endl; - (*m_osLog) << "\nB-1Row r: " << r << ": basics[r]=" << basics[r] - << endl; - yb[r] = 0.0; - - for (b = 0; b < m; b++) { - if (UtilIsZero(bInvRow[b])) { - continue; - } - - yb[r] += bInvRow[b] * rowRhs[b]; - (*m_osLog) << setw(6) << "bind: " - << setw(4) << basics[b] - << setw(12) << bInvRow[b] - << " [" - << setw(12) << rowRhs[b]; - - if (basics[b] < 0) { //== -rowIndex-1 - (*m_osLog) << " sense = " << rowSense[-(basics[b] + 1)]; - } - - (*m_osLog) << "] " - << setw(8) << " +=: " - << setw(12) << bInvRow[b] * rowRhs[b] - << setw(8) << " yb: " - << setw(12) << yb[r] - << setw(8) << " tabRhs: " - << setw(12) << tabRhs[r] << endl; - } - } - - //assert(UtilIsZero(yb[r] - tabRhs[r], 1.0e-4)); - } - - for (r = 0; r < m; r++) { - if (UtilIsZero(tabRhs[r])) { - continue; - } - - //all pos case? if yb < 0 (then we want to minimize B-1Ax, x in P') - //all neg case? if yb > 0 (then we want to maximize B-1Ax, x in P') - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << "\nB-1A:"; - ); - - if (tabRhs[r] > 0) { //instead of yb - //Ted also checks that it is a slack var here - why? - bool allneg = true; - m_masterSI->getBInvARow(r, bInvARow); - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << "\nB-1ARow r: " << r << ": "; - ); - allneg = true; - - for (c = 0; c < n; c++) { - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << bInvARow[c] << " "; - ); - - if (bInvARow[c] >= DecompEpsilon) { - allneg = false; - break; - } - } - - if (allneg) { - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << " ---> allneg"; - ); - double* dualRay = new double[m]; - m_masterSI->getBInvRow(r, dualRay); - transform(dualRay, dualRay + m, dualRay, negate()); - rays.push_back(dualRay); - } - } else { - bool allpos = true; - m_masterSI->getBInvARow(r, bInvARow); - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << "\nB-1ARow r: " << r << ": "; - ); - allpos = true; - - for (c = 0; c < n; c++) { - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << bInvARow[c] << " "; - ); - - if (bInvARow[c] <= -DecompEpsilon) { - allpos = false; - break; - } - } - - if (allpos) { - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << " ---> allpos"; - ); - double* dualRay = new double[m]; - m_masterSI->getBInvRow(r, dualRay); - rays.push_back(dualRay); - } - } - } - - UTIL_DELARR(tabRhs); - UTIL_DELARR(basics); - UTIL_DELARR(yb); - UTIL_DELARR(bInvRow); - UTIL_DELARR(bInvARow); - m_masterSI->disableSimplexInterface(); - printf("rays.size = %d\n", static_cast(rays.size())); - - if (rays.size() <= 0) { - printf("NO RAYS using standard lookup - try dualfarkas\n"); - double proof_p; - double* dualRay = new double[m]; - CPXdualfarkas(siCpx->getEnvironmentPtr(), - siCpx->getLpPtr(OsiCpxSolverInterface::KEEPCACHED_ALL), - dualRay, &proof_p); - (*m_osLog) << "After dual farkas proof_p = " << proof_p << "\n"; - transform(dualRay, dualRay + m, dualRay, negate()); - - for (int i = 0; i < m; i++) { - printf("dualRay[%d]: %g\n", i, dualRay[i]); - } - - rays.push_back(dualRay); - } - - //NOTE: you will have dup rays here - need to filter out... - printf("rays.size = %d", static_cast(rays.size())); - - for (size_t i = 0; i < rays.size(); i++) { - bool isProof = isDualRayInfProof(rays[i], - m_masterSI->getMatrixByRow(), - m_masterSI->getColLower(), - m_masterSI->getColUpper(), - m_masterSI->getRightHandSide(), - NULL); - - if (!isProof) { - isDualRayInfProof(rays[i], - m_masterSI->getMatrixByRow(), - m_masterSI->getColLower(), - m_masterSI->getColUpper(), - m_masterSI->getRightHandSide(), - m_osLog); - } - - assert(isProof); - } - - assert(rays.size() > 0); - return rays; - }else{//useMultiRay == false -//TEST THIS - OsiCpxSolverInterface* siCpx - = dynamic_cast(m_masterSI); - const int m = m_masterSI->getNumRows(); - const int n = m_masterSI->getNumCols(); - double proof_p; - bool isProof; - vector rays; - double* ray = new double[m]; - int err - = CPXdualfarkas(siCpx->getEnvironmentPtr(), - siCpx->getLpPtr(OsiCpxSolverInterface::KEEPCACHED_ALL), - ray, &proof_p);//proof_p - - if (err) { - cerr << "CPXdualfarkas returns err " << err << endl; - abort(); - } - - cout << "After dual farkas proof_p = " << proof_p << "\n"; - //We have to flip because in this context we want to max B-1Ax, x in P' - double* pneg = new double[m]; - transform(ray, ray + m, pneg, negate()); - rays.push_back(pneg); -#if 1 - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - bool isProof = isDualRayInfProof(rays[0], - m_masterSI->getMatrixByRow(), - m_masterSI->getColLower(), - m_masterSI->getColUpper(), - m_masterSI->getRightHandSide(), - NULL); - printf("isProof = %d\n", isProof); - printBasisInfo(m_masterSI, m_osLog); - fflush(stdout); - - if (!isProof) { - isDualRayInfProof(ray, - m_masterSI->getMatrixByRow(), - m_masterSI->getColLower(), - m_masterSI->getColUpper(), - m_masterSI->getRightHandSide(), - m_osLog); - printBasisInfo(m_masterSI, m_osLog); - fflush(stdout); - } - ); - assert(isDualRayInfProof(ray, - m_masterSI->getMatrixByRow(), - m_masterSI->getColLower(), - m_masterSI->getColUpper(), - m_masterSI->getRightHandSide(), - NULL)); -#endif - return rays; - } -#else - throw UtilException("CPLEX function called when CPLEX is not available", - "getDualRaysCpx", "DecompAlgo"); -#endif -} - -//===========================================================================// -//STOP - try this... -vector DecompAlgo::getDualRaysOsi(int maxNumRays) -{ - if (m_param.UseMultiRay){ - const int m = m_masterSI->getNumRows(); - const int n = m_masterSI->getNumCols(); - const double* rowRhs = m_masterSI->getRightHandSide(); - const char* rowSense = m_masterSI->getRowSense(); - int i, r, b, c; - vector rays; - UtilPrintFuncBegin(m_osLog, m_classTag, - "getDualRays()", m_param.LogDebugLevel, 2); - UTIL_DEBUG(m_param.LogDebugLevel, 5, - - for (r = 0; r < m; r++) { - (*m_osLog) << "Row r: " << r << " sense: " << rowSense[r] - << " rhs: " << rowRhs[r] << endl; - } - ); - m_masterSI->enableSimplexInterface(false); - //with simplex interface, this is slightly different... - const double* primSolution = m_masterSI->getColSolution(); - const double* rowAct = m_masterSI->getRowActivity(); //==slacks? - double* tabRhs = new double[m]; //osi_clp does not give this? - //B-1b just equals x, but what if art column then is slack var - int* basics = new int[m]; - double* yb = new double[m]; - double* bInvRow = new double[m]; - double* bInvARow = new double[n]; - m_masterSI->getBasics(basics); - - for (r = 0; r < m; r++) { - i = basics[r]; - - if (i < n) { - tabRhs[r] = primSolution[i]; //should == B-1b - //printf("tabRhs[c:%d]: %g\n", i, tabRhs[r]); - } else { - //this really should be slack vars... - //assuming clp does Ax-Is = b, s = ax-b ??? nope... - //tabRhs[r] = rowAct[i - n] - rowRhs[i - n]; - tabRhs[r] = rowRhs[i - n] - rowAct[i - n]; - //printf("tabRhs[r:%d]: %g [act: %g rhs: %g sense: %c]\n", - // i-n, tabRhs[r], rowAct[i-n], rowRhs[i-n], rowSense[i-n]); - } - } - - //as a sanity check print out the basis status next to the yb vs tabRhs - //calculation.... let's see why and where things don't match up... - //yb, where y is a row of B-1 (note, can get from bhead?) - //B-1b is tab rhs, is this equivalent to x for struct columns? - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << "\nB-1:"; - - for (r = 0; r < m; r++) { - if (UtilIsZero(tabRhs[r])) { - continue; - } - - yb[r] = 0.0; - m_masterSI->getBInvRow(r, bInvRow); - (*m_osLog) << "\nB-1Row r: " << r << ": " << endl; - - for (b = 0; b < m; b++) { - yb[r] += bInvRow[b] * rowRhs[b]; - (*m_osLog) << setw(6) << "bind: " - << setw(4) << basics[b] - << setw(12) << bInvRow[b] - << " [" - << setw(12) << rowRhs[b] - << "] " - << setw(8) << " +=: " - << setw(12) << bInvRow[b] * rowRhs[b] - << setw(8) << " yb: " - << setw(12) << yb[r] - << setw(8) << " tabRhs: " - << setw(12) << tabRhs[r] - << endl; - } - - if (!UtilIsZero(yb[r] - tabRhs[r])) { - (*m_osLog) << " DIFF is " << yb[r] - tabRhs[r] << endl; - } - - assert(UtilIsZero(yb[r] - tabRhs[r], 1.0e-4)); - } - ); - - for (r = 0; r < m; r++) { - if (UtilIsZero(tabRhs[r])) { - continue; - } - - //all pos case? if yb < 0 (then we want to minimize B-1Ax, x in P') - //all neg case? if yb > 0 (then we want to maximize B-1Ax, x in P') - if (tabRhs[r] > 0) { //instead of yb - //Ted also checks that it is a slack var here - why? - bool allneg = true; - //not getting back slacks part here... need? - m_masterSI->getBInvARow(r, bInvARow); - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << "B-1ARow r: " << r << ": "; - ); - allneg = true; - - for (c = 0; c < n; c++) { - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << bInvARow[c] << " "; - ); - - if (bInvARow[c] >= DecompEpsilon) { - allneg = false; - break; - } - } - - if (allneg) { - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << " ---> allneg"; - ); - double* dualRay = new double[m]; - m_masterSI->getBInvRow(r, dualRay); - transform(dualRay, dualRay + m, dualRay, negate()); - rays.push_back(dualRay); - } - } else { - bool allpos = true; - m_masterSI->getBInvARow(r, bInvARow); - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << "B-1ARow r: " << r << ": "; - ); - allpos = true; - - for (c = 0; c < n; c++) { - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << bInvARow[c] << " "; - ); - - if (bInvARow[c] <= -DecompEpsilon) { - allpos = false; - break; - } - } - - if (allpos) { - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << " ---> allpos"; - ); - double* dualRay = new double[m]; - m_masterSI->getBInvRow(r, dualRay); - rays.push_back(dualRay); - } - } - - UTIL_DEBUG(m_param.LogDebugLevel, 6, - (*m_osLog) << endl; - ); - } - - UTIL_DELARR(basics); - UTIL_DELARR(yb); - UTIL_DELARR(bInvRow); - UTIL_DELARR(bInvARow); - m_masterSI->disableSimplexInterface(); - /* - if(rays.size() <= 0){ - double proof_p; - double * dualRay = new double[m]; - CPXdualfarkas(siCpx->getEnvironmentPtr(), - siCpx->getLpPtr(OsiCpxSolverInterface::KEEPCACHED_ALL), - dualRay, &proof_p); - (*m_osLog) << "After dual farkas proof_p = " << proof_p << "\n"; - transform(dualRay, dualRay + m, dualRay, negate()); - for(int i = 0; i < m; i++){ - printf("dualRay[%d]: %g\n", i, dualRay[i]); - } - rays.push_back(dualRay); - } - */ - //NOTE: you will have dup rays here - need to filter out... - UTIL_DEBUG(m_param.LogDebugLevel, 5, - (*m_osLog) << "Number of Rays = " << rays.size() << endl; - ); - - for (int i = 0; i < (int)rays.size(); i++) { - bool isProof = isDualRayInfProof(rays[i], - m_masterSI->getMatrixByRow(), - m_masterSI->getColLower(), - m_masterSI->getColUpper(), - m_masterSI->getRightHandSide(), - NULL); - - if (!isProof) { - isDualRayInfProof(rays[i], - m_masterSI->getMatrixByRow(), - m_masterSI->getColLower(), - m_masterSI->getColUpper(), - m_masterSI->getRightHandSide(), - m_osLog); - } - - assert(isProof); - } - - assert(rays.size() > 0); - UTIL_DELARR(tabRhs); - UtilPrintFuncEnd(m_osLog, m_classTag, - "getDualRays()", m_param.LogDebugLevel, 2); - return rays; - }else{//m_param.UseMultiRay == false - - UtilPrintFuncBegin(m_osLog, m_classTag, - "getDualRays()", m_param.LogDebugLevel, 2); - vector raysT = m_masterSI->getDualRays(maxNumRays); - const double* rayT = raysT[0]; - assert(rayT); - //stop - //what is yb, that will tell me if i want to opt over uA or -uA - //y^T b - int i; - const CoinPackedMatrix* rowMatrix = m_masterSI->getMatrixByRow(); - const double* rowRhs = m_masterSI->getRightHandSide(); - const int m = rowMatrix->getNumRows(); - double yb = 0.0; - - for (i = 0; i < m; i++) { - yb += rayT[i] * rowRhs[i]; //safe to use rowRhs? or flips in tab going on - } - - (*m_osLog) << " yb = " << yb << endl; - //need tabRhs if doing this way? - //see Clp/examples/decompose.cpp - // he flips the infeasibility ray (always...) - //--- yA >= 0, yb < 0, or --> find a yAs <= 0 (min) - //--- yA <= 0, yb > 0 ?? --> find a yAs >= 0 (max <--> -min) - vector rays; - - if (yb > 0) { - double* pneg = new double[m]; - transform(rayT, rayT + m, pneg, negate()); - rays.push_back(pneg); - } else { - rays.push_back(raysT[0]); - } - -#if 1 - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - const double* ray = rays[0]; - assert(ray); - bool isProof = isDualRayInfProof(ray, - m_masterSI->getMatrixByRow(), - m_masterSI->getColLower(), - m_masterSI->getColUpper(), - m_masterSI->getRightHandSide(), - NULL); - printf("isProof = %d\n", isProof); - fflush(stdout); - - if (!isProof) { - isDualRayInfProof(ray, - m_masterSI->getMatrixByRow(), - m_masterSI->getColLower(), - m_masterSI->getColUpper(), - m_masterSI->getRightHandSide(), - m_osLog); - printBasisInfo(m_masterSI, m_osLog); - fflush(stdout); - } - assert(isDualRayInfProof(ray, - m_masterSI->getMatrixByRow(), - m_masterSI->getColLower(), - m_masterSI->getColUpper(), - m_masterSI->getRightHandSide(), - NULL)); - );; -#endif - UtilPrintFuncEnd(m_osLog, m_classTag, - "getDualRays()", m_param.LogDebugLevel, 2); - return rays; - } -} - -//===========================================================================// -int DecompAlgo::generateInitVars(DecompVarList& initVars) -{ - int c, attempts; - double aveC; - DecompConstraintSet* modelCore = m_modelCore.getModel(); - const int limit = m_param.InitVarsLimit; - // Need to get the different strategies for generating initial Vars - const int limit2 = 2 * limit; - //const int limit2 = 1; - const int nCoreCols = modelCore->getNumCols(); - const double* objCoeff = getOrigObjective(); - double timeLimit; - - UtilPrintFuncBegin(m_osLog, m_classTag, - "generateInitVars()", m_param.LogDebugLevel, 2); - m_function = DecompFuncGenerateInitVars; - - //--- - //--- APP: create an initial set of points F'[0] subseteq F' - //--- The base implementation of this function does nothing. - //--- This is the user's chance to implement something application - //--- specific. - //--- - - m_app->generateInitVars(initVars); - - //TODO: think - if user gives a partial feasible solution - // and this part is not run then PI master can be infeasible - // which will cause an issue - //TODO: PI master cannot be infeasible if we use artificials on - // convexity constraints - which we already have - so how is - // that possible? - //Should probably have this on irregardless of what we get from user. - //Another reason this has to run is because if user gives a solution - // with some master-only vars set to their LB=0. This will not be - // added as 0-columns. So, will have convexity constraints that are - // 0=1. - - int nInitVars = static_cast(initVars.size()); - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) - << "nInitVars from app = " << nInitVars - << " userLimit = " << limit << endl; - ); - - //nInitVars = 0;//THINK - if (nInitVars < limit) { - //--- - //--- create an initial set of points F'[0] subseteq F' - //--- randomly by solving zSP(c + eps), eps = U[0,ave(c)] - //--- - //--- - //--- NOTE: in GAP case, subproblem is knapsack, if use orig cost - //--- all cost > 0, so will get NULL column, later on reduced costs - //--- will give negative values, so this is not a problem - //--- - double* costeps = new double[nCoreCols]; - assert(objCoeff); - aveC = UtilAve(objCoeff, nCoreCols); - attempts = 0; - DecompSolverResult subprobResult(m_infinity);//nCoreCols); - - while ((nInitVars < limit) && (attempts < limit2)) { - //--- - //--- perturb the cost vector - //--- - srand(attempts); - - for (c = 0; c < nCoreCols; c++) { - double r = 0.0; - - if (attempts != 0) { - r = UtilURand(-aveC, aveC); - } - - costeps[c] = objCoeff[c] + r; - } - - //--- - //--- APP: solve zSP(c + eps) - //--- - map::iterator mit; - double sumInitLB = 0.0; //like LR with 0 dual (only first pass) - for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { - DecompSubModel& subModel = (*mit).second; - timeLimit = max(m_param.SubProbTimeLimitExact - - m_stats.timerOverall.getRealTime(), 0.0); - solveRelaxed(costeps, //reduced cost (fake here) - objCoeff, //original cost vector - 9e15, //alpha (fake here) - nCoreCols, //num core columns - false, //isNested - subModel, - &subprobResult, //results - initVars, //var list to populate - timeLimit); - - if (attempts == 0) { - //TODO: have to treat masterOnly differently - // we don't correctly populate LB/UB in - // subprobResult object - so contribution is wrong - sumInitLB += subprobResult.m_objLB; - //printf("ThisLB = %g, sumInitLB = %g\n", - // subprobResult.m_objLB, sumInitLB); - } - } - - map >::iterator mivt; - vector ::iterator vit; - - for (mivt = m_modelRelaxNest.begin(); - mivt != m_modelRelaxNest.end(); mivt++) { - for (vit = (*mivt).second.begin(); - vit != (*mivt).second.end(); vit++) { - timeLimit = max(m_param.SubProbTimeLimitExact - - m_stats.timerOverall.getRealTime(), 0.0); - solveRelaxed(costeps, //reduced cost (fake here) - objCoeff, //original cost vector - 9e15, //alpha (fake here) - nCoreCols, //num core columns - true, //isNested - (*vit), - &subprobResult, //results - initVars, //var list to populate - timeLimit); - } - } - - //--- - //--- THINK: check for duplicate variables - done in solveRelaxed - //--- don't assume the user does the duplicate check - should be - //--- done by col pool also - //--- - nInitVars = static_cast(initVars.size()); - attempts++; - } - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "\nm_varsThisCall = " - << initVars.size() << "\n"; - ); - //--- - //--- TODO: solve a few iterations of subgradient to get init vars? - //--- - //--- TODO: put them in the var pool?? - //--- - UTIL_DELARR(costeps);//TODO: use mem-pool - } - - //--- - //--- generate init vars by solving root LP and - //--- running DC at each iteration - //--- - if (m_param.InitVarsWithCutDC) { - printf("======= BEGIN Gen Init Vars - call CPM process root node\n"); - DecompAlgoC cpm(m_app, *m_utilParam); - cpm.m_param.CutDC = 2; - cpm.processNode(0, -m_infinity, m_infinity); - //--- - //--- copy the vars generated in passes of DC into initVars - //--- to warm-start DW master formulation - //--- - //m_vars.insert(m_vars.end(), cpm.m_vars.begin(), cpm.m_vars.end()); - initVars.splice(initVars.end(), cpm.m_vars); - printf("VARS moved into PC object initVars.size=%d\n", - static_cast(initVars.size())); - //printVars(m_osLog);//use this to warm start DW - //a hidden advantage of decomp in DC? - DecompSolution* bestSol = NULL; - vector::iterator it; - //there will be just one, i think, just need to copy it over here - double thisBound; - double bestBoundUB = m_nodeStats.objBest.second; - - for (it = cpm.m_xhatIPFeas.begin(); - it != cpm.m_xhatIPFeas.end(); it++) { - thisBound = (*it)->getQuality(); - printf("From init vars, IP Feasible with Quality = %g\n", thisBound); - - if ((*it)->getQuality() <= bestBoundUB) { - bestBoundUB = (*it)->getQuality(); - bestSol = (*it); - } - } - - //need to make copy of solution, since D.m_xhatIpFeas goes out of scope - if (bestSol) { - DecompSolution* bestSolCp = new DecompSolution(*bestSol); - m_xhatIPFeas.push_back(bestSolCp); - setObjBoundIP(bestSolCp->getQuality()); - m_xhatIPBest = bestSolCp; - m_xhatIPBest->print(); - } - - printf("======= END Gen Init Vars - call CPM process root node\n"); - } - - if (m_param.InitVarsWithIP) { - printf("======= BEGIN Gen Init Vars - call Direct IP solver\n"); - DecompAlgoC direct(m_app, *m_utilParam); - DecompSolverResult* result = NULL; - double oldSetting = m_param.TimeLimit; - m_param.TimeLimit = m_param.InitVarsWithIPTimeLimit; - result = direct.solveDirect(); - m_param.TimeLimit = oldSetting; - - if (result->m_nSolutions) { - //--- - //--- if an incumbent was found, create a var(s) from it - //--- - //TODO: safe to assume 0th is the best - const double* solution = result->getSolution(0); - const DecompVarType varType = result->m_isUnbounded ? DecompVar_Ray : DecompVar_Point; - - if (m_numConvexCon == 1) { - DecompVar* directVar = new DecompVar(nCoreCols, - solution, - 0.0, result->m_objUB, varType); - initVars.push_back(directVar); - } else { - map::iterator mid; - - for (mid = m_modelRelax.begin(); mid != m_modelRelax.end(); mid++) { - int blockId = (*mid).first; - DecompSubModel& modelRelax = (*mid).second; - vector& activeColumns - = modelRelax.getModel()->activeColumns; - vector ind; - vector els; - double origCost = 0.0; - vector::iterator it; - - for (it = activeColumns.begin(); - it != activeColumns.end(); it++) { - if (!UtilIsZero(solution[*it])) { - ind.push_back(*it); - els.push_back(solution[*it]); - origCost += objCoeff[*it] * solution[*it]; - } - } - - DecompVar* directVar - = new DecompVar(ind, els, 0.0, origCost, varType); - directVar->setBlockId(blockId); - initVars.push_back(directVar); - } - } - - //--- - //--- update the upper bound - //--- - double bestBoundUB = m_nodeStats.objBest.second; - - if (result->m_objUB < bestBoundUB) { - DecompSolution* directSol = new DecompSolution(nCoreCols, - solution, - result->m_objUB); - m_xhatIPFeas.push_back(directSol); - m_xhatIPBest = directSol; - setObjBoundIP(result->m_objUB); - } - } - - printf("======= END Gen Init Vars - call Direct IP solver\n"); - } - - //--- - //--- check init vars for incumbent - //--- - if (m_numConvexCon == 1) { - DecompVarList::iterator vli; - - for (vli = initVars.begin(); vli != initVars.end(); vli++) { - //--- - //--- unlikey to happen - but we should check ALL columns - //--- to see if they are IP feasible - //--- - (*vli)->fillDenseArr(modelCore->getNumCols(), - m_memPool.dblArrNCoreCols); - - if (isIPFeasible(m_memPool.dblArrNCoreCols)) { - if (m_app->APPisUserFeasible(m_memPool.dblArrNCoreCols, - modelCore->getNumCols(), - m_param.TolZero)) { - DecompSolution* decompSol - = new DecompSolution(modelCore->getNumCols(), - m_memPool.dblArrNCoreCols, - (*vli)->getOriginalCost()); - m_xhatIPBest = decompSol; - m_xhatIPFeas.push_back(decompSol); - //printf("var is ip feas with obj = %g\n", - // (*vli)->getOriginalCost()); - setObjBoundIP((*vli)->getOriginalCost()); - } - } - } - } - - //--- - //--- this will update the global UB before we start processing - //--- if we were lucky enough to find an incumbent in init vars - //--- - //setCutoffUB(getCutoffUB()); - m_function = DecompFuncGeneric; - UtilPrintFuncEnd(m_osLog, m_classTag, - "generateInitVars()", m_param.LogDebugLevel, 2); - nInitVars = static_cast(initVars.size()); - return nInitVars; -} - -//===========================================================================// -//once we do RC, this probably won't be in base anyway -bool DecompAlgo::updateObjBound(const double mostNegRC) -{ - //--- - //--- C : LB = masterLP obj - //--- PC : LB = zDW_RMP + RC* <= zDW <= zDW_RMP - //--- where RC* is the most negative reduced cost - //--- assuming the relaxation subproblem was solved exactly - //--- - //--- Careful here -- for many apps the user will use heuristics - //--- during column generation phase. If we update LB after each - //--- column added we might stop too early if this LB exceeds the - //--- tree's global upper bound. - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "updateObjBound()", m_param.LogDebugLevel, 2); - //for DualStab, this returns smoothed duals - int r; - const double* dualSol = getMasterDualSolution(); - const double* rowRhs = m_masterSI->getRightHandSide(); - double zDW_UBPrimal = getMasterObjValue(); - double zDW_UBDual = 0.0; - double zDW_LB = 0.0; - const double* rc = getMasterColReducedCost(); - const double* colLower = m_masterSI->getColLower(); - const double* colUpper = m_masterSI->getColUpper(); - //rStat might not be needed now, but will be needed - // when we support ranged rows. - int* rStat = new int[m_masterSI->getNumRows()]; - int* cStat = new int[m_masterSI->getNumCols()]; - m_masterSI->getBasisStatus(cStat, rStat); - - for (int c = 0; c < m_numCols; c++) { - if (cStat[c] == 3) { - zDW_UBDual += rc[c] * colLower[c]; - } else if (cStat[c] == 2 ) { - zDW_UBDual += rc[c] * colUpper[c]; - } - } - - int nRows = m_masterSI->getNumRows(); - - for (r = 0; r < nRows; r++) { - zDW_UBDual += dualSol[r] * rowRhs[r]; - } - - //zDW_LB = zDW_UBDual + mostNegRC; - zDW_LB = zDW_UBPrimal + mostNegRC; - setObjBound(zDW_LB, zDW_UBPrimal); - /* - double actDiff = fabs(zDW_UBDual - zDW_UBPrimal); - double unifDiff = actDiff / (1.0 + fabs(zDW_UBPrimal)); - if (!m_param.DualStab && !UtilIsZero(unifDiff, 1e-04)) { - (*m_osLog) << "MasterObj [primal] = " << UtilDblToStr(zDW_UBPrimal) - << endl; - (*m_osLog) << "MasterObj [dual] = " << UtilDblToStr(zDW_UBDual) - << endl; - throw UtilException("Primal and Dual Master Obj Not Matching.", - "updateObjBoundLB", "DecompAlgo"); - } - */ - //TODO: stats - we want to play zDW_LB vs UB... - UTIL_MSG(m_param.LogDebugLevel, 3, - (*m_osLog) - << "MasterObj[primal] = " << UtilDblToStr(zDW_UBPrimal) << "\t" - << "[dual] = " << UtilDblToStr(zDW_UBDual) << "\t" - << "mostNegRC = " << UtilDblToStr(mostNegRC) << "\n" - << "ThisLB = " << UtilDblToStr(zDW_LB) << "\t" - << "BestLB = " << UtilDblToStr(m_nodeStats.objBest.first) << "\n"; - ); - UTIL_DEBUG(m_param.LogDebugLevel, 2, - (*m_osLog) - << "PriceCallsRound= " - << setw(3) << m_nodeStats.priceCallsRound - << setw(13) << "\tmostNegRC=" - << setw(13) << UtilDblToStr(mostNegRC, 4) - << setw(13) << "\tthisLB=" - << setw(13) << UtilDblToStr(zDW_LB, 4) - << endl; - ); - - if ((getNodeIndex() == 0) && - (zDW_LB > (m_app->getBestKnownUB() + DecompEpsilon))) { - (*m_osLog) << "ERROR: in root node, bestKnownUB = " - << UtilDblToStr(m_app->getBestKnownUB()) - << " thisBoundLB = " - << UtilDblToStr(zDW_LB) << endl; - //assert(0); - } - - //--- - //--- check if the gap is tight (use the best bound) - //--- - bool isGapTight = false; - double tightGap = m_param.MasterGapLimit; - double relGap = getNodeLPGap(); - - if (relGap <= tightGap) { - isGapTight = true; - } - - if (m_param.LogDebugLevel >= 2) { - (*m_osLog) << "DW relGap = " << UtilDblToStr(relGap) - << " isTight = " << isGapTight << "\n"; - } - - UTIL_DELARR(rStat); - UTIL_DELARR(cStat); - m_relGap = relGap; - UtilPrintFuncEnd(m_osLog, m_classTag, - "updateObjBound()", m_param.LogDebugLevel, 2); - return isGapTight; -} - -//===========================================================================// -void DecompAlgo::masterPhaseItoII() -{ - //--- - //--- switch from Phase I to Phase II - //--- - UTIL_MSG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) << "Switching from PhaseI to PhaseII\n"; - ); - int i; - int nMasterCols = m_masterSI->getNumCols(); - //--- - //--- set obj for all columns to original cost - //--- set obj for artificial columns to 0 - //--- fix column bounds for artificial columns to 0 - //--- -#ifdef STAB_DUMERLE - //--- - //--- set cost on slacks to delta, where delta is init'd to - //--- initial duals (this is to be done in Phase 2 only) - //--- - //--- min deltap sp - deltam sm - //--- - //--- ax = b --> ax + sp - sm = b, sp >= 0 <= epsp, sm >= 0 <= epsm - //--- ax <= b --> ax - sm <= b, sm >= 0 <= epsm - //--- ax >= b --> ax + sp >= b, sp >= 0 <= epsp - //--- - int r; - const double* dualSol = NULL; - - if (m_useInitLpDuals) { - dualSol = m_cutgenSI->getRowPrice(); - m_useInitLpDuals = false; - m_stabEpsilon = 0.0; - } else { - dualSol = m_masterSI->getRowPrice(); - } - - assert(nMasterCols == static_cast(m_masterColType.size())); - - for (i = 0; i < nMasterCols; i++) { - DecompColType type = m_masterColType[i]; - - if (type == DecompCol_ArtForRowL || - type == DecompCol_ArtForBranchL || - type == DecompCol_ArtForCutL) { - r = m_artColIndToRowInd[i]; - printf("Master Col i=%d type=%s r=%d dual=%g\n", - i, DecompColTypeStr[type].c_str(), r, dualSol[r]); - m_masterSI->setObjCoeff(i, -dualSol[r]); - } else if (type == DecompCol_ArtForRowG || - type == DecompCol_ArtForBranchG || - type == DecompCol_ArtForCutG) { - r = m_artColIndToRowInd[i]; - printf("Master Col i=%d type=%s r=%d dual=%g\n", - i, DecompColTypeStr[type].c_str(), r, dualSol[r]); - m_masterSI->setObjCoeff(i, dualSol[r]); - } else { - m_masterSI->setObjCoeff(i, 0.0); - } - - if (isMasterColArtificial(i)) { - //m_masterSI->setColBounds(i, 0.0, 0.0);//TODO - m_masterSI->setColBounds(i, 0.0, m_stabEpsilon);//TODO - } - } - - DecompVarList::iterator li; - - for (li = m_vars.begin(); li != m_vars.end(); li++) { - assert(isMasterColStructural((*li)->getColMasterIndex())); - m_masterSI->setObjCoeff((*li)->getColMasterIndex(), - (*li)->getOriginalCost()); - } - - if (m_param.LogDumpModel > 1) { - string baseName = "masterProb_switchItoII"; - - if (m_isStrongBranch) { - baseName += "_SB"; - } - - printCurrentProblem(m_masterSI, - baseName, - m_nodeStats.nodeIndex, - m_nodeStats.cutCallsTotal, - m_nodeStats.priceCallsTotal); - } - -#else - assert(nMasterCols == static_cast(m_masterColType.size())); - - for (i = 0; i < nMasterCols; i++) { - m_masterSI->setObjCoeff(i, 0.0); - - if (isMasterColArtificial(i)) { - m_masterSI->setColBounds(i, 0.0, 0.0); - } - } - - DecompVarList::iterator li; - - for (li = m_vars.begin(); li != m_vars.end(); li++) { - assert(isMasterColStructural((*li)->getColMasterIndex())); - m_masterSI->setObjCoeff((*li)->getColMasterIndex(), - (*li)->getOriginalCost()); - } - - // restore the objective value of the masterOnly variables - int nMOVars = static_cast(m_masterOnlyCols.size()); - map:: iterator mit; - int j ; - int colIndex; - const double* objCoeff = getOrigObjective(); - - for (i = 0; i < nMOVars; i++) { - j = m_masterOnlyCols[i]; - mit = m_masterOnlyColsMap.find(j); - assert(mit != m_masterOnlyColsMap.end()); - colIndex = mit->second; -// assert(isMasterColMasterOnly(colIndex)); - m_masterSI->setObjCoeff(colIndex, objCoeff[j]); - } - - if (m_param.LogDumpModel > 1) { - string baseName = "masterProb_switchItoII"; - - if (m_isStrongBranch) { - baseName += "_SB"; - } - - printCurrentProblem(m_masterSI, - baseName, - m_nodeStats.nodeIndex, - m_nodeStats.cutCallsTotal, - m_nodeStats.priceCallsTotal); - } +#else + assert(nMasterCols == static_cast(m_masterColType.size())); + + for (i = 0; i < nMasterCols; i++) { + m_masterSI->setObjCoeff(i, 0.0); + + if (isMasterColArtificial(i)) { + m_masterSI->setColBounds(i, 0.0, 0.0); + } + } + + DecompVarList::iterator li; + + for (li = m_vars.begin(); li != m_vars.end(); li++) { + assert(isMasterColStructural((*li)->getColMasterIndex())); + m_masterSI->setObjCoeff((*li)->getColMasterIndex(), + (*li)->getOriginalCost()); + } + + // restore the objective value of the masterOnly variables + int nMOVars = static_cast(m_masterOnlyCols.size()); + map::iterator mit; + int j; + int colIndex; + const double *objCoeff = getOrigObjective(); + + for (i = 0; i < nMOVars; i++) { + j = m_masterOnlyCols[i]; + mit = m_masterOnlyColsMap.find(j); + assert(mit != m_masterOnlyColsMap.end()); + colIndex = mit->second; + // assert(isMasterColMasterOnly(colIndex)); + m_masterSI->setObjCoeff(colIndex, objCoeff[j]); + } + + if (m_param.LogDumpModel > 1) { + string baseName = "masterProb_switchItoII"; + + if (m_isStrongBranch) { + baseName += "_SB"; + } + + printCurrentProblem(m_masterSI, baseName, m_nodeStats.nodeIndex, + m_nodeStats.cutCallsTotal, m_nodeStats.priceCallsTotal); + } #endif } //===========================================================================// -void DecompAlgo::masterPhaseIItoI() -{ - //--- - //--- switch from Phase II to Phase I - //--- - UTIL_MSG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) << "Switching from PhaseII to PhaseI\n"; - ); - int i; - int nMasterCols = m_masterSI->getNumCols(); - //--- - //--- set obj for all columns to 0 - //--- set obj for artificial columns to 1 - //--- unfix column bounds for artificial columns - //--- - assert(nMasterCols == static_cast(m_masterColType.size())); - - for (i = 0; i < nMasterCols; i++) { - if (isMasterColStructural(i) || isMasterColMasterOnly(i)) { - m_masterSI->setObjCoeff(i, 0.0); - } else { - m_masterSI->setObjCoeff(i, 1.0); - m_masterSI->setColBounds(i, 0.0, m_infinity); - } - } - - if (m_param.LogDumpModel > 1) { - string baseName = "masterProb_switchIItoI"; - - if (m_isStrongBranch) { - baseName += "_SB"; - } - - printCurrentProblem(m_masterSI, - baseName, - m_nodeStats.nodeIndex, - m_nodeStats.cutCallsTotal, - m_nodeStats.priceCallsTotal); - } +void DecompAlgo::masterPhaseIItoI() { + //--- + //--- switch from Phase II to Phase I + //--- + UTIL_MSG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) << "Switching from PhaseII to PhaseI\n";); + int i; + int nMasterCols = m_masterSI->getNumCols(); + //--- + //--- set obj for all columns to 0 + //--- set obj for artificial columns to 1 + //--- unfix column bounds for artificial columns + //--- + assert(nMasterCols == static_cast(m_masterColType.size())); + + for (i = 0; i < nMasterCols; i++) { + if (isMasterColStructural(i) || isMasterColMasterOnly(i)) { + m_masterSI->setObjCoeff(i, 0.0); + } else { + m_masterSI->setObjCoeff(i, 1.0); + m_masterSI->setColBounds(i, 0.0, m_infinity); + } + } + + if (m_param.LogDumpModel > 1) { + string baseName = "masterProb_switchIItoI"; + + if (m_isStrongBranch) { + baseName += "_SB"; + } + + printCurrentProblem(m_masterSI, baseName, m_nodeStats.nodeIndex, + m_nodeStats.cutCallsTotal, m_nodeStats.priceCallsTotal); + } } -//how does this logic work when dealing with TSP where +// how does this logic work when dealing with TSP where // cuts define validity? must continue if user supplies // cut generator - think? //===========================================================================// -void DecompAlgo::phaseUpdate(DecompPhase& phase, - DecompStatus& status) -{ - bool mustSwitch, considerSwitch; - bool isCutPossible, isPricePossible, gapTight; - DecompPhase nextPhase = PHASE_UNKNOWN; - DecompStatus nextStatus = status; - pair& objBest = m_nodeStats.objBest; - int& priceCallsTotal = m_nodeStats.priceCallsTotal; - int& cutCallsTotal = m_nodeStats.cutCallsTotal; - int& priceCallsRound = m_nodeStats.priceCallsRound; - int& cutCallsRound = m_nodeStats.cutCallsRound; - int& varsThisCall = m_nodeStats.varsThisCall; - int& cutsThisCall = m_nodeStats.cutsThisCall; - int& varsThisRound = m_nodeStats.varsThisRound; - int& cutsThisRound = m_nodeStats.cutsThisRound; - UtilPrintFuncBegin(m_osLog, m_classTag, - "phaseUpdate()", m_param.LogDebugLevel, 2); - m_phaseLast = phase; - UTIL_MSG(m_param.LogDebugLevel, 3, - (*m_osLog) << "cutsThisRound : " << cutsThisRound << "\n"; - (*m_osLog) << "varsThisRound : " << varsThisRound << "\n"; - (*m_osLog) << "cutsThisCall : " << cutsThisCall << "\n"; - (*m_osLog) << "varsThisCall : " << varsThisCall << "\n"; - (*m_osLog) << "cutCallsTotal : " << cutCallsTotal << "\n"; - (*m_osLog) << "priceCallsTotal: " << priceCallsTotal << "\n"; - (*m_osLog) << "cutCallsRound : " << cutCallsRound << "\n"; - (*m_osLog) << "priceCallsRound: " << priceCallsRound << "\n"; - (*m_osLog) << "PHASEIN : " - << DecompPhaseStr[phase] << "\n"; - (*m_osLog) << "STATIN : " - << DecompStatusStr[status] << "\n"; - (*m_osLog) << "BestLB : " - << UtilDblToStr(objBest.first) << "\n"; - (*m_osLog) << "BestUB : " - << UtilDblToStr(objBest.second) << "\n"; - ); - - //--- - //--- is there an override? - //--- - if (m_phaseForce != PHASE_UNKNOWN) { - nextPhase = m_phaseForce; - m_phaseForce = PHASE_UNKNOWN; - nextStatus = status; - goto PHASE_UPDATE_FINISH; - } - - //--- - //--- was the current model found to be infeasible? - //--- - if (status == STAT_INFEASIBLE) { - //--- - //--- otherwise, switch to PHASEI - //--- NOTE: this can happen when a new cut (or branch cut) is added - //--- - masterPhaseIItoI(); +void DecompAlgo::phaseUpdate(DecompPhase &phase, DecompStatus &status) { + bool mustSwitch, considerSwitch; + bool isCutPossible, isPricePossible, gapTight; + DecompPhase nextPhase = PHASE_UNKNOWN; + DecompStatus nextStatus = status; + pair &objBest = m_nodeStats.objBest; + int &priceCallsTotal = m_nodeStats.priceCallsTotal; + int &cutCallsTotal = m_nodeStats.cutCallsTotal; + int &priceCallsRound = m_nodeStats.priceCallsRound; + int &cutCallsRound = m_nodeStats.cutCallsRound; + int &varsThisCall = m_nodeStats.varsThisCall; + int &cutsThisCall = m_nodeStats.cutsThisCall; + int &varsThisRound = m_nodeStats.varsThisRound; + int &cutsThisRound = m_nodeStats.cutsThisRound; + UtilPrintFuncBegin(m_osLog, m_classTag, "phaseUpdate()", + m_param.LogDebugLevel, 2); + m_phaseLast = phase; + UTIL_MSG(m_param.LogDebugLevel, 3, + (*m_osLog) << "cutsThisRound : " << cutsThisRound << "\n"; + (*m_osLog) << "varsThisRound : " << varsThisRound << "\n"; + (*m_osLog) << "cutsThisCall : " << cutsThisCall << "\n"; + (*m_osLog) << "varsThisCall : " << varsThisCall << "\n"; + (*m_osLog) << "cutCallsTotal : " << cutCallsTotal << "\n"; + (*m_osLog) << "priceCallsTotal: " << priceCallsTotal << "\n"; + (*m_osLog) << "cutCallsRound : " << cutCallsRound << "\n"; + (*m_osLog) << "priceCallsRound: " << priceCallsRound << "\n"; + (*m_osLog) << "PHASEIN : " << DecompPhaseStr[phase] << "\n"; + (*m_osLog) << "STATIN : " << DecompStatusStr[status] << "\n"; + (*m_osLog) << "BestLB : " << UtilDblToStr(objBest.first) + << "\n"; + (*m_osLog) << "BestUB : " << UtilDblToStr(objBest.second) + << "\n";); + + //--- + //--- is there an override? + //--- + if (m_phaseForce != PHASE_UNKNOWN) { + nextPhase = m_phaseForce; + m_phaseForce = PHASE_UNKNOWN; + nextStatus = status; + goto PHASE_UPDATE_FINISH; + } + + //--- + //--- was the current model found to be infeasible? + //--- + if (status == STAT_INFEASIBLE) { + //--- + //--- otherwise, switch to PHASEI + //--- NOTE: this can happen when a new cut (or branch cut) is added + //--- + masterPhaseIItoI(); + m_nodeStats.resetBestLB(); + m_firstPhase2Call = false; + nextPhase = PHASE_PRICE1; + nextStatus = solutionUpdate(nextPhase); + goto PHASE_UPDATE_FINISH; + } + + //--- + //--- check to see if we have exceeded the total iter limits + //--- + isCutPossible = (m_param.RoundCutItersLimit > 0) && + (cutCallsTotal < m_param.TotalCutItersLimit); + isPricePossible = (m_param.RoundPriceItersLimit > 0) && + (priceCallsTotal < m_param.TotalPriceItersLimit); + + switch (phase) { + case PHASE_PRICE1: { + //--- + //--- we are in PHASEI, check to see if solution is feasible + //--- to original by checking to see if all artificials are 0 + //--- + const double phaseIObj = m_masterSI->getObjValue(); + UTIL_MSG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "PhaseIObj= " << UtilDblToStr(phaseIObj) << endl;); + m_phaseIObj.push_back(phaseIObj); + + // if(phaseIObj <= DecompZero){ + // if(phaseIObj <= DecompEpsilon){ //11/09/09 + if (phaseIObj <= m_param.PhaseIObjTol) { // 01/22/10 - forestry + //--- + //--- switch to PHASE II (art=0) + //--- + masterPhaseItoII(); + setObjBound(m_nodeStats.getLastBoundThis(), phaseIObj); + m_firstPhase2Call = true; + m_nodeStats.resetCutRound(); + m_nodeStats.resetPriceRound(); m_nodeStats.resetBestLB(); - m_firstPhase2Call = false; - nextPhase = PHASE_PRICE1; - nextStatus = solutionUpdate(nextPhase); - goto PHASE_UPDATE_FINISH; - } - - //--- - //--- check to see if we have exceeded the total iter limits - //--- - isCutPossible = - (m_param.RoundCutItersLimit > 0) && - (cutCallsTotal < m_param.TotalCutItersLimit); - isPricePossible = - (m_param.RoundPriceItersLimit > 0) && - (priceCallsTotal < m_param.TotalPriceItersLimit); - - switch (phase) { - case PHASE_PRICE1: { - //--- - //--- we are in PHASEI, check to see if solution is feasible - //--- to original by checking to see if all artificials are 0 - //--- - const double phaseIObj = m_masterSI->getObjValue(); - UTIL_MSG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) - << "PhaseIObj= " << UtilDblToStr(phaseIObj) << endl;); - m_phaseIObj.push_back(phaseIObj); - - //if(phaseIObj <= DecompZero){ - //if(phaseIObj <= DecompEpsilon){ //11/09/09 - if (phaseIObj <= m_param.PhaseIObjTol) { //01/22/10 - forestry - //--- - //--- switch to PHASE II (art=0) - //--- - masterPhaseItoII(); - setObjBound(m_nodeStats.getLastBoundThis(), phaseIObj); - m_firstPhase2Call = true; - m_nodeStats.resetCutRound(); - m_nodeStats.resetPriceRound(); - m_nodeStats.resetBestLB(); - - if (m_algo == DECOMP) { - nextPhase = PHASE_DONE; - nextStatus = STAT_FEASIBLE; - } else { - nextPhase = PHASE_PRICE2; - nextStatus = solutionUpdate(nextPhase); - } - goto PHASE_UPDATE_FINISH; + if (m_algo == DECOMP) { + nextPhase = PHASE_DONE; + nextStatus = STAT_FEASIBLE; } else { - //--- - //--- we still have active artificials - //--- if this is the first call, just continue - //--- otherwise, check to see if any new (redCost<0) - //--- columns were found to help break infeasibility - //--- - //--- if no vars were found, we are really infeasible - //--- else, repeat PhaseI - //--- - if (priceCallsTotal == 0 || varsThisCall > 0) { - nextPhase = PHASE_PRICE1; - goto PHASE_UPDATE_FINISH; - } else { - UTIL_MSG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) - << "Vars this call is " << varsThisCall << endl;); - UTIL_MSG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) - << "Price calls total is " << priceCallsTotal << endl;); - UTIL_MSG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) - << "Node " << getNodeIndex() - << " is Infeasible." << endl;); - m_stopCriteria = DecompStopInfeasible; - nextPhase = PHASE_DONE; - // std::cout << "STATUS is INFEASIBLE" << std::endl; - nextStatus = STAT_INFEASIBLE; - goto PHASE_UPDATE_FINISH; - } - } - }//END: case PHASE_PRICE1 - break; - case PHASE_PRICE2: { - assert(status == STAT_FEASIBLE || status == STAT_UNKNOWN); - - //--- - //--- if we want to always favor cutting, then just do it - //--- - if (m_param.PCStrategy == FavorCut && isCutPossible) { - nextPhase = PHASE_CUT; - goto PHASE_UPDATE_FINISH; - } - - //--- - //--- if this is the first call, just continue - //--- - if (priceCallsTotal == 0 && cutCallsTotal == 0) { - nextPhase = PHASE_PRICE2; - goto PHASE_UPDATE_FINISH; + nextPhase = PHASE_PRICE2; + nextStatus = solutionUpdate(nextPhase); } + goto PHASE_UPDATE_FINISH; + } else { //--- - //--- Princess Bride (1987): - //--- "truly, you have a dizzying intellect" - //--- - mustSwitch = false; - considerSwitch = false; - - //--- - //--- if we hit the total limit or - //--- we found no new vars this call - //--- then we must switch (or we are done) - //--- - if (!isPricePossible || (varsThisCall == 0) || (varsThisRound == 0)) { - mustSwitch = true; - } - + //--- we still have active artificials + //--- if this is the first call, just continue + //--- otherwise, check to see if any new (redCost<0) + //--- columns were found to help break infeasibility //--- - //--- if we hit the round limit, we must consider switching + //--- if no vars were found, we are really infeasible + //--- else, repeat PhaseI //--- - if (priceCallsRound >= m_param.RoundPriceItersLimit) { - considerSwitch = true; - } - - //printf("mustSwitch=%d\n", mustSwitch); - //printf("considerSwitch=%d\n", considerSwitch); - //printf("isCutPossible=%d\n", isCutPossible); - //printf("isPricePossible=%d\n", isPricePossible); - if (mustSwitch) { - //--- - //--- we must switch from pricing - //--- - if (!isCutPossible) { - //--- - //--- if we exceed the cut iter limit, we are done - //--- - nextPhase = PHASE_DONE; - m_stopCriteria = DecompStopIterLimit; - } else { - if ((cutCallsTotal > 0) && - (cutsThisRound == 0) && - (varsThisRound == 0)) { - //--- - //--- nothing new happened, so we are done - //--- - nextPhase = PHASE_DONE; - } else { - //--- - //--- something new happened, so try cuts again - //--- - nextPhase = PHASE_CUT; - m_nodeStats.resetCutRound(); - } - } - }//END: if(mustSwitch) - else if (considerSwitch) { - //--- - //--- we consider switching from pricing - //--- - if (!isCutPossible) { - if (!isPricePossible) { - //--- - //--- if we exceed both iter limits, we are done - //--- - nextPhase = PHASE_DONE; - m_stopCriteria = DecompStopIterLimit; - } else { - //--- - //--- if we exceed cut iter limit, but not the price lim - //--- since we are not in mustSwitch, m_varsThisRound > 0, - //--- so we can go back to pricing, even though it violates - //--- the round counter, because we have no other choice - //--- - nextPhase = PHASE_PRICE2; - } - } else { - if ((cutsThisRound == 0) && (varsThisRound == 0)) { - //--- - //--- nothing new happened, so we are done - //--- - nextPhase = PHASE_DONE; - } else { - //--- - //--- something new happened, so try cuts again - //--- - nextPhase = PHASE_CUT; - m_nodeStats.resetCutRound(); - m_nodeStats.objHistoryBound.clear(); - } - } - } //END: else if(considerSwitch) - else { - nextPhase = PHASE_PRICE2; - } - } //END: case PHASE_PRICE2 - //--- - //--- are we suggesting another price phase but gap is tight? - //--- - gapTight = isGapTight(); - - if (gapTight && isCutPossible) - if (cutCallsTotal == 0 || //haven't even tried cuts yet - varsThisRound > 0) { //some new vars, try cut again - //--- - //--- we haven't even tried cuts yet, give it a try - //--- - nextPhase = PHASE_CUT; - } + if (priceCallsTotal == 0 || varsThisCall > 0) { + nextPhase = PHASE_PRICE1; + goto PHASE_UPDATE_FINISH; + } else { + UTIL_MSG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "Vars this call is " << varsThisCall << endl;); + UTIL_MSG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) + << "Price calls total is " << priceCallsTotal << endl;); + UTIL_MSG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "Node " << getNodeIndex() << " is Infeasible." + << endl;); + m_stopCriteria = DecompStopInfeasible; + nextPhase = PHASE_DONE; + // std::cout << "STATUS is INFEASIBLE" << std::endl; + nextStatus = STAT_INFEASIBLE; + goto PHASE_UPDATE_FINISH; + } + } + } // END: case PHASE_PRICE1 + break; + case PHASE_PRICE2: { + assert(status == STAT_FEASIBLE || status == STAT_UNKNOWN); + + //--- + //--- if we want to always favor cutting, then just do it + //--- + if (m_param.PCStrategy == FavorCut && isCutPossible) { + nextPhase = PHASE_CUT; + goto PHASE_UPDATE_FINISH; + } - if (nextPhase == PHASE_PRICE2 && gapTight) { + //--- + //--- if this is the first call, just continue + //--- + if (priceCallsTotal == 0 && cutCallsTotal == 0) { + nextPhase = PHASE_PRICE2; + goto PHASE_UPDATE_FINISH; + } + + //--- + //--- Princess Bride (1987): + //--- "truly, you have a dizzying intellect" + //--- + mustSwitch = false; + considerSwitch = false; + + //--- + //--- if we hit the total limit or + //--- we found no new vars this call + //--- then we must switch (or we are done) + //--- + if (!isPricePossible || (varsThisCall == 0) || (varsThisRound == 0)) { + mustSwitch = true; + } + + //--- + //--- if we hit the round limit, we must consider switching + //--- + if (priceCallsRound >= m_param.RoundPriceItersLimit) { + considerSwitch = true; + } + + // printf("mustSwitch=%d\n", mustSwitch); + // printf("considerSwitch=%d\n", considerSwitch); + // printf("isCutPossible=%d\n", isCutPossible); + // printf("isPricePossible=%d\n", isPricePossible); + if (mustSwitch) { + //--- + //--- we must switch from pricing + //--- + if (!isCutPossible) { + //--- + //--- if we exceed the cut iter limit, we are done + //--- + nextPhase = PHASE_DONE; + m_stopCriteria = DecompStopIterLimit; + } else { + if ((cutCallsTotal > 0) && (cutsThisRound == 0) && + (varsThisRound == 0)) { + //--- + //--- nothing new happened, so we are done + //--- + nextPhase = PHASE_DONE; + } else { + //--- + //--- something new happened, so try cuts again + //--- + nextPhase = PHASE_CUT; + m_nodeStats.resetCutRound(); + } + } + } // END: if(mustSwitch) + else if (considerSwitch) { + //--- + //--- we consider switching from pricing + //--- + if (!isCutPossible) { + if (!isPricePossible) { + //--- + //--- if we exceed both iter limits, we are done + //--- + nextPhase = PHASE_DONE; + m_stopCriteria = DecompStopIterLimit; + } else { + //--- + //--- if we exceed cut iter limit, but not the price lim + //--- since we are not in mustSwitch, m_varsThisRound > 0, + //--- so we can go back to pricing, even though it violates + //--- the round counter, because we have no other choice + //--- + nextPhase = PHASE_PRICE2; + } + } else { + if ((cutsThisRound == 0) && (varsThisRound == 0)) { + //--- + //--- nothing new happened, so we are done + //--- + nextPhase = PHASE_DONE; + } else { + //--- + //--- something new happened, so try cuts again + //--- + nextPhase = PHASE_CUT; + m_nodeStats.resetCutRound(); + m_nodeStats.objHistoryBound.clear(); + } + } + } // END: else if(considerSwitch) + else { + nextPhase = PHASE_PRICE2; + } + } // END: case PHASE_PRICE2 + //--- + //--- are we suggesting another price phase but gap is tight? + //--- + gapTight = isGapTight(); + + if (gapTight && isCutPossible) + if (cutCallsTotal == 0 || // haven't even tried cuts yet + varsThisRound > 0) { // some new vars, try cut again + //--- + //--- we haven't even tried cuts yet, give it a try + //--- + nextPhase = PHASE_CUT; + } + + if (nextPhase == PHASE_PRICE2 && gapTight) { m_stopCriteria = DecompStopGap; //--- //--- if branching candidate does not exist @@ -4297,1591 +4023,1490 @@ void DecompAlgo::phaseUpdate(DecompPhase& phase, //--- UTIL_DEBUG(m_param.LogDebugLevel, 3, (*m_osLog) << "Gap is tight" << endl;); - //int branchedOnIndex = -1; - //double branchedOnValue = 0; - //chooseBranchVar(branchedOnIndex, branchedOnValue); - std::vector< std::pair > downBranchLB, - downBranchUB, upBranchLB, upBranchUB; - bool gotBranch = chooseBranchSet(downBranchLB, - downBranchUB, - upBranchLB, - upBranchUB); + // int branchedOnIndex = -1; + // double branchedOnValue = 0; + // chooseBranchVar(branchedOnIndex, branchedOnValue); + std::vector> downBranchLB, downBranchUB, + upBranchLB, upBranchUB; + bool gotBranch = + chooseBranchSet(downBranchLB, downBranchUB, upBranchLB, upBranchUB); if (m_param.NodeLimit == 0) { - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "Gap is tight and NodeLimit=0." - << endl;); - nextPhase = PHASE_DONE; - break; + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "Gap is tight and NodeLimit=0." << endl;); + nextPhase = PHASE_DONE; + break; } else if (gotBranch) { - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "Gap is tight and we have a " - << "branch candidate" << endl;); - nextPhase = PHASE_DONE; - break; + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "Gap is tight and we have a " + << "branch candidate" << endl;); + nextPhase = PHASE_DONE; + break; } else { - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "Gap is tight and we have NO " - << "branch candidate" << endl;); - } - } - - break; - case PHASE_CUT: { - //--- - //--- if we want to always favor pricing, then just do it - //--- - if (m_param.PCStrategy == FavorPrice && isPricePossible) { - nextPhase = PHASE_PRICE2; - goto PHASE_UPDATE_FINISH; - } - - //--- - //--- if this is the first call, just continue - //--- - if (priceCallsTotal == 0 && cutCallsTotal == 0) { - nextPhase = PHASE_CUT; - goto PHASE_UPDATE_FINISH; - } - - //--- - //--- if tight was gap, the we went to cuts and found none, - //--- then stop on gap - //--- - gapTight = isGapTight(); - - if (priceCallsTotal > 0 && cutsThisCall == 0 && gapTight) { - m_stopCriteria = DecompStopGap; - //--- - //--- Even if we are stop on gap, we need to be careful of - //--- the following: If the last solution was integral (no - //--- branching candidates) but we are not done pricing out - //--- (i.e., a column with negative RC still exist) and we - //--- declare that we are tailing off then the node will get - //--- put back in the node work queue. This can lead to that - //--- node being repeatedly stopped and reseted. It is better - //--- to just price it out since we cannot branch on it in - //--- this state. - //--- - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "Gap is tight" << endl;); - //int branchedOnIndex = -1; - //double branchedOnValue = 0; - //chooseBranchVar(branchedOnIndex, branchedOnValue); - std::vector< std::pair > downBranchLB, - downBranchUB, upBranchLB, upBranchUB; - bool gotBranch = chooseBranchSet(downBranchLB, - downBranchUB, - upBranchLB, - upBranchUB); - - if (m_param.NodeLimit == 0) { - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "Gap is tight and NodeLimit=0." - << endl;); - nextPhase = PHASE_DONE; - goto PHASE_UPDATE_FINISH; - } else if (gotBranch) { - //if(branchedOnIndex != -1){ - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "Gap is tight and we have a " - << "branch candidate" << endl;); - nextPhase = PHASE_DONE; - goto PHASE_UPDATE_FINISH; - } else { - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "Gap is tight and we have NO " - << "branch candidate" << endl;); - } - } - - //--- - //--- Princess Bride (1987): - //--- "truly, you have a dizzying intellect" - //--- - mustSwitch = false; - considerSwitch = false; - - //--- - //--- if we hit the total limit or - //--- we found no new cuts this call - //--- then we must switch (or we are done) - //--- - if (!isCutPossible || (cutsThisCall == 0) || (cutsThisRound == 0)) { - mustSwitch = true; - } - - //--- - //--- if we hit the round limit, we must consider switching - //--- - if (cutCallsRound >= m_param.RoundCutItersLimit) { - considerSwitch = true; - } - - if (mustSwitch) { - //--- - //--- we must switch from cutting - //--- - if (!isPricePossible) { - //--- - //--- if we exceed the price iter limit, we are done - //--- - nextPhase = PHASE_DONE; - m_stopCriteria = DecompStopIterLimit; - } else { - if ((priceCallsTotal > 0) && - (cutsThisRound == 0) && - (varsThisRound == 0)) { - //--- - //--- nothing new happened, so we are done - //--- - nextPhase = PHASE_DONE; - } else { - //--- - //--- something new happened, so try price again - //--- - nextPhase = PHASE_PRICE2; - m_nodeStats.resetPriceRound(); - } - } - }//END: if(mustSwitch) - else if (considerSwitch) { - //--- - //--- we consider switching from cutting - //--- - if (!isPricePossible) { - if (!isCutPossible) { - //--- - //--- if we exceed both iter limits, we are done - //--- - nextPhase = PHASE_DONE; - m_stopCriteria = DecompStopIterLimit; - } else { - //--- - //--- if we exceed the price iter limit, but not the cut lim - //--- since we are not in mustSwitch, m_cutsThisRound > 0, - //--- so we can go back to cutting, even though it violates - //--- the round counter, because we have no other choice - //--- - nextPhase = PHASE_CUT; - } - } else { - if ((cutsThisRound == 0) && (varsThisRound == 0)) { - //--- - //--- nothing new happened, so we are done - //--- - nextPhase = PHASE_DONE; - } else { - //--- - //--- something new happened, so try price again - //--- - nextPhase = PHASE_PRICE2; - m_nodeStats.resetPriceRound(); - } - } - } //END: else if(considerSwitch) - else { - nextPhase = PHASE_CUT; - } - } //END: case PHASE_CUT - break; - default: - assert(0); - //UtilAssert(0, "Bad Phase in phaseUpdate!", m_osLog); - } //END: switch(phase) - -PHASE_UPDATE_FINISH: - UTIL_MSG(m_param.LogDebugLevel, 3, - (*m_osLog) << "PhaseOut: " << DecompPhaseStr[nextPhase]; - (*m_osLog) << " StatusOut: " << DecompStatusStr[nextStatus]; - (*m_osLog) << endl; - ); - phase = nextPhase; - status = nextStatus; - UtilPrintFuncEnd(m_osLog, m_classTag, - "phaseUpdate()", m_param.LogDebugLevel, 2); -} - -//------------------------------------------------------------------------ // -void DecompAlgo::generateVarsCalcRedCost(const double* u, - double* redCostX) -{ - int i; - DecompConstraintSet* modelCore = m_modelCore.getModel(); - int nCoreCols = modelCore->getNumCols(); - const double* origObjective = getOrigObjective(); - //--- - //--- Calculate reduced costs for a given dual vector. - //--- - //--- in DW, we use (c-uA'')x, where A'' is the core matrix - //--- u, in this case has dimension = #core rows - //--- in D , we use (c-u )x, we don't use the core matrix - //--- u, in this case has dimension = #core cols - //--- - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - int nMasterRows = m_masterSI->getNumRows(); - assert((nMasterRows - m_numConvexCon) == - modelCore->M->getNumRows()); - ); - - if (m_algo == DECOMP) { - for (i = 0; i < nCoreCols; i++) { - redCostX[i] = u[i]; - } - } else { - modelCore->M->transposeTimes(u, redCostX); - } - - //--- - //--- if in Phase I, c=0 - //--- - if (m_phase == PHASE_PRICE1) { - for (i = 0; i < nCoreCols; i++) { - redCostX[i] = -redCostX[i]; - } - } else { - for (i = 0; i < nCoreCols; i++) { - redCostX[i] = origObjective[i] - redCostX[i]; - } - } -} - -//------------------------------------------------------------------------ // -void DecompAlgo::generateVarsAdjustDuals(const double* uOld, - double* uNew) -{ - int r; - int nMasterRows = m_masterSI->getNumRows(); - DecompConstraintSet* modelCore = m_modelCore.getModel(); - int nBaseCoreRows = modelCore->nBaseRows; - int nCoreCols = modelCore->getNumCols(); - - if (m_algo == DECOMP) { - nBaseCoreRows = nCoreCols; - } - - //--- - //--- copy the dual vector for original core rows - //--- - CoinDisjointCopyN(uOld, nBaseCoreRows, uNew); - - //--- - //--- sanity check - make sure we are only skipping convexity rows - //--- everything else has a dual we want to use - //--- - if (m_param.DebugLevel >= 1) { - for (r = 0; r < nBaseCoreRows; r++) { - assert(m_masterRowType[r] == DecompRow_Original || - m_masterRowType[r] == DecompRow_Branch); - } - - for (r = nBaseCoreRows; r < nBaseCoreRows + m_numConvexCon; r++) { - assert(m_masterRowType[r] == DecompRow_Convex); - } - - for (r = nBaseCoreRows + m_numConvexCon; r < nMasterRows; r++) { - assert(m_masterRowType[r] == DecompRow_Cut); - } - } - - //NOTE: if no cuts, don't need to do any of this - // if DECOMP, don't need to do any of this? - //--- - //--- append dual vector for any added cuts - //--- skip over convexity constraints - //--- - assert((nMasterRows - nBaseCoreRows - m_numConvexCon) == - getNumRowType(DecompRow_Cut)); - CoinDisjointCopyN(uOld + nBaseCoreRows + m_numConvexCon, //from - nMasterRows - nBaseCoreRows - m_numConvexCon, //size - uNew + nBaseCoreRows); //to - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - - for (int i = 0; i < nMasterRows; i++) { - if (!UtilIsZero(uOld[i], DecompEpsilon)) { - (*m_osLog) << "uOld[" << setw(5) << i << " ]: " - << setw(12) << UtilDblToStr(uOld[i], 3) - << " --> " - << DecompRowTypeStr[m_masterRowType[i]] << "\n"; - } - } - ); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - - for (int i = 0; i < (nMasterRows - m_numConvexCon); i++) { - if (!UtilIsZero(uNew[i], DecompEpsilon)) { - (*m_osLog) << "uNew[" << setw(5) << i << " ]: " - << setw(12) << UtilDblToStr(uNew[i], 3) - << endl; - } - } - ); -} - -//------------------------------------------------------------------------ // -int DecompAlgo::generateVars(DecompVarList& newVars, - double& mostNegReducedCost) -{ - //--- - //--- solve min{s in F' | RC[s]} to generate new variables - //--- - //--- if LP was feasible, - //--- then RC[s] = c.s - (uhat.A''.s + alpha) - //--- = c.s - uhat.A''.s - alpha - //--- = (c - uhat.A'')s - alpha - //--- - //--- The master LP was formed in the following order - //--- (A''s) lam[s] >= b'' - from the original core [A'', b''] - //--- sum{s} lam[s] = 1 - convexity constraint - //--- But, we may have added cuts - which conceptually are added - //--- into [A'', b'']. But, in reality they are simply appended to - //--- the end of the LP matrix. So, when we get back the dual vector, - //--- we have to be aware of where alpha actually is. - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "generateVars()", m_param.LogDebugLevel, 2); - m_stats.timerOther1.reset(); - //--- - //--- TODO: - //--- Blocks... - //--- How do we adjust this for different blocks? - //--- Each block has exactly one convexity constraint. Otherwise, - //--- the coefficient in the matrix is 0, so there is no effect. - //--- So, all we need to do is use the approriate block's dual variable - //--- call it alpha. - //--- - //--- At this point, we have many strategies -- we could do round robin - //--- and only generate one var at a time. Or, we can generate vars for - //--- every block every time (default for now). - //--- - int i, b; - DecompVarList potentialVars; - DecompConstraintSet* modelCore = m_modelCore.getModel(); - const int m = m_masterSI->getNumRows(); - int nBaseCoreRows = modelCore->nBaseRows; - const int nCoreCols = modelCore->getNumCols(); - const double* u = NULL; - const double* userU = NULL; - const double* origObjective = getOrigObjective(); - double* redCostX = NULL; - double alpha = 0.0; - int whichBlock; - double varRedCost; - double timeLimit; - DecompVarList::iterator it; - // assert(!m_masterSI->isProvenPrimalInfeasible()); - - if (m_algo == DECOMP) { - nBaseCoreRows = nCoreCols; - } - - //--- - //--- PC: get dual vector - //--- u --> (A''s) lam[s] >= b'' --> from core [A'', b''] - //--- alpha --> sum{s} lam[s] = 1 --> convexity constraint - //--- - //--- NOTE: Sign of alpha is negative which is different than infeas case - //--- the solve relax function adds alpha so no sign switch needed. - //--- - //--- We flip the sign here. - //--- - //--- - //--- get the master dual solution - //--- the user can override this - //--- - u = getMasterDualSolution(); - userU = m_app->getDualForGenerateVars(u); - - if (userU) { - u = userU; - } - - //THINK: includes artificials now - redCostX = new double[nCoreCols]; // (c - uhat.A") in x-space - CoinAssertHint(redCostX, "Error: Out of Memory"); - //THINK: we should be checked col-type to make sure we get the - // right rows for the convexity constraints - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) << "m =" << m << endl; - (*m_osLog) << "numConvexCon =" << m_numConvexCon << endl; - (*m_osLog) << "nBaseCoreRows=" << nBaseCoreRows << endl; - ); - //--- - //--- remove the convexity constraint(s) from the dual vector - //--- TODO/THINK: this is fugly - //--- this is done because the convexity constraint acts like an - //--- objective offset in subproblem - it might be cleaner to manage - //--- it as an offset and just check for < 0 directly, rather than - //--- less than alpha -- sign switches are a little messy - //--- - double* u_adjusted = new double[m - m_numConvexCon]; - CoinAssertHint(u_adjusted, "Error: Out of Memory"); - //--- - //--- remove the convexity constraints from the dual vector - //--- - generateVarsAdjustDuals(u, u_adjusted); - //--- - //--- calculate reduced costs - //--- - generateVarsCalcRedCost(u_adjusted, redCostX); - - //TODO: move this all to debug utility file - if (m_param.DebugLevel >= 1) { - checkDuals(); - } - - //--- - //--- sanity check - none of the columns currently in master - //--- should have negative reduced cost - //--- m_vars contains the variables (in x-space) that have - //--- been pushed into the master LP (assumes no compression) - //--- - if (m_param.DebugLevel >= 1) { - checkReducedCost(u, u_adjusted); - } - - //--- - //--- if doing round-robin, solve just one block unless - //--- in PhaseI (do all blocks) - //--- in PhaseII every n iterations (so we can get a valid update to LB) - //--- - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) - << "RoundRobin iterSinceAll= " << m_rrIterSinceAll - << " lastBlock= " << m_rrLastBlock << endl; - ); - int doAllBlocks = false; - - if (m_phase == PHASE_PRICE1 || m_rrIterSinceAll >= m_param.RoundRobinInterval) { - doAllBlocks = true; - m_rrIterSinceAll = 0; - } - - //vector mostNegRCvec(m_numConvexCon, m_infinity); - vector mostNegRCvec(m_numConvexCon, 0); - DecompSolverResult solveResult(m_infinity); - - //--- - //--- solve min{ (c - u.A'')x - alpha | x in F'} - //--- - if (doAllBlocks) { -#ifdef _OPENMP - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) - << "===== START Threaded solve of subproblems. =====\n";); - if (m_param.SubProbParallel){ - omp_set_num_threads(min(m_param.NumConcurrentThreadsSubProb, - m_numConvexCon)); - }else{ - omp_set_num_threads(1); - } -#endif - - DecompVarList* potentialVarsT = new DecompVarList[m_numConvexCon]; - CoinAssertHint(potentialVarsT, "Error: Out of Memory"); - - //--- - //--- For pricing, - //--- redCostX: is the red-cost for each original column (c - uhat A")_e - //--- origCost: is the original cost for each original column c_e - //--- alpha: is the dual for the convexity constraint - //--- - //--- The reduced cost of a new variable (column) is the sum of the - //--- reduced cost on each of the original columns in the new variable - //--- minus alpha (this function is responsible for returning the reduced - //--- cost, which includes alpha). - //--- - //--- NOTE, redCost does not include alpha as sent in - //--- - //DecompApp * app = algo->getDecompAppMutable(); - /* - DecompSubProbParallelType ParallelType - = static_cast(m_param.SubProbParallelType); - - if (ParallelType == SubProbScheduleDynamic){ - omp_set_schedule(omp_sched_dynamic, m_param.SubProbParallelChunksize); - } - else if (ParallelType == SubProbScheduleRuntime){ - omp_set_schedule(omp_sched_auto,0); - } - else if(ParallelType == SubProbScheduleGuided){ - omp_set_schedule(omp_sched_guided, m_param.SubProbParallelChunksize); - } - else if(ParallelType == SubProbScheduleStatic){ - omp_set_schedule(omp_sched_static, m_param.SubProbParallelChunksize); - } - */ - -#pragma omp parallel for schedule(dynamic, m_param.SubProbParallelChunksize) - for (int subprobIndex = 0 ; subprobIndex < m_numConvexCon; - subprobIndex++) { - - DecompSubModel& subModel = getModelRelax(subprobIndex); - double alpha = u[nBaseCoreRows + subprobIndex]; - DecompSolverResult solveResult(m_infinity); - -#ifdef _OPENMP - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) - << "THREAD " << omp_get_thread_num() << - " solving subproblem " << subprobIndex << "\n";); -#else - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) << "solve relaxed model = " - << subModel.getModelName() << endl;); -#endif - - timeLimit = max(m_param.SubProbTimeLimitExact - - m_stats.timerOverall.getRealTime(), 0.0); - solveRelaxed(redCostX, - origObjective, - alpha, - nCoreCols, - false,//isNested - subModel, - &solveResult, - potentialVarsT[subprobIndex], - timeLimit); - if (solveResult.m_isCutoff) { - mostNegRCvec[subprobIndex] = min(mostNegRCvec[subprobIndex], 0.0); - } - } - - for (int subprobIndex = 0; subprobIndex < m_numConvexCon; - subprobIndex++) { - /* printf("arg[%d].vars size=%d\n", - t, static_cast(arg[t].vars->size())); - */ - for (it = potentialVarsT[subprobIndex].begin(); - it != potentialVarsT[subprobIndex].end(); it++) { - varRedCost = (*it)->getReducedCost(); - whichBlock = (*it)->getBlockId(); - - if ((*it)->getVarType() == DecompVar_Point) { - alpha = u[nBaseCoreRows + whichBlock]; - } else if ( (*it)->getVarType() == DecompVar_Ray) { - alpha = 0; - } - - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) - << "alpha[block=" << whichBlock << "]:" << alpha - << " varRedCost: " << varRedCost << "\n"; - ); - } - } - -#ifdef _OPENMP - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) - << "===== END Threaded solve of subproblems. =====\n";); -#endif - - //put the vars from all threads into one vector - for (int subprobIndex = 0; subprobIndex < m_numConvexCon; subprobIndex++) { - for (it = potentialVarsT[subprobIndex].begin(); - it != potentialVarsT[subprobIndex].end(); it++) { - potentialVars.push_back(*it); - } - } - - UTIL_DELARR(potentialVarsT); - - potentialVarsT = new DecompVarList[m_numConvexCon]; - map >::iterator mivt; - vector ::iterator vit; - - for (mivt = m_modelRelaxNest.begin(); mivt != m_modelRelaxNest.end(); mivt++) { - for (vit = (*mivt).second.begin(); vit != (*mivt).second.end(); vit++) { - b = (*vit).getBlockId(); - alpha = u[nBaseCoreRows + b]; - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) << "solve relaxed nested model = " - << (*vit).getModelName() << endl;); - timeLimit = max(m_param.SubProbTimeLimitExact - - m_stats.timerOverall.getRealTime(), 0.0); - solveRelaxed(redCostX, - origObjective, //original cost vector - alpha, - nCoreCols, //num core columns - true, //isNested - (*vit), - &solveResult, //results - potentialVarsT[b], //var list to populate - timeLimit); - - if (solveResult.m_isCutoff) { - mostNegRCvec[b] = min(mostNegRCvec[b], 0.0); - } - } - } - //put the vars from all threads into one vector - for (int subprobIndex = 0; subprobIndex < m_numConvexCon; subprobIndex++) { - for (it = potentialVarsT[subprobIndex].begin(); - it != potentialVarsT[subprobIndex].end(); it++) { - potentialVars.push_back(*it); - } - } - - UTIL_DELARR(potentialVarsT); - - } //END: if(doAllBlocks) - else { - //--- - //--- Ask the user which blocks should be solved. - //--- The user might also provide a different set of duals for - //--- each block. If so, use that to calculate reduced cost for - //--- that block. - //--- - vector blocksToSolve; - map > userDualsByBlock; - m_app->solveRelaxedWhich(blocksToSolve, - userDualsByBlock); - UTIL_MSG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "Blocks to solve: "; - UtilPrintVector(blocksToSolve, m_osLog); - ); - int nBlocks = static_cast(blocksToSolve.size()); - //--- - //--- keep trying until find a block with candidate variable - //--- - bool foundNegRC = false; - - for (i = 0; i < nBlocks; i++) { - b = blocksToSolve[i]; - //--- - //--- make sure the model for this block can be found - //--- - map::iterator mit; - mit = m_modelRelax.find(b); - assert(mit != m_modelRelax.end()); - //--- - //--- get the OSI objet - //--- - DecompSubModel& subModel = (*mit).second; - //--- - //--- did the user provide a specific dual for this block - //--- - map >::iterator mitv; - mitv = userDualsByBlock.find(b); - - if (mitv == userDualsByBlock.end()) { - if (m_param.LogDebugLevel >= 3) - (*m_osLog) << "Block b: " << b - << " using standard duals" << endl; - - if (m_param.LogDebugLevel >= 4) { - for (int i = 0; i < m; i++) { - (*m_osLog) << "r:" << i << "dual: " << u[i] << endl; - } - } - - //--- - //--- NOTE: the variables coming back include alpha in - //--- calculation of reduced cost - //--- - alpha = u[nBaseCoreRows + b]; - timeLimit = max(m_param.SubProbTimeLimitExact - - m_stats.timerOverall.getRealTime(), 0.0); - solveRelaxed(redCostX, - origObjective, - alpha, - nCoreCols, - false,//isNested - subModel, - &solveResult, - potentialVars, - timeLimit); - } else { - vector& uBlockV = mitv->second; - double* uBlock = &uBlockV[0]; - double* redCostXb = 0; - double* uBlockAdj = 0; - - if (static_cast(uBlockV.size()) != m) { - throw UtilException("The size of the user dual vector is not the same as the", - "number of master rows generateVars", "DecompAlgo"); - } - - if (m_param.LogDebugLevel >= 3) { - (*m_osLog) << "Block b: " << b - << " using user manipulated duals" << endl; - } - - if (m_param.LogDebugLevel >= 4) { - for (int i = 0; i < m; i++) { - (*m_osLog) << "r:" << i << "dual: " << uBlock[i] << endl; - } - } - - redCostXb = new double[nCoreCols]; // (c - uhat.A") in x-space - CoinAssertHint(redCostXb, "Error: Out of Memory"); - uBlockAdj = new double[m - m_numConvexCon]; - CoinAssertHint(uBlockAdj, "Error: Out of Memory"); - //--- - //--- remove the convexity constraints from the dual vector - //--- - generateVarsAdjustDuals(uBlock, uBlockAdj); - //--- - //--- calculate reduced costs - //--- - generateVarsCalcRedCost(uBlockAdj, redCostXb); - //--- - //--- solve relaxed problem - //--- - alpha = uBlockAdj[nBaseCoreRows + b]; - timeLimit = max(m_param.SubProbTimeLimitExact - - m_stats.timerOverall.getRealTime(), 0.0); - solveRelaxed(redCostXb, - origObjective, - alpha, - nCoreCols, - false,//isNested - subModel, - &solveResult, - potentialVars, - timeLimit); - UTIL_DELARR(redCostXb); - UTIL_DELARR(uBlockAdj); - } - - if (solveResult.m_isCutoff) { - mostNegRCvec[b] = min(mostNegRCvec[b], 0.0); - } - - m_rrLastBlock = b; - foundNegRC = false; - - for (it = potentialVars.begin(); it != potentialVars.end(); it++) { - varRedCost = (*it)->getReducedCost(); - - if (varRedCost < - m_param.RedCostEpsilon) { //TODO: strict, -dualTOL? - foundNegRC = true; - } - } - }//END:for(i = 0; i < nBlocks; i++) - - m_rrIterSinceAll++; - - //--- - //--- if we searched through all the blocks but still didn't - //--- find any columns with negative reduced cost, then we CAN - //--- update the LB and should - as we have priced out - //--- - //if(!foundNegRC) - // m_rrIterSinceAll = 0; - //--- - //--- if user provided blocks found no negRC, solve all blocks - //--- - if (!foundNegRC) { - printf("no neg rc from user blocks, solve all blocks\n"); - - //TODO: make this a function (to solve all blocks) - map::iterator mit; - - for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { - DecompSubModel& subModel = (*mit).second; - b = subModel.getBlockId(); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) << "solve relaxed model = " - << subModel.getModelName() << endl;); - //--- - //--- PC: get dual vector - //--- alpha --> sum{s} lam[s] = 1 - convexity constraint - //--- - alpha = u[nBaseCoreRows + b]; - //TODO: stat return, restrict how many? pass that in to user? - //--- - //--- NOTE: the variables coming back include alpha in - //--- calculation of reduced cost - //--- - timeLimit = max(m_param.SubProbTimeLimitExact - - m_stats.timerOverall.getRealTime(), 0.0); - solveRelaxed(redCostX, - origObjective, - alpha, - nCoreCols, - false, //isNested - subModel, - &solveResult, - potentialVars, - timeLimit); - - //if cutoff delcares infeasible, we know subprob >= 0 - // we can use 0 as valid (but possibly weaker bound) - if (solveResult.m_isCutoff) { - mostNegRCvec[b] = min(mostNegRCvec[b], 0.0); - } - } - - map >::iterator mivt; - vector ::iterator vit; - - for (mivt = m_modelRelaxNest.begin(); - mivt != m_modelRelaxNest.end(); mivt++) { - for (vit = (*mivt).second.begin(); - vit != (*mivt).second.end(); vit++) { - b = (*vit).getBlockId(); - alpha = u[nBaseCoreRows + b]; - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) << "solve relaxed nested model = " - << (*vit).getModelName() << endl;); - timeLimit = max(m_param.SubProbTimeLimitExact - - m_stats.timerOverall.getRealTime(), 0.0); - solveRelaxed(redCostX, - origObjective, //original cost vector - alpha, - nCoreCols, //num core columns - true, //isNested - (*vit), - &solveResult, //results - potentialVars, //var list to populate - timeLimit); - - if (solveResult.m_isCutoff) { - mostNegRCvec[b] = min(mostNegRCvec[b], 0.0); - } - } - } - - m_rrIterSinceAll = 0; - } - }//END: else(doAllBlocks) + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "Gap is tight and we have NO " + << "branch candidate" << endl;); + } + } + + break; + case PHASE_CUT: { + //--- + //--- if we want to always favor pricing, then just do it + //--- + if (m_param.PCStrategy == FavorPrice && isPricePossible) { + nextPhase = PHASE_PRICE2; + goto PHASE_UPDATE_FINISH; + } - for (it = potentialVars.begin(); it != potentialVars.end(); it++) { - varRedCost = (*it)->getReducedCost(); - whichBlock = (*it)->getBlockId(); - alpha = u[nBaseCoreRows + whichBlock]; - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) - << "alpha[block=" << whichBlock << "]:" << alpha - << " varRedCost: " << varRedCost << "\n"; - ); + //--- + //--- if this is the first call, just continue + //--- + if (priceCallsTotal == 0 && cutCallsTotal == 0) { + nextPhase = PHASE_CUT; + goto PHASE_UPDATE_FINISH; + } - //--- - //--- unlikey to happen - but we should check ALL columns - //--- to see if they are IP feasible - whether or not the - //--- column has negative red-cost - //--- - //THINK: in blocks case, these are partial columns - // and this check can be relatively expensive for - // large number of original columns. - //But, it is impossible for the partial column to - // be feasible to full IP - so this check is useless. - if (m_numConvexCon == 1) { - (*it)->fillDenseArr(modelCore->getNumCols(), - m_memPool.dblArrNCoreCols); - - //--- - //--- STOP: this isIpFeasible uses isLPFeasible - //--- isLPFeasible should have 2 settings - //--- here, it can be true, but might not be - //--- if checking recompsed point, it must be true, else bug - //--- - if (isIPFeasible(m_memPool.dblArrNCoreCols)) { - if (m_app->APPisUserFeasible(m_memPool.dblArrNCoreCols, - modelCore->getNumCols(), - m_param.TolZero)) { - DecompSolution* decompSol - = new DecompSolution(modelCore->getNumCols(), - m_memPool.dblArrNCoreCols, - (*it)->getOriginalCost()); - //TODO: solution pool? - m_xhatIPFeas.push_back(decompSol); - setObjBoundIP((*it)->getOriginalCost()); - } - } - } + //--- + //--- if tight was gap, the we went to cuts and found none, + //--- then stop on gap + //--- + gapTight = isGapTight(); + if (priceCallsTotal > 0 && cutsThisCall == 0 && gapTight) { + m_stopCriteria = DecompStopGap; //--- - //--- for multi-blocks the mostNegReducedCost is the - //--- sum of the best reduced costs over all blocks - //--- NOTE: we need all blocks to make it valid - //--- - ////////////STOP: this would explain why the LB seems wrong - //////////// on ATM model, since we were stopping on gap, but - //////////// declaring it optimal. Now, it is fixed and the bound - //////////// should be valid, but stopping on gap won't be valid. - //--- TODO: if a block was NOT solved to optimality, - //--- we can still use the problems LB, but that will NOT - //--- be equivalent to its varRedCost - so we need to return - //--- that value as well, if we want to use it - //--- The red-cost does not have to be used in bound calculation - //--- it is only relevant for deciding on entering columns + //--- Even if we are stop on gap, we need to be careful of + //--- the following: If the last solution was integral (no + //--- branching candidates) but we are not done pricing out + //--- (i.e., a column with negative RC still exist) and we + //--- declare that we are tailing off then the node will get + //--- put back in the node work queue. This can lead to that + //--- node being repeatedly stopped and reseted. It is better + //--- to just price it out since we cannot branch on it in + //--- this state. //--- - if (varRedCost < mostNegRCvec[whichBlock]) { - mostNegRCvec[whichBlock] = varRedCost; - } + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "Gap is tight" << endl;); + // int branchedOnIndex = -1; + // double branchedOnValue = 0; + // chooseBranchVar(branchedOnIndex, branchedOnValue); + std::vector> downBranchLB, downBranchUB, + upBranchLB, upBranchUB; + bool gotBranch = + chooseBranchSet(downBranchLB, downBranchUB, upBranchLB, upBranchUB); - if (varRedCost < - m_param.RedCostEpsilon) { //TODO: strict, -dualTOL? - UTIL_MSG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "PUSHING new var with varRedCost= " - << UtilDblToStr(varRedCost, 5) << endl;); - //--- - //--- the variable has neg reduced cost, push onto list - //--- - newVars.push_back(*it); + if (m_param.NodeLimit == 0) { + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "Gap is tight and NodeLimit=0." << endl;); + nextPhase = PHASE_DONE; + goto PHASE_UPDATE_FINISH; + } else if (gotBranch) { + // if(branchedOnIndex != -1){ + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "Gap is tight and we have a " + << "branch candidate" << endl;); + nextPhase = PHASE_DONE; + goto PHASE_UPDATE_FINISH; } else { - UTIL_DELPTR(*it); - } - } - - mostNegReducedCost = 0.0; + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "Gap is tight and we have NO " + << "branch candidate" << endl;); + } + } + + //--- + //--- Princess Bride (1987): + //--- "truly, you have a dizzying intellect" + //--- + mustSwitch = false; + considerSwitch = false; + + //--- + //--- if we hit the total limit or + //--- we found no new cuts this call + //--- then we must switch (or we are done) + //--- + if (!isCutPossible || (cutsThisCall == 0) || (cutsThisRound == 0)) { + mustSwitch = true; + } + + //--- + //--- if we hit the round limit, we must consider switching + //--- + if (cutCallsRound >= m_param.RoundCutItersLimit) { + considerSwitch = true; + } + + if (mustSwitch) { + //--- + //--- we must switch from cutting + //--- + if (!isPricePossible) { + //--- + //--- if we exceed the price iter limit, we are done + //--- + nextPhase = PHASE_DONE; + m_stopCriteria = DecompStopIterLimit; + } else { + if ((priceCallsTotal > 0) && (cutsThisRound == 0) && + (varsThisRound == 0)) { + //--- + //--- nothing new happened, so we are done + //--- + nextPhase = PHASE_DONE; + } else { + //--- + //--- something new happened, so try price again + //--- + nextPhase = PHASE_PRICE2; + m_nodeStats.resetPriceRound(); + } + } + } // END: if(mustSwitch) + else if (considerSwitch) { + //--- + //--- we consider switching from cutting + //--- + if (!isPricePossible) { + if (!isCutPossible) { + //--- + //--- if we exceed both iter limits, we are done + //--- + nextPhase = PHASE_DONE; + m_stopCriteria = DecompStopIterLimit; + } else { + //--- + //--- if we exceed the price iter limit, but not the cut lim + //--- since we are not in mustSwitch, m_cutsThisRound > 0, + //--- so we can go back to cutting, even though it violates + //--- the round counter, because we have no other choice + //--- + nextPhase = PHASE_CUT; + } + } else { + if ((cutsThisRound == 0) && (varsThisRound == 0)) { + //--- + //--- nothing new happened, so we are done + //--- + nextPhase = PHASE_DONE; + } else { + //--- + //--- something new happened, so try price again + //--- + nextPhase = PHASE_PRICE2; + m_nodeStats.resetPriceRound(); + } + } + } // END: else if(considerSwitch) + else { + nextPhase = PHASE_CUT; + } + } // END: case PHASE_CUT + break; + default: + assert(0); + // UtilAssert(0, "Bad Phase in phaseUpdate!", m_osLog); + } // END: switch(phase) - for (b = 0; b < m_numConvexCon; b++) { - mostNegReducedCost += mostNegRCvec[b]; - UTIL_MSG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) - << "mostNegR[block=" << b << "]: " << mostNegRCvec[b] - << " mostNegReducedCost: " << mostNegReducedCost << "\n"; - ); - } - - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) << "m_rrIterSinceAll = " - << m_rrIterSinceAll << endl; - ); - potentialVars.clear(); //THINK? what does clear do exactly ? - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, - - for (it = newVars.begin(); it != newVars.end(); it++) { - (*it)->print(m_infinity, m_osLog, m_app); - } - ); - //--- - //--- free local memory - //--- - UTIL_DELARR(u_adjusted); - UTIL_DELARR(redCostX); - m_stats.thisGenVars.push_back(m_stats.timerOther1.getRealTime()); - UtilPrintFuncEnd(m_osLog, m_classTag, - "generateVars()", m_param.LogDebugLevel, 2); - return static_cast(newVars.size()); +PHASE_UPDATE_FINISH: + UTIL_MSG(m_param.LogDebugLevel, 3, + (*m_osLog) << "PhaseOut: " << DecompPhaseStr[nextPhase]; + (*m_osLog) << " StatusOut: " << DecompStatusStr[nextStatus]; + (*m_osLog) << endl;); + phase = nextPhase; + status = nextStatus; + UtilPrintFuncEnd(m_osLog, m_classTag, "phaseUpdate()", m_param.LogDebugLevel, + 2); } -//TODO - ugh! only PC again? //------------------------------------------------------------------------ // -//this seems ok for C and PC... but what when we want to do DC within C THINK -int DecompAlgo::generateCuts(double* xhat, - DecompCutList& newCuts) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "generateCuts()", m_param.LogDebugLevel, 2); - m_stats.timerOther1.reset(); - DecompConstraintSet* modelCore = m_modelCore.getModel(); - m_app->generateCuts(xhat, - newCuts); - - //--- - //--- attempt to generate CGL cuts on x?? - //--- the only way this is going to work, is if you carry - //--- around another OSI problem instance completely in terms of x - //--- only allow CGL to generate cuts on the original full formulation?? - //--- otherwise, we'd be allowing cuts on cuts - would have to add cuts - //--- to master and to this version... hmmm... ughh - //--- for some problems, you can't have the full original formulation - //--- for PC you probably don't want it anyway... P' comes in from ep's - //--- you can just generate cuts on Q"? that cut off xhat - //--- but for C, you need Q' and Q" - //--- m_masterSI holds the problem in terms of lambda (over Q") - //--- m_subprobSI holds the problem in terms of x (over P') - //--- - if (m_param.CutCGL) { - assert(m_cutgenSI); - - if (m_algo == PRICE_AND_CUT) { - //TODO: this could be tighter in the tree - double gLB = getNodeIndex() ? m_globalLB : - m_nodeStats.objBest.first; - m_cutgenSI->setRowLower(m_cutgenObjCutInd, gLB); - } - - if (m_param.LogDumpModel > 1) { - string baseName = "cutgenProb"; - - if (m_isStrongBranch) { - baseName += "_SB"; - } +void DecompAlgo::generateVarsCalcRedCost(const double *u, double *redCostX) { + int i; + DecompConstraintSet *modelCore = m_modelCore.getModel(); + int nCoreCols = modelCore->getNumCols(); + const double *origObjective = getOrigObjective(); + //--- + //--- Calculate reduced costs for a given dual vector. + //--- + //--- in DW, we use (c-uA'')x, where A'' is the core matrix + //--- u, in this case has dimension = #core rows + //--- in D , we use (c-u )x, we don't use the core matrix + //--- u, in this case has dimension = #core cols + //--- + UTIL_DEBUG( + m_app->m_param.LogDebugLevel, 5, + int nMasterRows = m_masterSI->getNumRows(); + assert((nMasterRows - m_numConvexCon) == modelCore->M->getNumRows());); + + if (m_algo == DECOMP) { + for (i = 0; i < nCoreCols; i++) { + redCostX[i] = u[i]; + } + } else { + modelCore->M->transposeTimes(u, redCostX); + } + + //--- + //--- if in Phase I, c=0 + //--- + if (m_phase == PHASE_PRICE1) { + for (i = 0; i < nCoreCols; i++) { + redCostX[i] = -redCostX[i]; + } + } else { + for (i = 0; i < nCoreCols; i++) { + redCostX[i] = origObjective[i] - redCostX[i]; + } + } +} - printCurrentProblem(m_cutgenSI, - baseName, - m_nodeStats.nodeIndex, - m_nodeStats.cutCallsTotal, - m_nodeStats.priceCallsTotal); - } +//------------------------------------------------------------------------ // +void DecompAlgo::generateVarsAdjustDuals(const double *uOld, double *uNew) { + int r; + int nMasterRows = m_masterSI->getNumRows(); + DecompConstraintSet *modelCore = m_modelCore.getModel(); + int nBaseCoreRows = modelCore->nBaseRows; + int nCoreCols = modelCore->getNumCols(); + + if (m_algo == DECOMP) { + nBaseCoreRows = nCoreCols; + } + + //--- + //--- copy the dual vector for original core rows + //--- + CoinDisjointCopyN(uOld, nBaseCoreRows, uNew); + + //--- + //--- sanity check - make sure we are only skipping convexity rows + //--- everything else has a dual we want to use + //--- + if (m_param.DebugLevel >= 1) { + for (r = 0; r < nBaseCoreRows; r++) { + assert(m_masterRowType[r] == DecompRow_Original || + m_masterRowType[r] == DecompRow_Branch); + } + + for (r = nBaseCoreRows; r < nBaseCoreRows + m_numConvexCon; r++) { + assert(m_masterRowType[r] == DecompRow_Convex); + } + + for (r = nBaseCoreRows + m_numConvexCon; r < nMasterRows; r++) { + assert(m_masterRowType[r] == DecompRow_Cut); + } + } + + // NOTE: if no cuts, don't need to do any of this + // if DECOMP, don't need to do any of this? + //--- + //--- append dual vector for any added cuts + //--- skip over convexity constraints + //--- + assert((nMasterRows - nBaseCoreRows - m_numConvexCon) == + getNumRowType(DecompRow_Cut)); + CoinDisjointCopyN(uOld + nBaseCoreRows + m_numConvexCon, // from + nMasterRows - nBaseCoreRows - m_numConvexCon, // size + uNew + nBaseCoreRows); // to + UTIL_DEBUG( + m_app->m_param.LogDebugLevel, 5, + + for (int i = 0; i < nMasterRows; i++) { + if (!UtilIsZero(uOld[i], DecompEpsilon)) { + (*m_osLog) << "uOld[" << setw(5) << i << " ]: " << setw(12) + << UtilDblToStr(uOld[i], 3) << " --> " + << DecompRowTypeStr[m_masterRowType[i]] << "\n"; + } + }); + UTIL_DEBUG( + m_app->m_param.LogDebugLevel, 5, + + for (int i = 0; i < (nMasterRows - m_numConvexCon); i++) { + if (!UtilIsZero(uNew[i], DecompEpsilon)) { + (*m_osLog) << "uNew[" << setw(5) << i << " ]: " << setw(12) + << UtilDblToStr(uNew[i], 3) << endl; + } + }); +} - m_cgl->generateCuts(m_cutgenSI, - m_masterSI, - xhat, - modelCore->integerVars, - newCuts); - } +//------------------------------------------------------------------------ // +int DecompAlgo::generateVars(DecompVarList &newVars, + double &mostNegReducedCost) { + //--- + //--- solve min{s in F' | RC[s]} to generate new variables + //--- + //--- if LP was feasible, + //--- then RC[s] = c.s - (uhat.A''.s + alpha) + //--- = c.s - uhat.A''.s - alpha + //--- = (c - uhat.A'')s - alpha + //--- + //--- The master LP was formed in the following order + //--- (A''s) lam[s] >= b'' - from the original core [A'', b''] + //--- sum{s} lam[s] = 1 - convexity constraint + //--- But, we may have added cuts - which conceptually are added + //--- into [A'', b'']. But, in reality they are simply appended to + //--- the end of the LP matrix. So, when we get back the dual vector, + //--- we have to be aware of where alpha actually is. + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "generateVars()", + m_param.LogDebugLevel, 2); + m_stats.timerOther1.reset(); + //--- + //--- TODO: + //--- Blocks... + //--- How do we adjust this for different blocks? + //--- Each block has exactly one convexity constraint. Otherwise, + //--- the coefficient in the matrix is 0, so there is no effect. + //--- So, all we need to do is use the approriate block's dual variable + //--- call it alpha. + //--- + //--- At this point, we have many strategies -- we could do round robin + //--- and only generate one var at a time. Or, we can generate vars for + //--- every block every time (default for now). + //--- + int i, b; + DecompVarList potentialVars; + DecompConstraintSet *modelCore = m_modelCore.getModel(); + const int m = m_masterSI->getNumRows(); + int nBaseCoreRows = modelCore->nBaseRows; + const int nCoreCols = modelCore->getNumCols(); + const double *u = NULL; + const double *userU = NULL; + const double *origObjective = getOrigObjective(); + double *redCostX = NULL; + double alpha = 0.0; + int whichBlock; + double varRedCost; + double timeLimit; + DecompVarList::iterator it; + // assert(!m_masterSI->isProvenPrimalInfeasible()); + + if (m_algo == DECOMP) { + nBaseCoreRows = nCoreCols; + } + + //--- + //--- PC: get dual vector + //--- u --> (A''s) lam[s] >= b'' --> from core [A'', b''] + //--- alpha --> sum{s} lam[s] = 1 --> convexity constraint + //--- + //--- NOTE: Sign of alpha is negative which is different than infeas case + //--- the solve relax function adds alpha so no sign switch needed. + //--- + //--- We flip the sign here. + //--- + //--- + //--- get the master dual solution + //--- the user can override this + //--- + u = getMasterDualSolution(); + userU = m_app->getDualForGenerateVars(u); + + if (userU) { + u = userU; + } + + // THINK: includes artificials now + redCostX = new double[nCoreCols]; // (c - uhat.A") in x-space + CoinAssertHint(redCostX, "Error: Out of Memory"); + // THINK: we should be checked col-type to make sure we get the + // right rows for the convexity constraints + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) << "m =" << m << endl; + (*m_osLog) << "numConvexCon =" << m_numConvexCon << endl; + (*m_osLog) << "nBaseCoreRows=" << nBaseCoreRows << endl;); + //--- + //--- remove the convexity constraint(s) from the dual vector + //--- TODO/THINK: this is fugly + //--- this is done because the convexity constraint acts like an + //--- objective offset in subproblem - it might be cleaner to manage + //--- it as an offset and just check for < 0 directly, rather than + //--- less than alpha -- sign switches are a little messy + //--- + double *u_adjusted = new double[m - m_numConvexCon]; + CoinAssertHint(u_adjusted, "Error: Out of Memory"); + //--- + //--- remove the convexity constraints from the dual vector + //--- + generateVarsAdjustDuals(u, u_adjusted); + //--- + //--- calculate reduced costs + //--- + generateVarsCalcRedCost(u_adjusted, redCostX); + + // TODO: move this all to debug utility file + if (m_param.DebugLevel >= 1) { + checkDuals(); + } + + //--- + //--- sanity check - none of the columns currently in master + //--- should have negative reduced cost + //--- m_vars contains the variables (in x-space) that have + //--- been pushed into the master LP (assumes no compression) + //--- + if (m_param.DebugLevel >= 1) { + checkReducedCost(u, u_adjusted); + } + + //--- + //--- if doing round-robin, solve just one block unless + //--- in PhaseI (do all blocks) + //--- in PhaseII every n iterations (so we can get a valid update to LB) + //--- + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "RoundRobin iterSinceAll= " << m_rrIterSinceAll + << " lastBlock= " << m_rrLastBlock << endl;); + int doAllBlocks = false; + + if (m_phase == PHASE_PRICE1 || + m_rrIterSinceAll >= m_param.RoundRobinInterval) { + doAllBlocks = true; + m_rrIterSinceAll = 0; + } + + // vector mostNegRCvec(m_numConvexCon, m_infinity); + vector mostNegRCvec(m_numConvexCon, 0); + DecompSolverResult solveResult(m_infinity); + + //--- + //--- solve min{ (c - u.A'')x - alpha | x in F'} + //--- + if (doAllBlocks) { +#ifdef _OPENMP + UTIL_DEBUG( + m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "===== START Threaded solve of subproblems. =====\n";); + if (m_param.SubProbParallel) { + omp_set_num_threads( + min(m_param.NumConcurrentThreadsSubProb, m_numConvexCon)); + } else { + omp_set_num_threads(1); + } +#endif -#if 1 + DecompVarList *potentialVarsT = new DecompVarList[m_numConvexCon]; + CoinAssertHint(potentialVarsT, "Error: Out of Memory"); - //do DC only if no other cuts were found or if CutDC=2 - // this is the case of doing for init vars - if ((m_param.CutDC == 1 && newCuts.size() == 0) || - (m_param.CutDC == 2)) { - //printf("\n\n==================================== IN DECOMP\n\n"); - DecompAlgoD D(m_app, *m_utilParam, - xhat, modelCore->getNumCols()); - //also might want to use the columns you get here for something... - //heur for ubs, etc.. - //either returns a set of cuts or a decomposition, have - //that wrap solve()? - D.solveD(&newCuts); - //--- - //--- copy the vars generated in passes of DC into initVars - //--- to warm-start DW master formulation - //--- - //--- NO: this won't work because it just copies the pointers - //--- and these will be deleted when D scopes out - see DecompAlgo - //--- destructor... - //m_vars.insert(m_vars.begin(), D.m_vars.begin(), D.m_vars.end()); - //--- - //--- this moves the elements of D.m_vars to m_vars - //--- this is what we want since D will be deleted after this - //--- - m_vars.splice(m_vars.end(), D.m_vars); - //printf("VARS moved into CPM object\n"); - //printVars(m_osLog);//use this to warm start DW - //a hidden advantage of decomp in BC? - DecompSolution* bestSol = NULL; - vector::iterator it; - double thisBound; - double bestBoundUB = m_nodeStats.objBest.second; + //--- + //--- For pricing, + //--- redCostX: is the red-cost for each original column (c - uhat A")_e + //--- origCost: is the original cost for each original column c_e + //--- alpha: is the dual for the convexity constraint + //--- + //--- The reduced cost of a new variable (column) is the sum of the + //--- reduced cost on each of the original columns in the new variable + //--- minus alpha (this function is responsible for returning the reduced + //--- cost, which includes alpha). + //--- + //--- NOTE, redCost does not include alpha as sent in + //--- + // DecompApp * app = algo->getDecompAppMutable(); + /* + DecompSubProbParallelType ParallelType + = static_cast(m_param.SubProbParallelType); - for (it = D.m_xhatIPFeas.begin(); - it != D.m_xhatIPFeas.end(); it++) { - thisBound = (*it)->getQuality(); - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "From DECOMP, IP Feasible with Quality ="; - (*m_osLog) << thisBound << endl; - ); - - if ((*it)->getQuality() <= bestBoundUB) { - bestBoundUB = (*it)->getQuality(); - bestSol = (*it); - } + if (ParallelType == SubProbScheduleDynamic){ + omp_set_schedule(omp_sched_dynamic, m_param.SubProbParallelChunksize); } - - //need to make copy of solution, since D.m_xhatIpFeas goes out of scope - if (bestSol) { - DecompSolution* bestSolCp = new DecompSolution(*bestSol); - m_xhatIPFeas.push_back(bestSolCp); - setObjBoundIP(bestSolCp->getQuality()); - m_xhatIPBest = bestSolCp; - //m_xhatIPBest->print(); + else if (ParallelType == SubProbScheduleRuntime){ + omp_set_schedule(omp_sched_auto,0); } + else if(ParallelType == SubProbScheduleGuided){ + omp_set_schedule(omp_sched_guided, m_param.SubProbParallelChunksize); + } + else if(ParallelType == SubProbScheduleStatic){ + omp_set_schedule(omp_sched_static, m_param.SubProbParallelChunksize); + } + */ - //this could also very likely return a new gUB - - // for the case when it does find a decomposition - // and luckily it is feasible to original? - //STOP -- 6/6/08 - //if decomp is found, then can't use currently - just looking for - //farkas -- if decomp is found this means that z_LP = z_DW for that - //relaxation?? - // printf("D.m_stopCriteria = %s\n", - // DecompAlgoStopStr[D.m_stopCriteria].c_str()); - //who deletes this memory? better to pass in newCuts.. - //printf("\n\n====================================OUT DECOMP\n\n"); - //exit(1); - } - -#endif - m_stats.thisGenCuts.push_back(m_stats.timerOther1.getRealTime()); - UtilPrintFuncEnd(m_osLog, m_classTag, - "generateCuts()", m_param.LogDebugLevel, 2); - return static_cast(newCuts.size()); -} - +#pragma omp parallel for schedule(dynamic, m_param.SubProbParallelChunksize) + for (int subprobIndex = 0; subprobIndex < m_numConvexCon; subprobIndex++) { + DecompSubModel &subModel = getModelRelax(subprobIndex); + double alpha = u[nBaseCoreRows + subprobIndex]; + DecompSolverResult solveResult(m_infinity); +#ifdef _OPENMP + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) << "THREAD " << omp_get_thread_num() + << " solving subproblem " << subprobIndex << "\n";); +#else + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) << "solve relaxed model = " + << subModel.getModelName() << endl;); +#endif + timeLimit = max(m_param.SubProbTimeLimitExact - + m_stats.timerOverall.getRealTime(), + 0.0); + solveRelaxed(redCostX, origObjective, alpha, nCoreCols, + false, // isNested + subModel, &solveResult, potentialVarsT[subprobIndex], + timeLimit); + if (solveResult.m_isCutoff) { + mostNegRCvec[subprobIndex] = min(mostNegRCvec[subprobIndex], 0.0); + } + } + + for (int subprobIndex = 0; subprobIndex < m_numConvexCon; subprobIndex++) { + /* printf("arg[%d].vars size=%d\n", + t, static_cast(arg[t].vars->size())); + */ + for (it = potentialVarsT[subprobIndex].begin(); + it != potentialVarsT[subprobIndex].end(); it++) { + varRedCost = (*it)->getReducedCost(); + whichBlock = (*it)->getBlockId(); + if ((*it)->getVarType() == DecompVar_Point) { + alpha = u[nBaseCoreRows + whichBlock]; + } else if ((*it)->getVarType() == DecompVar_Ray) { + alpha = 0; + } + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "alpha[block=" << whichBlock << "]:" << alpha + << " varRedCost: " << varRedCost << "\n";); + } + } +#ifdef _OPENMP + UTIL_DEBUG( + m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "===== END Threaded solve of subproblems. =====\n";); +#endif -//------------------------------------------------------------------------- // -//member of varpool versus algo class? different for DC?? -void DecompAlgo::addVarsToPool(DecompVarList& newVars) -{ - int blockIndex; - double* denseCol = NULL; - CoinPackedVector* sparseCol = NULL; - DecompConstraintSet* modelCore = m_modelCore.getModel(); - UtilPrintFuncBegin(m_osLog, m_classTag, - "addVarsToPool()", m_param.LogDebugLevel, 2); - //printf("varpool size=%d\n", m_varpool.size()); - //--- - //--- sanity check - make sure the number of rows in core is - //--- num of (orig+branch+cuts) in LP formulation - //--- - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - (*m_osLog) - << "num original= " << getNumRowType(DecompRow_Original) << "\n" - << "num branch = " << getNumRowType(DecompRow_Branch) << "\n" - << "num cut = " << getNumRowType(DecompRow_Cut) << "\n" - << "num convex = " << getNumRowType(DecompRow_Convex) << "\n" - << "num core = " << modelCore->getNumRows() << "\n"; - ); - - if (m_algo != DECOMP) { - assert((getNumRowType(DecompRow_Original) + - getNumRowType(DecompRow_Branch) + - getNumRowType(DecompRow_Cut)) == modelCore->getNumRows()); - denseCol = new double[modelCore->getNumRows() + m_numConvexCon]; - } - - //--- - //--- is it ok to purge vars that are parallel? - //--- just make sure at least one gets thru so process can continue - //--- NOTE: in case of RoundRobin, this will cause parallel check - //--- to never be activated, if pull in only one col at a time - //--- - //--- - //--- as soon as found one good, can purge rest - //--- problem is, what if cols are par, par, not-par, not-par - //--- then, we accept the first two, even though should not have - //--- - bool foundGoodCol = false; - DecompVarList::iterator li; - - for (li = newVars.begin(); li != newVars.end(); li++) { + // put the vars from all threads into one vector + for (int subprobIndex = 0; subprobIndex < m_numConvexCon; subprobIndex++) { + for (it = potentialVarsT[subprobIndex].begin(); + it != potentialVarsT[subprobIndex].end(); it++) { + potentialVars.push_back(*it); + } + } + + UTIL_DELARR(potentialVarsT); + + potentialVarsT = new DecompVarList[m_numConvexCon]; + map>::iterator mivt; + vector::iterator vit; + + for (mivt = m_modelRelaxNest.begin(); mivt != m_modelRelaxNest.end(); + mivt++) { + for (vit = (*mivt).second.begin(); vit != (*mivt).second.end(); vit++) { + b = (*vit).getBlockId(); + alpha = u[nBaseCoreRows + b]; + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) << "solve relaxed nested model = " + << (*vit).getModelName() << endl;); + timeLimit = max(m_param.SubProbTimeLimitExact - + m_stats.timerOverall.getRealTime(), + 0.0); + solveRelaxed(redCostX, + origObjective, // original cost vector + alpha, + nCoreCols, // num core columns + true, // isNested + (*vit), + &solveResult, // results + potentialVarsT[b], // var list to populate + timeLimit); + + if (solveResult.m_isCutoff) { + mostNegRCvec[b] = min(mostNegRCvec[b], 0.0); + } + } + } + // put the vars from all threads into one vector + for (int subprobIndex = 0; subprobIndex < m_numConvexCon; subprobIndex++) { + for (it = potentialVarsT[subprobIndex].begin(); + it != potentialVarsT[subprobIndex].end(); it++) { + potentialVars.push_back(*it); + } + } + + UTIL_DELARR(potentialVarsT); + + } // END: if(doAllBlocks) + else { + //--- + //--- Ask the user which blocks should be solved. + //--- The user might also provide a different set of duals for + //--- each block. If so, use that to calculate reduced cost for + //--- that block. + //--- + vector blocksToSolve; + map> userDualsByBlock; + m_app->solveRelaxedWhich(blocksToSolve, userDualsByBlock); + UTIL_MSG(m_app->m_param.LogDebugLevel, 3, (*m_osLog) << "Blocks to solve: "; + UtilPrintVector(blocksToSolve, m_osLog);); + int nBlocks = static_cast(blocksToSolve.size()); + //--- + //--- keep trying until find a block with candidate variable + //--- + bool foundNegRC = false; + + for (i = 0; i < nBlocks; i++) { + b = blocksToSolve[i]; + //--- + //--- make sure the model for this block can be found //--- - //--- get dense column = A''s, append convexity constraint on end - //--- THINK: PC specific - //--- - //TODO - fix this derive method for decomp - if (m_algo == DECOMP) { - blockIndex = (*li)->getBlockId(); - sparseCol = new CoinPackedVector((*li)->m_s); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - (*m_osLog) << "\nPRINT m_s\n"; - UtilPrintPackedVector((*li)->m_s); - ); - //add in convexity constraint - sparseCol->insert(modelCore->getNumCols() + blockIndex, 1.0); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - (*m_osLog) << "\nPRINT sparseCol\n"; - UtilPrintPackedVector(*sparseCol); - ); + map::iterator mit; + mit = m_modelRelax.find(b); + assert(mit != m_modelRelax.end()); + //--- + //--- get the OSI objet + //--- + DecompSubModel &subModel = (*mit).second; + //--- + //--- did the user provide a specific dual for this block + //--- + map>::iterator mitv; + mitv = userDualsByBlock.find(b); + + if (mitv == userDualsByBlock.end()) { + if (m_param.LogDebugLevel >= 3) + (*m_osLog) << "Block b: " << b << " using standard duals" << endl; + + if (m_param.LogDebugLevel >= 4) { + for (int i = 0; i < m; i++) { + (*m_osLog) << "r:" << i << "dual: " << u[i] << endl; + } + } + + //--- + //--- NOTE: the variables coming back include alpha in + //--- calculation of reduced cost + //--- + alpha = u[nBaseCoreRows + b]; + timeLimit = max(m_param.SubProbTimeLimitExact - + m_stats.timerOverall.getRealTime(), + 0.0); + solveRelaxed(redCostX, origObjective, alpha, nCoreCols, + false, // isNested + subModel, &solveResult, potentialVars, timeLimit); } else { - //--- - //--- this creates a dense array of the column (in x-space) - //--- it is stored as a sparse vector, so we have translate - //--- it to do the matrix multiply - //--- - //--- we put the dense array into the mem pool at dblArrNCoreCols - //--- - (*li)->fillDenseArr(modelCore->getNumCols(), - m_memPool.dblArrNCoreCols); - //--- - //--- modelCore->M = A'' + branch-rows + new cuts - //--- here, we caculate M.s (where s is a var in x-space) - //--- - modelCore->M->times(m_memPool.dblArrNCoreCols, denseCol); - //--- - //--- Let A'' = original rows and branching rows. - //--- - //--- a dense column here gives the coefficients in the - //--- reformulation by [A'' + cuts] * column (in x-space) - //--- - //--- dimensions: - //--- (m'' + ncuts x n) * (n x 1) -> (m'' + ncuts x 1) - //--- - //--- but this is missing the convexity constraints, and these - //--- need to be in order as originally setup (after A'', but - //--- before cuts) - //--- - //--- shift all the cuts over to make space for convexity cons - //--- - //--- since the convexity constraints are just sum{} lambda = 1 - //--- we know that there is exactly one entry 1.0 for the approriate - //--- convexity row (depends on block id) - //--- - //--- r[0], r[1], ..., r[m''-1], - //--- cut[0], cut[1], ... cut[ncuts-1] - //--- --> - //--- r[0], r[1], ..., r[m''-1], - //--- conv[0], conv[1], ..., conv[b-1], - //--- cut[0], cut[1], ... cut[ncuts-1] - //--- - int r, b; - //number of rows in core (A''+cuts) - int mpp = modelCore->getNumRows(); - //number of rows in original core (before cuts: A'') - int convexity_index = modelCore->nBaseRows; - //in the master, the convexity constraints are put just - // after A'' (before any cuts were added) - assert(m_masterRowType[convexity_index] == DecompRow_Convex); - assert(mpp - convexity_index == getNumRowType(DecompRow_Cut)); - //--- - //--- for each cut row, move it to right/down - //--- o=original, b=branch, x=convex, c=cut - //--- 0123456789012 - //--- LP : oooobbbbxxccc - //--- Core (current denseCol): oooobbbbccc - //--- make room in denseCol, 0 out, then fill in for block - //--- nRows=13, nCuts=3, nConv=2 - //--- r=12..10 <-- r=10..8 - //--- 0123456789012 - //--- : oooobbbb..ccc - //--- : oooobbbb00ccc - //--- : oooobbbb10ccc - //--- - int nCuts = mpp - convexity_index; - int nRows = m_masterSI->getNumRows(); - assert(nRows == mpp + m_numConvexCon); - - for (r = (nRows - 1); r >= (nRows - nCuts); r--) { - denseCol[r] = denseCol[r - m_numConvexCon]; - } - - for (b = 0; b < m_numConvexCon; b++) { - denseCol[convexity_index + b] = 0.0; - } + vector &uBlockV = mitv->second; + double *uBlock = &uBlockV[0]; + double *redCostXb = 0; + double *uBlockAdj = 0; + + if (static_cast(uBlockV.size()) != m) { + throw UtilException( + "The size of the user dual vector is not the same as the", + "number of master rows generateVars", "DecompAlgo"); + } + + if (m_param.LogDebugLevel >= 3) { + (*m_osLog) << "Block b: " << b << " using user manipulated duals" + << endl; + } + + if (m_param.LogDebugLevel >= 4) { + for (int i = 0; i < m; i++) { + (*m_osLog) << "r:" << i << "dual: " << uBlock[i] << endl; + } + } + + redCostXb = new double[nCoreCols]; // (c - uhat.A") in x-space + CoinAssertHint(redCostXb, "Error: Out of Memory"); + uBlockAdj = new double[m - m_numConvexCon]; + CoinAssertHint(uBlockAdj, "Error: Out of Memory"); + //--- + //--- remove the convexity constraints from the dual vector + //--- + generateVarsAdjustDuals(uBlock, uBlockAdj); + //--- + //--- calculate reduced costs + //--- + generateVarsCalcRedCost(uBlockAdj, redCostXb); + //--- + //--- solve relaxed problem + //--- + alpha = uBlockAdj[nBaseCoreRows + b]; + timeLimit = max(m_param.SubProbTimeLimitExact - + m_stats.timerOverall.getRealTime(), + 0.0); + solveRelaxed(redCostXb, origObjective, alpha, nCoreCols, + false, // isNested + subModel, &solveResult, potentialVars, timeLimit); + UTIL_DELARR(redCostXb); + UTIL_DELARR(uBlockAdj); + } + + if (solveResult.m_isCutoff) { + mostNegRCvec[b] = min(mostNegRCvec[b], 0.0); + } + + m_rrLastBlock = b; + foundNegRC = false; + + for (it = potentialVars.begin(); it != potentialVars.end(); it++) { + varRedCost = (*it)->getReducedCost(); + + if (varRedCost < -m_param.RedCostEpsilon) { // TODO: strict, -dualTOL? + foundNegRC = true; + } + } + } // END:for(i = 0; i < nBlocks; i++) + + m_rrIterSinceAll++; + + //--- + //--- if we searched through all the blocks but still didn't + //--- find any columns with negative reduced cost, then we CAN + //--- update the LB and should - as we have priced out + //--- + // if(!foundNegRC) + // m_rrIterSinceAll = 0; + //--- + //--- if user provided blocks found no negRC, solve all blocks + //--- + if (!foundNegRC) { + printf("no neg rc from user blocks, solve all blocks\n"); + + // TODO: make this a function (to solve all blocks) + map::iterator mit; - denseCol[convexity_index + (*li)->getBlockId()] = 1.0; + for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { + DecompSubModel &subModel = (*mit).second; + b = subModel.getBlockId(); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) << "solve relaxed model = " + << subModel.getModelName() << endl;); + //--- + //--- PC: get dual vector + //--- alpha --> sum{s} lam[s] = 1 - convexity constraint + //--- + alpha = u[nBaseCoreRows + b]; + // TODO: stat return, restrict how many? pass that in to user? + //--- + //--- NOTE: the variables coming back include alpha in + //--- calculation of reduced cost + //--- + timeLimit = max(m_param.SubProbTimeLimitExact - + m_stats.timerOverall.getRealTime(), + 0.0); + solveRelaxed(redCostX, origObjective, alpha, nCoreCols, + false, // isNested + subModel, &solveResult, potentialVars, timeLimit); + + // if cutoff delcares infeasible, we know subprob >= 0 + // we can use 0 as valid (but possibly weaker bound) + if (solveResult.m_isCutoff) { + mostNegRCvec[b] = min(mostNegRCvec[b], 0.0); + } + } + + map>::iterator mivt; + vector::iterator vit; + + for (mivt = m_modelRelaxNest.begin(); mivt != m_modelRelaxNest.end(); + mivt++) { + for (vit = (*mivt).second.begin(); vit != (*mivt).second.end(); vit++) { + b = (*vit).getBlockId(); + alpha = u[nBaseCoreRows + b]; + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) << "solve relaxed nested model = " + << (*vit).getModelName() << endl;); + timeLimit = max(m_param.SubProbTimeLimitExact - + m_stats.timerOverall.getRealTime(), + 0.0); + solveRelaxed(redCostX, + origObjective, // original cost vector + alpha, + nCoreCols, // num core columns + true, // isNested + (*vit), + &solveResult, // results + potentialVars, // var list to populate + timeLimit); + + if (solveResult.m_isCutoff) { + mostNegRCvec[b] = min(mostNegRCvec[b], 0.0); + } + } + } - if ((*li)->getVarType() == DecompVar_Ray) { - denseCol[convexity_index + (*li)->getBlockId()] = 0.0; - } + m_rrIterSinceAll = 0; + } + } // END: else(doAllBlocks) + + for (it = potentialVars.begin(); it != potentialVars.end(); it++) { + varRedCost = (*it)->getReducedCost(); + whichBlock = (*it)->getBlockId(); + alpha = u[nBaseCoreRows + whichBlock]; + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "alpha[block=" << whichBlock << "]:" << alpha + << " varRedCost: " << varRedCost << "\n";); + + //--- + //--- unlikey to happen - but we should check ALL columns + //--- to see if they are IP feasible - whether or not the + //--- column has negative red-cost + //--- + // THINK: in blocks case, these are partial columns + // and this check can be relatively expensive for + // large number of original columns. + // But, it is impossible for the partial column to + // be feasible to full IP - so this check is useless. + if (m_numConvexCon == 1) { + (*it)->fillDenseArr(modelCore->getNumCols(), m_memPool.dblArrNCoreCols); + + //--- + //--- STOP: this isIpFeasible uses isLPFeasible + //--- isLPFeasible should have 2 settings + //--- here, it can be true, but might not be + //--- if checking recompsed point, it must be true, else bug + //--- + if (isIPFeasible(m_memPool.dblArrNCoreCols)) { + if (m_app->APPisUserFeasible(m_memPool.dblArrNCoreCols, + modelCore->getNumCols(), + m_param.TolZero)) { + DecompSolution *decompSol = new DecompSolution( + modelCore->getNumCols(), m_memPool.dblArrNCoreCols, + (*it)->getOriginalCost()); + // TODO: solution pool? + m_xhatIPFeas.push_back(decompSol); + setObjBoundIP((*it)->getOriginalCost()); + } + } + } + + //--- + //--- for multi-blocks the mostNegReducedCost is the + //--- sum of the best reduced costs over all blocks + //--- NOTE: we need all blocks to make it valid + //--- + ////////////STOP: this would explain why the LB seems wrong + //////////// on ATM model, since we were stopping on gap, but + //////////// declaring it optimal. Now, it is fixed and the bound + //////////// should be valid, but stopping on gap won't be valid. + //--- TODO: if a block was NOT solved to optimality, + //--- we can still use the problems LB, but that will NOT + //--- be equivalent to its varRedCost - so we need to return + //--- that value as well, if we want to use it + //--- The red-cost does not have to be used in bound calculation + //--- it is only relevant for deciding on entering columns + //--- + if (varRedCost < mostNegRCvec[whichBlock]) { + mostNegRCvec[whichBlock] = varRedCost; + } + + if (varRedCost < -m_param.RedCostEpsilon) { // TODO: strict, -dualTOL? + UTIL_MSG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "PUSHING new var with varRedCost= " + << UtilDblToStr(varRedCost, 5) << endl;); + //--- + //--- the variable has neg reduced cost, push onto list + //--- + newVars.push_back(*it); + } else { + UTIL_DELPTR(*it); + } + } + + mostNegReducedCost = 0.0; + + for (b = 0; b < m_numConvexCon; b++) { + mostNegReducedCost += mostNegRCvec[b]; + UTIL_MSG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "mostNegR[block=" << b << "]: " << mostNegRCvec[b] + << " mostNegReducedCost: " << mostNegReducedCost + << "\n";); + } + + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) << "m_rrIterSinceAll = " << m_rrIterSinceAll << endl;); + potentialVars.clear(); // THINK? what does clear do exactly ? + UTIL_DEBUG( + m_app->m_param.LogDebugLevel, 4, + + for (it = newVars.begin(); it != newVars.end(); + it++) { (*it)->print(m_infinity, m_osLog, m_app); }); + //--- + //--- free local memory + //--- + UTIL_DELARR(u_adjusted); + UTIL_DELARR(redCostX); + m_stats.thisGenVars.push_back(m_stats.timerOther1.getRealTime()); + UtilPrintFuncEnd(m_osLog, m_classTag, "generateVars()", m_param.LogDebugLevel, + 2); + return static_cast(newVars.size()); +} - //--- - //--- creat a sparse column from the dense column - //--- - sparseCol - = UtilPackedVectorFromDense(modelCore->getNumRows() + - m_numConvexCon, - denseCol, m_app->m_param.TolZero); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - (*m_osLog) << "\nPRINT sparseCol\n"; - UtilPrintPackedVector(*sparseCol); - ); - }//END: else(m_algo == DECOMP) - - DecompWaitingCol waitingCol(*li, sparseCol); - - //TOOD: since DecompVarList does not have its own class... - // this is ugly, fix this later... make a helper funciton of DecompVar? - //TODO: this is very expensive - use hash like in cuts - if (m_varpool.isDuplicate(m_vars, waitingCol)) { - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "Duplicate variable, already in vars!!\n"; - (*li)->print(m_infinity, - m_osLog, - modelCore->getColNames(), - NULL); - ); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - (*m_osLog) << "\nVAR POOL:\n"; - m_varpool.print(m_infinity, m_osLog); - (*m_osLog) << "\nVARS:\n"; - printVars(m_osLog); - ); - waitingCol.deleteVar(); - waitingCol.deleteCol(); - - if (m_algo != RELAX_AND_CUT) { //?? - m_nodeStats.varsThisCall--; - m_nodeStats.varsThisRound--; - } +// TODO - ugh! only PC again? +//------------------------------------------------------------------------ // +// this seems ok for C and PC... but what when we want to do DC within C THINK +int DecompAlgo::generateCuts(double *xhat, DecompCutList &newCuts) { + UtilPrintFuncBegin(m_osLog, m_classTag, "generateCuts()", + m_param.LogDebugLevel, 2); + m_stats.timerOther1.reset(); + DecompConstraintSet *modelCore = m_modelCore.getModel(); + m_app->generateCuts(xhat, newCuts); + + //--- + //--- attempt to generate CGL cuts on x?? + //--- the only way this is going to work, is if you carry + //--- around another OSI problem instance completely in terms of x + //--- only allow CGL to generate cuts on the original full formulation?? + //--- otherwise, we'd be allowing cuts on cuts - would have to add cuts + //--- to master and to this version... hmmm... ughh + //--- for some problems, you can't have the full original formulation + //--- for PC you probably don't want it anyway... P' comes in from ep's + //--- you can just generate cuts on Q"? that cut off xhat + //--- but for C, you need Q' and Q" + //--- m_masterSI holds the problem in terms of lambda (over Q") + //--- m_subprobSI holds the problem in terms of x (over P') + //--- + if (m_param.CutCGL) { + assert(m_cutgenSI); + + if (m_algo == PRICE_AND_CUT) { + // TODO: this could be tighter in the tree + double gLB = getNodeIndex() ? m_globalLB : m_nodeStats.objBest.first; + m_cutgenSI->setRowLower(m_cutgenObjCutInd, gLB); + } + + if (m_param.LogDumpModel > 1) { + string baseName = "cutgenProb"; - continue; + if (m_isStrongBranch) { + baseName += "_SB"; } - if (m_varpool.isDuplicate(waitingCol)) { - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "Duplicate variable, already in var pool.\n"; - ); - waitingCol.deleteVar(); - waitingCol.deleteCol(); + printCurrentProblem(m_cutgenSI, baseName, m_nodeStats.nodeIndex, + m_nodeStats.cutCallsTotal, + m_nodeStats.priceCallsTotal); + } - if (m_algo != RELAX_AND_CUT) { //?? - m_nodeStats.varsThisCall--; - m_nodeStats.varsThisRound--; - } + m_cgl->generateCuts(m_cutgenSI, m_masterSI, xhat, modelCore->integerVars, + newCuts); + } - continue; - } +#if 1 - //--- - //--- check to see if this var is parallel to the ones in LP - //--- cosine=1.0 means the vars are exactly parallel - //--- - if (foundGoodCol && - m_varpool.isParallel(m_vars, waitingCol, m_param.ParallelColsLimit)) { - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "Parallel variable, already in vars.\n"; - ); - waitingCol.deleteVar(); - waitingCol.deleteCol(); - - if (m_algo != RELAX_AND_CUT) { //?? - m_nodeStats.varsThisCall--; - m_nodeStats.varsThisRound--; - } + // do DC only if no other cuts were found or if CutDC=2 + // this is the case of doing for init vars + if ((m_param.CutDC == 1 && newCuts.size() == 0) || (m_param.CutDC == 2)) { + // printf("\n\n==================================== IN DECOMP\n\n"); + DecompAlgoD D(m_app, *m_utilParam, xhat, modelCore->getNumCols()); + // also might want to use the columns you get here for something... + // heur for ubs, etc.. + // either returns a set of cuts or a decomposition, have + // that wrap solve()? + D.solveD(&newCuts); + //--- + //--- copy the vars generated in passes of DC into initVars + //--- to warm-start DW master formulation + //--- + //--- NO: this won't work because it just copies the pointers + //--- and these will be deleted when D scopes out - see DecompAlgo + //--- destructor... + // m_vars.insert(m_vars.begin(), D.m_vars.begin(), D.m_vars.end()); + //--- + //--- this moves the elements of D.m_vars to m_vars + //--- this is what we want since D will be deleted after this + //--- + m_vars.splice(m_vars.end(), D.m_vars); + // printf("VARS moved into CPM object\n"); + // printVars(m_osLog);//use this to warm start DW + // a hidden advantage of decomp in BC? + DecompSolution *bestSol = NULL; + vector::iterator it; + double thisBound; + double bestBoundUB = m_nodeStats.objBest.second; + + for (it = D.m_xhatIPFeas.begin(); it != D.m_xhatIPFeas.end(); it++) { + thisBound = (*it)->getQuality(); + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "From DECOMP, IP Feasible with Quality ="; + (*m_osLog) << thisBound << endl;); + + if ((*it)->getQuality() <= bestBoundUB) { + bestBoundUB = (*it)->getQuality(); + bestSol = (*it); + } + } + + // need to make copy of solution, since D.m_xhatIpFeas goes out of scope + if (bestSol) { + DecompSolution *bestSolCp = new DecompSolution(*bestSol); + m_xhatIPFeas.push_back(bestSolCp); + setObjBoundIP(bestSolCp->getQuality()); + m_xhatIPBest = bestSolCp; + // m_xhatIPBest->print(); + } + + // this could also very likely return a new gUB - + // for the case when it does find a decomposition + // and luckily it is feasible to original? + // STOP -- 6/6/08 + // if decomp is found, then can't use currently - just looking for + // farkas -- if decomp is found this means that z_LP = z_DW for that + // relaxation?? + // printf("D.m_stopCriteria = %s\n", + // DecompAlgoStopStr[D.m_stopCriteria].c_str()); + // who deletes this memory? better to pass in newCuts.. + // printf("\n\n====================================OUT DECOMP\n\n"); + // exit(1); + } - continue; - } +#endif + m_stats.thisGenCuts.push_back(m_stats.timerOther1.getRealTime()); + UtilPrintFuncEnd(m_osLog, m_classTag, "generateCuts()", m_param.LogDebugLevel, + 2); + return static_cast(newCuts.size()); +} +//------------------------------------------------------------------------- // +// member of varpool versus algo class? different for DC?? +void DecompAlgo::addVarsToPool(DecompVarList &newVars) { + int blockIndex; + double *denseCol = NULL; + CoinPackedVector *sparseCol = NULL; + DecompConstraintSet *modelCore = m_modelCore.getModel(); + UtilPrintFuncBegin(m_osLog, m_classTag, "addVarsToPool()", + m_param.LogDebugLevel, 2); + // printf("varpool size=%d\n", m_varpool.size()); + //--- + //--- sanity check - make sure the number of rows in core is + //--- num of (orig+branch+cuts) in LP formulation + //--- + UTIL_DEBUG( + m_app->m_param.LogDebugLevel, 5, + (*m_osLog) << "num original= " << getNumRowType(DecompRow_Original) + << "\n" + << "num branch = " << getNumRowType(DecompRow_Branch) << "\n" + << "num cut = " << getNumRowType(DecompRow_Cut) << "\n" + << "num convex = " << getNumRowType(DecompRow_Convex) << "\n" + << "num core = " << modelCore->getNumRows() << "\n";); + + if (m_algo != DECOMP) { + assert((getNumRowType(DecompRow_Original) + + getNumRowType(DecompRow_Branch) + getNumRowType(DecompRow_Cut)) == + modelCore->getNumRows()); + denseCol = new double[modelCore->getNumRows() + m_numConvexCon]; + } + + //--- + //--- is it ok to purge vars that are parallel? + //--- just make sure at least one gets thru so process can continue + //--- NOTE: in case of RoundRobin, this will cause parallel check + //--- to never be activated, if pull in only one col at a time + //--- + //--- + //--- as soon as found one good, can purge rest + //--- problem is, what if cols are par, par, not-par, not-par + //--- then, we accept the first two, even though should not have + //--- + bool foundGoodCol = false; + DecompVarList::iterator li; + + for (li = newVars.begin(); li != newVars.end(); li++) { + //--- + //--- get dense column = A''s, append convexity constraint on end + //--- THINK: PC specific + //--- + // TODO - fix this derive method for decomp + if (m_algo == DECOMP) { + blockIndex = (*li)->getBlockId(); + sparseCol = new CoinPackedVector((*li)->m_s); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, (*m_osLog) << "\nPRINT m_s\n"; + UtilPrintPackedVector((*li)->m_s);); + // add in convexity constraint + sparseCol->insert(modelCore->getNumCols() + blockIndex, 1.0); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, + (*m_osLog) << "\nPRINT sparseCol\n"; + UtilPrintPackedVector(*sparseCol);); + } else { //--- - //--- passed all filters, add the column to var pool + //--- this creates a dense array of the column (in x-space) + //--- it is stored as a sparse vector, so we have translate + //--- it to do the matrix multiply //--- - m_varpool.push_back(waitingCol); - foundGoodCol = true; - } //END: for(li = newVars.begin(); li != newVars.end(); li++) - - //--- - //--- in the case of Wengtes, you might get all duplicate - //--- columns - if this is the case, you don't want to stop - //--- searching - rather, reduce alpha and repeat gen vars - //--- - if (m_phase == PHASE_PRICE2 && - newVars.size() > 0 && - !foundGoodCol && m_param.DualStab) { - m_phaseForce = PHASE_PRICE2; - m_param.DualStabAlpha *= 0.90; - - if (m_param.LogDebugLevel >= 2) - (*m_osLog) << "No vars passed doing Wengtes. Reduce alpha to " - << m_param.DualStabAlpha << " and repeat." << endl; - + //--- we put the dense array into the mem pool at dblArrNCoreCols //--- - //--- adjust dual solution with updated stability parameter + (*li)->fillDenseArr(modelCore->getNumCols(), m_memPool.dblArrNCoreCols); //--- - adjustMasterDualSolution(); - } else { - m_phaseForce = PHASE_UNKNOWN; - } - - //--- - //--- if Wengtes parameter has been reduced, set it back to original - //--- - if (foundGoodCol && m_param.DualStabAlpha < m_param.DualStabAlphaOrig) { - m_param.DualStabAlpha = m_param.DualStabAlphaOrig; - - if (m_param.LogDebugLevel >= 2) - (*m_osLog) << "Good column found doing Wengtes. Setting alpha back " - << "to its original setting " - << m_param.DualStabAlpha << "." << endl; - } - - UTIL_DELARR(denseCol); - UtilPrintFuncEnd(m_osLog, m_classTag, - "addVarsToPool()", m_param.LogDebugLevel, 2); -} + //--- modelCore->M = A'' + branch-rows + new cuts + //--- here, we caculate M.s (where s is a var in x-space) + //--- + modelCore->M->times(m_memPool.dblArrNCoreCols, denseCol); + //--- + //--- Let A'' = original rows and branching rows. + //--- + //--- a dense column here gives the coefficients in the + //--- reformulation by [A'' + cuts] * column (in x-space) + //--- + //--- dimensions: + //--- (m'' + ncuts x n) * (n x 1) -> (m'' + ncuts x 1) + //--- + //--- but this is missing the convexity constraints, and these + //--- need to be in order as originally setup (after A'', but + //--- before cuts) + //--- + //--- shift all the cuts over to make space for convexity cons + //--- + //--- since the convexity constraints are just sum{} lambda = 1 + //--- we know that there is exactly one entry 1.0 for the approriate + //--- convexity row (depends on block id) + //--- + //--- r[0], r[1], ..., r[m''-1], + //--- cut[0], cut[1], ... cut[ncuts-1] + //--- --> + //--- r[0], r[1], ..., r[m''-1], + //--- conv[0], conv[1], ..., conv[b-1], + //--- cut[0], cut[1], ... cut[ncuts-1] + //--- + int r, b; + // number of rows in core (A''+cuts) + int mpp = modelCore->getNumRows(); + // number of rows in original core (before cuts: A'') + int convexity_index = modelCore->nBaseRows; + // in the master, the convexity constraints are put just + // after A'' (before any cuts were added) + assert(m_masterRowType[convexity_index] == DecompRow_Convex); + assert(mpp - convexity_index == getNumRowType(DecompRow_Cut)); + //--- + //--- for each cut row, move it to right/down + //--- o=original, b=branch, x=convex, c=cut + //--- 0123456789012 + //--- LP : oooobbbbxxccc + //--- Core (current denseCol): oooobbbbccc + //--- make room in denseCol, 0 out, then fill in for block + //--- nRows=13, nCuts=3, nConv=2 + //--- r=12..10 <-- r=10..8 + //--- 0123456789012 + //--- : oooobbbb..ccc + //--- : oooobbbb00ccc + //--- : oooobbbb10ccc + //--- + int nCuts = mpp - convexity_index; + int nRows = m_masterSI->getNumRows(); + assert(nRows == mpp + m_numConvexCon); -//------------------------------------------------------------------------- // -void DecompAlgo::addVarsFromPool() -{ - //TODO: we have checked to make sure there are no dups added to pool - // do we also need to check that no dup vars are added to LP? for that - // we'd have to check across m_vars - UtilPrintFuncBegin(m_osLog, m_classTag, - "addVarsFromPool()", m_param.LogDebugLevel, 2); - //TODO: - // const int maxvars_toadd = m_app->m_param.maxvars_periter; - // int n_newcols = std::min(m_varpool.size(), maxvars_toadd); - DecompVarPool::iterator vi; - DecompVarPool::iterator viLast; - int n_newcols = static_cast(m_varpool.size()); - - if (n_newcols == 0) { - UtilPrintFuncEnd(m_osLog, m_classTag, - "addVarsFromPool()", m_param.LogDebugLevel, 2); - return; - } - - //--- - //--- sort the pool by increasing reduced cost - //--- - partial_sort(m_varpool.begin(), - m_varpool.begin() + n_newcols, - m_varpool.end(), - is_less_thanD()); - UTIL_MSG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "size: var pool = " << m_varpool.size(); - (*m_osLog) << " master cols = " << m_masterSI->getNumCols() - << endl; - ); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 10, - (*m_osLog) << "\nVAR POOL BEFORE:\n"; - m_varpool.print(m_infinity, m_osLog); - (*m_osLog) << "\nVARS BEFORE:\n"; - printVars(m_osLog); - ); - //--- - //--- never add anything with pos rc - //--- - int index = 0; - - for (vi = m_varpool.begin(); vi != m_varpool.end(); vi++) { - if (m_algo != RELAX_AND_CUT) { //THINK?? - if ((*vi).getReducedCost() > -0.0000001) { //TODO - param - break; - } + for (r = (nRows - 1); r >= (nRows - nCuts); r--) { + denseCol[r] = denseCol[r - m_numConvexCon]; } - index++; - } - - n_newcols = std::min(n_newcols, index); - //TODO - /*if(n_newcols > 0) - m_cutpool.setRowsAreValid(false);*/ - //see CoinBuild or switch to ind,els,beg form - //--- - //--- 1.) build up the block of columns to be added to the master - //--- create a block for speed, rather than one column at a time - //--- 2.) copy the var pointers to the DecompModel var list - //--- - double* clb = new double[n_newcols]; - double* cub = new double[n_newcols]; - double* obj = new double[n_newcols]; - const CoinPackedVectorBase** colBlock = - new const CoinPackedVectorBase*[n_newcols]; - const vector& colNamesM = m_masterSI->getColNames(); - vector colNames; - bool hasNames = colNamesM.size() > 0 ? true : false; - const int colIndex0 = m_masterSI->getNumCols(); - - if (hasNames) { - if (colIndex0 != static_cast(colNamesM.size())) { - printf("master num cols=%d names size=%d", - colIndex0, static_cast(colNamesM.size())); + for (b = 0; b < m_numConvexCon; b++) { + denseCol[convexity_index + b] = 0.0; } - assert(colIndex0 == static_cast(colNamesM.size())); - } + denseCol[convexity_index + (*li)->getBlockId()] = 1.0; + + if ((*li)->getVarType() == DecompVar_Ray) { + denseCol[convexity_index + (*li)->getBlockId()] = 0.0; + } - index = 0; + //--- + //--- creat a sparse column from the dense column + //--- + sparseCol = + UtilPackedVectorFromDense(modelCore->getNumRows() + m_numConvexCon, + denseCol, m_app->m_param.TolZero); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, + (*m_osLog) << "\nPRINT sparseCol\n"; + UtilPrintPackedVector(*sparseCol);); + } // END: else(m_algo == DECOMP) + + DecompWaitingCol waitingCol(*li, sparseCol); + + // TOOD: since DecompVarList does not have its own class... + // this is ugly, fix this later... make a helper funciton of DecompVar? + // TODO: this is very expensive - use hash like in cuts + if (m_varpool.isDuplicate(m_vars, waitingCol)) { + UTIL_DEBUG( + m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "Duplicate variable, already in vars!!\n"; + (*li)->print(m_infinity, m_osLog, modelCore->getColNames(), NULL);); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, (*m_osLog) << "\nVAR POOL:\n"; + m_varpool.print(m_infinity, m_osLog); + (*m_osLog) << "\nVARS:\n"; printVars(m_osLog);); + waitingCol.deleteVar(); + waitingCol.deleteCol(); + + if (m_algo != RELAX_AND_CUT) { //?? + m_nodeStats.varsThisCall--; + m_nodeStats.varsThisRound--; + } + + continue; + } + + if (m_varpool.isDuplicate(waitingCol)) { + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "Duplicate variable, already in var pool.\n";); + waitingCol.deleteVar(); + waitingCol.deleteCol(); - for (vi = m_varpool.begin(); vi != m_varpool.end(); vi++) { - if (index >= n_newcols) { - break; + if (m_algo != RELAX_AND_CUT) { //?? + m_nodeStats.varsThisCall--; + m_nodeStats.varsThisRound--; } - const CoinPackedVector* col = (*vi).getColPtr(); + continue; + } - DecompVar* var = (*vi).getVarPtr(); + //--- + //--- check to see if this var is parallel to the ones in LP + //--- cosine=1.0 means the vars are exactly parallel + //--- + if (foundGoodCol && + m_varpool.isParallel(m_vars, waitingCol, m_param.ParallelColsLimit)) { + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "Parallel variable, already in vars.\n";); + waitingCol.deleteVar(); + waitingCol.deleteCol(); + + if (m_algo != RELAX_AND_CUT) { //?? + m_nodeStats.varsThisCall--; + m_nodeStats.varsThisRound--; + } + + continue; + } + + //--- + //--- passed all filters, add the column to var pool + //--- + m_varpool.push_back(waitingCol); + foundGoodCol = true; + } // END: for(li = newVars.begin(); li != newVars.end(); li++) + + //--- + //--- in the case of Wengtes, you might get all duplicate + //--- columns - if this is the case, you don't want to stop + //--- searching - rather, reduce alpha and repeat gen vars + //--- + if (m_phase == PHASE_PRICE2 && newVars.size() > 0 && !foundGoodCol && + m_param.DualStab) { + m_phaseForce = PHASE_PRICE2; + m_param.DualStabAlpha *= 0.90; + + if (m_param.LogDebugLevel >= 2) + (*m_osLog) << "No vars passed doing Wengtes. Reduce alpha to " + << m_param.DualStabAlpha << " and repeat." << endl; + + //--- + //--- adjust dual solution with updated stability parameter + //--- + adjustMasterDualSolution(); + } else { + m_phaseForce = PHASE_UNKNOWN; + } + + //--- + //--- if Wengtes parameter has been reduced, set it back to original + //--- + if (foundGoodCol && m_param.DualStabAlpha < m_param.DualStabAlphaOrig) { + m_param.DualStabAlpha = m_param.DualStabAlphaOrig; + + if (m_param.LogDebugLevel >= 2) + (*m_osLog) << "Good column found doing Wengtes. Setting alpha back " + << "to its original setting " << m_param.DualStabAlpha << "." + << endl; + } - assert(col); + UTIL_DELARR(denseCol); + UtilPrintFuncEnd(m_osLog, m_classTag, "addVarsToPool()", + m_param.LogDebugLevel, 2); +} - colBlock[index] = col; +//------------------------------------------------------------------------- // +void DecompAlgo::addVarsFromPool() { + // TODO: we have checked to make sure there are no dups added to pool + // do we also need to check that no dup vars are added to LP? for that + // we'd have to check across m_vars + UtilPrintFuncBegin(m_osLog, m_classTag, "addVarsFromPool()", + m_param.LogDebugLevel, 2); + // TODO: + // const int maxvars_toadd = m_app->m_param.maxvars_periter; + // int n_newcols = std::min(m_varpool.size(), maxvars_toadd); + DecompVarPool::iterator vi; + DecompVarPool::iterator viLast; + int n_newcols = static_cast(m_varpool.size()); + + if (n_newcols == 0) { + UtilPrintFuncEnd(m_osLog, m_classTag, "addVarsFromPool()", + m_param.LogDebugLevel, 2); + return; + } + + //--- + //--- sort the pool by increasing reduced cost + //--- + partial_sort(m_varpool.begin(), m_varpool.begin() + n_newcols, + m_varpool.end(), is_less_thanD()); + UTIL_MSG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "size: var pool = " << m_varpool.size(); + (*m_osLog) << " master cols = " << m_masterSI->getNumCols() + << endl;); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 10, + (*m_osLog) << "\nVAR POOL BEFORE:\n"; + m_varpool.print(m_infinity, m_osLog); + (*m_osLog) << "\nVARS BEFORE:\n"; printVars(m_osLog);); + //--- + //--- never add anything with pos rc + //--- + int index = 0; + + for (vi = m_varpool.begin(); vi != m_varpool.end(); vi++) { + if (m_algo != RELAX_AND_CUT) { // THINK?? + if ((*vi).getReducedCost() > -0.0000001) { // TODO - param + break; + } + } + + index++; + } + + n_newcols = std::min(n_newcols, index); + // TODO + /*if(n_newcols > 0) + m_cutpool.setRowsAreValid(false);*/ + // see CoinBuild or switch to ind,els,beg form + //--- + //--- 1.) build up the block of columns to be added to the master + //--- create a block for speed, rather than one column at a time + //--- 2.) copy the var pointers to the DecompModel var list + //--- + double *clb = new double[n_newcols]; + double *cub = new double[n_newcols]; + double *obj = new double[n_newcols]; + const CoinPackedVectorBase **colBlock = + new const CoinPackedVectorBase *[n_newcols]; + const vector &colNamesM = m_masterSI->getColNames(); + vector colNames; + bool hasNames = colNamesM.size() > 0 ? true : false; + const int colIndex0 = m_masterSI->getNumCols(); + + if (hasNames) { + if (colIndex0 != static_cast(colNamesM.size())) { + printf("master num cols=%d names size=%d", colIndex0, + static_cast(colNamesM.size())); + } + + assert(colIndex0 == static_cast(colNamesM.size())); + } + + index = 0; + + for (vi = m_varpool.begin(); vi != m_varpool.end(); vi++) { + if (index >= n_newcols) { + break; + } - clb[index] = (*vi).getLowerBound(); + const CoinPackedVector *col = (*vi).getColPtr(); - cub[index] = (*vi).getUpperBound(); + DecompVar *var = (*vi).getVarPtr(); - if (m_phase == PHASE_PRICE1) { - obj[index] = 0.0; - } else { - obj[index] = (*vi).getOrigCost(); - } + assert(col); - int blockIndex = var->getBlockId(); - int colIndex = colIndex0 + index; - var->setColMasterIndex(colIndex); - m_masterColType.push_back(DecompCol_Structural); + colBlock[index] = col; - //--- - //--- give the column a name - //--- - if (hasNames) { - string colName = "lam(c_" + UtilIntToStr(m_colIndexUnique) - + ",b_" + UtilIntToStr(blockIndex) + ")"; - colNames.push_back(colName); - } + clb[index] = (*vi).getLowerBound(); - m_colIndexUnique++; - appendVars(var); - index++; - } - - viLast = vi; - m_masterSI->addCols(n_newcols, colBlock, clb, cub, obj); - - if (hasNames) { - m_masterSI->setColNames(colNames, 0, - static_cast(colNames.size()), - colIndex0); - } - - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - const int n_colsAfter = m_masterSI->getNumCols(); - assert(colIndex0 + n_newcols == n_colsAfter); - ); - - //--- - //--- 3.) delete the col memory and clear the var pointer from varpool - //--- the column memory is no longer needed, it has been copied into - //--- the master object, the variable memory is still needed, its - //--- pointer is now in m_vars, and no longer is needed in varpool - //--- - //THINK is this all neccessary? just to keep memory small? or - //doing this for some reason of efficiency? - for (vi = m_varpool.begin(); vi != viLast; vi++) { - (*vi).deleteCol(); - (*vi).clearVar(); //needed? dangling pointer if not - } - - //TODO: is this slow for vector? if so, maybe list is still the way to go - m_varpool.erase(m_varpool.begin(), viLast); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 10, - (*m_osLog) << "\nVAR POOL AFTER:\n"; - m_varpool.print(m_infinity, m_osLog); - (*m_osLog) << "\nVARS AFTER:\n"; - printVars(m_osLog); - ); - UTIL_MSG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "size: var pool = " << m_varpool.size(); - (*m_osLog) << " master cols = " << m_masterSI->getNumCols() - << endl; - ); - //--- - //--- free local memory - //--- - UTIL_DELARR(colBlock); - UTIL_DELARR(clb); - UTIL_DELARR(cub); - UTIL_DELARR(obj); - UtilPrintFuncEnd(m_osLog, m_classTag, - "addVarsFromPool()", m_param.LogDebugLevel, 2); -} + cub[index] = (*vi).getUpperBound(); + if (m_phase == PHASE_PRICE1) { + obj[index] = 0.0; + } else { + obj[index] = (*vi).getOrigCost(); + } + + int blockIndex = var->getBlockId(); + int colIndex = colIndex0 + index; + var->setColMasterIndex(colIndex); + m_masterColType.push_back(DecompCol_Structural); + + //--- + //--- give the column a name + //--- + if (hasNames) { + string colName = "lam(c_" + UtilIntToStr(m_colIndexUnique) + ",b_" + + UtilIntToStr(blockIndex) + ")"; + colNames.push_back(colName); + } + + m_colIndexUnique++; + appendVars(var); + index++; + } + + viLast = vi; + m_masterSI->addCols(n_newcols, colBlock, clb, cub, obj); + + if (hasNames) { + m_masterSI->setColNames(colNames, 0, static_cast(colNames.size()), + colIndex0); + } + + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, + const int n_colsAfter = m_masterSI->getNumCols(); + assert(colIndex0 + n_newcols == n_colsAfter);); + + //--- + //--- 3.) delete the col memory and clear the var pointer from varpool + //--- the column memory is no longer needed, it has been copied into + //--- the master object, the variable memory is still needed, its + //--- pointer is now in m_vars, and no longer is needed in varpool + //--- + // THINK is this all neccessary? just to keep memory small? or + // doing this for some reason of efficiency? + for (vi = m_varpool.begin(); vi != viLast; vi++) { + (*vi).deleteCol(); + (*vi).clearVar(); // needed? dangling pointer if not + } + + // TODO: is this slow for vector? if so, maybe list is still the way to go + m_varpool.erase(m_varpool.begin(), viLast); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 10, + (*m_osLog) << "\nVAR POOL AFTER:\n"; + m_varpool.print(m_infinity, m_osLog); + (*m_osLog) << "\nVARS AFTER:\n"; printVars(m_osLog);); + UTIL_MSG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "size: var pool = " << m_varpool.size(); + (*m_osLog) << " master cols = " << m_masterSI->getNumCols() + << endl;); + //--- + //--- free local memory + //--- + UTIL_DELARR(colBlock); + UTIL_DELARR(clb); + UTIL_DELARR(cub); + UTIL_DELARR(obj); + UtilPrintFuncEnd(m_osLog, m_classTag, "addVarsFromPool()", + m_param.LogDebugLevel, 2); +} /*-------------------------------------------------------------------------*/ -void DecompAlgo::addCutsToPool(const double* x, - DecompCutList& newCuts, - int& m_cutsThisCall) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "addCutsToPool()", m_param.LogDebugLevel, 2); - //for RC, make sure no cuts we are about to add are already in modelCore - //also check that we have no duplicate cuts being put in here - //TODO: do something similiar to check for pos-rc vars - DecompConstraintSet* modelCore = m_modelCore.getModel(); - int r, cutIndex = 0; - bool isViolated = false; - bool isDupCore;//also check relax? - bool isDupPool; - bool addCut; - DecompCutPool::iterator ci; - DecompCutList::iterator li = newCuts.begin(); - - while (li != newCuts.end()) { - CoinPackedVector* row = new CoinPackedVector(); - //--- - //--- create a row (in terms of original formulation, x), from a cut - //--- - (*li)->expandCutToRow(row); - //--- - //--- set the hash string (for quick duplicate checks) - //--- - (*li)->setStringHash(row, m_infinity); +void DecompAlgo::addCutsToPool(const double *x, DecompCutList &newCuts, + int &m_cutsThisCall) { + UtilPrintFuncBegin(m_osLog, m_classTag, "addCutsToPool()", + m_param.LogDebugLevel, 2); + // for RC, make sure no cuts we are about to add are already in modelCore + // also check that we have no duplicate cuts being put in here + // TODO: do something similiar to check for pos-rc vars + DecompConstraintSet *modelCore = m_modelCore.getModel(); + int r, cutIndex = 0; + bool isViolated = false; + bool isDupCore; // also check relax? + bool isDupPool; + bool addCut; + DecompCutPool::iterator ci; + DecompCutList::iterator li = newCuts.begin(); + + while (li != newCuts.end()) { + CoinPackedVector *row = new CoinPackedVector(); + //--- + //--- create a row (in terms of original formulation, x), from a cut + //--- + (*li)->expandCutToRow(row); + //--- + //--- set the hash string (for quick duplicate checks) + //--- + (*li)->setStringHash(row, m_infinity); #if 0 bool isOptViolated = false; @@ -5897,1047 +5522,955 @@ void DecompAlgo::addCutsToPool(const double* x, } #endif - //here we will use a hash table - or just STL map - addCut = true; - isDupCore = false; - - for (r = 0; r < modelCore->getNumRows(); r++) { - //override isSame( ) - //in one case you check hash if expanded - //in user case you check isSame directly - //this will become hash lookup code - if (modelCore->rowHash[r] == (*li)->getStrHash()) { - //--- - //--- This should not happen, however, it is possible - //--- due to roundoff error. Since x = sum{}lambda, - //--- the masterLP might be feasible while an a.x might - //--- violate a row bound slightly. This is checked after - //--- the recomposition. But, we don't throw an error unless - //--- the error is significant. The cut generator might - //--- duplicate a cut, because it finds an inequality that - //--- does cut off the current point that matches a row/cut - //--- already in the LP. - //--- - //--- Like the check in checkPointFeasible, we should check - //--- that this duplicated cut violates by only a small - //--- percentage. If not, then it really is an error. - //--- - UTIL_MSG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "Cut is Duplicate with Core\n"; - ); - UTIL_MSG(m_app->m_param.LogDebugLevel, 4, - (*li)->print(); - ); - isDupCore = true; - break; - } - } - - if (isDupCore) { - addCut = false; - } else { - //--- - //--- is this cut already in pool - //--- - isDupPool = false; - - for (ci = m_cutpool.begin(); ci != m_cutpool.end(); ci++) { - if ((*li)->getStrHash() == (*ci).getCutPtr()->getStrHash()) { - UTIL_MSG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) << "Cut is Duplicate with Pool\n"; - (*li)->print(); - ); - isDupPool = true; - break; - } - } - - if (isDupPool) { - addCut = false; - } else { - isViolated = (*li)->calcViolation(row, x);//also sets it - - if (!isViolated) { - addCut = false; - } - } - } - - if (addCut) { - DecompWaitingRow waitingRow(*li, row); - m_cutpool.push_back(waitingRow); - li++; + // here we will use a hash table - or just STL map + addCut = true; + isDupCore = false; + + for (r = 0; r < modelCore->getNumRows(); r++) { + // override isSame( ) + // in one case you check hash if expanded + // in user case you check isSame directly + // this will become hash lookup code + if (modelCore->rowHash[r] == (*li)->getStrHash()) { + //--- + //--- This should not happen, however, it is possible + //--- due to roundoff error. Since x = sum{}lambda, + //--- the masterLP might be feasible while an a.x might + //--- violate a row bound slightly. This is checked after + //--- the recomposition. But, we don't throw an error unless + //--- the error is significant. The cut generator might + //--- duplicate a cut, because it finds an inequality that + //--- does cut off the current point that matches a row/cut + //--- already in the LP. + //--- + //--- Like the check in checkPointFeasible, we should check + //--- that this duplicated cut violates by only a small + //--- percentage. If not, then it really is an error. + //--- + UTIL_MSG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "Cut is Duplicate with Core\n";); + UTIL_MSG(m_app->m_param.LogDebugLevel, 4, (*li)->print();); + isDupCore = true; + break; + } + } + + if (isDupCore) { + addCut = false; + } else { + //--- + //--- is this cut already in pool + //--- + isDupPool = false; + + for (ci = m_cutpool.begin(); ci != m_cutpool.end(); ci++) { + if ((*li)->getStrHash() == (*ci).getCutPtr()->getStrHash()) { + UTIL_MSG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) << "Cut is Duplicate with Pool\n"; + (*li)->print();); + isDupPool = true; + break; + } + } + + if (isDupPool) { + addCut = false; } else { - //--- - //--- cut is not violated, do not put in cut pool, delete memory - //--- - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "\nCUT " << cutIndex - << " do not put in pool"; - ); - UTIL_DELPTR(*li); //need to do? - li = newCuts.erase(li); //does this call cut destructor? - m_cutsThisCall--; - //then don't increment li next iter?? think - } - - cutIndex++; - } - - assert(m_cutsThisCall >= 0); - UtilPrintFuncEnd(m_osLog, m_classTag, - "addCutsToPool()", m_param.LogDebugLevel, 2); -} - - - -/*--------------------------------------------------------------------------*/ -int DecompAlgo::addCutsFromPool() -{ - //this is exactly the same for RC, except that we also have to add - //multipliers - UtilPrintFuncBegin(m_osLog, m_classTag, - "addCutsFromPool()", m_param.LogDebugLevel, 2); - //TODO: do some work here to check for duplicate cuts (actually do that - //in addCutsToPool) in RC, can add cuts that already did (no "core model" - //) - //TODO: do some work here to check for parallel cuts - DecompConstraintSet* modelCore = m_modelCore.getModel(); - //TODO partial sort! - sort(m_cutpool.begin(), - m_cutpool.end(), - is_greater_thanD()); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 10, - (*m_osLog) << "\nCUT POOL BEFORE:\n"; - m_cutpool.print(m_osLog); - (*m_osLog) << "\nCUTS BEFORE:\n"; - printCuts(m_osLog); - ); - const int maxcuts_toadd = 10000;//m_app->m_param.cut_maxcuts_periter; - int n_newrows = CoinMin(static_cast(m_cutpool.size()), maxcuts_toadd); - //since we use a list - find_first won't help as it returns an - //iterator not an index in the list... UGH - int index = 0; - DecompCutPool::iterator li; - - for (li = m_cutpool.begin(); li != m_cutpool.end(); li++) { - if ((*li).getViolation() < DecompEpsilon) { //PARM - break; - } - - index++; - } - - //never add anything not violated - n_newrows = std::min(n_newrows, index); - //TODO: look into coin build... - double* rlb = new double[n_newrows]; - double* rub = new double[n_newrows]; - const CoinPackedVectorBase** rowBlock = - new const CoinPackedVectorBase*[n_newrows]; - //vector & coreRowNames = modelCore->getRowNames(); - vector colNames; - vector rowNames; - string colName; - string rowName; - int rowIndex; - //int colIndex; - index = 0; - - for (li = m_cutpool.begin(); li != m_cutpool.end(); li++) { - if (index >= n_newrows) { - break; - } - - CoinPackedVector* row = (*li).getRowPtr(); - DecompCut* cut = (*li).getCutPtr(); - rlb[index] = (*li).getLowerBound(); - rub[index] = (*li).getUpperBound(); - rowBlock[index] = row; - rowIndex = m_masterSI->getNumRows() + index; - //TODO: allow user to give cut names? - rowName = "cut(" + UtilIntToStr(rowIndex) + ")"; - rowNames.push_back(rowName); - //--- - //--- add the cut ptr to the list of cuts in masterLP - //--- - m_cuts.push_back(cut); - //--- - //--- set hash for cut - //--- - modelCore->rowHash.push_back(cut->getStrHash()); - index++; - } - - if ((m_algo == RELAX_AND_CUT)) { - //this is what we want to update in RC, but not in C - modelCore->M->appendRows(n_newrows, rowBlock); - //create a modelCore->appendRows does all of this direct from - //a cut pool - //THINK: francois idea, when add cuts to P'? - char sense; - double rhs, range; - - for (index = 0; index < n_newrows; index++) { - modelCore->rowLB.push_back(rlb[index]); - modelCore->rowUB.push_back(rub[index]); - UtilBoundToSense(rlb[index], rub[index], m_infinity, - sense, rhs, range); - modelCore->rowRhs.push_back(rhs); - modelCore->rowSense.push_back(sense); - } - } else { - //--- - //--- add the new rows to master - //--- add the new rows to core (?) - must in PC, don't really need here... - //--- - m_masterSI->addRows(n_newrows, rowBlock, rlb, rub); - int nRowNames = static_cast(rowNames.size()); + isViolated = (*li)->calcViolation(row, x); // also sets it - if (nRowNames > 0) { - m_masterSI->setRowNames(rowNames, 0, nRowNames, 0); + if (!isViolated) { + addCut = false; + } } + } + if (addCut) { + DecompWaitingRow waitingRow(*li, row); + m_cutpool.push_back(waitingRow); + li++; + } else { //--- - //--- add to master row types - //--- add names to modelCore as well (?) + //--- cut is not violated, do not put in cut pool, delete memory //--- - int i; - - for (i = 0; i < n_newrows; i++) { - m_masterRowType.push_back(DecompRow_Cut); - //coreRowNames.push_back(rowNames[i]); - } - } - - //if(m_isTightenAlgo) - // m_masterSI->addRows(n_newrows, rowBlock, rlb, rub); - //any reason to update this copy?? THINK - //cuts on cuts... - //clean up - index = 0; - - for (li = m_cutpool.begin(); li != m_cutpool.end(); li++) { - if (index >= n_newrows) { - break; - } + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "\nCUT " << cutIndex << " do not put in pool";); + UTIL_DELPTR(*li); // need to do? + li = newCuts.erase(li); // does this call cut destructor? + m_cutsThisCall--; + // then don't increment li next iter?? think + } + + cutIndex++; + } + + assert(m_cutsThisCall >= 0); + UtilPrintFuncEnd(m_osLog, m_classTag, "addCutsToPool()", + m_param.LogDebugLevel, 2); +} - (*li).deleteRow(); - (*li).clearCut();//need to do this? - index++; - } - - m_cutpool.erase(m_cutpool.begin(), li); - //UTIL_DELARR(rowReformBlock); - UTIL_DELARR(rowBlock); - UTIL_DELARR(rlb); - UTIL_DELARR(rub); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 10, - (*m_osLog) << "\nCUT POOL AFTER:\n"; - m_cutpool.print(m_osLog); - (*m_osLog) << "\nCUTS AFTER:\n"; - printCuts(m_osLog); //??? add to cuts? - (*m_osLog) << "n_newrows = " << n_newrows << "\n"; - ); - UtilPrintFuncEnd(m_osLog, m_classTag, - "addCutsFromPool()", m_param.LogDebugLevel, 2); - return n_newrows; +/*--------------------------------------------------------------------------*/ +int DecompAlgo::addCutsFromPool() { + // this is exactly the same for RC, except that we also have to add + // multipliers + UtilPrintFuncBegin(m_osLog, m_classTag, "addCutsFromPool()", + m_param.LogDebugLevel, 2); + // TODO: do some work here to check for duplicate cuts (actually do that + // in addCutsToPool) in RC, can add cuts that already did (no "core model" + //) + // TODO: do some work here to check for parallel cuts + DecompConstraintSet *modelCore = m_modelCore.getModel(); + // TODO partial sort! + sort(m_cutpool.begin(), m_cutpool.end(), is_greater_thanD()); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 10, + (*m_osLog) << "\nCUT POOL BEFORE:\n"; + m_cutpool.print(m_osLog); (*m_osLog) << "\nCUTS BEFORE:\n"; + printCuts(m_osLog);); + const int maxcuts_toadd = 10000; // m_app->m_param.cut_maxcuts_periter; + int n_newrows = CoinMin(static_cast(m_cutpool.size()), maxcuts_toadd); + // since we use a list - find_first won't help as it returns an + // iterator not an index in the list... UGH + int index = 0; + DecompCutPool::iterator li; + + for (li = m_cutpool.begin(); li != m_cutpool.end(); li++) { + if ((*li).getViolation() < DecompEpsilon) { // PARM + break; + } + + index++; + } + + // never add anything not violated + n_newrows = std::min(n_newrows, index); + // TODO: look into coin build... + double *rlb = new double[n_newrows]; + double *rub = new double[n_newrows]; + const CoinPackedVectorBase **rowBlock = + new const CoinPackedVectorBase *[n_newrows]; + // vector & coreRowNames = modelCore->getRowNames(); + vector colNames; + vector rowNames; + string colName; + string rowName; + int rowIndex; + // int colIndex; + index = 0; + + for (li = m_cutpool.begin(); li != m_cutpool.end(); li++) { + if (index >= n_newrows) { + break; + } + + CoinPackedVector *row = (*li).getRowPtr(); + DecompCut *cut = (*li).getCutPtr(); + rlb[index] = (*li).getLowerBound(); + rub[index] = (*li).getUpperBound(); + rowBlock[index] = row; + rowIndex = m_masterSI->getNumRows() + index; + // TODO: allow user to give cut names? + rowName = "cut(" + UtilIntToStr(rowIndex) + ")"; + rowNames.push_back(rowName); + //--- + //--- add the cut ptr to the list of cuts in masterLP + //--- + m_cuts.push_back(cut); + //--- + //--- set hash for cut + //--- + modelCore->rowHash.push_back(cut->getStrHash()); + index++; + } + + if ((m_algo == RELAX_AND_CUT)) { + // this is what we want to update in RC, but not in C + modelCore->M->appendRows(n_newrows, rowBlock); + // create a modelCore->appendRows does all of this direct from + // a cut pool + // THINK: francois idea, when add cuts to P'? + char sense; + double rhs, range; + + for (index = 0; index < n_newrows; index++) { + modelCore->rowLB.push_back(rlb[index]); + modelCore->rowUB.push_back(rub[index]); + UtilBoundToSense(rlb[index], rub[index], m_infinity, sense, rhs, range); + modelCore->rowRhs.push_back(rhs); + modelCore->rowSense.push_back(sense); + } + } else { + //--- + //--- add the new rows to master + //--- add the new rows to core (?) - must in PC, don't really need here... + //--- + m_masterSI->addRows(n_newrows, rowBlock, rlb, rub); + int nRowNames = static_cast(rowNames.size()); + + if (nRowNames > 0) { + m_masterSI->setRowNames(rowNames, 0, nRowNames, 0); + } + + //--- + //--- add to master row types + //--- add names to modelCore as well (?) + //--- + int i; + + for (i = 0; i < n_newrows; i++) { + m_masterRowType.push_back(DecompRow_Cut); + // coreRowNames.push_back(rowNames[i]); + } + } + + // if(m_isTightenAlgo) + // m_masterSI->addRows(n_newrows, rowBlock, rlb, rub); + // any reason to update this copy?? THINK + // cuts on cuts... + // clean up + index = 0; + + for (li = m_cutpool.begin(); li != m_cutpool.end(); li++) { + if (index >= n_newrows) { + break; + } + + (*li).deleteRow(); + (*li).clearCut(); // need to do this? + index++; + } + + m_cutpool.erase(m_cutpool.begin(), li); + // UTIL_DELARR(rowReformBlock); + UTIL_DELARR(rowBlock); + UTIL_DELARR(rlb); + UTIL_DELARR(rub); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 10, + (*m_osLog) << "\nCUT POOL AFTER:\n"; + m_cutpool.print(m_osLog); (*m_osLog) << "\nCUTS AFTER:\n"; + printCuts(m_osLog); //??? add to cuts? + (*m_osLog) << "n_newrows = " << n_newrows << "\n";); + UtilPrintFuncEnd(m_osLog, m_classTag, "addCutsFromPool()", + m_param.LogDebugLevel, 2); + return n_newrows; } //------------------------------------------------------------------------- // -bool DecompAlgo::isIPFeasible(const double* x, - const bool isXSparse, - const double feasVarTol, - const double feasConTol, - const double intTol) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "isIPFeasible()", m_param.LogDebugLevel, 2); - DecompConstraintSet* modelCore = m_modelCore.getModel(); - const int nInts = modelCore->getNumInts(); - const int* integerVars = (nInts > 0) ? modelCore->getIntegerVars() : NULL; - const double intTol10 = 10 * intTol; - const vector& colNames = modelCore->getColNames(); - bool hasColNames = false; - - if (colNames.size()) { - hasColNames = true; - } - - bool ipFeas = true; - - if (!isLPFeasible(x, isXSparse, feasVarTol, feasConTol)) { - ipFeas = false; - goto FUNC_EXIT; - } - - int i, c; - - for (i = 0; i < nInts; i++) { - c = integerVars[i]; - - if (!UtilIsIntegral(x[c], intTol)) { - //Notify, but don't mark in feasible unless 10x worse. - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "IpFeas Integer Col x[" << c << "] "; - - if (hasColNames) - (*m_osLog) << " -> " << colNames[c]; - (*m_osLog) << " : " << UtilDblToStr(x[c]) << endl; - ) { - ; - } +bool DecompAlgo::isIPFeasible(const double *x, const bool isXSparse, + const double feasVarTol, const double feasConTol, + const double intTol) { + UtilPrintFuncBegin(m_osLog, m_classTag, "isIPFeasible()", + m_param.LogDebugLevel, 2); + DecompConstraintSet *modelCore = m_modelCore.getModel(); + const int nInts = modelCore->getNumInts(); + const int *integerVars = (nInts > 0) ? modelCore->getIntegerVars() : NULL; + const double intTol10 = 10 * intTol; + const vector &colNames = modelCore->getColNames(); + bool hasColNames = false; + + if (colNames.size()) { + hasColNames = true; + } + + bool ipFeas = true; + + if (!isLPFeasible(x, isXSparse, feasVarTol, feasConTol)) { + ipFeas = false; + goto FUNC_EXIT; + } + + int i, c; + + for (i = 0; i < nInts; i++) { + c = integerVars[i]; + + if (!UtilIsIntegral(x[c], intTol)) { + // Notify, but don't mark in feasible unless 10x worse. + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "IpFeas Integer Col x[" << c << "] "; - if (!UtilIsIntegral(x[c], intTol10)) { - ipFeas = false; - goto FUNC_EXIT; - } + if (hasColNames)(*m_osLog) << " -> " << colNames[c]; + (*m_osLog) << " : " << UtilDblToStr(x[c]) << endl;) { + ; + } + + if (!UtilIsIntegral(x[c], intTol10)) { + ipFeas = false; + goto FUNC_EXIT; } - } + } + } - UTIL_MSG(m_app->m_param.LogDebugLevel, 4, - m_app->printOriginalSolution(modelCore->getNumCols(), - modelCore->getColNames(), - x); - ); + UTIL_MSG(m_app->m_param.LogDebugLevel, 4, + m_app->printOriginalSolution(modelCore->getNumCols(), + modelCore->getColNames(), x);); FUNC_EXIT: - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) << "isIPFeasible = " << ipFeas << endl; - ); - UtilPrintFuncEnd(m_osLog, m_classTag, - "isIPFeasible()", m_param.LogDebugLevel, 2); - return ipFeas; + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) << "isIPFeasible = " << ipFeas << endl;); + UtilPrintFuncEnd(m_osLog, m_classTag, "isIPFeasible()", m_param.LogDebugLevel, + 2); + return ipFeas; } //------------------------------------------------------------------------- // -bool DecompAlgo::isLPFeasible(const double* x, - const bool isXSparse, - const double feasVarTol, - const double feasConTol) -{ - //--- - //--- The base isFeasible assumes a full explicit description - //--- in modelCore and m_modelRelax (plus integrality). The user - //--- app can also define an isFeasible - which will be checked - //--- first. - //--- - //TODO: consider different tolerances? - //--- - //--- There are two cases where this is used which require different - //--- tolerance levels. If this is to sanity check the recomposed - //--- solution x=sum{s}lamabda_s, then there could be alot of round-off - //--- and we can probably allow a higher level of error. If this is - //--- used for checking if we found a new incumbent, there is less additive - //--- roundoff since usually working with integer values, so tolerances - //--- can be tighter. For example, in p0033, if we don't enforce a tight - //--- satisfaction of constraint ax <= -1656, then ax=-1655 might sneak - //--- by as it is only a 0.06% violation. - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "isLPFeasible()", m_param.LogDebugLevel, 2); - bool lpFeas = m_modelCore.isPointFeasible(x, - isXSparse, - m_param.LogDebugLevel, - feasVarTol, - feasConTol); - - if (!lpFeas) { - goto FUNC_EXIT; - } - - if (m_modelRelax.size()) { - map::iterator mit; - - for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { - lpFeas = (*mit).second.isPointFeasible(x, - isXSparse, - m_param.LogDebugLevel, - feasVarTol, - feasConTol); - - if (!lpFeas) { - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) - << "Block " << mit->first << " infeasible." - << endl; - ); - goto FUNC_EXIT; - } - } - } +bool DecompAlgo::isLPFeasible(const double *x, const bool isXSparse, + const double feasVarTol, + const double feasConTol) { + //--- + //--- The base isFeasible assumes a full explicit description + //--- in modelCore and m_modelRelax (plus integrality). The user + //--- app can also define an isFeasible - which will be checked + //--- first. + //--- + // TODO: consider different tolerances? + //--- + //--- There are two cases where this is used which require different + //--- tolerance levels. If this is to sanity check the recomposed + //--- solution x=sum{s}lamabda_s, then there could be alot of round-off + //--- and we can probably allow a higher level of error. If this is + //--- used for checking if we found a new incumbent, there is less additive + //--- roundoff since usually working with integer values, so tolerances + //--- can be tighter. For example, in p0033, if we don't enforce a tight + //--- satisfaction of constraint ax <= -1656, then ax=-1655 might sneak + //--- by as it is only a 0.06% violation. + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "isLPFeasible()", + m_param.LogDebugLevel, 2); + bool lpFeas = m_modelCore.isPointFeasible(x, isXSparse, m_param.LogDebugLevel, + feasVarTol, feasConTol); + + if (!lpFeas) { + goto FUNC_EXIT; + } + + if (m_modelRelax.size()) { + map::iterator mit; + + for (mit = m_modelRelax.begin(); mit != m_modelRelax.end(); mit++) { + lpFeas = (*mit).second.isPointFeasible( + x, isXSparse, m_param.LogDebugLevel, feasVarTol, feasConTol); + + if (!lpFeas) { + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) + << "Block " << mit->first << " infeasible." << endl;); + goto FUNC_EXIT; + } + } + } FUNC_EXIT: - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) << "isLPFeasible = " << lpFeas << endl; - ); - UtilPrintFuncEnd(m_osLog, m_classTag, - "isLPFeasible()", m_param.LogDebugLevel, 2); - return lpFeas; + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) << "isLPFeasible = " << lpFeas << endl;); + UtilPrintFuncEnd(m_osLog, m_classTag, "isLPFeasible()", m_param.LogDebugLevel, + 2); + return lpFeas; } //--------------------------------------------------------------------- // -DecompStatus DecompAlgo::solveRelaxed(const double* redCostX, - const double* origCost, - const double alpha, - const int n_origCols, - const bool isNested, - DecompSubModel& subModel, - DecompSolverResult* solveResult, - DecompVarList& vars, - double timeLimit - ) -{ - //--- - //--- For pricing, - //--- redCostX: is the red-cost for each original column (c - uhat A")_e - //--- origCost: is the original cost for each original column c_e - //--- alpha: is the dual for the convexity constraint - //--- - //--- The reduced cost of a new variable (column) is the sum of the - //--- reduced cost on each of the original columns in the new variable - //--- minus alpha (this function is responsible for returning the reduced - //--- cost, which includes alpha). - //--- - //--- NOTE, redCost does not include alpha as sent in - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "solveRelaxed()", m_param.LogDebugLevel, 2); - OsiSolverInterface* subprobSI = subModel.getOsi(); - int whichBlock = subModel.getBlockId(); - bool isRoot = getNodeIndex() ? false : true; - DecompConstraintSet* model = subModel.getModel(); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "solve block b = " << whichBlock << endl; - (*m_osLog) << "alpha = " << alpha << endl; - (*m_osLog) << "isNested = " << isNested << endl; - ); - - if (m_param.SubProbParallel) { - m_stats.timerOther1.reset(); - }else{ - m_stats.timerOther2.reset(); - } - - int nVars = static_cast(vars.size()); - int nNewVars = 0; - //SolveRelaxAsIp - //0 = If a user function is defined, it will use the user function. - // If the user returns an exact solution, it will not run the built-in - // IP solve (default). - // If a user function is not defined, it will use the built-in IP solve. - //1 = Use the built-in IP solve, even if there is a user defines a function. - //2 = Calls the user defined function (if exists) and then calls built-in - // IP solver (use this for debugging). - bool doCutoff = m_param.SubProbUseCutoff; - bool doExact = isNested ? false : true; - doExact = m_function == DecompFuncGenerateInitVars ? false : doExact; - DecompSolverStatus solverStatus = DecompSolStatNoSolution; - DecompVarList userVars; - - //#ifndef RELAXED_THREADED - if (m_param.SolveRelaxAsIp != 1) { - +DecompStatus DecompAlgo::solveRelaxed( + const double *redCostX, const double *origCost, const double alpha, + const int n_origCols, const bool isNested, DecompSubModel &subModel, + DecompSolverResult *solveResult, DecompVarList &vars, double timeLimit) { + //--- + //--- For pricing, + //--- redCostX: is the red-cost for each original column (c - uhat A")_e + //--- origCost: is the original cost for each original column c_e + //--- alpha: is the dual for the convexity constraint + //--- + //--- The reduced cost of a new variable (column) is the sum of the + //--- reduced cost on each of the original columns in the new variable + //--- minus alpha (this function is responsible for returning the reduced + //--- cost, which includes alpha). + //--- + //--- NOTE, redCost does not include alpha as sent in + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "solveRelaxed()", + m_param.LogDebugLevel, 2); + OsiSolverInterface *subprobSI = subModel.getOsi(); + int whichBlock = subModel.getBlockId(); + bool isRoot = getNodeIndex() ? false : true; + DecompConstraintSet *model = subModel.getModel(); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "solve block b = " << whichBlock << endl; + (*m_osLog) << "alpha = " << alpha << endl; + (*m_osLog) << "isNested = " << isNested << endl;); + + if (m_param.SubProbParallel) { + m_stats.timerOther1.reset(); + } else { + m_stats.timerOther2.reset(); + } + + int nVars = static_cast(vars.size()); + int nNewVars = 0; + // SolveRelaxAsIp + // 0 = If a user function is defined, it will use the user function. + // If the user returns an exact solution, it will not run the built-in + // IP solve (default). + // If a user function is not defined, it will use the built-in IP solve. + // 1 = Use the built-in IP solve, even if there is a user defines a function. + // 2 = Calls the user defined function (if exists) and then calls built-in + // IP solver (use this for debugging). + bool doCutoff = m_param.SubProbUseCutoff; + bool doExact = isNested ? false : true; + doExact = m_function == DecompFuncGenerateInitVars ? false : doExact; + DecompSolverStatus solverStatus = DecompSolStatNoSolution; + DecompVarList userVars; + + //#ifndef RELAXED_THREADED + if (m_param.SolveRelaxAsIp != 1) { + + if (isNested) { + solverStatus = + m_app->solveRelaxedNest(whichBlock, redCostX, alpha, userVars); + } else { + solverStatus = m_app->solveRelaxed(whichBlock, redCostX, alpha, userVars); + } + + DecompVarList::iterator it; + + for (it = userVars.begin(); it != userVars.end(); it++) { + if ((*it)->getBlockId() == whichBlock) { + if ((*it)->getVarType() == DecompVar_Point) { + (*it)->setReducedCost((*it)->getReducedCost() - alpha); + } + } + } + + if (!m_param.SubProbParallel) { + m_stats.thisSolveRelaxApp.push_back(m_stats.timerOther2.getRealTime()); + } + + nNewVars = static_cast(userVars.size()) - nVars; + } + + m_isColGenExact = (solverStatus == DecompSolStatOptimal); + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "m_isColGenExact = " << m_isColGenExact << endl;); + //#endif + + if ((!m_isColGenExact && nNewVars <= 0) || (m_param.SolveRelaxAsIp == 2)) { + //--- + //--- Here, we are going to use the built-in IP solver + //--- to solve the subproblem. In many cases, the solver + //--- will get a "good column" quickly (within some gap). + //--- That is, it is ok to return suboptimal columns that + //--- have negative reduced cost. + //--- + //--- To possible approaches to speed things up: + //--- (1) return as soon as an UB < 0 is found + //--- (2) return when gap is tight + //--- + //--- However, to prove that the final DW LB is valid, we will need + //--- solve the pricing problem to optimaity at some point. + //--- + assert(subprobSI); + //--- + //--- reset the objective to reduced cost + //--- + subModel.setOsiObjCoeff(redCostX); + + //--- + //--- reset the col lbs/ubs to node bounds + //--- + //--- for block angular case, the user must tell us + //--- the active columns + //--- + //--- CAREFUL: this overrides the user subproblem column + //--- bounds - if for some reason they don't want that + //--- to match up with core, this might cause an issue + //--- + if (m_param.BranchEnforceInSubProb) { + subModel.setActiveColBounds(m_colLBNode, m_colUBNode); + } + + //--- + //--- dump subproblem model .mps/.lp + //--- + if (m_param.LogDumpModel > 1) { if (isNested) { - solverStatus - = m_app->solveRelaxedNest(whichBlock, redCostX, alpha, userVars); - } else { - solverStatus - = m_app->solveRelaxed(whichBlock, redCostX, alpha, userVars); - } - - DecompVarList::iterator it; - - for (it = userVars.begin(); it != userVars.end(); it++) { - if ((*it)->getBlockId() == whichBlock) { - if ((*it)->getVarType() == DecompVar_Point) { - (*it)->setReducedCost((*it)->getReducedCost() - alpha); - } - } - } - - if (!m_param.SubProbParallel) { - m_stats.thisSolveRelaxApp.push_back(m_stats.timerOther2.getRealTime()); - } - - nNewVars = static_cast(userVars.size()) - nVars; - - } - - m_isColGenExact = (solverStatus == DecompSolStatOptimal); - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "m_isColGenExact = " << m_isColGenExact << endl; - ); - //#endif - - if ((!m_isColGenExact && nNewVars <= 0) || (m_param.SolveRelaxAsIp == 2)) { - //--- - //--- Here, we are going to use the built-in IP solver - //--- to solve the subproblem. In many cases, the solver - //--- will get a "good column" quickly (within some gap). - //--- That is, it is ok to return suboptimal columns that - //--- have negative reduced cost. - //--- - //--- To possible approaches to speed things up: - //--- (1) return as soon as an UB < 0 is found - //--- (2) return when gap is tight - //--- - //--- However, to prove that the final DW LB is valid, we will need - //--- solve the pricing problem to optimaity at some point. - //--- - assert(subprobSI); - //--- - //--- reset the objective to reduced cost - //--- - subModel.setOsiObjCoeff(redCostX); - - //--- - //--- reset the col lbs/ubs to node bounds - //--- - //--- for block angular case, the user must tell us - //--- the active columns - //--- - //--- CAREFUL: this overrides the user subproblem column - //--- bounds - if for some reason they don't want that - //--- to match up with core, this might cause an issue - //--- - if (m_param.BranchEnforceInSubProb) { - subModel.setActiveColBounds(m_colLBNode, m_colUBNode); - } - - //--- - //--- dump subproblem model .mps/.lp - //--- - if (m_param.LogDumpModel > 1) { - if (isNested) { - string baseName = "subProbN_" + subModel.getModelName(); + string baseName = "subProbN_" + subModel.getModelName(); - if (m_isStrongBranch) { - baseName += "_SB"; - } - - printCurrentProblem(subprobSI, - baseName, - m_nodeStats.nodeIndex, - m_nodeStats.cutCallsTotal, - m_nodeStats.priceCallsTotal, - whichBlock); - } else { - string baseName = "subProb_" + subModel.getModelName(); - - if (m_isStrongBranch) { - baseName += "_SB"; - } - - std::cout << "problem name is " - << baseName - << m_nodeStats.nodeIndex - << m_nodeStats.cutCallsTotal - << m_nodeStats.priceCallsTotal - << whichBlock - << std::endl; - printCurrentProblem(subprobSI, - baseName, - m_nodeStats.nodeIndex, - m_nodeStats.cutCallsTotal, - m_nodeStats.priceCallsTotal, - whichBlock); - } - } + if (m_isStrongBranch) { + baseName += "_SB"; + } - //--- - //--- solve: min cx, s.t. A'x >= b', x in Z ([A,b] is in modelRelax.M) - //--- - subModel.solveAsMIP(solveResult, - m_param, - doExact, - doCutoff, - isRoot, - alpha - DecompEpsilon, - timeLimit); - //double * milpSolution = NULL; - //if(solveResult->m_nSolutions) - // milpSolution = solveResult->m_solution; - //TODO: - // z_DW is a LB on z_IP - //During DW we get... z*_DW + RC* <= z_DW <= z*_DW - // but if we have bounds on RC* lbRC <= RC* <= ubRC - //then z*_DW + lbRC <= z*_DW + RC* <= z_DW - // we can get a valid LB update just from LB of oracle.. - //we can choose to stop and branch any time we want - - // we sometimes wait for rc=0 but really we can just look at - // gap between DW's lb and ub... - m_isColGenExact = solveResult->m_isOptimal; - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "m_isColGenExact = " << m_isColGenExact << endl; - ); - - // THINK: we really don't want to force the user to create vars - // and check rc and obj, etc... but they might know how to be smart - // and produce more than one, etc... THINK - if (solveResult->m_nSolutions) { - int k; - int nSol = std::min(solveResult->m_nSolutions, - m_param.SubProbNumSolLimit); - for (k = 0; k < nSol; k++) { - const double* milpSolution = solveResult->getSolution(k); - //--- - //--- create a DecompVar (shat) from the optimal solution - //--- - vector ind; - vector els; - int i, c; - double varRedCost = 0.0; //stupid - == obj ?? - double varOrigCost = 0.0; - // defaut assume it is bounded and generating extreme points - DecompVarType varType = !solveResult->m_isUnbounded ? - DecompVar_Point : DecompVar_Ray; - - //std::cout << "The variable Type is " << varType << std::endl; - if (model->isSparse()) { - //TODO: this can just be a vector? ever need arb access? - map::const_iterator mcit; - const map& sparseToOrig = model->getMapSparseToOrig(); - - for (mcit = sparseToOrig.begin(); - mcit != sparseToOrig.end(); mcit++) { - i = mcit->first; //sparse-index - c = mcit->second; //original-index - - if (!UtilIsZero(milpSolution[i], m_app->m_param.TolZero)) { - ind.push_back(c); - els.push_back(milpSolution[i]); - //the reduced cost of shat: (c-uA").s - varRedCost += redCostX[c] * milpSolution[i]; - //the original cost of shat: c.s - varOrigCost += origCost[c] * milpSolution[i]; - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - (*m_osLog) << "c: " << c - << " varOrigCost = " << varOrigCost - << " origCost = " << origCost[c] - << " solution = " << milpSolution[i] << endl; - ); - } - } - } else { - for (c = 0; c < n_origCols; c++) { - if (!UtilIsZero(milpSolution[c], m_app->m_param.TolZero)) { - ind.push_back(c); - els.push_back(milpSolution[c]); - //the reduced cost of shat: (c-uA").s - varRedCost += redCostX[c] * milpSolution[c]; - //the original cost of shat: c.s - varOrigCost += origCost[c] * milpSolution[c]; - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - (*m_osLog) << "c: " << c - << " varOrigCost = " << varOrigCost - << " origCost = " << origCost[c] - << " solution = " << milpSolution[c] << endl; - ); - } - } + printCurrentProblem(subprobSI, baseName, m_nodeStats.nodeIndex, + m_nodeStats.cutCallsTotal, + m_nodeStats.priceCallsTotal, whichBlock); + } else { + string baseName = "subProb_" + subModel.getModelName(); + + if (m_isStrongBranch) { + baseName += "_SB"; + } + + std::cout << "problem name is " << baseName << m_nodeStats.nodeIndex + << m_nodeStats.cutCallsTotal << m_nodeStats.priceCallsTotal + << whichBlock << std::endl; + printCurrentProblem(subprobSI, baseName, m_nodeStats.nodeIndex, + m_nodeStats.cutCallsTotal, + m_nodeStats.priceCallsTotal, whichBlock); + } + } + + //--- + //--- solve: min cx, s.t. A'x >= b', x in Z ([A,b] is in modelRelax.M) + //--- + subModel.solveAsMIP(solveResult, m_param, doExact, doCutoff, isRoot, + alpha - DecompEpsilon, timeLimit); + // double * milpSolution = NULL; + // if(solveResult->m_nSolutions) + // milpSolution = solveResult->m_solution; + // TODO: + // z_DW is a LB on z_IP + // During DW we get... z*_DW + RC* <= z_DW <= z*_DW + // but if we have bounds on RC* lbRC <= RC* <= ubRC + // then z*_DW + lbRC <= z*_DW + RC* <= z_DW + // we can get a valid LB update just from LB of oracle.. + // we can choose to stop and branch any time we want - + // we sometimes wait for rc=0 but really we can just look at + // gap between DW's lb and ub... + m_isColGenExact = solveResult->m_isOptimal; + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "m_isColGenExact = " << m_isColGenExact << endl;); + + // THINK: we really don't want to force the user to create vars + // and check rc and obj, etc... but they might know how to be smart + // and produce more than one, etc... THINK + if (solveResult->m_nSolutions) { + int k; + int nSol = + std::min(solveResult->m_nSolutions, m_param.SubProbNumSolLimit); + for (k = 0; k < nSol; k++) { + const double *milpSolution = solveResult->getSolution(k); + //--- + //--- create a DecompVar (shat) from the optimal solution + //--- + vector ind; + vector els; + int i, c; + double varRedCost = 0.0; // stupid - == obj ?? + double varOrigCost = 0.0; + // defaut assume it is bounded and generating extreme points + DecompVarType varType = + !solveResult->m_isUnbounded ? DecompVar_Point : DecompVar_Ray; + + // std::cout << "The variable Type is " << varType << std::endl; + if (model->isSparse()) { + // TODO: this can just be a vector? ever need arb access? + map::const_iterator mcit; + const map &sparseToOrig = model->getMapSparseToOrig(); + + for (mcit = sparseToOrig.begin(); mcit != sparseToOrig.end(); + mcit++) { + i = mcit->first; // sparse-index + c = mcit->second; // original-index + + if (!UtilIsZero(milpSolution[i], m_app->m_param.TolZero)) { + ind.push_back(c); + els.push_back(milpSolution[i]); + // the reduced cost of shat: (c-uA").s + varRedCost += redCostX[c] * milpSolution[i]; + // the original cost of shat: c.s + varOrigCost += origCost[c] * milpSolution[i]; + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, + (*m_osLog) + << "c: " << c << " varOrigCost = " << varOrigCost + << " origCost = " << origCost[c] + << " solution = " << milpSolution[i] << endl;); } - - if (varType == DecompVar_Point ) { - varRedCost -= alpha;//RC = c-uA''s - alpha + } + } else { + for (c = 0; c < n_origCols; c++) { + if (!UtilIsZero(milpSolution[c], m_app->m_param.TolZero)) { + ind.push_back(c); + els.push_back(milpSolution[c]); + // the reduced cost of shat: (c-uA").s + varRedCost += redCostX[c] * milpSolution[c]; + // the original cost of shat: c.s + varOrigCost += origCost[c] * milpSolution[c]; + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, + (*m_osLog) + << "c: " << c << " varOrigCost = " << varOrigCost + << " origCost = " << origCost[c] + << " solution = " << milpSolution[c] << endl;); } - - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "alpha = " << alpha << "\n"; - (*m_osLog) << "varRedCost = " << varRedCost << "\n"; - (*m_osLog) << "varOrigCost = " << varOrigCost << "\n"; - ); - DecompVar* var = new DecompVar(ind, els, varRedCost, - varOrigCost, varType); - var->setBlockId(whichBlock); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - var->print(m_infinity);); - vars.push_back(var); - } - } - }else{ - // We didn't solve the subproblem as a generic MIP, so just take user - // variables. - vars = userVars; - }//END: if((rc == STAT_UNKNOWN) || (!isExact && nNewVars <= 0)){ - - //--- - //--- sanity check - if user provides a full description of - //--- relaxed problem, make sure the variables from either app or IP - //--- solver are valid - //--- - //TODO: make this check work for sparse as well - if (model && !model->isSparse() && vars.size() > 0) { - //--- - //--- get a pointer to the relaxed model for this block - //--- even if this check is for a nested model, it should - //--- be feasible to base relaxed model for this block - //--- - double* xTemp = new double[n_origCols]; - assert(xTemp); - DecompVarList::iterator it; - - for (it = vars.begin(); it != vars.end(); it++) { - int whichBlock = (*it)->getBlockId(); - - if (whichBlock != -1) { - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "Check that var satisifes relax matrix " - << whichBlock << endl; - (*it)->print(m_infinity); - ); - (*it)->fillDenseArr(n_origCols, xTemp); - //TODO: get rid of this function, use isPointFeasible - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - DecompSubModel& subModelCheck = - getModelRelax(whichBlock); - bool isRelaxFeas = - checkPointFeasible(subModelCheck.getModel(), - xTemp); - assert(isRelaxFeas); - ); - } - } - - UTIL_DELARR(xTemp); - } - - if (!m_param.SubProbParallel) { - m_stats.thisSolveRelax.push_back(m_stats.timerOther1.getRealTime()); - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "solveRelaxed()", m_param.LogDebugLevel, 2); - return STAT_UNKNOWN; + } + } + + if (varType == DecompVar_Point) { + varRedCost -= alpha; // RC = c-uA''s - alpha + } + + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "alpha = " << alpha << "\n"; + (*m_osLog) << "varRedCost = " << varRedCost << "\n"; + (*m_osLog) << "varOrigCost = " << varOrigCost << "\n";); + DecompVar *var = + new DecompVar(ind, els, varRedCost, varOrigCost, varType); + var->setBlockId(whichBlock); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, var->print(m_infinity);); + vars.push_back(var); + } + } + } else { + // We didn't solve the subproblem as a generic MIP, so just take user + // variables. + vars = userVars; + } // END: if((rc == STAT_UNKNOWN) || (!isExact && nNewVars <= 0)){ + + //--- + //--- sanity check - if user provides a full description of + //--- relaxed problem, make sure the variables from either app or IP + //--- solver are valid + //--- + // TODO: make this check work for sparse as well + if (model && !model->isSparse() && vars.size() > 0) { + //--- + //--- get a pointer to the relaxed model for this block + //--- even if this check is for a nested model, it should + //--- be feasible to base relaxed model for this block + //--- + double *xTemp = new double[n_origCols]; + assert(xTemp); + DecompVarList::iterator it; + + for (it = vars.begin(); it != vars.end(); it++) { + int whichBlock = (*it)->getBlockId(); + + if (whichBlock != -1) { + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "Check that var satisifes relax matrix " + << whichBlock << endl; + (*it)->print(m_infinity);); + (*it)->fillDenseArr(n_origCols, xTemp); + // TODO: get rid of this function, use isPointFeasible + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, + DecompSubModel &subModelCheck = getModelRelax(whichBlock); + bool isRelaxFeas = + checkPointFeasible(subModelCheck.getModel(), xTemp); + assert(isRelaxFeas);); + } + } + + UTIL_DELARR(xTemp); + } + + if (!m_param.SubProbParallel) { + m_stats.thisSolveRelax.push_back(m_stats.timerOther1.getRealTime()); + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "solveRelaxed()", m_param.LogDebugLevel, + 2); + return STAT_UNKNOWN; } - //===========================================================================// -void DecompAlgo::recomposeSolution(const double* solution, - double* rsolution) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "recomposeSolution()", m_param.LogDebugLevel, 2); - //--- - //--- refresh the output array (x-space) - //--- - DecompConstraintSet* modelCore = m_modelCore.getModel(); - UtilFillN(rsolution, modelCore->getNumCols(), 0.0); - int j; - bool isFeas = true; - - for (j = 0; j < m_masterSI->getNumCols(); j++) { - if ((fabs(solution[j]) > DecompEpsilon) && isMasterColArtificial(j)) { - isFeas = false; - break; - } - } - - if (m_param.LogDebugLevel >= 4) { - int r; - const vector& colNames = m_masterSI->getColNames(); - const vector& rowNames = m_masterSI->getRowNames(); - - for (j = 0; j < m_masterSI->getNumCols(); j++) { - if (fabs(solution[j]) > DecompEpsilon) { - if (j < static_cast(colNames.size())) - printf("MASTER %25s PRIM[%6d->%20s] = %12.10f\n", - DecompColTypeStr[m_masterColType[j]].c_str(), - j, colNames[j].c_str(), solution[j]); - else - printf("MASTER %25s PRIM[%6d] = %12.10f\n", - DecompColTypeStr[m_masterColType[j]].c_str(), - j, solution[j]); - - if (isMasterColArtificial(j)) { - isFeas = false; - } - } - } - - if (m_masterSI->getNumIntegers() == 0) { - //--- - //--- in the case of recompose after solveMasterAsMIP - //--- we cannot access valid duals - //--- - const double* dualSol = m_masterSI->getRowPrice(); - - for (r = 0; r < m_masterSI->getNumRows(); r++) { - if (fabs(dualSol[r]) > DecompEpsilon) { - if (r < static_cast(rowNames.size())) { - printf("MASTER %25s DUAL[%6d->%20s] = %12.10f\n", - DecompRowTypeStr[m_masterRowType[r]].c_str(), - r, rowNames[r].c_str(), dualSol[r]); - } else - printf("MASTER %25s DUAL[%6d] = %12.10f\n", - DecompRowTypeStr[m_masterRowType[r]].c_str(), - r, dualSol[r]); - } - } - } - } - - double lamSol; - int i, colIndex = 0; - const vector& colNames = modelCore->getColNames(); - int nColNames = static_cast(colNames.size()); - DecompVarList::const_iterator li; - - for (li = m_vars.begin(); li != m_vars.end(); li++) { - colIndex = (*li)->getColMasterIndex(); - lamSol = solution[colIndex]; - assert(colIndex < m_masterSI->getNumCols()); - assert(isMasterColStructural(colIndex)); - - if (lamSol > m_param.TolZero) { - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "LAMBDA[" << colIndex << "]: " << lamSol; - - if (nColNames){ - (*li)->print(m_infinity, m_osLog, colNames); - } - else { - (*li)->print(m_infinity, m_osLog); - } - ); - CoinPackedVector& v = (*li)->m_s; - const int* inds = v.getIndices(); - const double* els = v.getElements(); - - for (i = 0; i < v.getNumElements(); i++) { - rsolution[inds[i]] += els[i] * lamSol; - // std::cout << "The index of the nonMasterOnly variables is" - // << inds[i] << " " << i - // << std::endl; - } - } - } - - //--- - //--- now set the master-only variable assignments - //--- - map::iterator mit; - int nMOVars = static_cast(m_masterOnlyCols.size()); - - for (i = 0; i < nMOVars; i++) { - j = m_masterOnlyCols[i]; - mit = m_masterOnlyColsMap.find(j); - assert(mit != m_masterOnlyColsMap.end()); - colIndex = mit->second; - // For now , master-only variable is of type DecompCol_Structural_NoDelete - assert(isMasterColMasterOnly(colIndex)); - rsolution[j] = solution[colIndex]; - } - - UTIL_MSG(m_param.LogDebugLevel, 4, - const double* cLB = modelCore->getColLB(); - const double* cUB = modelCore->getColUB(); - - for (i = 0; i < modelCore->getNumCols(); i++) { - if (!UtilIsZero(fabs(rsolution[i]))) { - (*m_osLog) << "x[ " << setw(5) << i << " -> "; - - if (nColNames) { +void DecompAlgo::recomposeSolution(const double *solution, double *rsolution) { + UtilPrintFuncBegin(m_osLog, m_classTag, "recomposeSolution()", + m_param.LogDebugLevel, 2); + //--- + //--- refresh the output array (x-space) + //--- + DecompConstraintSet *modelCore = m_modelCore.getModel(); + UtilFillN(rsolution, modelCore->getNumCols(), 0.0); + int j; + bool isFeas = true; + + for (j = 0; j < m_masterSI->getNumCols(); j++) { + if ((fabs(solution[j]) > DecompEpsilon) && isMasterColArtificial(j)) { + isFeas = false; + break; + } + } + + if (m_param.LogDebugLevel >= 4) { + int r; + const vector &colNames = m_masterSI->getColNames(); + const vector &rowNames = m_masterSI->getRowNames(); + + for (j = 0; j < m_masterSI->getNumCols(); j++) { + if (fabs(solution[j]) > DecompEpsilon) { + if (j < static_cast(colNames.size())) + printf("MASTER %25s PRIM[%6d->%20s] = %12.10f\n", + DecompColTypeStr[m_masterColType[j]].c_str(), j, + colNames[j].c_str(), solution[j]); + else + printf("MASTER %25s PRIM[%6d] = %12.10f\n", + DecompColTypeStr[m_masterColType[j]].c_str(), j, solution[j]); + + if (isMasterColArtificial(j)) { + isFeas = false; + } + } + } + + if (m_masterSI->getNumIntegers() == 0) { + //--- + //--- in the case of recompose after solveMasterAsMIP + //--- we cannot access valid duals + //--- + const double *dualSol = m_masterSI->getRowPrice(); + + for (r = 0; r < m_masterSI->getNumRows(); r++) { + if (fabs(dualSol[r]) > DecompEpsilon) { + if (r < static_cast(rowNames.size())) { + printf("MASTER %25s DUAL[%6d->%20s] = %12.10f\n", + DecompRowTypeStr[m_masterRowType[r]].c_str(), r, + rowNames[r].c_str(), dualSol[r]); + } else + printf("MASTER %25s DUAL[%6d] = %12.10f\n", + DecompRowTypeStr[m_masterRowType[r]].c_str(), r, dualSol[r]); + } + } + } + } + + double lamSol; + int i, colIndex = 0; + const vector &colNames = modelCore->getColNames(); + int nColNames = static_cast(colNames.size()); + DecompVarList::const_iterator li; + + for (li = m_vars.begin(); li != m_vars.end(); li++) { + colIndex = (*li)->getColMasterIndex(); + lamSol = solution[colIndex]; + assert(colIndex < m_masterSI->getNumCols()); + assert(isMasterColStructural(colIndex)); + + if (lamSol > m_param.TolZero) { + UTIL_DEBUG( + m_param.LogDebugLevel, 4, + (*m_osLog) << "LAMBDA[" << colIndex << "]: " << lamSol; + + if (nColNames) { (*li)->print(m_infinity, m_osLog, colNames); } else { + (*li)->print(m_infinity, m_osLog); + }); + CoinPackedVector &v = (*li)->m_s; + const int *inds = v.getIndices(); + const double *els = v.getElements(); + + for (i = 0; i < v.getNumElements(); i++) { + rsolution[inds[i]] += els[i] * lamSol; + // std::cout << "The index of the nonMasterOnly variables is" + // << inds[i] << " " << i + // << std::endl; + } + } + } + + //--- + //--- now set the master-only variable assignments + //--- + map::iterator mit; + int nMOVars = static_cast(m_masterOnlyCols.size()); + + for (i = 0; i < nMOVars; i++) { + j = m_masterOnlyCols[i]; + mit = m_masterOnlyColsMap.find(j); + assert(mit != m_masterOnlyColsMap.end()); + colIndex = mit->second; + // For now , master-only variable is of type DecompCol_Structural_NoDelete + assert(isMasterColMasterOnly(colIndex)); + rsolution[j] = solution[colIndex]; + } + + UTIL_MSG( + m_param.LogDebugLevel, 4, const double *cLB = modelCore->getColLB(); + const double *cUB = modelCore->getColUB(); + + for (i = 0; i < modelCore->getNumCols(); i++) { + if (!UtilIsZero(fabs(rsolution[i]))) { + (*m_osLog) << "x[ " << setw(5) << i << " -> "; + + if (nColNames) { (*m_osLog) << setw(25) << colNames[i]; - } - - (*m_osLog) << " ] = " << UtilDblToStr(rsolution[i], 6) - << " LB = " << UtilDblToStr(cLB[i], 6) - << " UB = " << UtilDblToStr(cUB[i], 6) - << endl; - } - } - ); - - //--- - //--- if any artificials are positive, then don't check against core - //--- - if (isFeas) { - //TODO: get rid of this function, use isPointFeasible - isFeas = checkPointFeasible(modelCore, rsolution); - assert(isFeas); - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "recomposeSolution()", m_param.LogDebugLevel, 2); + } + + (*m_osLog) << " ] = " << UtilDblToStr(rsolution[i], 6) + << " LB = " << UtilDblToStr(cLB[i], 6) + << " UB = " << UtilDblToStr(cUB[i], 6) << endl; + } + }); + + //--- + //--- if any artificials are positive, then don't check against core + //--- + if (isFeas) { + // TODO: get rid of this function, use isPointFeasible + isFeas = checkPointFeasible(modelCore, rsolution); + assert(isFeas); + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "recomposeSolution()", + m_param.LogDebugLevel, 2); } //===========================================================================// -bool DecompAlgo::isTailoffLB(const int changeLen, - const double changePerLimit) -{ - //--- - //--- check the change percentage in the last changeLen updates - //--- - assert(changeLen >= 2); - - //--- - //--- don't check for tailoff until we have enough iterations - //--- - if (static_cast(m_nodeStats.objHistoryBound.size()) <= changeLen) { +bool DecompAlgo::isTailoffLB(const int changeLen, const double changePerLimit) { + //--- + //--- check the change percentage in the last changeLen updates + //--- + assert(changeLen >= 2); + + //--- + //--- don't check for tailoff until we have enough iterations + //--- + if (static_cast(m_nodeStats.objHistoryBound.size()) <= changeLen) { + return false; + } + + //--- + //--- don't check for tailoff until we are at least in the ballpark + //--- with respect to DW gap + //--- TODO: get its own parameter? + //--- + int nHistorySize = static_cast(m_nodeStats.objHistoryBound.size()); + + if (nHistorySize > 0) { + DecompObjBound &objBound = m_nodeStats.objHistoryBound[nHistorySize - 1]; + double masterUB = objBound.thisBoundUB; + double masterLB = objBound.thisBound; + // double masterLB = m_nodeStats.objBest.first; + double masterGap = UtilCalculateGap(masterLB, masterUB, m_infinity); + + // printf("Check tailoff, masterLB=%g masterUB=%g masterGap=%g\n", + // masterLB, masterUB, masterGap); + if (masterGap > m_param.CompressColumnsMasterGapStart) { return false; - } - - //--- - //--- don't check for tailoff until we are at least in the ballpark - //--- with respect to DW gap - //--- TODO: get its own parameter? - //--- - int nHistorySize - = static_cast(m_nodeStats.objHistoryBound.size()); - - if (nHistorySize > 0) { - DecompObjBound& objBound - = m_nodeStats.objHistoryBound[nHistorySize - 1]; - double masterUB = objBound.thisBoundUB; - double masterLB = objBound.thisBound; - //double masterLB = m_nodeStats.objBest.first; - double masterGap = UtilCalculateGap(masterLB, masterUB, m_infinity); - - //printf("Check tailoff, masterLB=%g masterUB=%g masterGap=%g\n", - // masterLB, masterUB, masterGap); - if (masterGap > m_param.CompressColumnsMasterGapStart) { - return false; - } - } - - vector< DecompObjBound >::reverse_iterator it - = m_nodeStats.objHistoryBound.rbegin(); - int len = 0; - double prevBound = (*it).bestBound; - double diff = m_infinity; - double sumDiff = 0.0; - double aveDiff = 0.0; - double perDiff = 0.0; - - for (it++; it != m_nodeStats.objHistoryBound.rend(); it++) { - diff = fabs(prevBound - (*it).bestBound); - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) - << setw(10) << "prevBound=" - << setw(10) << UtilDblToStr(prevBound, 2) - << setw(10) << ", thisBound=" - << setw(10) << UtilDblToStr((*it).bestBound) << endl; - ); - sumDiff += diff; - prevBound = (*it).bestBound; - len++; - - if (len >= changeLen) { - break; - } - } - - aveDiff = sumDiff / len; - - if (UtilIsZero(prevBound)) { - perDiff = aveDiff; - } else { - perDiff = 100 * aveDiff / fabs(prevBound); - } - - UTIL_MSG(m_param.LogDebugLevel, 2, - (*m_osLog) - << setw(10) << "Percentage difference in obj bound=" - << setw(10) << UtilDblToStr(perDiff, 2) << endl; - ); - - //--- - //--- if the average percentage difference is more than some threshold - //--- than we are tailing off - //--- - if (perDiff > changePerLimit) { + } + } + + vector::reverse_iterator it = + m_nodeStats.objHistoryBound.rbegin(); + int len = 0; + double prevBound = (*it).bestBound; + double diff = m_infinity; + double sumDiff = 0.0; + double aveDiff = 0.0; + double perDiff = 0.0; + + for (it++; it != m_nodeStats.objHistoryBound.rend(); it++) { + diff = fabs(prevBound - (*it).bestBound); + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << setw(10) << "prevBound=" << setw(10) + << UtilDblToStr(prevBound, 2) << setw(10) + << ", thisBound=" << setw(10) + << UtilDblToStr((*it).bestBound) << endl;); + sumDiff += diff; + prevBound = (*it).bestBound; + len++; + + if (len >= changeLen) { + break; + } + } + + aveDiff = sumDiff / len; + + if (UtilIsZero(prevBound)) { + perDiff = aveDiff; + } else { + perDiff = 100 * aveDiff / fabs(prevBound); + } + + UTIL_MSG(m_param.LogDebugLevel, 2, + (*m_osLog) << setw(10) << "Percentage difference in obj bound=" + << setw(10) << UtilDblToStr(perDiff, 2) << endl;); + + //--- + //--- if the average percentage difference is more than some threshold + //--- than we are tailing off + //--- + if (perDiff > changePerLimit) { + return false; + } else { + //--- + //--- Even if we are tailing off, we need to be careful of the following: + //--- If the last solution was integral (no branching candidates) + //--- but we are not done pricing out (i.e., a column with negative + //--- RC still exist) and we declare that we are tailing off then the + //--- node will get put back in the node work queue. This can lead + //--- to that node being repeatedly stopped and reset. It is + //--- better to just price it out since we cannot branch on it in + //--- this state. + //--- + std::vector> downBranchLB, downBranchUB, upBranchLB, + upBranchUB; + bool gotBranch = + chooseBranchSet(downBranchLB, downBranchUB, upBranchLB, upBranchUB); + + if (gotBranch) { + return true; + } else { return false; - } else { - //--- - //--- Even if we are tailing off, we need to be careful of the following: - //--- If the last solution was integral (no branching candidates) - //--- but we are not done pricing out (i.e., a column with negative - //--- RC still exist) and we declare that we are tailing off then the - //--- node will get put back in the node work queue. This can lead - //--- to that node being repeatedly stopped and reset. It is - //--- better to just price it out since we cannot branch on it in - //--- this state. - //--- - std::vector< std::pair > downBranchLB, - downBranchUB, upBranchLB, upBranchUB; - bool gotBranch = chooseBranchSet(downBranchLB, - downBranchUB, - upBranchLB, - upBranchUB); - - if (gotBranch) { - return true; - } else { - return false; - } - } + } + } } //===========================================================================// -OsiSolverInterface *DecompAlgo::getOsiLpSolverInterface() -{ - if (m_param.DecompLPSolver == "Clp"){ +OsiSolverInterface *DecompAlgo::getOsiLpSolverInterface() { + if (m_param.DecompLPSolver == "Clp") { #ifdef DIP_HAS_CLP - return(new OsiClpSolverInterface()); + return (new OsiClpSolverInterface()); #else - throw UtilException("Clp selected as solver, but it's not available", - "getOsiLpSolverInterface", "DecompAlgo"); + throw UtilException("Clp selected as solver, but it's not available", + "getOsiLpSolverInterface", "DecompAlgo"); #endif - }else if (m_param.DecompLPSolver == "CPLEX"){ + } else if (m_param.DecompLPSolver == "CPLEX") { #ifdef DIP_HAS_CPX - return(new OsiCpxSolverInterface()); + return (new OsiCpxSolverInterface()); #else - throw UtilException("CPLEX selected as solver, but it's not available", - "getOsiLpSolverInterface", "DecompAlgo"); + throw UtilException("CPLEX selected as solver, but it's not available", + "getOsiLpSolverInterface", "DecompAlgo"); #endif - }else if (m_param.DecompLPSolver == "Gurobi"){ + } else if (m_param.DecompLPSolver == "Gurobi") { #ifdef DIP_HAS_GRB - return(new OsiGrbSolverInterface()); + return (new OsiGrbSolverInterface()); #else - throw UtilException("Gurobi selected as solver, but it's not available", - "getOsiLpSolverInterface", "DecompAlgo"); + throw UtilException("Gurobi selected as solver, but it's not available", + "getOsiLpSolverInterface", "DecompAlgo"); #endif - }else{ - throw UtilException("Unknown solver selected", - "getOsiLpSolverInterface", "DecompAlgo"); - } + } else { + throw UtilException("Unknown solver selected", "getOsiLpSolverInterface", + "DecompAlgo"); + } } //===========================================================================// -OsiSolverInterface *DecompAlgo::getOsiIpSolverInterface() -{ - if (m_param.DecompIPSolver == "SYMPHONY"){ +OsiSolverInterface *DecompAlgo::getOsiIpSolverInterface() { + if (m_param.DecompIPSolver == "SYMPHONY") { #ifdef DIP_HAS_SYMPHONY - return (new OsiSymSolverInterface()); + return (new OsiSymSolverInterface()); #else - throw UtilException("SYMPHONY selected as solver, but it's not available", - "getOsiIpSolverInterface", "DecompAlgo"); + throw UtilException("SYMPHONY selected as solver, but it's not available", + "getOsiIpSolverInterface", "DecompAlgo"); #endif - }else if (m_param.DecompIPSolver == "Cbc"){ + } else if (m_param.DecompIPSolver == "Cbc") { #if defined(DIP_HAS_CLP) && defined(DIP_HAS_CBC) - //We return a ClpSolverInterface object here, since we'll make a CbcModel - //object from it and Cbc expects a Clp object. Yes, a bit tangled. - return(new OsiClpSolverInterface()); + // We return a ClpSolverInterface object here, since we'll make a CbcModel + // object from it and Cbc expects a Clp object. Yes, a bit tangled. + return (new OsiClpSolverInterface()); #else - throw UtilException("Cbc selected as solver, but it's not available", - "getOsiIpSolverInterface", "DecompAlgo"); + throw UtilException("Cbc selected as solver, but it's not available", + "getOsiIpSolverInterface", "DecompAlgo"); #endif - }else if (m_param.DecompIPSolver == "CPLEX"){ + } else if (m_param.DecompIPSolver == "CPLEX") { #ifdef DIP_HAS_CPX - return(new OsiCpxSolverInterface()); + return (new OsiCpxSolverInterface()); #else - throw UtilException("CPLEX selected as solver, but it's not available", - "getOsiIpSolverInterface", "DecompAlgo"); + throw UtilException("CPLEX selected as solver, but it's not available", + "getOsiIpSolverInterface", "DecompAlgo"); #endif - }else if (m_param.DecompIPSolver == "Gurobi"){ + } else if (m_param.DecompIPSolver == "Gurobi") { #ifdef DIP_HAS_GRB - return(new OsiGrbSolverInterface()); + return (new OsiGrbSolverInterface()); #else - throw UtilException("Gurobi selected as solver, but it's not available", - "getOsiIpSolverInterface", "DecompAlgo"); + throw UtilException("Gurobi selected as solver, but it's not available", + "getOsiIpSolverInterface", "DecompAlgo"); #endif - }else{ - throw UtilException("Unknown solver selected", - "getOsiIpSolverInterface", "DecompAlgo"); - } + } else { + throw UtilException("Unknown solver selected", "getOsiIpSolverInterface", + "DecompAlgo"); + } } diff --git a/Dip/src/DecompAlgoC.cpp b/Dip/src/DecompAlgoC.cpp old mode 100755 new mode 100644 index 948ce1a1..5501d3cb --- a/Dip/src/DecompAlgoC.cpp +++ b/Dip/src/DecompAlgoC.cpp @@ -13,108 +13,99 @@ //===========================================================================// //===========================================================================// -#include "DecompApp.h" #include "DecompAlgoC.h" #include "DecompAlgoD.h" +#include "DecompApp.h" #include "DecompConstraintSet.h" using namespace std; -//TODO: OsiDualObjLimit = gUB? if LB is higher, then can stop early +// TODO: OsiDualObjLimit = gUB? if LB is higher, then can stop early //===========================================================================// -void DecompAlgoC::createMasterProblem(DecompVarList& initVars) -{ - //--- - //--- Initialize the solver interface for the master problem. - //--- C: min c(x) - //--- A' x >= b' [optional?] - //--- A''x >= b'' - //--- l <= x <= u - //--- - //--- m_modelRelax contains [A', b' ] in terms of x (if explicit) - //--- m_modelCore contains [A'', b''] in terms of x - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "createMasterProblem()", m_param.LogDebugLevel, 2); - loadSIFromModel(m_masterSI); +void DecompAlgoC::createMasterProblem(DecompVarList &initVars) { + //--- + //--- Initialize the solver interface for the master problem. + //--- C: min c(x) + //--- A' x >= b' [optional?] + //--- A''x >= b'' + //--- l <= x <= u + //--- + //--- m_modelRelax contains [A', b' ] in terms of x (if explicit) + //--- m_modelCore contains [A'', b''] in terms of x + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createMasterProblem()", + m_param.LogDebugLevel, 2); + loadSIFromModel(m_masterSI); - if (m_param.CutCGL) { - m_cutgenSI = new OsiClpSolverInterface(); - CoinAssertHint(m_cutgenSI, "Error: Out of Memory"); - loadSIFromModel(m_cutgenSI, true); - } + if (m_param.CutCGL) { + m_cutgenSI = new OsiClpSolverInterface(); + CoinAssertHint(m_cutgenSI, "Error: Out of Memory"); + loadSIFromModel(m_cutgenSI, true); + } - UtilPrintFuncEnd(m_osLog, m_classTag, - "createMasterProblem()", m_param.LogDebugLevel, 2); + UtilPrintFuncEnd(m_osLog, m_classTag, "createMasterProblem()", + m_param.LogDebugLevel, 2); } //===========================================================================// -void DecompAlgoC::setMasterBounds(const double* lbs, - const double* ubs) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "setMasterBounds()", m_param.LogDebugLevel, 2); - int c; - const int n_cols = m_masterSI->getNumCols(); - //TODO: reuse this memory - col size does not change - int* index = new int[n_cols]; - double* bounds = new double[2 * n_cols]; +void DecompAlgoC::setMasterBounds(const double *lbs, const double *ubs) { + UtilPrintFuncBegin(m_osLog, m_classTag, "setMasterBounds()", + m_param.LogDebugLevel, 2); + int c; + const int n_cols = m_masterSI->getNumCols(); + // TODO: reuse this memory - col size does not change + int *index = new int[n_cols]; + double *bounds = new double[2 * n_cols]; - for (c = 0; c < n_cols; c++) { - index[c] = c; - bounds[2 * c] = lbs[c]; - bounds[2 * c + 1] = ubs[c]; - } + for (c = 0; c < n_cols; c++) { + index[c] = c; + bounds[2 * c] = lbs[c]; + bounds[2 * c + 1] = ubs[c]; + } - m_masterSI->setColSetBounds(index, index + n_cols, bounds); - UTIL_DELARR(index); - UTIL_DELARR(bounds); - UtilPrintFuncEnd(m_osLog, m_classTag, - "setMasterBounds()", m_param.LogDebugLevel, 2); + m_masterSI->setColSetBounds(index, index + n_cols, bounds); + UTIL_DELARR(index); + UTIL_DELARR(bounds); + UtilPrintFuncEnd(m_osLog, m_classTag, "setMasterBounds()", + m_param.LogDebugLevel, 2); } //===========================================================================// -bool DecompAlgoC::updateObjBound(const double mostNegRC) -{ - //--- - //--- C : LB = masterLP obj - //--- PC : LB = zDW_RMP + RC* <= zDW <= zDW_RMP - //--- where RC* is the most negative reduced cost - //--- assuming the relaxation subproblem was solved exactly - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "updateObjBoundLB()", m_param.LogDebugLevel, 2); - double thisBoundLB = m_masterSI->getObjValue(); - setObjBound(thisBoundLB, thisBoundLB); - UTIL_DEBUG(m_param.LogDebugLevel, 5, - (*m_osLog) - << "ThisLB = " << UtilDblToStr(thisBoundLB) << "\t" - << "BestLB = " << UtilDblToStr(m_nodeStats.objBest.first) - << "\n"; - ); - UtilPrintFuncEnd(m_osLog, m_classTag, - "updateObjBoundLB()", m_param.LogDebugLevel, 2); - return false; +bool DecompAlgoC::updateObjBound(const double mostNegRC) { + //--- + //--- C : LB = masterLP obj + //--- PC : LB = zDW_RMP + RC* <= zDW <= zDW_RMP + //--- where RC* is the most negative reduced cost + //--- assuming the relaxation subproblem was solved exactly + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "updateObjBoundLB()", + m_param.LogDebugLevel, 2); + double thisBoundLB = m_masterSI->getObjValue(); + setObjBound(thisBoundLB, thisBoundLB); + UTIL_DEBUG(m_param.LogDebugLevel, 5, + (*m_osLog) << "ThisLB = " << UtilDblToStr(thisBoundLB) << "\t" + << "BestLB = " + << UtilDblToStr(m_nodeStats.objBest.first) << "\n";); + UtilPrintFuncEnd(m_osLog, m_classTag, "updateObjBoundLB()", + m_param.LogDebugLevel, 2); + return false; } //===========================================================================// -void DecompAlgoC::recomposeSolution(const double* solution, - double* rsolution) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "recomposeSolution()", m_param.LogDebugLevel, 2); - DecompConstraintSet* modelCore = m_modelCore.getModel(); - memcpy(rsolution, solution, modelCore->getNumCols() * sizeof(double)); - UtilPrintFuncEnd(m_osLog, m_classTag, - "recomposeSolution()", m_param.LogDebugLevel, 2); +void DecompAlgoC::recomposeSolution(const double *solution, double *rsolution) { + UtilPrintFuncBegin(m_osLog, m_classTag, "recomposeSolution()", + m_param.LogDebugLevel, 2); + DecompConstraintSet *modelCore = m_modelCore.getModel(); + memcpy(rsolution, solution, modelCore->getNumCols() * sizeof(double)); + UtilPrintFuncEnd(m_osLog, m_classTag, "recomposeSolution()", + m_param.LogDebugLevel, 2); } //===========================================================================// -void DecompAlgoC::phaseDone() -{ - //1 = every iter - //2 = only at end of node +void DecompAlgoC::phaseDone() { + // 1 = every iter + // 2 = only at end of node #if 0 DecompConstraintSet* modelCore = m_modelCore.getModel(); @@ -176,145 +167,139 @@ void DecompAlgoC::phaseDone() } //===========================================================================// -void DecompAlgoC::phaseUpdate(DecompPhase& phase, - DecompStatus& status) -{ - bool isCutPossible, mustSwitch, considerSwitch; - DecompPhase nextPhase = PHASE_UNKNOWN; - DecompStatus nextStatus = status; - pair& objBest = m_nodeStats.objBest; - int cutCallsTotal = m_nodeStats.cutCallsTotal; - int cutCallsRound = m_nodeStats.cutCallsRound; - int cutsThisCall = m_nodeStats.cutsThisCall; - int cutsThisRound = m_nodeStats.cutsThisRound; - UtilPrintFuncBegin(m_osLog, m_classTag, - "phaseUpdate()", m_param.LogDebugLevel, 2); - m_phaseLast = phase; - UTIL_MSG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "cutsThisRound : " << cutsThisRound << "\n"; - (*m_osLog) << "cutsThisCall : " << cutsThisCall << "\n"; - (*m_osLog) << "cutCallsTotal : " << cutCallsTotal << "\n"; - (*m_osLog) << "cutCallsRound : " << cutCallsRound << "\n"; - (*m_osLog) << "TotalCutItersLimit : " << m_param.TotalCutItersLimit << "\n"; - (*m_osLog) << "RoundCutItersLimit : " << m_param.RoundCutItersLimit << "\n"; - (*m_osLog) << "PHASEIN : " - << DecompPhaseStr[phase] << "\n"; - (*m_osLog) << "STATIN : " - << DecompStatusStr[status] << "\n"; - (*m_osLog) << "BestLB : " - << UtilDblToStr(objBest.first) << "\n"; - (*m_osLog) << "BestUB : " - << UtilDblToStr(objBest.second) << "\n"; - ); +void DecompAlgoC::phaseUpdate(DecompPhase &phase, DecompStatus &status) { + bool isCutPossible, mustSwitch, considerSwitch; + DecompPhase nextPhase = PHASE_UNKNOWN; + DecompStatus nextStatus = status; + pair &objBest = m_nodeStats.objBest; + int cutCallsTotal = m_nodeStats.cutCallsTotal; + int cutCallsRound = m_nodeStats.cutCallsRound; + int cutsThisCall = m_nodeStats.cutsThisCall; + int cutsThisRound = m_nodeStats.cutsThisRound; + UtilPrintFuncBegin(m_osLog, m_classTag, "phaseUpdate()", + m_param.LogDebugLevel, 2); + m_phaseLast = phase; + UTIL_MSG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "cutsThisRound : " << cutsThisRound << "\n"; + (*m_osLog) << "cutsThisCall : " << cutsThisCall << "\n"; + (*m_osLog) << "cutCallsTotal : " << cutCallsTotal << "\n"; + (*m_osLog) << "cutCallsRound : " << cutCallsRound << "\n"; + (*m_osLog) << "TotalCutItersLimit : " << m_param.TotalCutItersLimit + << "\n"; + (*m_osLog) << "RoundCutItersLimit : " << m_param.RoundCutItersLimit + << "\n"; + (*m_osLog) << "PHASEIN : " << DecompPhaseStr[phase] << "\n"; + (*m_osLog) << "STATIN : " << DecompStatusStr[status] << "\n"; + (*m_osLog) << "BestLB : " << UtilDblToStr(objBest.first) + << "\n"; + (*m_osLog) << "BestUB : " << UtilDblToStr(objBest.second) + << "\n";); - //--- - //--- if the lower bound meets the global ub, we are done - //--- - //if(objBest.first >= (objBest.second - DecompEpsilon)){ - // nextPhase = PHASE_DONE; - // goto PHASE_UPDATE_FINISH; - //} - //TODO: check infeasible case - //--- - //--- if no cuts, then jump to finish - //--- - if ((m_param.TotalCutItersLimit == 0) || - (m_param.RoundCutItersLimit == 0)) { - nextPhase = PHASE_DONE; - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "Done - no cuts allowed." << endl;); - goto PHASE_UPDATE_FINISH; - } + //--- + //--- if the lower bound meets the global ub, we are done + //--- + // if(objBest.first >= (objBest.second - DecompEpsilon)){ + // nextPhase = PHASE_DONE; + // goto PHASE_UPDATE_FINISH; + //} + // TODO: check infeasible case + //--- + //--- if no cuts, then jump to finish + //--- + if ((m_param.TotalCutItersLimit == 0) || (m_param.RoundCutItersLimit == 0)) { + nextPhase = PHASE_DONE; + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "Done - no cuts allowed." << endl;); + goto PHASE_UPDATE_FINISH; + } - //--- - //--- we have exceeded the cut iter limit we are done - //--- - if (cutCallsTotal >= m_param.TotalCutItersLimit) { - nextPhase = PHASE_DONE; - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "Done total cut calls exceeds limit." << endl;); - goto PHASE_UPDATE_FINISH; - } + //--- + //--- we have exceeded the cut iter limit we are done + //--- + if (cutCallsTotal >= m_param.TotalCutItersLimit) { + nextPhase = PHASE_DONE; + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "Done total cut calls exceeds limit." << endl;); + goto PHASE_UPDATE_FINISH; + } - if (status == STAT_INFEASIBLE) { - nextPhase = PHASE_DONE; - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "Done status INFEASIBLE." << endl;); - goto PHASE_UPDATE_FINISH; - } + if (status == STAT_INFEASIBLE) { + nextPhase = PHASE_DONE; + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "Done status INFEASIBLE." << endl;); + goto PHASE_UPDATE_FINISH; + } - isCutPossible = (cutCallsTotal < m_param.TotalCutItersLimit); + isCutPossible = (cutCallsTotal < m_param.TotalCutItersLimit); - switch (phase) { - /*case PHASE_INIT: - { - if(isCutPossible) - nextPhase = PHASE_CUT; - else - nextPhase = PHASE_DONE; - } - break;*/ - case PHASE_CUT: { - mustSwitch = false; - considerSwitch = false; + switch (phase) { + /*case PHASE_INIT: + { + if(isCutPossible) + nextPhase = PHASE_CUT; + else + nextPhase = PHASE_DONE; + } + break;*/ + case PHASE_CUT: { + mustSwitch = false; + considerSwitch = false; - if ((cutCallsTotal > 0) && - (!isCutPossible || (cutsThisCall == 0) || (cutsThisRound == 0))) { - mustSwitch = true; - } + if ((cutCallsTotal > 0) && + (!isCutPossible || (cutsThisCall == 0) || (cutsThisRound == 0))) { + mustSwitch = true; + } - if (cutCallsRound >= m_param.RoundCutItersLimit) { - considerSwitch = true; - } + if (cutCallsRound >= m_param.RoundCutItersLimit) { + considerSwitch = true; + } - //printf("isCutPossible =%d\n", isCutPossible); - //printf("mustSwitch =%d\n", mustSwitch); - //printf("considerSwitch=%d\n", considerSwitch); - if (mustSwitch) { - //--- - //--- we must switch from cutting - //--- - nextPhase = PHASE_DONE; - }//END: if(mustSwitch) - else if (considerSwitch) { - //--- - //--- we consider switching from cutting - //--- - if (!isCutPossible) { - //--- - //--- if we exceed both iter limits, we are done - //--- - nextPhase = PHASE_DONE; - } else { - //--- - //--- if we exceed the price iter limit, but not the cut limit - //--- since we are not in mustSwitch, m_cutsThisRound > 0, so - //--- we can go back to cutting, even though it violates the - //--- round counter, because we have no other choice - //--- - nextPhase = PHASE_CUT; - } - } //END: else if(considerSwitch) - else { - nextPhase = PHASE_CUT; + // printf("isCutPossible =%d\n", isCutPossible); + // printf("mustSwitch =%d\n", mustSwitch); + // printf("considerSwitch=%d\n", considerSwitch); + if (mustSwitch) { + //--- + //--- we must switch from cutting + //--- + nextPhase = PHASE_DONE; + } // END: if(mustSwitch) + else if (considerSwitch) { + //--- + //--- we consider switching from cutting + //--- + if (!isCutPossible) { + //--- + //--- if we exceed both iter limits, we are done + //--- + nextPhase = PHASE_DONE; + } else { + //--- + //--- if we exceed the price iter limit, but not the cut limit + //--- since we are not in mustSwitch, m_cutsThisRound > 0, so + //--- we can go back to cutting, even though it violates the + //--- round counter, because we have no other choice + //--- + nextPhase = PHASE_CUT; } - } - break; - case PHASE_DONE: - break; - case PHASE_UNKNOWN: - default: - assert(0); - } + } // END: else if(considerSwitch) + else { + nextPhase = PHASE_CUT; + } + } break; + case PHASE_DONE: + break; + case PHASE_UNKNOWN: + default: + assert(0); + } PHASE_UPDATE_FINISH: - UTIL_MSG(m_param.LogDebugLevel, 3, - (*m_osLog) << "PhaseOut: " << DecompPhaseStr[nextPhase]; - (*m_osLog) << " StatusOut: " << DecompStatusStr[nextStatus]; - (*m_osLog) << endl; - ); - phase = nextPhase; - status = nextStatus; - UtilPrintFuncEnd(m_osLog, m_classTag, - "phaseUpdate()", m_param.LogDebugLevel, 2); + UTIL_MSG(m_param.LogDebugLevel, 3, + (*m_osLog) << "PhaseOut: " << DecompPhaseStr[nextPhase]; + (*m_osLog) << " StatusOut: " << DecompStatusStr[nextStatus]; + (*m_osLog) << endl;); + phase = nextPhase; + status = nextStatus; + UtilPrintFuncEnd(m_osLog, m_classTag, "phaseUpdate()", m_param.LogDebugLevel, + 2); } diff --git a/Dip/src/DecompAlgoCGL.cpp b/Dip/src/DecompAlgoCGL.cpp index 9634cfed..056c0fd9 100644 --- a/Dip/src/DecompAlgoCGL.cpp +++ b/Dip/src/DecompAlgoCGL.cpp @@ -16,293 +16,277 @@ #include "DecompAlgo.h" #include "DecompCutOsi.h" //===========================================================================// -#include "CglGomory.hpp" -#include "CglProbing.hpp" -#include "CglKnapsackCover.hpp" #include "CglClique.hpp" #include "CglFlowCover.hpp" +#include "CglGomory.hpp" +#include "CglKnapsackCover.hpp" #include "CglMixedIntegerRounding2.hpp" +#include "CglProbing.hpp" #include "CoinPackedMatrix.hpp" using namespace std; //===========================================================================// -int DecompAlgoCGL::initGenerators(const int doClique, - const int doOddHole, - const int doFlowCover, - const int doKnapCover, - const int doMixIntRound, - const int doGomory) -{ - int status = DecompStatOk; - - if (doClique) { - m_genClique = new CglClique; - m_genClique->setStarCliqueReport(false); - m_genClique->setRowCliqueReport (false); - - if (!m_genClique) { - return DecompStatOutOfMemory; - } - } - - if (doOddHole) { - m_genOddHole = new CglOddHole; - - if (!m_genClique) { - return DecompStatOutOfMemory; - } - } - - if (doFlowCover) { - m_genFlowCover = new CglFlowCover; - - if (!m_genFlowCover) { - return DecompStatOutOfMemory; - } - } - - if (doKnapCover) { - m_genKnapCover = new CglKnapsackCover; - - if (!m_genKnapCover) { - return DecompStatOutOfMemory; - } - } - - if (doMixIntRound) { - m_genMixIntRound = new CglMixedIntegerRounding2; - - if (!m_genMixIntRound) { - return DecompStatOutOfMemory; - } - } - - if (doGomory) { - m_genGomory = new CglGomory; - - if (!m_genGomory) { - return DecompStatOutOfMemory; - } - } - - return status; +int DecompAlgoCGL::initGenerators(const int doClique, const int doOddHole, + const int doFlowCover, const int doKnapCover, + const int doMixIntRound, const int doGomory) { + int status = DecompStatOk; + + if (doClique) { + m_genClique = new CglClique; + m_genClique->setStarCliqueReport(false); + m_genClique->setRowCliqueReport(false); + + if (!m_genClique) { + return DecompStatOutOfMemory; + } + } + + if (doOddHole) { + m_genOddHole = new CglOddHole; + + if (!m_genClique) { + return DecompStatOutOfMemory; + } + } + + if (doFlowCover) { + m_genFlowCover = new CglFlowCover; + + if (!m_genFlowCover) { + return DecompStatOutOfMemory; + } + } + + if (doKnapCover) { + m_genKnapCover = new CglKnapsackCover; + + if (!m_genKnapCover) { + return DecompStatOutOfMemory; + } + } + + if (doMixIntRound) { + m_genMixIntRound = new CglMixedIntegerRounding2; + + if (!m_genMixIntRound) { + return DecompStatOutOfMemory; + } + } + + if (doGomory) { + m_genGomory = new CglGomory; + + if (!m_genGomory) { + return DecompStatOutOfMemory; + } + } + + return status; } //===========================================================================// -int DecompAlgoCGL::generateCuts(OsiSolverInterface* cutGenSI, - OsiSolverInterface* masterSI, - double* xhat, - vector& integerVars, - DecompCutList& newCuts) -{ - OsiCuts osiCuts; - int status = DecompStatOk; - int nCliqueCuts = 0; - int nOddHoleCuts = 0; - int nFlowCoverCuts = 0; - int nKnapCoverCuts = 0; - int nMixIntRoundCuts = 0; - int nGomoryCuts = 0; - int nTotalCuts = 0; - //--- - //--- this is typically coming from relaxed master problem - //--- which has no defined integers (why not?) you are using - //--- initalSolve, resolve which solves relaxation anyway... - //--- can't the master and m_cutgenSI just stores the integers? - //--- - //--- if you set integer on the fly and then unset - //--- will that break things? - //--- if master, need to set, if cutgenSI, don't... - //--- - //int nInts = static_cast(integerVars.size()); - //if(nInts > 0) - // si->setInteger(&integerVars[0], nInts); - //--- - //--- some CGLs need row activities too - //--- currently, no easy way to set this - //--- - OsiClpSolverInterface* cutGenClpSI = - dynamic_cast(cutGenSI); - assert(cutGenClpSI); - const int nRows = cutGenClpSI->getNumRows(); - //--- - //--- calculate activity - //--- - //TODO: note, this design never does cuts on cuts - const CoinPackedMatrix* M = cutGenClpSI->getMatrixByRow(); - double* act = new double[nRows]; - assert(act); - M->times(xhat, act); - //--- - //--- set primal column solution - //--- - cutGenClpSI->setColSolution(xhat); - //--- - //--- set primal row solution (i.e., activities) - //--- - //write a si->setRowPrice for OsiClp - //si->setRowPrice(act);//BAD NAME! - bool mustDeleteWS = true; - CoinWarmStart* warmStart = NULL; - - //TODO: check on crossover code - some speedups possible - // with a version that accepts memory - so not alloc/free - // too often - switch (m_algo) { - case CUT: +int DecompAlgoCGL::generateCuts(OsiSolverInterface *cutGenSI, + OsiSolverInterface *masterSI, double *xhat, + vector &integerVars, + DecompCutList &newCuts) { + OsiCuts osiCuts; + int status = DecompStatOk; + int nCliqueCuts = 0; + int nOddHoleCuts = 0; + int nFlowCoverCuts = 0; + int nKnapCoverCuts = 0; + int nMixIntRoundCuts = 0; + int nGomoryCuts = 0; + int nTotalCuts = 0; + //--- + //--- this is typically coming from relaxed master problem + //--- which has no defined integers (why not?) you are using + //--- initalSolve, resolve which solves relaxation anyway... + //--- can't the master and m_cutgenSI just stores the integers? + //--- + //--- if you set integer on the fly and then unset + //--- will that break things? + //--- if master, need to set, if cutgenSI, don't... + //--- + // int nInts = static_cast(integerVars.size()); + // if(nInts > 0) + // si->setInteger(&integerVars[0], nInts); + //--- + //--- some CGLs need row activities too + //--- currently, no easy way to set this + //--- + OsiClpSolverInterface *cutGenClpSI = + dynamic_cast(cutGenSI); + assert(cutGenClpSI); + const int nRows = cutGenClpSI->getNumRows(); + //--- + //--- calculate activity + //--- + // TODO: note, this design never does cuts on cuts + const CoinPackedMatrix *M = cutGenClpSI->getMatrixByRow(); + double *act = new double[nRows]; + assert(act); + M->times(xhat, act); + //--- + //--- set primal column solution + //--- + cutGenClpSI->setColSolution(xhat); + //--- + //--- set primal row solution (i.e., activities) + //--- + // write a si->setRowPrice for OsiClp + // si->setRowPrice(act);//BAD NAME! + bool mustDeleteWS = true; + CoinWarmStart *warmStart = NULL; + + // TODO: check on crossover code - some speedups possible + // with a version that accepts memory - so not alloc/free + // too often + switch (m_algo) { + case CUT: + //--- + //--- set master warm start in cgl SI + //--- + warmStart = masterSI->getPointerToWarmStart(mustDeleteWS); + cutGenClpSI->setWarmStart(warmStart); + break; + case PRICE_AND_CUT: + case RELAX_AND_CUT: + + if (m_genGomory) { //--- - //--- set master warm start in cgl SI + //--- crossover from xhat to basic solution + //--- + //--- + //--- OsiClp::crossover + //--- options - 0 no presolve (use primal and dual) + //--- 1 presolve (just use primal) + //--- 2 no presolve (just use primal) + //--- basis - 0 use all slack basis + //--- 1 try and put some in basis //--- - warmStart = masterSI->getPointerToWarmStart(mustDeleteWS); - cutGenClpSI->setWarmStart(warmStart); - break; - case PRICE_AND_CUT: - case RELAX_AND_CUT: - - if (m_genGomory) { - //--- - //--- crossover from xhat to basic solution - //--- - //--- - //--- OsiClp::crossover - //--- options - 0 no presolve (use primal and dual) - //--- 1 presolve (just use primal) - //--- 2 no presolve (just use primal) - //--- basis - 0 use all slack basis - //--- 1 try and put some in basis - //--- #ifdef TRUNK_BUILD - int crossOptions = 2; - int crossBasis = 1; - //add obj cut - // obj >= dw-master obj - then generate gomory? - // or do that all the time for cuts? - cutGenClpSI->crossover(crossOptions, crossBasis); + int crossOptions = 2; + int crossBasis = 1; + // add obj cut + // obj >= dw-master obj - then generate gomory? + // or do that all the time for cuts? + cutGenClpSI->crossover(crossOptions, crossBasis); #endif - //cutGenClpSI->resolve();//need? - ///////////STOP -> getting all kinds of not violated - ///// maybe try options=2, no presolve as it might be - ///// screwing up the model? - break; - } - - default: + // cutGenClpSI->resolve();//need? + ///////////STOP -> getting all kinds of not violated + ///// maybe try options=2, no presolve as it might be + ///// screwing up the model? break; - } - - /*#ifdef __DECOMP_IP_CPX__ - OsiCpxSolverInterface * subprobSI_Cpx - = dynamic_cast(si); - CPXENVptr cpxEnv = subprobSI_Cpx->getEnvironmentPtr(); - CPXLPptr cpxLp = subprobSI_Cpx->getLpPtr(); - - int err = CPXcopystart( cpxEnv, cpxLp, NULL, NULL, - const_cast( xhat ), - const_cast( act ), - NULL, NULL ); - printf("Err=%d\n",err);fflush(stdout); - assert(!err); - #endif*/ - if (m_genClique) { - UTIL_MSG(m_logLevel, 3, - (*m_logStream) << "Calling cut generator: cliques\n"; - ); - m_genClique->generateCuts(*cutGenClpSI, osiCuts); - nCliqueCuts = osiCuts.sizeCuts() - nTotalCuts; - nTotalCuts = osiCuts.sizeCuts(); - } - - if (m_genOddHole) { - UTIL_MSG(m_logLevel, 3, - (*m_logStream) << "Calling cut generator: cliques\n"; - ); - m_genOddHole->generateCuts(*cutGenClpSI, osiCuts); - nOddHoleCuts = osiCuts.sizeCuts() - nTotalCuts; - nTotalCuts = osiCuts.sizeCuts(); - } - - if (m_genFlowCover) { - UTIL_MSG(m_logLevel, 3, - (*m_logStream) << "Calling cut generator: flow-covers\n"; - ); - m_genFlowCover->generateCuts(*cutGenClpSI, osiCuts); - nFlowCoverCuts = osiCuts.sizeCuts() - nTotalCuts; - nTotalCuts = osiCuts.sizeCuts(); - } - - if (m_genKnapCover) { - UTIL_MSG(m_logLevel, 3, - (*m_logStream) << "Calling cut generator: knap-covers\n"; - ); - m_genKnapCover->generateCuts(*cutGenClpSI, osiCuts); - nKnapCoverCuts = osiCuts.sizeCuts() - nTotalCuts; - nTotalCuts = osiCuts.sizeCuts(); - } - - if (m_genMixIntRound) { - UTIL_MSG(m_logLevel, 3, - (*m_logStream) << "Calling cut generator: mixint-round\n"; - ); - m_genMixIntRound->generateCuts(*cutGenClpSI, osiCuts); - nMixIntRoundCuts = osiCuts.sizeCuts() - nTotalCuts; - nTotalCuts = osiCuts.sizeCuts(); - } - - if (m_genGomory) { - UTIL_MSG(m_logLevel, 3, - (*m_logStream) << "Calling cut generator: gomory\n"; - ); - m_genGomory->generateCuts(*cutGenClpSI, osiCuts); - nGomoryCuts = osiCuts.sizeCuts() - nTotalCuts; - nTotalCuts = osiCuts.sizeCuts(); - } - - UTIL_MSG(m_logLevel, 3, - (*m_logStream) - << "Num clique cuts= " << nCliqueCuts << endl - << "Num odd-hole cuts= " << nOddHoleCuts << endl - << "Num flow-cover cuts= " << nFlowCoverCuts << endl - << "Num knap-cover cuts= " << nKnapCoverCuts << endl - << "Num mixed-int cuts= " << nMixIntRoundCuts << endl - << "Num gomory cuts= " << nGomoryCuts << endl; - ); - //osiCuts.printCuts(); - int i; - - for (i = 0; i < osiCuts.sizeRowCuts(); i++) { - CoinAssertDebug(osiCuts.rowCut(i).consistent()); - CoinAssertDebug(osiCuts.rowCut(i).consistent(*cutGenClpSI)); - CoinAssertDebug(!osiCuts.rowCut(i).infeasible(*cutGenClpSI)); - //CoinAssertDebug(osiCuts.rowCut(i).violated(xhat) > 1.e-5); - DecompCutOsi* decompCut = new DecompCutOsi(osiCuts.rowCut(i)); - - if (osiCuts.rowCut(i).violated(xhat) < DecompEpsilon) { - UTIL_DEBUG(m_logLevel, 3, - (*m_logStream) << - "WARNING: CGL cut " << i << " not violated." << endl; - osiCuts.rowCut(i).print(); - ); - } else { - newCuts.push_back(decompCut); - } - } - - UTIL_DEBUG(m_logLevel, 3, - - if (osiCuts.sizeColCuts() > 0) { - (*m_logStream) - << "WARNING: " << osiCuts.sizeColCuts() - << " CGL col cuts found." << endl; - } - ); - - // if(nInts > 0) - // si->setContinuous(&integerVars[0], nInts); - if (mustDeleteWS && warmStart) { - UTIL_DELPTR(warmStart); - } - - UTIL_DELARR(act); - return status; + } + + default: + break; + } + + /*#ifdef __DECOMP_IP_CPX__ + OsiCpxSolverInterface * subprobSI_Cpx + = dynamic_cast(si); + CPXENVptr cpxEnv = subprobSI_Cpx->getEnvironmentPtr(); + CPXLPptr cpxLp = subprobSI_Cpx->getLpPtr(); + + int err = CPXcopystart( cpxEnv, cpxLp, NULL, NULL, + const_cast( xhat ), + const_cast( act ), + NULL, NULL ); + printf("Err=%d\n",err);fflush(stdout); + assert(!err); + #endif*/ + if (m_genClique) { + UTIL_MSG(m_logLevel, 3, + (*m_logStream) << "Calling cut generator: cliques\n";); + m_genClique->generateCuts(*cutGenClpSI, osiCuts); + nCliqueCuts = osiCuts.sizeCuts() - nTotalCuts; + nTotalCuts = osiCuts.sizeCuts(); + } + + if (m_genOddHole) { + UTIL_MSG(m_logLevel, 3, + (*m_logStream) << "Calling cut generator: cliques\n";); + m_genOddHole->generateCuts(*cutGenClpSI, osiCuts); + nOddHoleCuts = osiCuts.sizeCuts() - nTotalCuts; + nTotalCuts = osiCuts.sizeCuts(); + } + + if (m_genFlowCover) { + UTIL_MSG(m_logLevel, 3, + (*m_logStream) << "Calling cut generator: flow-covers\n";); + m_genFlowCover->generateCuts(*cutGenClpSI, osiCuts); + nFlowCoverCuts = osiCuts.sizeCuts() - nTotalCuts; + nTotalCuts = osiCuts.sizeCuts(); + } + + if (m_genKnapCover) { + UTIL_MSG(m_logLevel, 3, + (*m_logStream) << "Calling cut generator: knap-covers\n";); + m_genKnapCover->generateCuts(*cutGenClpSI, osiCuts); + nKnapCoverCuts = osiCuts.sizeCuts() - nTotalCuts; + nTotalCuts = osiCuts.sizeCuts(); + } + + if (m_genMixIntRound) { + UTIL_MSG(m_logLevel, 3, + (*m_logStream) << "Calling cut generator: mixint-round\n";); + m_genMixIntRound->generateCuts(*cutGenClpSI, osiCuts); + nMixIntRoundCuts = osiCuts.sizeCuts() - nTotalCuts; + nTotalCuts = osiCuts.sizeCuts(); + } + + if (m_genGomory) { + UTIL_MSG(m_logLevel, 3, + (*m_logStream) << "Calling cut generator: gomory\n";); + m_genGomory->generateCuts(*cutGenClpSI, osiCuts); + nGomoryCuts = osiCuts.sizeCuts() - nTotalCuts; + nTotalCuts = osiCuts.sizeCuts(); + } + + UTIL_MSG(m_logLevel, 3, + (*m_logStream) << "Num clique cuts= " << nCliqueCuts << endl + << "Num odd-hole cuts= " << nOddHoleCuts << endl + << "Num flow-cover cuts= " << nFlowCoverCuts << endl + << "Num knap-cover cuts= " << nKnapCoverCuts << endl + << "Num mixed-int cuts= " << nMixIntRoundCuts << endl + << "Num gomory cuts= " << nGomoryCuts << endl;); + // osiCuts.printCuts(); + int i; + + for (i = 0; i < osiCuts.sizeRowCuts(); i++) { + CoinAssertDebug(osiCuts.rowCut(i).consistent()); + CoinAssertDebug(osiCuts.rowCut(i).consistent(*cutGenClpSI)); + CoinAssertDebug(!osiCuts.rowCut(i).infeasible(*cutGenClpSI)); + // CoinAssertDebug(osiCuts.rowCut(i).violated(xhat) > 1.e-5); + DecompCutOsi *decompCut = new DecompCutOsi(osiCuts.rowCut(i)); + + if (osiCuts.rowCut(i).violated(xhat) < DecompEpsilon) { + UTIL_DEBUG(m_logLevel, 3, + (*m_logStream) + << "WARNING: CGL cut " << i << " not violated." << endl; + osiCuts.rowCut(i).print();); + } else { + newCuts.push_back(decompCut); + } + } + + UTIL_DEBUG( + m_logLevel, 3, + + if (osiCuts.sizeColCuts() > 0) { + (*m_logStream) << "WARNING: " << osiCuts.sizeColCuts() + << " CGL col cuts found." << endl; + }); + + // if(nInts > 0) + // si->setContinuous(&integerVars[0], nInts); + if (mustDeleteWS && warmStart) { + UTIL_DELPTR(warmStart); + } + + UTIL_DELARR(act); + return status; } diff --git a/Dip/src/DecompAlgoD.cpp b/Dip/src/DecompAlgoD.cpp index 3700ac78..45856e75 100644 --- a/Dip/src/DecompAlgoD.cpp +++ b/Dip/src/DecompAlgoD.cpp @@ -12,660 +12,613 @@ // All Rights Reserved. // //===========================================================================// - -#include "DecompApp.h" -#include "DecompVar.h" #include "DecompAlgoD.h" +#include "DecompApp.h" #include "DecompCutOsi.h" #include "DecompSolverResult.h" +#include "DecompVar.h" using namespace std; -//TODO: generateInitVars should be based on cost = -xhat +// TODO: generateInitVars should be based on cost = -xhat // ------------------------------------------------------------------------- // -void DecompAlgoD::phaseUpdate(DecompPhase& phase, - DecompStatus& status) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "phaseUpdate()", m_param.LogDebugLevel, 2); - DecompAlgo::phaseUpdate(phase, status); - - if (phase == PHASE_DONE && status == STAT_FEASIBLE) { - //--- - //--- then a decomposition was found, return it - //--- - } - - //--- - //--- TODO: - //--- are we stuck? - //--- 11/3/09: I13 PhaseIObj not moving - //--- TODO: use tailoff function - //--- - int changeLen = m_param.TailoffLength; - double changePerLimit = m_param.TailoffPercent; - - if (static_cast(m_phaseIObj.size()) > changeLen) { - vector< double >::reverse_iterator it = m_phaseIObj.rbegin(); - int len = 0; - double prevBound = (*it); - double diff = m_infinity; - double sumDiff = 0.0; - double aveDiff = 0.0; - double perDiff = 0.0; - - for ( ; it != m_phaseIObj.rend(); it++) { - diff = fabs(prevBound - (*it)); - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) - << setw(10) << "prevBound=" - << setw(10) << UtilDblToStr(prevBound, 2) - << setw(10) << ", thisBound=" - << setw(10) << UtilDblToStr((*it)) << endl; - ); - sumDiff += diff; - prevBound = (*it); - len++; - - if (len >= changeLen) { - break; - } - } - - aveDiff = sumDiff / len; - - if (UtilIsZero(prevBound)) { - perDiff = aveDiff; - } else { - perDiff = 100 * aveDiff / fabs(prevBound); - } - - UTIL_MSG(m_param.LogDebugLevel, 2, - (*m_osLog) - << setw(10) << "Percentage difference in obj bound=" - << setw(10) << UtilDblToStr(perDiff, 2) << endl; - ); - - //--- - //--- if the average percentage difference is less than some threshold - //--- than we are tailing off - //--- - if (perDiff <= changePerLimit) { - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "DC is tailing off - STOP PROCESS" << endl; - ); - phase = PHASE_DONE; - m_stopCriteria = DecompStopTailOff; +void DecompAlgoD::phaseUpdate(DecompPhase &phase, DecompStatus &status) { + UtilPrintFuncBegin(m_osLog, m_classTag, "phaseUpdate()", + m_param.LogDebugLevel, 2); + DecompAlgo::phaseUpdate(phase, status); + + if (phase == PHASE_DONE && status == STAT_FEASIBLE) { + //--- + //--- then a decomposition was found, return it + //--- + } + + //--- + //--- TODO: + //--- are we stuck? + //--- 11/3/09: I13 PhaseIObj not moving + //--- TODO: use tailoff function + //--- + int changeLen = m_param.TailoffLength; + double changePerLimit = m_param.TailoffPercent; + + if (static_cast(m_phaseIObj.size()) > changeLen) { + vector::reverse_iterator it = m_phaseIObj.rbegin(); + int len = 0; + double prevBound = (*it); + double diff = m_infinity; + double sumDiff = 0.0; + double aveDiff = 0.0; + double perDiff = 0.0; + + for (; it != m_phaseIObj.rend(); it++) { + diff = fabs(prevBound - (*it)); + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << setw(10) << "prevBound=" << setw(10) + << UtilDblToStr(prevBound, 2) << setw(10) + << ", thisBound=" << setw(10) << UtilDblToStr((*it)) + << endl;); + sumDiff += diff; + prevBound = (*it); + len++; + + if (len >= changeLen) { + break; } - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "phaseUpdate()", m_param.LogDebugLevel, 2); + } + + aveDiff = sumDiff / len; + + if (UtilIsZero(prevBound)) { + perDiff = aveDiff; + } else { + perDiff = 100 * aveDiff / fabs(prevBound); + } + + UTIL_MSG(m_param.LogDebugLevel, 2, + (*m_osLog) << setw(10) << "Percentage difference in obj bound=" + << setw(10) << UtilDblToStr(perDiff, 2) << endl;); + + //--- + //--- if the average percentage difference is less than some threshold + //--- than we are tailing off + //--- + if (perDiff <= changePerLimit) { + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "DC is tailing off - STOP PROCESS" << endl;); + phase = PHASE_DONE; + m_stopCriteria = DecompStopTailOff; + } + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "phaseUpdate()", m_param.LogDebugLevel, + 2); } //===========================================================================// -void DecompAlgoD::phaseDone() -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "phaseDone()", m_param.LogDebugLevel, 1); - - if (m_stopCriteria != DecompStopInfeasible) { - if (m_param.LogDebugLevel >= 3) { - printVars(m_osLog); //use this to warm start DW - } - - return; - } - - //--- - //--- decomposition could not be found, this means the - //--- point we are decomposing is not inside P' and we can - //--- generate a 'farkas cut' - //--- - //--- since we use a phase I, our 'proof of infeasibility' - //--- does not come from the 'dual ray' but rather the objective - //--- of the oracle - //--- - //--- by getting here, we have shown that (c=0,A=I) - //--- (c-uA'')s - alpha >= 0 for all s in P' - //--- and - //--- (c-uA'')s* - alpha < 0 - //--- - //--- in case of many blocks, take the most violated block - //--- - int i, b; - const double* dualSol = m_masterSI->getRowPrice(); - double lhs = 0.0; - - for (i = 0; i < m_numOrigCols; i++) { - lhs -= dualSol[i] * m_xhatD[i]; - - if (m_param.LogDebugLevel >= 3) { - printf("i:%4d u:%5g x:%5g lhs:%5g\n", - i, dualSol[i], m_xhatD[i], lhs); - } - } - - //--- - //--- pick the alpha that maximizes the violation - //--- - double alpha = -m_infinity; - - for (b = 0; b < m_numConvexCon; b++) { - if (dualSol[m_numOrigCols + b] > alpha) { - alpha = dualSol[m_numOrigCols + b]; - } - } - - lhs -= alpha; - - if (m_param.LogDebugLevel >= 3) { - printf("alpha:%5g lhs:%5g\n", alpha, lhs); - } - - if (lhs < 0) { - printf(" VIOLATED FARKAS CUT lhs = %g\n", lhs); - CoinPackedVector cut; - OsiRowCut rowCut; - - //--- - //--- Farkas Cut: u*x <= -alpha - //--- - for (i = 0; i < m_numOrigCols; i++) { - cut.insert(i, dualSol[i]); - } - - rowCut.setRow(cut); - rowCut.setLb(-m_infinity); - rowCut.setUb(-alpha); - DecompCutOsi* decompCut = new DecompCutOsi(rowCut); - decompCut->setStringHash(m_infinity);//constructor should do! - //decompCut->print(m_osLog); - (*m_newCuts).push_back(decompCut); - } - - //--- - //--- comparing to Concorde's LOCALCUT implementation, our - //--- version correponds to their version (3) in separate.c - //--- because we have a cost of 1.0 on artificals, the duals (u) - //--- should be bounded between -1 and 1 - //--- - UtilPrintFuncEnd(m_osLog, m_classTag, - "phaseDone()", m_param.LogDebugLevel, 1); +void DecompAlgoD::phaseDone() { + UtilPrintFuncBegin(m_osLog, m_classTag, "phaseDone()", m_param.LogDebugLevel, + 1); + + if (m_stopCriteria != DecompStopInfeasible) { + if (m_param.LogDebugLevel >= 3) { + printVars(m_osLog); // use this to warm start DW + } + + return; + } + + //--- + //--- decomposition could not be found, this means the + //--- point we are decomposing is not inside P' and we can + //--- generate a 'farkas cut' + //--- + //--- since we use a phase I, our 'proof of infeasibility' + //--- does not come from the 'dual ray' but rather the objective + //--- of the oracle + //--- + //--- by getting here, we have shown that (c=0,A=I) + //--- (c-uA'')s - alpha >= 0 for all s in P' + //--- and + //--- (c-uA'')s* - alpha < 0 + //--- + //--- in case of many blocks, take the most violated block + //--- + int i, b; + const double *dualSol = m_masterSI->getRowPrice(); + double lhs = 0.0; + + for (i = 0; i < m_numOrigCols; i++) { + lhs -= dualSol[i] * m_xhatD[i]; + + if (m_param.LogDebugLevel >= 3) { + printf("i:%4d u:%5g x:%5g lhs:%5g\n", i, dualSol[i], m_xhatD[i], lhs); + } + } + + //--- + //--- pick the alpha that maximizes the violation + //--- + double alpha = -m_infinity; + + for (b = 0; b < m_numConvexCon; b++) { + if (dualSol[m_numOrigCols + b] > alpha) { + alpha = dualSol[m_numOrigCols + b]; + } + } + + lhs -= alpha; + + if (m_param.LogDebugLevel >= 3) { + printf("alpha:%5g lhs:%5g\n", alpha, lhs); + } + + if (lhs < 0) { + printf(" VIOLATED FARKAS CUT lhs = %g\n", lhs); + CoinPackedVector cut; + OsiRowCut rowCut; + + //--- + //--- Farkas Cut: u*x <= -alpha + //--- + for (i = 0; i < m_numOrigCols; i++) { + cut.insert(i, dualSol[i]); + } + + rowCut.setRow(cut); + rowCut.setLb(-m_infinity); + rowCut.setUb(-alpha); + DecompCutOsi *decompCut = new DecompCutOsi(rowCut); + decompCut->setStringHash(m_infinity); // constructor should do! + // decompCut->print(m_osLog); + (*m_newCuts).push_back(decompCut); + } + + //--- + //--- comparing to Concorde's LOCALCUT implementation, our + //--- version correponds to their version (3) in separate.c + //--- because we have a cost of 1.0 on artificals, the duals (u) + //--- should be bounded between -1 and 1 + //--- + UtilPrintFuncEnd(m_osLog, m_classTag, "phaseDone()", m_param.LogDebugLevel, + 1); } - //===========================================================================// -void DecompAlgoD::masterMatrixAddArtCols(CoinPackedMatrix* masterM, - double* colLB, - double* colUB, - double* objCoeff, - vector& colNames, - int startRow, - int endRow, - char origOrBranch) -{ - //--- - //--- min sp + sm - //--- - //--- ax = b --> ax + sp - sm = b, sp >= 0, sm >= 0 - //--- ax <= b --> ax - sm <= b, sm >= 0 - //--- ax >= b --> ax + sp >= b, sp >= 0 - //--- - int r, colIndex; - DecompConstraintSet* modelCore = m_modelCore.getModel(); - vector& rowNames = modelCore->colNames; - bool hasNames = rowNames.empty() ? false : true; - string colName; - string strIndex; - string colNameL = origOrBranch == 'O' ? "sOL(c_" : "sBL(c_"; - string colNameG = origOrBranch == 'O' ? "sOG(c_" : "sBG(c_"; - DecompColType colTypeL = origOrBranch == 'O' ? - DecompCol_ArtForRowL : DecompCol_ArtForBranchL; - DecompColType colTypeG = origOrBranch == 'O' ? - DecompCol_ArtForRowG : DecompCol_ArtForBranchG; - colIndex = masterM->getNumCols(); - vector colBeg; - vector colInd; - vector colVal; - colBeg.push_back(0); - - for (r = startRow; r < endRow; r++) { - if (hasNames) { - strIndex = UtilIntToStr(colIndex); - } - - masterMatrixAddArtCol(colBeg, colInd, colVal, - 'L', r, colIndex, colTypeL, - colLB[colIndex], colUB[colIndex], - objCoeff[colIndex]); - - if (hasNames) { - colName = colNameL + strIndex + "_" + rowNames[r] + ")"; - colNames.push_back(colName); - } +void DecompAlgoD::masterMatrixAddArtCols(CoinPackedMatrix *masterM, + double *colLB, double *colUB, + double *objCoeff, + vector &colNames, int startRow, + int endRow, char origOrBranch) { + //--- + //--- min sp + sm + //--- + //--- ax = b --> ax + sp - sm = b, sp >= 0, sm >= 0 + //--- ax <= b --> ax - sm <= b, sm >= 0 + //--- ax >= b --> ax + sp >= b, sp >= 0 + //--- + int r, colIndex; + DecompConstraintSet *modelCore = m_modelCore.getModel(); + vector &rowNames = modelCore->colNames; + bool hasNames = rowNames.empty() ? false : true; + string colName; + string strIndex; + string colNameL = origOrBranch == 'O' ? "sOL(c_" : "sBL(c_"; + string colNameG = origOrBranch == 'O' ? "sOG(c_" : "sBG(c_"; + DecompColType colTypeL = + origOrBranch == 'O' ? DecompCol_ArtForRowL : DecompCol_ArtForBranchL; + DecompColType colTypeG = + origOrBranch == 'O' ? DecompCol_ArtForRowG : DecompCol_ArtForBranchG; + colIndex = masterM->getNumCols(); + vector colBeg; + vector colInd; + vector colVal; + colBeg.push_back(0); + + for (r = startRow; r < endRow; r++) { + if (hasNames) { + strIndex = UtilIntToStr(colIndex); + } + + masterMatrixAddArtCol(colBeg, colInd, colVal, 'L', r, colIndex, colTypeL, + colLB[colIndex], colUB[colIndex], objCoeff[colIndex]); + + if (hasNames) { + colName = colNameL + strIndex + "_" + rowNames[r] + ")"; + colNames.push_back(colName); + } - colIndex++; - masterMatrixAddArtCol(colBeg, colInd, colVal, - 'G', r, colIndex, colTypeG, - colLB[colIndex], colUB[colIndex], - objCoeff[colIndex]); + colIndex++; + masterMatrixAddArtCol(colBeg, colInd, colVal, 'G', r, colIndex, colTypeG, + colLB[colIndex], colUB[colIndex], objCoeff[colIndex]); - if (hasNames) { - colName = colNameG + strIndex + "_" + rowNames[r] + ")"; - colNames.push_back(colName); - } + if (hasNames) { + colName = colNameG + strIndex + "_" + rowNames[r] + ")"; + colNames.push_back(colName); + } - colIndex++; - } + colIndex++; + } - masterM->appendCols(static_cast(colBeg.size()) - 1, - &colBeg[0], - &colInd[0], - &colVal[0]); + masterM->appendCols(static_cast(colBeg.size()) - 1, &colBeg[0], + &colInd[0], &colVal[0]); } - - - - //===========================================================================// -void DecompAlgoD::createMasterProblem(DecompVarList& initVars) -{ - //use DecompAlgoPC2? - //--- - //--- Initialize the solver interface for the master problem. - //--- - //--- For the master constraint system: - //--- modelCore contains [A'', b''], in terms of x. - //--- m_modelRelax contains [A', b'], and might contiain multiple blocks. - //--- - //--- For each block we must add a convexity constraint. Let K be the set - //--- of blocks. - //--- - //--- Notation: - //--- n = orig number of vars - //--- m'' = orig number of rows in A'', b'' - //--- |K| = the number of blocks that defines [A' b'] - //--- s = a solution (e.p.) to the relaxed problem, size (1xn) - //--- c = original cost, size (1xn) - //--- F'[k] = the current set of relaxed e.p.'s for block k in K - //--- a''[i,j] = entry at row i, column j for A'' matrix - //--- C = original set of columns (n = |C|) - //--- R'' = original set of rows in A'' (m''=|R''|) - //--- - //--- The Dantzig-Wolfe LP: - //--- - //--- min sum{k in K, s in F'[k]} - //--- sum{j in C}(c[j] * s[j]) * lambda[k][s] - //--- s.t. sum{k in K, s in F'[k]} - //--- sum{j in C}(a''[i,j] * s[j])* lambda[k][s] ~ b''[i], i in R'' - //--- sum{s in F'[k]} lambda[k][s] = 1, k in K - //--- lambda[k][s] >= 0, k in K, s in F'[k] - //--- - //--- - //--- The Dantzig-Wolfe (DECOMP) LP: - //--- - //--- min sum{k in K, s in F'[k]} - //--- sum{j in C}(0 * s[j]) * lambda[k][s] - //--- s.t. sum{k in K, s in F'[k]} - //--- sum{j in C}(1 * s[j])* lambda[k][s] = x*[j], j in C - //--- sum{s in F'[k]} lambda[k][s] = 1, k in K - //--- lambda[k][s] >= 0, k in K, s in F'[k] - //--- - //--- - //--- - //--- Change for Phase I model. - //--- Add a slack and/or surplus variable to each master constraint - //--- including the bounds for branching?? THINK... - //--- - //--- THINK: - //--- Do we bother removing these vars once feasible? What about the - //--- fact that adding cuts could once again cause infeasible.... - //--- - //--- What do we do after a branching? jump back to Phase I? - //--- - //--- - //--- Phase I: - //--- min sum{i in R''} (splus[i] + sminus[i]) - //--- - //--- Phase II: - //--- min sum{k in K, s in F'[k]} - //--- sum{j in C}(c[j] * s[j]) * lambda[k][s] - //--- - //--- s.t. sum{k in K, s in F'[k]} - //--- sum{j in C}(a''[i,j] * s[j])* lambda[k][s] + - //--- splus[i] - sminus[i] ~ b''[i], i in R'' - //--- sum{s in F'[k]} lambda[k][s] = 1, k in K - //--- lambda[k][s] >= 0, k in K, s in F'[k] - //--- splus[i] >= 0, i in R'' - //--- sminus[i] >= 0, i in R'' - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "createMasterProblem()", m_param.LogDebugLevel, 2); - DecompConstraintSet* modelCore = m_modelCore.getModel(); - assert(initVars.size() > 0); - assert(modelCore); //TODO: what if core is empty - int r, c, startRow, endRow; - int nColsCore = modelCore->getNumCols(); - //int nRowsCore = modelCore->getNumRows(); - //int nIntVars = static_cast(modelCore->integerVars.size()); - double* dblArrNCoreCols = new double[nColsCore]; - assert(dblArrNCoreCols); - //--- - //--- set the row counts - //--- - //m_nRowsOrig = nRowsCore; - m_nRowsOrig = nColsCore; - m_nRowsBranch = 0;//2 * nIntVars; - m_nRowsConvex = m_numConvexCon; - m_nRowsCuts = 0; - - //--- - //--- set the row types of the rows - //--- original rows, branch rows, convexity rows - //--- - for (r = 0; r < m_nRowsOrig; r++) { - m_masterRowType.push_back(DecompRow_Original); - } - - //for(r = 0; r < m_nRowsBranch; r++) - // m_masterRowType.push_back(DecompRow_Branch); - for (r = 0; r < m_nRowsConvex; r++) { - m_masterRowType.push_back(DecompRow_Convex); - } - - //--- - //--- In order to implement simple branching, we are going to - //--- treat all column bounds as explicit constraints. Then branching - //--- for DW can be done in the same way it is done for regular CPM. - //--- NOTE: in D, we don't need to ever branch - //--- - //coreMatrixAppendColBounds(); - //////// - //THINK - what need this for? - //number of original core rows - modelCore->nBaseRowsOrig = modelCore->nBaseRows; - //number of original core rows plus branching rows - modelCore->nBaseRows = modelCore->getNumRows(); - assert(modelCore->nBaseRowsOrig == modelCore->nBaseRows); - //--- - //--- create a matrix for the master LP - //--- make room for original rows and convexity rows - //--- - int nRows = m_nRowsOrig + m_nRowsBranch + m_nRowsConvex; - int nColsMax = static_cast(initVars.size()) - + 2 * (m_nRowsOrig + m_nRowsBranch); - double* colLB = new double[nColsMax]; - double* colUB = new double[nColsMax]; - double* objCoeff = new double[nColsMax]; - CoinPackedMatrix* masterM = new CoinPackedMatrix(true, 0, 0); - vector colNames; - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - double* denseCol = new double[nRows]; - CoinAssertHint(colLB && colUB && objCoeff && denseCol && masterM, - "Error: Out of Memory"); - ); - //--- - //--- set the number of rows, we will add columns - //--- - masterM->setDimensions(nRows, 0); - //--- - //--- create artifical columns in master LP for: - //--- original rows - //--- - startRow = 0; - endRow = m_nRowsOrig; - masterMatrixAddArtCols(masterM, - colLB, - colUB, - objCoeff, - colNames, - startRow, endRow, 'O'); - //startRow = m_nRowsOrig; - //endRow = m_nRowsOrig + m_nRowsBranch; - //masterMatrixAddArtCols(masterM, - // colLB, - // colUB, - // objCoeff, - // colNames, - // startRow, endRow, 'B'); - //TODO: should initVars be in pool and do addVarsFromPool here? - /*M = new CoinPackedMatrix(true, 0, 0);//col-ordered - M->setDimensions(nColsCore + m_numConvexCon, 0); - - const int n_colsArt = 2 * nColsCore; //this includes appended... ?? - int n_cols = static_cast(initVars.size()); - n_cols += n_colsArt; - double * colLB = new double[n_cols]; - double * colUB = new double[n_cols]; - double * obj = new double[n_cols]; - //double * denseCol = new double[nColsCore + m_numConvexCon]; - CoinAssertHint(colLB && colUB && obj,// && denseCol, - "Error: Out of Memory"); - - int r, c; - int row_index = 0; - int col_index = 0; - DecompVarList::iterator li; - - - //TODO: for =, why not just use one art that is free? - - //--- - //--- min sp + sm - //--- - //--- ax = b --> ax + sp - sm = b, sp >= 0, sm >= 0 - //--- - //--- ax <= b --> ax - sm <= b, sm >= 0 - //--- ax <= b --> ax + sp - sm <= b, sp = 0, sm >= 0 (for now) - //--- - //--- ax >= b --> ax + sp >= b, sp >= 0 - //--- ax >= b --> ax + sp - sm >= b, sp >= 0, sm = 0 (for now) - //--- - //vector & rowSense = modelCore->rowSense; - vector rowNames; - vector colNames; - string colNamePlus, colNameMinus; - for(c = 0; c < nColsCore; c++){ - CoinPackedVector artColPlus; - CoinPackedVector artColMinus; - artColPlus.insert (c, 1.0); - artColMinus.insert(c, -1.0); - - //--- - //--- append the two artificial columns to the matrix - //--- - M->appendCol(artColPlus); - colLB[col_index] = 0.0; - colUB[col_index] = m_infinity; - obj[col_index] = 1.0; - colNamePlus = "sP(c_" + UtilIntToStr(col_index) - + "_" + UtilIntToStr(c) + ")"; - col_index++; - - M->appendCol(artColMinus); - colLB[col_index] = 0.0; - colUB[col_index] = m_infinity; - obj[col_index] = 1.0; - colNameMinus = "sM(c_" + UtilIntToStr(col_index) - + "_" + UtilIntToStr(c) + ")"; - col_index++; - - colNames.push_back(colNamePlus); - colNames.push_back(colNameMinus); - } - */ - int colIndex = 0; - int blockIndex = 0; - DecompVarList::iterator li; - - //TODO: - // this should be calling a function to add var to lp so don't dup code - for (li = initVars.begin(); li != initVars.end(); li++) { - //--- - //--- appending these variables (lambda) to end of matrix - //--- after the artificials - //--- - colIndex = masterM->getNumCols(); - m_colIndexUnique = colIndex; - //--- - //--- store the col index for this var in the master LP - //--- NOTE: if we remove columns, this will be wrong - //--- - (*li)->setColMasterIndex(colIndex); - //--- - //--- we expect the user to define the block id in the var object - //--- - blockIndex = (*li)->getBlockId(); - //--- - //--- give the column a name - //--- - string colName = "lam(c_" + UtilIntToStr(m_colIndexUnique) - + ",b_" + UtilIntToStr(blockIndex) + ")"; - colNames.push_back(colName); - UTIL_DEBUG(m_param.LogDebugLevel, 5, - (*li)->print(m_infinity, m_osLog, m_app); - ); - //--- - //--- the column is just the vector s - //--- - CoinPackedVector* sparseCol = 0; - - if ((*li)->m_s.getNumElements() > 0) { - sparseCol = new CoinPackedVector((*li)->m_s); - } else { - sparseCol = new CoinPackedVector(); - } - - //--- - //--- append the coeff for the approriate convexity constraint - //--- - sparseCol->insert(nColsCore + blockIndex, 1.0); - UTIL_DEBUG(m_param.LogDebugLevel, 5, - (*m_osLog) << "\nSparse Col: \n"; - UtilPrintPackedVector(*sparseCol, m_osLog); - ); - //TODO: check for duplicates (against m_vars) - // or force initVars to be sent in with no dups? - //TODO: do in const blocks - //--- - //--- append the sparse column to the matrix - //--- - masterM->appendCol(*sparseCol); - colLB[colIndex] = 0.0; - colUB[colIndex] = m_infinity; - objCoeff[colIndex] = 0.0; //for D, we are in PHASEI the whole time - //--- - //--- set master column type - //--- - m_masterColType.push_back(DecompCol_Structural); - //--- - //--- clean-up - //--- - UTIL_DELPTR(sparseCol); //THINK - } //END: for(li = initVars.begin(); li != initVars.end(); li++) - - //--- - //--- insert the initial set of variables into the master variable list - //--- - //THINK: now doing in loop, so can check for dups - appendVars(initVars); - //--- - //--- row bounds from core inclding - //--- original rows (= x*) - //--- - vector masterRowLB; - vector masterRowUB; - - for (c = 0; c < nColsCore; c++) { - masterRowLB.push_back(m_xhatD[c]); - masterRowUB.push_back(m_xhatD[c]); - } - - //--- - //--- row bounds for convexity constraints - //--- - for (r = 0; r < m_numConvexCon; r++) { - masterRowLB.push_back(1.0); - masterRowUB.push_back(1.0); - } - - //--- - //--- load the problem into master's solver interface - //--- - assert(masterM->getNumRows() == static_cast(masterRowLB.size())); - assert(masterM->getNumRows() == static_cast(masterRowUB.size())); - assert(masterM->getNumRows() == static_cast(m_masterRowType.size())); - assert(masterM->getNumCols() == static_cast(m_masterColType.size())); - m_masterSI->loadProblem(*masterM, - colLB, colUB, objCoeff, - &masterRowLB[0], - &masterRowUB[0]); - - //--- - //--- load column and row names to OSI - //--- - if (modelCore->colNames.size() > 0) { - m_masterSI->setIntParam(OsiNameDiscipline, 2); //Full-Names - } - - if (modelCore->colNames.size() > 0) { - assert(static_cast(modelCore->colNames.size()) == - modelCore->getNumCols()); - m_masterSI->setRowNames(modelCore->colNames, - 0, - static_cast(modelCore->colNames.size()), - 0); - vector conRowNames; - - for (r = 0; r < m_numConvexCon; r++) { - string rowName = "conv(b_" + UtilIntToStr(r) + ")"; - conRowNames.push_back(rowName); - } - - m_masterSI->setRowNames(conRowNames, - 0, - static_cast(conRowNames.size()), - static_cast(modelCore->colNames.size())); - } - - if (colNames.size() > 0) - m_masterSI->setColNames(colNames, - 0, - static_cast(colNames.size()), - 0); - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - - for (r = 0; r < m_masterSI->getNumRows(); r++) { - const string rowN = m_masterSI->getRowName(r); - printf("Row[%4d] Name: %30s Type: %20s\n", - r, - rowN.c_str(), - DecompRowTypeStr[m_masterRowType[r]].c_str()); - } - for (c = 0; c < m_masterSI->getNumCols(); c++) { - const string colN = m_masterSI->getColName(c); - printf("Col[%4d] Name: %30s Type: %20s\n", - c, - colN.c_str(), - DecompColTypeStr[m_masterColType[c]].c_str()); - } - ); - //--- - //--- reset unique col index id - //--- - m_colIndexUnique = masterM->getNumCols(); - //--- - //--- free local memory - //--- - UTIL_DELPTR(masterM); - UTIL_DELARR(colLB); - UTIL_DELARR(colUB); - UTIL_DELARR(objCoeff); - UTIL_DELARR(dblArrNCoreCols); - UtilPrintFuncEnd(m_osLog, m_classTag, - "createMasterProblem()", m_param.LogDebugLevel, 2); +void DecompAlgoD::createMasterProblem(DecompVarList &initVars) { + // use DecompAlgoPC2? + //--- + //--- Initialize the solver interface for the master problem. + //--- + //--- For the master constraint system: + //--- modelCore contains [A'', b''], in terms of x. + //--- m_modelRelax contains [A', b'], and might contiain multiple blocks. + //--- + //--- For each block we must add a convexity constraint. Let K be the set + //--- of blocks. + //--- + //--- Notation: + //--- n = orig number of vars + //--- m'' = orig number of rows in A'', b'' + //--- |K| = the number of blocks that defines [A' b'] + //--- s = a solution (e.p.) to the relaxed problem, size (1xn) + //--- c = original cost, size (1xn) + //--- F'[k] = the current set of relaxed e.p.'s for block k in K + //--- a''[i,j] = entry at row i, column j for A'' matrix + //--- C = original set of columns (n = |C|) + //--- R'' = original set of rows in A'' (m''=|R''|) + //--- + //--- The Dantzig-Wolfe LP: + //--- + //--- min sum{k in K, s in F'[k]} + //--- sum{j in C}(c[j] * s[j]) * lambda[k][s] + //--- s.t. sum{k in K, s in F'[k]} + //--- sum{j in C}(a''[i,j] * s[j])* lambda[k][s] ~ b''[i], i in R'' + //--- sum{s in F'[k]} lambda[k][s] = 1, k in K + //--- lambda[k][s] >= 0, k in K, s in F'[k] + //--- + //--- + //--- The Dantzig-Wolfe (DECOMP) LP: + //--- + //--- min sum{k in K, s in F'[k]} + //--- sum{j in C}(0 * s[j]) * lambda[k][s] + //--- s.t. sum{k in K, s in F'[k]} + //--- sum{j in C}(1 * s[j])* lambda[k][s] = x*[j], j in C + //--- sum{s in F'[k]} lambda[k][s] = 1, k in K + //--- lambda[k][s] >= 0, k in K, s in F'[k] + //--- + //--- + //--- + //--- Change for Phase I model. + //--- Add a slack and/or surplus variable to each master constraint + //--- including the bounds for branching?? THINK... + //--- + //--- THINK: + //--- Do we bother removing these vars once feasible? What about the + //--- fact that adding cuts could once again cause infeasible.... + //--- + //--- What do we do after a branching? jump back to Phase I? + //--- + //--- + //--- Phase I: + //--- min sum{i in R''} (splus[i] + sminus[i]) + //--- + //--- Phase II: + //--- min sum{k in K, s in F'[k]} + //--- sum{j in C}(c[j] * s[j]) * lambda[k][s] + //--- + //--- s.t. sum{k in K, s in F'[k]} + //--- sum{j in C}(a''[i,j] * s[j])* lambda[k][s] + + //--- splus[i] - sminus[i] ~ b''[i], i in R'' + //--- sum{s in F'[k]} lambda[k][s] = 1, k in K + //--- lambda[k][s] >= 0, k in K, s in F'[k] + //--- splus[i] >= 0, i in R'' + //--- sminus[i] >= 0, i in R'' + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createMasterProblem()", + m_param.LogDebugLevel, 2); + DecompConstraintSet *modelCore = m_modelCore.getModel(); + assert(initVars.size() > 0); + assert(modelCore); // TODO: what if core is empty + int r, c, startRow, endRow; + int nColsCore = modelCore->getNumCols(); + // int nRowsCore = modelCore->getNumRows(); + // int nIntVars = static_cast(modelCore->integerVars.size()); + double *dblArrNCoreCols = new double[nColsCore]; + assert(dblArrNCoreCols); + //--- + //--- set the row counts + //--- + // m_nRowsOrig = nRowsCore; + m_nRowsOrig = nColsCore; + m_nRowsBranch = 0; // 2 * nIntVars; + m_nRowsConvex = m_numConvexCon; + m_nRowsCuts = 0; + + //--- + //--- set the row types of the rows + //--- original rows, branch rows, convexity rows + //--- + for (r = 0; r < m_nRowsOrig; r++) { + m_masterRowType.push_back(DecompRow_Original); + } + + // for(r = 0; r < m_nRowsBranch; r++) + // m_masterRowType.push_back(DecompRow_Branch); + for (r = 0; r < m_nRowsConvex; r++) { + m_masterRowType.push_back(DecompRow_Convex); + } + + //--- + //--- In order to implement simple branching, we are going to + //--- treat all column bounds as explicit constraints. Then branching + //--- for DW can be done in the same way it is done for regular CPM. + //--- NOTE: in D, we don't need to ever branch + //--- + // coreMatrixAppendColBounds(); + //////// + // THINK - what need this for? + // number of original core rows + modelCore->nBaseRowsOrig = modelCore->nBaseRows; + // number of original core rows plus branching rows + modelCore->nBaseRows = modelCore->getNumRows(); + assert(modelCore->nBaseRowsOrig == modelCore->nBaseRows); + //--- + //--- create a matrix for the master LP + //--- make room for original rows and convexity rows + //--- + int nRows = m_nRowsOrig + m_nRowsBranch + m_nRowsConvex; + int nColsMax = + static_cast(initVars.size()) + 2 * (m_nRowsOrig + m_nRowsBranch); + double *colLB = new double[nColsMax]; + double *colUB = new double[nColsMax]; + double *objCoeff = new double[nColsMax]; + CoinPackedMatrix *masterM = new CoinPackedMatrix(true, 0, 0); + vector colNames; + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, + double *denseCol = new double[nRows]; + CoinAssertHint(colLB && colUB && objCoeff && denseCol && masterM, + "Error: Out of Memory");); + //--- + //--- set the number of rows, we will add columns + //--- + masterM->setDimensions(nRows, 0); + //--- + //--- create artifical columns in master LP for: + //--- original rows + //--- + startRow = 0; + endRow = m_nRowsOrig; + masterMatrixAddArtCols(masterM, colLB, colUB, objCoeff, colNames, startRow, + endRow, 'O'); + // startRow = m_nRowsOrig; + // endRow = m_nRowsOrig + m_nRowsBranch; + // masterMatrixAddArtCols(masterM, + // colLB, + // colUB, + // objCoeff, + // colNames, + // startRow, endRow, 'B'); + // TODO: should initVars be in pool and do addVarsFromPool here? + /*M = new CoinPackedMatrix(true, 0, 0);//col-ordered + M->setDimensions(nColsCore + m_numConvexCon, 0); + + const int n_colsArt = 2 * nColsCore; //this includes appended... ?? + int n_cols = static_cast(initVars.size()); + n_cols += n_colsArt; + double * colLB = new double[n_cols]; + double * colUB = new double[n_cols]; + double * obj = new double[n_cols]; + //double * denseCol = new double[nColsCore + m_numConvexCon]; + CoinAssertHint(colLB && colUB && obj,// && denseCol, + "Error: Out of Memory"); + + int r, c; + int row_index = 0; + int col_index = 0; + DecompVarList::iterator li; + + + //TODO: for =, why not just use one art that is free? + + //--- + //--- min sp + sm + //--- + //--- ax = b --> ax + sp - sm = b, sp >= 0, sm >= 0 + //--- + //--- ax <= b --> ax - sm <= b, sm >= 0 + //--- ax <= b --> ax + sp - sm <= b, sp = 0, sm >= 0 (for now) + //--- + //--- ax >= b --> ax + sp >= b, sp >= 0 + //--- ax >= b --> ax + sp - sm >= b, sp >= 0, sm = 0 (for now) + //--- + //vector & rowSense = modelCore->rowSense; + vector rowNames; + vector colNames; + string colNamePlus, colNameMinus; + for(c = 0; c < nColsCore; c++){ + CoinPackedVector artColPlus; + CoinPackedVector artColMinus; + artColPlus.insert (c, 1.0); + artColMinus.insert(c, -1.0); + + //--- + //--- append the two artificial columns to the matrix + //--- + M->appendCol(artColPlus); + colLB[col_index] = 0.0; + colUB[col_index] = m_infinity; + obj[col_index] = 1.0; + colNamePlus = "sP(c_" + UtilIntToStr(col_index) + + "_" + UtilIntToStr(c) + ")"; + col_index++; + + M->appendCol(artColMinus); + colLB[col_index] = 0.0; + colUB[col_index] = m_infinity; + obj[col_index] = 1.0; + colNameMinus = "sM(c_" + UtilIntToStr(col_index) + + "_" + UtilIntToStr(c) + ")"; + col_index++; + + colNames.push_back(colNamePlus); + colNames.push_back(colNameMinus); + } + */ + int colIndex = 0; + int blockIndex = 0; + DecompVarList::iterator li; + + // TODO: + // this should be calling a function to add var to lp so don't dup code + for (li = initVars.begin(); li != initVars.end(); li++) { + //--- + //--- appending these variables (lambda) to end of matrix + //--- after the artificials + //--- + colIndex = masterM->getNumCols(); + m_colIndexUnique = colIndex; + //--- + //--- store the col index for this var in the master LP + //--- NOTE: if we remove columns, this will be wrong + //--- + (*li)->setColMasterIndex(colIndex); + //--- + //--- we expect the user to define the block id in the var object + //--- + blockIndex = (*li)->getBlockId(); + //--- + //--- give the column a name + //--- + string colName = "lam(c_" + UtilIntToStr(m_colIndexUnique) + ",b_" + + UtilIntToStr(blockIndex) + ")"; + colNames.push_back(colName); + UTIL_DEBUG(m_param.LogDebugLevel, 5, + (*li)->print(m_infinity, m_osLog, m_app);); + //--- + //--- the column is just the vector s + //--- + CoinPackedVector *sparseCol = 0; + + if ((*li)->m_s.getNumElements() > 0) { + sparseCol = new CoinPackedVector((*li)->m_s); + } else { + sparseCol = new CoinPackedVector(); + } + + //--- + //--- append the coeff for the approriate convexity constraint + //--- + sparseCol->insert(nColsCore + blockIndex, 1.0); + UTIL_DEBUG(m_param.LogDebugLevel, 5, (*m_osLog) << "\nSparse Col: \n"; + UtilPrintPackedVector(*sparseCol, m_osLog);); + // TODO: check for duplicates (against m_vars) + // or force initVars to be sent in with no dups? + // TODO: do in const blocks + //--- + //--- append the sparse column to the matrix + //--- + masterM->appendCol(*sparseCol); + colLB[colIndex] = 0.0; + colUB[colIndex] = m_infinity; + objCoeff[colIndex] = 0.0; // for D, we are in PHASEI the whole time + //--- + //--- set master column type + //--- + m_masterColType.push_back(DecompCol_Structural); + //--- + //--- clean-up + //--- + UTIL_DELPTR(sparseCol); // THINK + } // END: for(li = initVars.begin(); li != initVars.end(); li++) + + //--- + //--- insert the initial set of variables into the master variable list + //--- + // THINK: now doing in loop, so can check for dups + appendVars(initVars); + //--- + //--- row bounds from core inclding + //--- original rows (= x*) + //--- + vector masterRowLB; + vector masterRowUB; + + for (c = 0; c < nColsCore; c++) { + masterRowLB.push_back(m_xhatD[c]); + masterRowUB.push_back(m_xhatD[c]); + } + + //--- + //--- row bounds for convexity constraints + //--- + for (r = 0; r < m_numConvexCon; r++) { + masterRowLB.push_back(1.0); + masterRowUB.push_back(1.0); + } + + //--- + //--- load the problem into master's solver interface + //--- + assert(masterM->getNumRows() == static_cast(masterRowLB.size())); + assert(masterM->getNumRows() == static_cast(masterRowUB.size())); + assert(masterM->getNumRows() == static_cast(m_masterRowType.size())); + assert(masterM->getNumCols() == static_cast(m_masterColType.size())); + m_masterSI->loadProblem(*masterM, colLB, colUB, objCoeff, &masterRowLB[0], + &masterRowUB[0]); + + //--- + //--- load column and row names to OSI + //--- + if (modelCore->colNames.size() > 0) { + m_masterSI->setIntParam(OsiNameDiscipline, 2); // Full-Names + } + + if (modelCore->colNames.size() > 0) { + assert(static_cast(modelCore->colNames.size()) == + modelCore->getNumCols()); + m_masterSI->setRowNames(modelCore->colNames, 0, + static_cast(modelCore->colNames.size()), 0); + vector conRowNames; + + for (r = 0; r < m_numConvexCon; r++) { + string rowName = "conv(b_" + UtilIntToStr(r) + ")"; + conRowNames.push_back(rowName); + } + + m_masterSI->setRowNames(conRowNames, 0, + static_cast(conRowNames.size()), + static_cast(modelCore->colNames.size())); + } + + if (colNames.size() > 0) + m_masterSI->setColNames(colNames, 0, static_cast(colNames.size()), 0); + + UTIL_DEBUG( + m_param.LogDebugLevel, 4, + + for (r = 0; r < m_masterSI->getNumRows(); r++) { + const string rowN = m_masterSI->getRowName(r); + printf("Row[%4d] Name: %30s Type: %20s\n", r, rowN.c_str(), + DecompRowTypeStr[m_masterRowType[r]].c_str()); + } for (c = 0; c < m_masterSI->getNumCols(); c++) { + const string colN = m_masterSI->getColName(c); + printf("Col[%4d] Name: %30s Type: %20s\n", c, colN.c_str(), + DecompColTypeStr[m_masterColType[c]].c_str()); + }); + //--- + //--- reset unique col index id + //--- + m_colIndexUnique = masterM->getNumCols(); + //--- + //--- free local memory + //--- + UTIL_DELPTR(masterM); + UTIL_DELARR(colLB); + UTIL_DELARR(colUB); + UTIL_DELARR(objCoeff); + UTIL_DELARR(dblArrNCoreCols); + UtilPrintFuncEnd(m_osLog, m_classTag, "createMasterProblem()", + m_param.LogDebugLevel, 2); } diff --git a/Dip/src/DecompAlgoPC.cpp b/Dip/src/DecompAlgoPC.cpp old mode 100755 new mode 100644 index b60960e9..dcda777a --- a/Dip/src/DecompAlgoPC.cpp +++ b/Dip/src/DecompAlgoPC.cpp @@ -15,10 +15,10 @@ //===========================================================================// //===========================================================================// -#include "DecompApp.h" #include "DecompAlgoPC.h" -#include "DecompSolverResult.h" +#include "DecompApp.h" #include "DecompConstraintSet.h" +#include "DecompSolverResult.h" //===========================================================================// #include "CoinWarmStartBasis.hpp" @@ -27,731 +27,688 @@ using namespace std; //#define DO_INTERIOR //also in DecompAlgo //===========================================================================// -void DecompAlgoPC::phaseInit(DecompPhase& phase) -{ - //--- - //--- solve the LP of the compact formulation - //--- and current branching decisions - //--- - //--- the main goal here is determine if a node is LP infeasible - //--- due to the branching decisions - if we can determine this - //--- here, we can just fathom the node and skip the expensive - //--- PhaseI pricing to prove infeasible - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "phaseInit()", m_param.LogDebugLevel, 2); - - //--- - //--- set column bounds - //--- - //TODO: reuse this memory - col size does not change - //TODO: not getting cuts since only populate A'' in beginning - // by adding cuts this adds some of the integrality property - // otherwise the only thing causing infeasibility is the branching - //TODO: to get cuts would want to populate m_auxSI with - // the current core model - not too bad since only once - // a node? but even without cuts, see if it helps - if (m_auxSI) { - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) << "Solve the LP of compact formulation." << endl; - ); - int c; - int numCols = m_modelCore.getModel()->getNumCols(); - int* index = new int[numCols]; - double* bounds = new double[2 * numCols]; - - if (!(index && bounds)) { - throw UtilExceptionMemory("phaseInit", m_classTag); - } - - for (c = 0; c < numCols; c++) { - index[c] = c; - bounds[2 * c] = m_colLBNode[c]; - bounds[2 * c + 1] = m_colUBNode[c]; - } - - m_auxSI->setColSetBounds(index, index + numCols, bounds); - UTIL_DELARR(index); - UTIL_DELARR(bounds); - //--- - //--- solve LP relaxation - //--- - m_auxSI->resolve(); - //--- - //--- two possible results that might help here: - //--- (1) the LP is found infeasible -> fathom - //--- (2) the LP bound is already greater than the global UB -> fathom - //--- - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) - << "Iteration Count : " - << m_auxSI->getIterationCount() << "\n" - << "isAbandoned() : " - << m_auxSI->isAbandoned() << "\n" - << "isProvenOptimal() : " - << m_auxSI->isProvenOptimal() << "\n" - << "isProvenPrimalInfeasible() : " - << m_auxSI->isProvenPrimalInfeasible() << "\n" - << "isProvenDualInfeasible() : " - << m_auxSI->isProvenDualInfeasible() << "\n" - << "isPrimalObjectiveLimitReached : " - << m_auxSI->isDualObjectiveLimitReached() << "\n" - << "isDualObjectiveLimitReached : " - << m_auxSI->isDualObjectiveLimitReached() << "\n" - << "isIterationLimitReached : " - << m_auxSI->isIterationLimitReached() << "\n"; - ); - - if (m_auxSI->isProvenPrimalInfeasible()) { - UTIL_MSG(m_param.LogLevel, 3, - (*m_osLog) << "LP of Compact found Infeasible." << endl; - ); - phase = PHASE_DONE; - } - } - - if (phase != PHASE_DONE) - if (getNodeIndex() == 0 && !m_isStrongBranch) { - phase = PHASE_PRICE1; - } - - UTIL_MSG(m_param.LogLevel, 2, - (*m_osLog) << "phase = " << DecompPhaseStr[phase] << endl; - ); - UtilPrintFuncEnd(m_osLog, m_classTag, - "phaseInit()", m_param.LogDebugLevel, 2); +void DecompAlgoPC::phaseInit(DecompPhase &phase) { + //--- + //--- solve the LP of the compact formulation + //--- and current branching decisions + //--- + //--- the main goal here is determine if a node is LP infeasible + //--- due to the branching decisions - if we can determine this + //--- here, we can just fathom the node and skip the expensive + //--- PhaseI pricing to prove infeasible + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "phaseInit()", m_param.LogDebugLevel, + 2); + + //--- + //--- set column bounds + //--- + // TODO: reuse this memory - col size does not change + // TODO: not getting cuts since only populate A'' in beginning + // by adding cuts this adds some of the integrality property + // otherwise the only thing causing infeasibility is the branching + // TODO: to get cuts would want to populate m_auxSI with + // the current core model - not too bad since only once + // a node? but even without cuts, see if it helps + if (m_auxSI) { + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "Solve the LP of compact formulation." << endl;); + int c; + int numCols = m_modelCore.getModel()->getNumCols(); + int *index = new int[numCols]; + double *bounds = new double[2 * numCols]; + + if (!(index && bounds)) { + throw UtilExceptionMemory("phaseInit", m_classTag); + } + + for (c = 0; c < numCols; c++) { + index[c] = c; + bounds[2 * c] = m_colLBNode[c]; + bounds[2 * c + 1] = m_colUBNode[c]; + } + + m_auxSI->setColSetBounds(index, index + numCols, bounds); + UTIL_DELARR(index); + UTIL_DELARR(bounds); + //--- + //--- solve LP relaxation + //--- + m_auxSI->resolve(); + //--- + //--- two possible results that might help here: + //--- (1) the LP is found infeasible -> fathom + //--- (2) the LP bound is already greater than the global UB -> fathom + //--- + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "Iteration Count : " + << m_auxSI->getIterationCount() << "\n" + << "isAbandoned() : " + << m_auxSI->isAbandoned() << "\n" + << "isProvenOptimal() : " + << m_auxSI->isProvenOptimal() << "\n" + << "isProvenPrimalInfeasible() : " + << m_auxSI->isProvenPrimalInfeasible() << "\n" + << "isProvenDualInfeasible() : " + << m_auxSI->isProvenDualInfeasible() << "\n" + << "isPrimalObjectiveLimitReached : " + << m_auxSI->isDualObjectiveLimitReached() << "\n" + << "isDualObjectiveLimitReached : " + << m_auxSI->isDualObjectiveLimitReached() << "\n" + << "isIterationLimitReached : " + << m_auxSI->isIterationLimitReached() << "\n";); + + if (m_auxSI->isProvenPrimalInfeasible()) { + UTIL_MSG(m_param.LogLevel, 3, + (*m_osLog) << "LP of Compact found Infeasible." << endl;); + phase = PHASE_DONE; + } + } + + if (phase != PHASE_DONE) + if (getNodeIndex() == 0 && !m_isStrongBranch) { + phase = PHASE_PRICE1; + } + + UTIL_MSG(m_param.LogLevel, 2, + (*m_osLog) << "phase = " << DecompPhaseStr[phase] << endl;); + UtilPrintFuncEnd(m_osLog, m_classTag, "phaseInit()", m_param.LogDebugLevel, + 2); } //===========================================================================// -void DecompAlgoPC::adjustMasterDualSolution() -{ - if (!m_param.DualStab) { - return; - } - - UtilPrintFuncBegin(m_osLog, m_classTag, - "adjustMasterDualSolution()", m_param.LogDebugLevel, 2); - //--- - //--- resize dual vectors - //--- - int nRows = static_cast(m_masterSI->getNumRows()); - m_dual.resize(nRows); - m_dualRM.resize(nRows); - m_dualST.resize(nRows); - //--- - //--- calculate smoothed dual - //--- pi_ST = alpha * pi_Bar + (1-alpha) * pi_RM - //--- this is dual feasible because it is taking - //--- a convex combination of previously dual feasible vectors - //--- need to be careful here, as the init dual is 0, which might not - //--- be dual feasible, therefore, in the first iteration, we need - //--- to skip the smoothing and enforce that the first dual be set - //--- to dualRM - //--- - int r; - const double* u = &m_dualSolution[0]; - double alpha = m_param.DualStabAlpha; - double alpha1 = 1.0 - alpha; - copy(u, u + nRows, m_dualRM.begin()); //copy for sake of debugging - - //--- - //--- for both the first PhaseI and first PhaseII calls, - //--- be sure to set the dual vector to dualRM as dual=0 - //--- might not be feasible - //--- - if (m_param.LogDebugLevel >= 3) { - (*m_osLog) << "m_firstPhase2Call = " << m_firstPhase2Call << endl; - } - - if (((m_nodeStats.cutCallsTotal + - m_nodeStats.priceCallsTotal) == 0) || m_firstPhase2Call) { - if (m_param.LogDebugLevel >= 2) { - (*m_osLog) << "Init dual to dualRM" << endl; +void DecompAlgoPC::adjustMasterDualSolution() { + if (!m_param.DualStab) { + return; + } + + UtilPrintFuncBegin(m_osLog, m_classTag, "adjustMasterDualSolution()", + m_param.LogDebugLevel, 2); + //--- + //--- resize dual vectors + //--- + int nRows = static_cast(m_masterSI->getNumRows()); + m_dual.resize(nRows); + m_dualRM.resize(nRows); + m_dualST.resize(nRows); + //--- + //--- calculate smoothed dual + //--- pi_ST = alpha * pi_Bar + (1-alpha) * pi_RM + //--- this is dual feasible because it is taking + //--- a convex combination of previously dual feasible vectors + //--- need to be careful here, as the init dual is 0, which might not + //--- be dual feasible, therefore, in the first iteration, we need + //--- to skip the smoothing and enforce that the first dual be set + //--- to dualRM + //--- + int r; + const double *u = &m_dualSolution[0]; + double alpha = m_param.DualStabAlpha; + double alpha1 = 1.0 - alpha; + copy(u, u + nRows, m_dualRM.begin()); // copy for sake of debugging + + //--- + //--- for both the first PhaseI and first PhaseII calls, + //--- be sure to set the dual vector to dualRM as dual=0 + //--- might not be feasible + //--- + if (m_param.LogDebugLevel >= 3) { + (*m_osLog) << "m_firstPhase2Call = " << m_firstPhase2Call << endl; + } + + if (((m_nodeStats.cutCallsTotal + m_nodeStats.priceCallsTotal) == 0) || + m_firstPhase2Call) { + if (m_param.LogDebugLevel >= 2) { + (*m_osLog) << "Init dual to dualRM" << endl; + } + + copy(m_dualRM.begin(), m_dualRM.end(), m_dual.begin()); + } + + if (m_firstPhase2Call) { + m_app->initDualVector(m_dual); + } + + for (r = 0; r < nRows; r++) { + m_dualST[r] = (alpha * m_dual[r]) + (alpha1 * m_dualRM[r]); + } + + //--- + //--- log for debugging + //--- + if (m_param.LogDebugLevel >= 3) { + const vector &rowNames = m_masterSI->getRowNames(); + + for (r = 0; r < m_masterSI->getNumRows(); r++) { + if (!(UtilIsZero(m_dual[r]) && UtilIsZero(m_dualRM[r]) && + UtilIsZero(m_dualST[r]))) { + if (r < static_cast(rowNames.size())) { + (*m_osLog) << "MASTER " << DecompRowTypeStr[m_masterRowType[r]] + << " DUAL[ " << r << "->" << rowNames[r] + << "] = " << m_dual[r] << " RM = " << m_dualRM[r] + << " ST = " << m_dualST[r] << endl; + } else + (*m_osLog) << "MASTER " << DecompRowTypeStr[m_masterRowType[r]] + << " DUAL[ " << r << "] = " << m_dual[r] + << " RM = " << m_dualRM[r] << " ST = " << m_dualST[r] + << endl; } + } + } - copy(m_dualRM.begin(), m_dualRM.end(), m_dual.begin()); - } - - if (m_firstPhase2Call) { - m_app->initDualVector(m_dual); - } - - for (r = 0; r < nRows; r++) { - m_dualST[r] = (alpha * m_dual[r]) + (alpha1 * m_dualRM[r]); - } - - //--- - //--- log for debugging - //--- - if (m_param.LogDebugLevel >= 3) { - const vector& rowNames = m_masterSI->getRowNames(); - - for (r = 0; r < m_masterSI->getNumRows(); r++) { - if (!(UtilIsZero(m_dual[r]) && - UtilIsZero(m_dualRM[r]) && UtilIsZero(m_dualST[r]))) { - if (r < static_cast(rowNames.size())) { - (*m_osLog) << "MASTER " - << DecompRowTypeStr[m_masterRowType[r]] - << " DUAL[ " << r << "->" << rowNames[r] - << "] = " << m_dual[r] << " RM = " - << m_dualRM[r] << " ST = " << m_dualST[r] - << endl; - } else - (*m_osLog) << "MASTER " - << DecompRowTypeStr[m_masterRowType[r]] - << " DUAL[ " << r - << "] = " << m_dual[r] << " RM = " - << m_dualRM[r] << " ST = " << m_dualST[r] - << endl; - } - } - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "adjustMasterDualSolution()", m_param.LogDebugLevel, 2); + UtilPrintFuncEnd(m_osLog, m_classTag, "adjustMasterDualSolution()", + m_param.LogDebugLevel, 2); } - //===========================================================================// -int DecompAlgoPC::adjustColumnsEffCnt() -{ - int status = DecompStatOk; - int colMasterIndex = -1; - const double* redCost = m_masterSI->getReducedCost(); - double redCostI = 0.0; - UtilPrintFuncBegin(m_osLog, m_classTag, - "adjustColumnsEffCnt()", m_param.LogDebugLevel, 2); - DecompVarList::iterator li; - - for (li = m_vars.begin(); li != m_vars.end(); li++) { - colMasterIndex = (*li)->getColMasterIndex(); - redCostI = redCost[colMasterIndex]; - assert(isMasterColStructural(colMasterIndex)); - - if (redCostI > DecompEpsilon) { - (*li)->decreaseEffCnt(); - } else { - (*li)->increaseEffCnt(); - } - - UTIL_DEBUG(m_param.LogLevel, 4, - (*m_osLog) << "ColIndex= " << setw(5) << colMasterIndex - << " RedCost= " << UtilDblToStr(redCostI) - << " EffCnt= " << setw(5) << (*li)->getEffectiveness() - << endl; - ); - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "adjustColumnsEffCnt()", m_param.LogDebugLevel, 2); - return status; +int DecompAlgoPC::adjustColumnsEffCnt() { + int status = DecompStatOk; + int colMasterIndex = -1; + const double *redCost = m_masterSI->getReducedCost(); + double redCostI = 0.0; + UtilPrintFuncBegin(m_osLog, m_classTag, "adjustColumnsEffCnt()", + m_param.LogDebugLevel, 2); + DecompVarList::iterator li; + + for (li = m_vars.begin(); li != m_vars.end(); li++) { + colMasterIndex = (*li)->getColMasterIndex(); + redCostI = redCost[colMasterIndex]; + assert(isMasterColStructural(colMasterIndex)); + + if (redCostI > DecompEpsilon) { + (*li)->decreaseEffCnt(); + } else { + (*li)->increaseEffCnt(); + } + + UTIL_DEBUG(m_param.LogLevel, 4, + (*m_osLog) << "ColIndex= " << setw(5) << colMasterIndex + << " RedCost= " << UtilDblToStr(redCostI) + << " EffCnt= " << setw(5) << (*li)->getEffectiveness() + << endl;); + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "adjustColumnsEffCnt()", + m_param.LogDebugLevel, 2); + return status; } //===========================================================================// -int DecompAlgoPC::compressColumns() -{ - //--- - //--- periodically, get rid of ineffective columns - //--- periodic: - //--- every K iterations OR - //--- numCols has inceased by X since last compression - //--- - int status = DecompStatOk; - UtilPrintFuncBegin(m_osLog, m_classTag, - "compressColumns()", m_param.LogDebugLevel, 2); - m_stats.timerOther1.reset(); - int nHistorySize - = static_cast(m_nodeStats.objHistoryBound.size()); - - if (nHistorySize > 0) { - DecompObjBound& objBound - = m_nodeStats.objHistoryBound[nHistorySize - 1]; - double masterUB = objBound.thisBoundUB; - double masterLB = m_nodeStats.objBest.first; - double masterGap = m_infinity; - - if (masterUB > -m_infinity && - masterUB < m_infinity) { - if (masterUB != 0.0) { - masterGap = fabs(masterUB - masterLB) / masterUB; - } else { - masterGap = fabs(masterUB - masterLB); - } +int DecompAlgoPC::compressColumns() { + //--- + //--- periodically, get rid of ineffective columns + //--- periodic: + //--- every K iterations OR + //--- numCols has inceased by X since last compression + //--- + int status = DecompStatOk; + UtilPrintFuncBegin(m_osLog, m_classTag, "compressColumns()", + m_param.LogDebugLevel, 2); + m_stats.timerOther1.reset(); + int nHistorySize = static_cast(m_nodeStats.objHistoryBound.size()); + + if (nHistorySize > 0) { + DecompObjBound &objBound = m_nodeStats.objHistoryBound[nHistorySize - 1]; + double masterUB = objBound.thisBoundUB; + double masterLB = m_nodeStats.objBest.first; + double masterGap = m_infinity; + + if (masterUB > -m_infinity && masterUB < m_infinity) { + if (masterUB != 0.0) { + masterGap = fabs(masterUB - masterLB) / masterUB; + } else { + masterGap = fabs(masterUB - masterLB); } + } - if (masterGap > m_param.CompressColumnsMasterGapStart) { - return status; - } - } else { + if (masterGap > m_param.CompressColumnsMasterGapStart) { return status; - } - - const int CompressColsIterFreq = m_param.CompressColumnsIterFreq; + } + } else { + return status; + } - const double CompressColsSizeMultLimit = m_param.CompressColumnsSizeMultLimit; + const int CompressColsIterFreq = m_param.CompressColumnsIterFreq; - const int nMasterCols = m_masterSI->getNumCols(); + const double CompressColsSizeMultLimit = m_param.CompressColumnsSizeMultLimit; - const int nMasterRows = m_masterSI->getNumRows(); + const int nMasterCols = m_masterSI->getNumCols(); - int nColsSinceLast - = nMasterCols - m_compressColsLastNumCols; + const int nMasterRows = m_masterSI->getNumRows(); - int nIterSinceLast - = m_nodeStats.priceCallsTotal - m_compressColsLastPrice; + int nColsSinceLast = nMasterCols - m_compressColsLastNumCols; - int nColsSinceLastLimit - = static_cast(ceil(m_compressColsLastNumCols * - CompressColsSizeMultLimit)); + int nIterSinceLast = m_nodeStats.priceCallsTotal - m_compressColsLastPrice; - UTIL_MSG(m_param.LogLevel, 4, - (*m_osLog) << "nMasterCols = " - << nMasterCols << endl; - (*m_osLog) << "m_compressColsLastNumCols= " - << m_compressColsLastNumCols << endl; - (*m_osLog) << "nColsSinceLast = " - << nColsSinceLast << endl; - (*m_osLog) << "priceCallsTotal = " - << m_nodeStats.priceCallsTotal << endl; - (*m_osLog) << "m_compressColsLastPrice = " - << m_compressColsLastPrice << endl; - (*m_osLog) << "nItersSinceLast = " - << nIterSinceLast << endl; - ); + int nColsSinceLastLimit = static_cast( + ceil(m_compressColsLastNumCols * CompressColsSizeMultLimit)); - if (nColsSinceLast < nColsSinceLastLimit && - nIterSinceLast < CompressColsIterFreq) { - return status; - } - - //TODO: reuse memory - //TODO: using getBasics instead of getBasis since seems cheaper - int c; - int* basics = new int[nMasterRows]; - bool* isBasic = new bool[nMasterCols]; - assert(basics && isBasic); - UtilFillN(isBasic, nMasterCols, false); - //--- - //--- COIN BUG: to use OSI::getBasics with CLP, you need to - //--- enableSimplexInterface() - which has issues, so to get around - //--- this, we will use the warm start object to get basis status - //--- of variables - //--- - // m_masterSI->getBasics(basics); - // for(r = 0; r < nMasterRows; r++){ - // c = basics[r]; - // if(c < nMasterCols) - // isBasic[c] = true; - //} + UTIL_MSG( + m_param.LogLevel, 4, + (*m_osLog) << "nMasterCols = " << nMasterCols << endl; + (*m_osLog) << "m_compressColsLastNumCols= " << m_compressColsLastNumCols + << endl; + (*m_osLog) << "nColsSinceLast = " << nColsSinceLast << endl; + (*m_osLog) << "priceCallsTotal = " << m_nodeStats.priceCallsTotal + << endl; + (*m_osLog) << "m_compressColsLastPrice = " << m_compressColsLastPrice + << endl; + (*m_osLog) << "nItersSinceLast = " << nIterSinceLast << endl;); + + if (nColsSinceLast < nColsSinceLastLimit && + nIterSinceLast < CompressColsIterFreq) { + return status; + } + + // TODO: reuse memory + // TODO: using getBasics instead of getBasis since seems cheaper + int c; + int *basics = new int[nMasterRows]; + bool *isBasic = new bool[nMasterCols]; + assert(basics && isBasic); + UtilFillN(isBasic, nMasterCols, false); + //--- + //--- COIN BUG: to use OSI::getBasics with CLP, you need to + //--- enableSimplexInterface() - which has issues, so to get around + //--- this, we will use the warm start object to get basis status + //--- of variables + //--- + // m_masterSI->getBasics(basics); + // for(r = 0; r < nMasterRows; r++){ + // c = basics[r]; + // if(c < nMasterCols) + // isBasic[c] = true; + //} #ifndef DO_INTERIOR - bool mustDeleteWS = false; - CoinWarmStartBasis* warmStart - = dynamic_cast(m_masterSI->getPointerToWarmStart( - mustDeleteWS)); - - for (c = 0; c < nMasterCols; c++) { - if (warmStart->getStructStatus(c) == CoinWarmStartBasis::basic) { - isBasic[c] = true; - } - } + bool mustDeleteWS = false; + CoinWarmStartBasis *warmStart = dynamic_cast( + m_masterSI->getPointerToWarmStart(mustDeleteWS)); - if (mustDeleteWS) { - UTIL_DELPTR(warmStart); - } + for (c = 0; c < nMasterCols; c++) { + if (warmStart->getStructStatus(c) == CoinWarmStartBasis::basic) { + isBasic[c] = true; + } + } -#endif - //--- - //--- sanity check - //--- m_vars should contain just the structural columns - //--- - int nMasterColsStruct = 0; - assert(nMasterCols == static_cast(m_masterColType.size())); - - for (c = 0; c < nMasterCols; c++) { - if (isMasterColStructural(c)) { - nMasterColsStruct++; - } - } - - //assert(nMasterColsStruct == static_cast(m_vars.size())); - //several strategies here - we can sort by effCnt and - // purge those with the worse (only those negative) - //or we can just purge anything negative - //or we can purge anything less than some threshold - but not sure - // how to set that threshold - //decide what to purge based on effCnt but also be careful - // and DO Not purge anything that is currently in the basis! - // since eff count is based on > eps dual, you might have a degenerate - // point that has 0 rc but is in basis - so need to check that - int shift = 0; - int colMasterIndex; - vector lpColsToDelete; - vector indexShift; - UtilFillN(indexShift, nMasterCols, 0); - UTIL_DEBUG(m_param.LogLevel, 5, - (*m_osLog) << "VARS before compress:" << endl; - printVars(m_osLog);); - DecompVarList::iterator li = m_vars.begin(); - int nCols = 0; - int nColsNoDel = 0; - int nColsBasic = 0; - int nColsEffPos = 0; - - while (li != m_vars.end()) { - colMasterIndex = (*li)->getColMasterIndex(); - indexShift[colMasterIndex] = shift; - assert(isMasterColStructural(colMasterIndex)); - nCols++; + if (mustDeleteWS) { + UTIL_DELPTR(warmStart); + } - //--- - //--- do not delete any columns that were marked "NoDelete" - //--- these were degenerate points and deleting them can - //--- cause cycling - //--- - if (m_masterColType[colMasterIndex] == DecompCol_Structural_NoDelete - || m_masterColType[colMasterIndex] == DecompCol_MasterOnly) { - li++; - nColsNoDel++; - continue; - } - - //--- - //--- do not delete any columns that are basic - //--- do not delete any columns with non-negative effectiveness - //--- - if (isBasic[colMasterIndex]) { - nColsBasic++; - } - - if ((*li)->getEffectiveness() >= 0) { - nColsEffPos++; - } - - if (isBasic[colMasterIndex] || ((*li)->getEffectiveness() >= 0)) { - li++; - continue; - } - - UTIL_DEBUG(m_param.LogLevel, 4, - const double* masterSolution = getMasterPrimalSolution(); - (*m_osLog) << "CompressCol" - << " lpIndex= " << setw(5) << colMasterIndex - << " effCnt= " << setw(2) << (*li)->getEffectiveness() - << " currSol= " << setw(10) - << UtilDblToStr(masterSolution[colMasterIndex], 3) << endl; - ); - //add this var to pool - THINK what exactly does that entail - (*li)->resetEffectiveness(); - //this deletes the var object (we won't do this - // once we move to pool) - delete *li; - li = m_vars.erase(li); //removes link in list - lpColsToDelete.push_back(colMasterIndex); - m_masterColType[colMasterIndex] = DecompCol_ToBeDeleted; - shift++; - } - - if (lpColsToDelete.size() > 0) { - /*for(c = 0; c < m_masterSI->getNumCols(); c++){ - const string colN = m_masterSI->getColName(c); - printf("Before Col[%4d] Name: %30s Type: %20s\n", - c, - colN.c_str(), - DecompColTypeStr[m_masterColType[c]].c_str()); - }*/ - m_masterSI->deleteCols(static_cast(lpColsToDelete.size()), - &lpColsToDelete[0]); - m_cutpool.setRowsAreValid(false); - UTIL_MSG(m_param.LogLevel, 3, - (*m_osLog) << "Num Columns Deleted = " - << lpColsToDelete.size() - << " Cols = " << nCols - << " NoDel = " << nColsNoDel - << " Basic = " << nColsBasic - << " EffPos = " << nColsEffPos - << endl; - ); - - //--- - //--- now, we must update the mapping between LP index and - //--- the index in the var list objects - but we might have - //--- artificial columns lurking in between the LP columns - //--- - //--- Example: - //--- a=artificial - //--- s=structural (either from original row, branch row or cut row) - //--- - //--- lpColsToDelete = {6,7,15} - //--- 000000000011111111 - //--- 012345678901234567 - //--- aaaassssaasssaasss - //--- - //--- 000000000011111111 - //--- 012345678901234567 - //--- aaaas..saasssaa.ss - //--- aaaassaasssaass - //--- shift - //--- 00000..22222222.33 - //--- - //--- - //--- - //--- reset the master index in m_vars - //--- - for (li = m_vars.begin(); li != m_vars.end(); li++) { - colMasterIndex = (*li)->getColMasterIndex(); - (*li)->setColMasterIndex(colMasterIndex - indexShift[colMasterIndex]); - } - - //--- - //--- delete the entries in vector m_masterColType - //--- NOTE: this would be much faster if used list instead of vector - //--- - vector::iterator vi = m_masterColType.begin(); - - while (vi != m_masterColType.end()) { - if (*vi == DecompCol_ToBeDeleted) { - vi = m_masterColType.erase(vi); - } else { - vi++; - } +#endif + //--- + //--- sanity check + //--- m_vars should contain just the structural columns + //--- + int nMasterColsStruct = 0; + assert(nMasterCols == static_cast(m_masterColType.size())); + + for (c = 0; c < nMasterCols; c++) { + if (isMasterColStructural(c)) { + nMasterColsStruct++; + } + } + + // assert(nMasterColsStruct == static_cast(m_vars.size())); + // several strategies here - we can sort by effCnt and + // purge those with the worse (only those negative) + // or we can just purge anything negative + // or we can purge anything less than some threshold - but not sure + // how to set that threshold + // decide what to purge based on effCnt but also be careful + // and DO Not purge anything that is currently in the basis! + // since eff count is based on > eps dual, you might have a degenerate + // point that has 0 rc but is in basis - so need to check that + int shift = 0; + int colMasterIndex; + vector lpColsToDelete; + vector indexShift; + UtilFillN(indexShift, nMasterCols, 0); + UTIL_DEBUG(m_param.LogLevel, 5, (*m_osLog) << "VARS before compress:" << endl; + printVars(m_osLog);); + DecompVarList::iterator li = m_vars.begin(); + int nCols = 0; + int nColsNoDel = 0; + int nColsBasic = 0; + int nColsEffPos = 0; + + while (li != m_vars.end()) { + colMasterIndex = (*li)->getColMasterIndex(); + indexShift[colMasterIndex] = shift; + assert(isMasterColStructural(colMasterIndex)); + nCols++; + + //--- + //--- do not delete any columns that were marked "NoDelete" + //--- these were degenerate points and deleting them can + //--- cause cycling + //--- + if (m_masterColType[colMasterIndex] == DecompCol_Structural_NoDelete || + m_masterColType[colMasterIndex] == DecompCol_MasterOnly) { + li++; + nColsNoDel++; + continue; + } + + //--- + //--- do not delete any columns that are basic + //--- do not delete any columns with non-negative effectiveness + //--- + if (isBasic[colMasterIndex]) { + nColsBasic++; + } + + if ((*li)->getEffectiveness() >= 0) { + nColsEffPos++; + } + + if (isBasic[colMasterIndex] || ((*li)->getEffectiveness() >= 0)) { + li++; + continue; + } + + UTIL_DEBUG(m_param.LogLevel, 4, + const double *masterSolution = getMasterPrimalSolution(); + (*m_osLog) << "CompressCol" + << " lpIndex= " << setw(5) << colMasterIndex + << " effCnt= " << setw(2) << (*li)->getEffectiveness() + << " currSol= " << setw(10) + << UtilDblToStr(masterSolution[colMasterIndex], 3) + << endl;); + // add this var to pool - THINK what exactly does that entail + (*li)->resetEffectiveness(); + // this deletes the var object (we won't do this + // once we move to pool) + delete *li; + li = m_vars.erase(li); // removes link in list + lpColsToDelete.push_back(colMasterIndex); + m_masterColType[colMasterIndex] = DecompCol_ToBeDeleted; + shift++; + } + + if (lpColsToDelete.size() > 0) { + /*for(c = 0; c < m_masterSI->getNumCols(); c++){ + const string colN = m_masterSI->getColName(c); + printf("Before Col[%4d] Name: %30s Type: %20s\n", + c, + colN.c_str(), + DecompColTypeStr[m_masterColType[c]].c_str()); + }*/ + m_masterSI->deleteCols(static_cast(lpColsToDelete.size()), + &lpColsToDelete[0]); + m_cutpool.setRowsAreValid(false); + UTIL_MSG(m_param.LogLevel, 3, + (*m_osLog) << "Num Columns Deleted = " << lpColsToDelete.size() + << " Cols = " << nCols << " NoDel = " << nColsNoDel + << " Basic = " << nColsBasic + << " EffPos = " << nColsEffPos << endl;); + + //--- + //--- now, we must update the mapping between LP index and + //--- the index in the var list objects - but we might have + //--- artificial columns lurking in between the LP columns + //--- + //--- Example: + //--- a=artificial + //--- s=structural (either from original row, branch row or cut row) + //--- + //--- lpColsToDelete = {6,7,15} + //--- 000000000011111111 + //--- 012345678901234567 + //--- aaaassssaasssaasss + //--- + //--- 000000000011111111 + //--- 012345678901234567 + //--- aaaas..saasssaa.ss + //--- aaaassaasssaass + //--- shift + //--- 00000..22222222.33 + //--- + //--- + //--- + //--- reset the master index in m_vars + //--- + for (li = m_vars.begin(); li != m_vars.end(); li++) { + colMasterIndex = (*li)->getColMasterIndex(); + (*li)->setColMasterIndex(colMasterIndex - indexShift[colMasterIndex]); + } + + //--- + //--- delete the entries in vector m_masterColType + //--- NOTE: this would be much faster if used list instead of vector + //--- + vector::iterator vi = m_masterColType.begin(); + + while (vi != m_masterColType.end()) { + if (*vi == DecompCol_ToBeDeleted) { + vi = m_masterColType.erase(vi); + } else { + vi++; } + } - //--- - //--- sanity check - //--- m_vars should contain just the structural columns - //--- - int nMasterColsNew = m_masterSI->getNumCols(); - nMasterColsStruct = 0; - assert(nMasterColsNew == static_cast(m_masterColType.size())); + //--- + //--- sanity check + //--- m_vars should contain just the structural columns + //--- + int nMasterColsNew = m_masterSI->getNumCols(); + nMasterColsStruct = 0; + assert(nMasterColsNew == static_cast(m_masterColType.size())); - for (li = m_vars.begin(); li != m_vars.end(); li++) { - colMasterIndex = (*li)->getColMasterIndex(); - assert(isMasterColStructural(colMasterIndex)); - } + for (li = m_vars.begin(); li != m_vars.end(); li++) { + colMasterIndex = (*li)->getColMasterIndex(); + assert(isMasterColStructural(colMasterIndex)); + } - for (c = 0; c < nMasterColsNew; c++) { - if (isMasterColStructural(c)) { - nMasterColsStruct++; - } + for (c = 0; c < nMasterColsNew; c++) { + if (isMasterColStructural(c)) { + nMasterColsStruct++; } - - //assert(nMasterColsStruct == static_cast(m_vars.size())); - UTIL_DEBUG(m_param.LogLevel, 5, - (*m_osLog) << "VARS after compress:" << endl; - printVars(m_osLog);); - /*for(c = 0; c < m_masterSI->getNumCols(); c++){ - const string colN = m_masterSI->getColName(c); - printf("After Col[%4d] Name: %30s Type: %20s\n", - c, - colN.c_str(), - DecompColTypeStr[m_masterColType[c]].c_str()); - }*/ - //--- - //--- we deleted something, so reset the counters - m_compressColsLastPrice = m_nodeStats.priceCallsTotal; - m_compressColsLastNumCols = m_masterSI->getNumCols(); - //--- - //--- if any vars were deleted, do a solution update to refresh - //--- - status = solutionUpdate(m_phase, 99999, 99999); - } - - m_stats.thisCompressCols.push_back(m_stats.timerOther1.getRealTime()); - UTIL_DELARR(basics); - UTIL_DELARR(isBasic); - UtilPrintFuncEnd(m_osLog, m_classTag, - "compressColumns()", m_param.LogDebugLevel, 2); - return status; + } + + // assert(nMasterColsStruct == static_cast(m_vars.size())); + UTIL_DEBUG(m_param.LogLevel, 5, + (*m_osLog) << "VARS after compress:" << endl; + printVars(m_osLog);); + /*for(c = 0; c < m_masterSI->getNumCols(); c++){ + const string colN = m_masterSI->getColName(c); + printf("After Col[%4d] Name: %30s Type: %20s\n", + c, + colN.c_str(), + DecompColTypeStr[m_masterColType[c]].c_str()); + }*/ + //--- + //--- we deleted something, so reset the counters + m_compressColsLastPrice = m_nodeStats.priceCallsTotal; + m_compressColsLastNumCols = m_masterSI->getNumCols(); + //--- + //--- if any vars were deleted, do a solution update to refresh + //--- + status = solutionUpdate(m_phase, 99999, 99999); + } + + m_stats.thisCompressCols.push_back(m_stats.timerOther1.getRealTime()); + UTIL_DELARR(basics); + UTIL_DELARR(isBasic); + UtilPrintFuncEnd(m_osLog, m_classTag, "compressColumns()", + m_param.LogDebugLevel, 2); + return status; } //===========================================================================// -void DecompAlgoPC::phaseDone() -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "phaseDone()", m_param.LogDebugLevel, 2); - - if (m_param.SolveMasterAsMip && - getNodeIndex() % m_param.SolveMasterAsMipFreqNode == 0 && - m_stopCriteria != DecompStopTime) { - solveMasterAsMIP(); - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "phaseDone()", m_param.LogDebugLevel, 2); +void DecompAlgoPC::phaseDone() { + UtilPrintFuncBegin(m_osLog, m_classTag, "phaseDone()", m_param.LogDebugLevel, + 2); + + if (m_param.SolveMasterAsMip && + getNodeIndex() % m_param.SolveMasterAsMipFreqNode == 0 && + m_stopCriteria != DecompStopTime) { + solveMasterAsMIP(); + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "phaseDone()", m_param.LogDebugLevel, + 2); } //===========================================================================// -void DecompAlgoPC::solveMasterAsMIP() -{ - //--- - //--- if node was already found infeasible, just return - //--- - if (m_status == STAT_INFEASIBLE) { - return; - } - - UtilPrintFuncBegin(m_osLog, m_classTag, - "solveMasterAsMIP()", m_param.LogDebugLevel, 2); - //--- - //--- no point in doing this if only one block, we check each - //--- new column to see if it is feasible to original already - //--- - assert(m_numConvexCon > 1); - int nMasterCols = m_masterSI->getNumCols();//lambda - DecompConstraintSet* modelCore = m_modelCore.getModel(); - //--- - //--- set the master (generated) columns (lambda) to integer - //--- set the master-onlys (that are integral) to integer - //--- - int colIndex; - const char* intMarkerCore = modelCore->getIntegerMark(); - - for (colIndex = 0; colIndex < nMasterCols; colIndex++) { - if (isMasterColStructural(colIndex)){ - m_masterSI->setInteger(colIndex); - } - } - for (int i = 0; i < m_masterOnlyCols.size(); i++){ - if (intMarkerCore[m_masterOnlyCols[i]] == 'I'){ - m_masterSI->setInteger(m_masterOnlyColsMap[m_masterOnlyCols[i]]); - } - } - if (m_param.LogDumpModel >= 2) - printCurrentProblem(m_masterSI, - "masterProbRootIP", - m_nodeStats.nodeIndex, - m_nodeStats.cutCallsTotal, - m_nodeStats.priceCallsTotal); - - DecompSolverResult result(m_infinity); - - if (m_param.DecompIPSolver == "SYMPHONY"){ - solveMasterAsMIPSym(&result); - }else if (m_param.DecompIPSolver == "Cbc"){ - solveMasterAsMIPCbc(&result); - }else if (m_param.DecompIPSolver == "CPLEX"){ - solveMasterAsMIPCpx(&result); - }else if (m_param.DecompIPSolver == "Gurobi"){ - solveMasterAsMIPGrb(&result); - }else{ - throw UtilException("Unknown solver selected", - "solveMasterAsMIP", "DecompAlgoPC"); - } - - if (result.m_nSolutions) { - double* rsolution = new double[modelCore->getNumCols()]; - - if (!rsolution) { - throw UtilExceptionMemory("solveMasterAsMIP", "DecompAlgoPC"); - } +void DecompAlgoPC::solveMasterAsMIP() { + //--- + //--- if node was already found infeasible, just return + //--- + if (m_status == STAT_INFEASIBLE) { + return; + } + + UtilPrintFuncBegin(m_osLog, m_classTag, "solveMasterAsMIP()", + m_param.LogDebugLevel, 2); + //--- + //--- no point in doing this if only one block, we check each + //--- new column to see if it is feasible to original already + //--- + assert(m_numConvexCon > 1); + int nMasterCols = m_masterSI->getNumCols(); // lambda + DecompConstraintSet *modelCore = m_modelCore.getModel(); + //--- + //--- set the master (generated) columns (lambda) to integer + //--- set the master-onlys (that are integral) to integer + //--- + int colIndex; + const char *intMarkerCore = modelCore->getIntegerMark(); + + for (colIndex = 0; colIndex < nMasterCols; colIndex++) { + if (isMasterColStructural(colIndex)) { + m_masterSI->setInteger(colIndex); + } + } + for (int i = 0; i < m_masterOnlyCols.size(); i++) { + if (intMarkerCore[m_masterOnlyCols[i]] == 'I') { + m_masterSI->setInteger(m_masterOnlyColsMap[m_masterOnlyCols[i]]); + } + } + if (m_param.LogDumpModel >= 2) + printCurrentProblem(m_masterSI, "masterProbRootIP", m_nodeStats.nodeIndex, + m_nodeStats.cutCallsTotal, m_nodeStats.priceCallsTotal); + + DecompSolverResult result(m_infinity); + + if (m_param.DecompIPSolver == "SYMPHONY") { + solveMasterAsMIPSym(&result); + } else if (m_param.DecompIPSolver == "Cbc") { + solveMasterAsMIPCbc(&result); + } else if (m_param.DecompIPSolver == "CPLEX") { + solveMasterAsMIPCpx(&result); + } else if (m_param.DecompIPSolver == "Gurobi") { + solveMasterAsMIPGrb(&result); + } else { + throw UtilException("Unknown solver selected", "solveMasterAsMIP", + "DecompAlgoPC"); + } + + if (result.m_nSolutions) { + double *rsolution = new double[modelCore->getNumCols()]; + + if (!rsolution) { + throw UtilExceptionMemory("solveMasterAsMIP", "DecompAlgoPC"); + } + + UTIL_MSG(m_param.LogLevel, 3, + (*m_osLog) << "Solve as IP found a solution." << endl;); + recomposeSolution(result.getSolution(0), rsolution); + + if (!isIPFeasible(rsolution)) + throw UtilException("Recomposed solution is not feasible", + "solveMasterAsMIP", "DecompAlgoPC"); + if (m_app->APPisUserFeasible(rsolution, modelCore->getNumCols(), + m_param.TolZero)) { UTIL_MSG(m_param.LogLevel, 3, - (*m_osLog) << "Solve as IP found a solution." << endl;); - recomposeSolution(result.getSolution(0), rsolution); - - if (!isIPFeasible(rsolution)) - throw UtilException("Recomposed solution is not feasible", - "solveMasterAsMIP", "DecompAlgoPC"); - - if (m_app->APPisUserFeasible(rsolution, - modelCore->getNumCols(), - m_param.TolZero)) { - UTIL_MSG(m_param.LogLevel, 3, - (*m_osLog) << "Solution is app-feasible, nSolutions=" - << (int)m_xhatIPFeas.size() << endl;); - //check for dup sol - TODO: make func - bool isDup = m_xhatIPFeas.size() > 0 ? true : false; - vector::iterator vit; - - for (vit = m_xhatIPFeas.begin(); - vit != m_xhatIPFeas.end(); vit++) { - const DecompSolution* xhatIPFeas = *vit; - const double* values - = xhatIPFeas->getValues(); - - for (int c = 0; c < modelCore->getNumCols(); c++) { - if (!UtilIsZero(values[c] - rsolution[c])) { - isDup = false; - break; - } - } - } - - if (isDup) { - UTIL_MSG(m_param.LogLevel, 3, - (*m_osLog) << "Solution is a duplicate, not pushing." - << endl;); - } else { - DecompSolution* decompSol - = new DecompSolution(modelCore->getNumCols(), - rsolution, - getOrigObjective()); - m_xhatIPFeas.push_back(decompSol); - vector::iterator vi; - DecompSolution* viBest = NULL; - double bestBoundUB = m_nodeStats.objBest.second; - - for (vi = m_xhatIPFeas.begin(); vi != m_xhatIPFeas.end(); vi++) { - const DecompSolution* xhatIPFeas = *vi; - - if (xhatIPFeas->getQuality() <= bestBoundUB) { - bestBoundUB = xhatIPFeas->getQuality(); - viBest = *vi; - } - } - - if (viBest) { - //save the best - setObjBoundIP(bestBoundUB); - m_xhatIPBest = viBest; - } - } + (*m_osLog) << "Solution is app-feasible, nSolutions=" + << (int)m_xhatIPFeas.size() << endl;); + // check for dup sol - TODO: make func + bool isDup = m_xhatIPFeas.size() > 0 ? true : false; + vector::iterator vit; + + for (vit = m_xhatIPFeas.begin(); vit != m_xhatIPFeas.end(); vit++) { + const DecompSolution *xhatIPFeas = *vit; + const double *values = xhatIPFeas->getValues(); + + for (int c = 0; c < modelCore->getNumCols(); c++) { + if (!UtilIsZero(values[c] - rsolution[c])) { + isDup = false; + break; + } + } } - if (m_param.LogDebugLevel >= 3) { - int j; - const vector& colNames = modelCore->getColNames(); - - for (j = 0; j < modelCore->getNumCols(); j++) { - if (fabs(rsolution[j]) > DecompEpsilon) { - if (j < static_cast(colNames.size())) - printf("MASTER PRIM[%6d->%20s] = %12.10f\n", - j, colNames[j].c_str(), rsolution[j]); - else - printf("MASTER PRIM[%6d] = %12.10f\n", - j, rsolution[j]); - } - } + if (isDup) { + UTIL_MSG(m_param.LogLevel, 3, + (*m_osLog) + << "Solution is a duplicate, not pushing." << endl;); + } else { + DecompSolution *decompSol = new DecompSolution( + modelCore->getNumCols(), rsolution, getOrigObjective()); + m_xhatIPFeas.push_back(decompSol); + vector::iterator vi; + DecompSolution *viBest = NULL; + double bestBoundUB = m_nodeStats.objBest.second; + + for (vi = m_xhatIPFeas.begin(); vi != m_xhatIPFeas.end(); vi++) { + const DecompSolution *xhatIPFeas = *vi; + + if (xhatIPFeas->getQuality() <= bestBoundUB) { + bestBoundUB = xhatIPFeas->getQuality(); + viBest = *vi; + } + } + + if (viBest) { + // save the best + setObjBoundIP(bestBoundUB); + m_xhatIPBest = viBest; + } } - - UTIL_DELARR(rsolution); - } - - //--- - //--- set the master columns back to continuous - //--- - for (colIndex = 0; colIndex < nMasterCols; colIndex++) { - if (isMasterColStructural(colIndex) || - isMasterColMasterOnly(colIndex)) { - m_masterSI->setContinuous(colIndex); + } + + if (m_param.LogDebugLevel >= 3) { + int j; + const vector &colNames = modelCore->getColNames(); + + for (j = 0; j < modelCore->getNumCols(); j++) { + if (fabs(rsolution[j]) > DecompEpsilon) { + if (j < static_cast(colNames.size())) + printf("MASTER PRIM[%6d->%20s] = %12.10f\n", j, colNames[j].c_str(), + rsolution[j]); + else + printf("MASTER PRIM[%6d] = %12.10f\n", j, rsolution[j]); + } } - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "solveMasterAsMIP()", m_param.LogDebugLevel, 2); + } + + UTIL_DELARR(rsolution); + } + + //--- + //--- set the master columns back to continuous + //--- + for (colIndex = 0; colIndex < nMasterCols; colIndex++) { + if (isMasterColStructural(colIndex) || isMasterColMasterOnly(colIndex)) { + m_masterSI->setContinuous(colIndex); + } + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "solveMasterAsMIP()", + m_param.LogDebugLevel, 2); } //===========================================================================// -void DecompAlgoPC::solveMasterAsMIPSym(DecompSolverResult* result) -{ +void DecompAlgoPC::solveMasterAsMIPSym(DecompSolverResult *result) { #ifdef DIP_HAS_SYMPHONY - int colIndex; - int numCols = m_masterSI->getNumCols(); - int nMasterCols = m_masterSI->getNumCols();//lambda - int logIpLevel = m_param.LogIpLevel; - const char* intMarkerCore = m_modelCore.getModel()->getIntegerMark(); + int colIndex; + int numCols = m_masterSI->getNumCols(); + int nMasterCols = m_masterSI->getNumCols(); // lambda + int logIpLevel = m_param.LogIpLevel; + const char *intMarkerCore = m_modelCore.getModel()->getIntegerMark(); - OsiSymSolverInterface* osiSym = new OsiSymSolverInterface(); + OsiSymSolverInterface *osiSym = new OsiSymSolverInterface(); #if 0 OsiSolverInterface * m_masterClone = m_masterSI->clone(); @@ -778,850 +735,825 @@ void DecompAlgoPC::solveMasterAsMIPSym(DecompSolverResult* result) } #else - osiSym->loadProblem(const_cast(*m_masterSI->getMatrixByRow()), - const_cast(m_masterSI->getColLower()), - const_cast(m_masterSI->getColUpper()), - const_cast(m_masterSI->getObjCoefficients()), - const_cast(m_masterSI->getRowLower()), - const_cast(m_masterSI->getRowUpper())); - - for (colIndex = 0; colIndex < nMasterCols; colIndex++) { - if (isMasterColStructural(colIndex)){ - osiSym->setInteger(colIndex); - } - } - for (int i = 0; i < m_masterOnlyCols.size(); i++){ - if (intMarkerCore[m_masterOnlyCols[i]] == 'I'){ - osiSym->setInteger(m_masterOnlyColsMap[m_masterOnlyCols[i]]); - } - } + osiSym->loadProblem( + const_cast(*m_masterSI->getMatrixByRow()), + const_cast(m_masterSI->getColLower()), + const_cast(m_masterSI->getColUpper()), + const_cast(m_masterSI->getObjCoefficients()), + const_cast(m_masterSI->getRowLower()), + const_cast(m_masterSI->getRowUpper())); + + for (colIndex = 0; colIndex < nMasterCols; colIndex++) { + if (isMasterColStructural(colIndex)) { + osiSym->setInteger(colIndex); + } + } + for (int i = 0; i < m_masterOnlyCols.size(); i++) { + if (intMarkerCore[m_masterOnlyCols[i]] == 'I') { + osiSym->setInteger(m_masterOnlyColsMap[m_masterOnlyCols[i]]); + } + } #endif - assert(osiSym); - sym_environment* env = osiSym->getSymphonyEnvironment(); - if (logIpLevel == 0){ - sym_set_int_param(env, "verbosity", -10); - } - else{ - sym_set_int_param(env, "verbosity", logIpLevel); - } - - assert(env); - osiSym->branchAndBound(); - int status = sym_get_status(env); - - if ((status == PREP_OPTIMAL_SOLUTION_FOUND) || - (status == TM_OPTIMAL_SOLUTION_FOUND) - || (status == TM_TARGET_GAP_ACHIEVED)) { + assert(osiSym); + sym_environment *env = osiSym->getSymphonyEnvironment(); + if (logIpLevel == 0) { + sym_set_int_param(env, "verbosity", -10); + } else { + sym_set_int_param(env, "verbosity", logIpLevel); + } + + assert(env); + osiSym->branchAndBound(); + int status = sym_get_status(env); + + if ((status == PREP_OPTIMAL_SOLUTION_FOUND) || + (status == TM_OPTIMAL_SOLUTION_FOUND) || + (status == TM_TARGET_GAP_ACHIEVED)) { + result->m_isOptimal = true; + double *solution = new double[numCols]; + assert(solution); + status = sym_get_col_solution(env, solution); + result->m_nSolutions = 1; + vector solVec(solution, solution + numCols); + result->m_solution.push_back(solVec); + UTIL_DELARR(solution); + + if (status == FUNCTION_TERMINATED_ABNORMALLY) + throw UtilException("sym_get_col_solution failure", "solveMasterAsMIP", + "DecompSubModel"); + } else { + if (sym_is_proven_primal_infeasible(env)) { + result->m_nSolutions = 0; result->m_isOptimal = true; - double* solution = new double[numCols]; - assert(solution); - status = sym_get_col_solution(env, solution); - result->m_nSolutions = 1; - vector solVec(solution, solution + numCols); - result->m_solution.push_back(solVec); - UTIL_DELARR(solution); - - if (status == FUNCTION_TERMINATED_ABNORMALLY) - throw UtilException("sym_get_col_solution failure", - "solveMasterAsMIP", "DecompSubModel"); - } else { - if (sym_is_proven_primal_infeasible(env)) { - result->m_nSolutions = 0; - result->m_isOptimal = true; - // result->m_isCutoff = doCutoff; - } else { - // result->m_isCutoff = doCutoff; - result->m_isOptimal = false ; - } - } - - if (status == (TM_ERROR__USER || TM_ERROR__COMM_ERROR - || TM_ERROR__NUMERICAL_INSTABILITY - || TM_ERROR__ILLEGAL_RETURN_CODE - || TM_ERROR__NO_BRANCHING_CANDIDATE)) { - std::cerr << "Error: SYPHONMY IP solver status = " - << status << std::endl; - } - - UTIL_DELPTR(osiSym); + // result->m_isCutoff = doCutoff; + } else { + // result->m_isCutoff = doCutoff; + result->m_isOptimal = false; + } + } + + if (status == (TM_ERROR__USER || TM_ERROR__COMM_ERROR || + TM_ERROR__NUMERICAL_INSTABILITY || + TM_ERROR__ILLEGAL_RETURN_CODE || + TM_ERROR__NO_BRANCHING_CANDIDATE)) { + std::cerr << "Error: SYPHONMY IP solver status = " << status << std::endl; + } + + UTIL_DELPTR(osiSym); #else - throw UtilException("SYMPHONY selected as solver, but it's not available", - "solveMasterAsMIPSym", "DecompAlgoPC"); + throw UtilException("SYMPHONY selected as solver, but it's not available", + "solveMasterAsMIPSym", "DecompAlgoPC"); #endif } //===========================================================================// -void DecompAlgoPC::solveMasterAsMIPCbc(DecompSolverResult* result) -{ +void DecompAlgoPC::solveMasterAsMIPCbc(DecompSolverResult *result) { #ifdef DIP_HAS_CBC - int nMasterCols = m_masterSI->getNumCols();//lambda - int logIpLevel = m_param.LogIpLevel; - //TODO: what exactly does this do? make copy of entire model!? - CbcModel cbc(*m_masterSI); - cbc.setLogLevel(logIpLevel); - cbc.setDblParam(CbcModel::CbcAllowableFractionGap, - m_param.SolveMasterAsMipLimitGap); - cbc.setDblParam(CbcModel::CbcMaximumSeconds, m_param.SolveMasterAsMipTimeLimit); - cbc.setDblParam(CbcModel::CbcCurrentCutoff, m_globalUB); + int nMasterCols = m_masterSI->getNumCols(); // lambda + int logIpLevel = m_param.LogIpLevel; + // TODO: what exactly does this do? make copy of entire model!? + CbcModel cbc(*m_masterSI); + cbc.setLogLevel(logIpLevel); + cbc.setDblParam(CbcModel::CbcAllowableFractionGap, + m_param.SolveMasterAsMipLimitGap); + cbc.setDblParam(CbcModel::CbcMaximumSeconds, + m_param.SolveMasterAsMipTimeLimit); + cbc.setDblParam(CbcModel::CbcCurrentCutoff, m_globalUB); #if 0 cbc.branchAndBound(); #else - CbcMain0(cbc); - //--- - //--- build argument list - //--- - //TODO: time limit, cutoff,gap - const char* argv[20]; - int argc = 0; - string cbcExe = "cbc"; - string cbcSolve = "-solve"; - string cbcQuit = "-quit"; - string cbcLog = "-log"; - string cbcLogSet = UtilIntToStr(logIpLevel); - string cbcGap = "-ratio"; - string cbcGapSet = UtilDblToStr(m_param.SolveMasterAsMipLimitGap); - string cbcTime = "-seconds"; - string cbcTimeSet = UtilDblToStr(m_param.SolveMasterAsMipTimeLimit); - string cbcCutoff = "-cutoff"; - string cbcCutoffSet = UtilDblToStr(m_globalUB, -1, 1.0e100); - argv[argc++] = cbcExe.c_str(); - argv[argc++] = cbcLog.c_str(); - argv[argc++] = cbcLogSet.c_str(); - argv[argc++] = cbcGap.c_str(); - argv[argc++] = cbcGapSet.c_str(); - argv[argc++] = cbcTime.c_str(); - argv[argc++] = cbcTimeSet.c_str(); - argv[argc++] = cbcCutoff.c_str(); - argv[argc++] = cbcCutoffSet.c_str(); - argv[argc++] = cbcSolve.c_str(); - argv[argc++] = cbcQuit.c_str(); - //--- - //--- solve IP using argument list - //--- - CbcMain1(argc, argv, cbc); + CbcMain0(cbc); + //--- + //--- build argument list + //--- + // TODO: time limit, cutoff,gap + const char *argv[20]; + int argc = 0; + string cbcExe = "cbc"; + string cbcSolve = "-solve"; + string cbcQuit = "-quit"; + string cbcLog = "-log"; + string cbcLogSet = UtilIntToStr(logIpLevel); + string cbcGap = "-ratio"; + string cbcGapSet = UtilDblToStr(m_param.SolveMasterAsMipLimitGap); + string cbcTime = "-seconds"; + string cbcTimeSet = UtilDblToStr(m_param.SolveMasterAsMipTimeLimit); + string cbcCutoff = "-cutoff"; + string cbcCutoffSet = UtilDblToStr(m_globalUB, -1, 1.0e100); + argv[argc++] = cbcExe.c_str(); + argv[argc++] = cbcLog.c_str(); + argv[argc++] = cbcLogSet.c_str(); + argv[argc++] = cbcGap.c_str(); + argv[argc++] = cbcGapSet.c_str(); + argv[argc++] = cbcTime.c_str(); + argv[argc++] = cbcTimeSet.c_str(); + argv[argc++] = cbcCutoff.c_str(); + argv[argc++] = cbcCutoffSet.c_str(); + argv[argc++] = cbcSolve.c_str(); + argv[argc++] = cbcQuit.c_str(); + //--- + //--- solve IP using argument list + //--- + CbcMain1(argc, argv, cbc); #endif - //--- - //--- get solver status - //--- comments based on Cbc2.3 - //--- - /** Final status of problem. - * -1 before branchAndBound - * 0 finished - check isProvenOptimal or isProvenInfeasible - * to see if solution found (or check value of best solution) - * 1 stopped - on maxnodes, maxsols, maxtime - * 2 difficulties so run was abandoned - * (5 event user programmed event occurred) + //--- + //--- get solver status + //--- comments based on Cbc2.3 + //--- + /** Final status of problem. + * -1 before branchAndBound + * 0 finished - check isProvenOptimal or isProvenInfeasible + * to see if solution found (or check value of best solution) + * 1 stopped - on maxnodes, maxsols, maxtime + * 2 difficulties so run was abandoned + * (5 event user programmed event occurred) */ - const int statusSet[2] = {0, 1}; - result->m_solStatus = cbc.status(); - - if (!UtilIsInSet(result->m_solStatus, statusSet, 2)) { - cerr << "Error: CBC IP solver status = " << result->m_solStatus << endl; - //This shouldn't really cause an exception - //throw UtilException("CBC solver status", - // "solveMasterAsMIP", "DecompSubModel"); - } - - /** Secondary status of problem - * -1 unset (status_ will also be -1) - * 0 search completed with solution - * 1 linear relaxation not feasible (or worse than cutoff) - * 2 stopped on gap - * 3 stopped on nodes - * 4 stopped on time - * 5 stopped on user event - * 6 stopped on solutions - * 7 linear relaxation unbounded - */ - const int statusSet2[4] = {0, 1, 2, 4}; - result->m_solStatus2 = cbc.secondaryStatus(); - - //--- - //--- In root the subproblem should not be infeasible - //--- unless due to cutoff. But, after branching it - //--- can be infeasible. - //--- - if (!UtilIsInSet(result->m_solStatus2, statusSet2, 4)) { - cerr << "Warning: CBC IP solver 2nd status = " - << result->m_solStatus2 << endl; - //This shouldn't really cause an exception - //throw UtilException("CBC solver 2nd status", - // "solveMasterAsMIP", "DecompAlgoPC"); - } - - //--- - //--- update results object - //--- - result->m_nSolutions = 0; - result->m_isOptimal = false; - //TODO: can get multple solutions! - // how to retrieve? - //TODO: look into setHotstartSolution... done automatically - // look at one call to the next - //TODO: look into setNumberThreads - //TODO: redo cpx in this same way - it could be stopping on time, not gap - int nSolutions = cbc.getSolutionCount(); - result->m_nSolutions = nSolutions ? 1 : 0; - - if (cbc.isProvenOptimal() || - cbc.isProvenInfeasible()) { - result->m_isOptimal = true; - } - - //--- - //--- get copy of solution - //--- - result->m_objLB = cbc.getBestPossibleObjValue(); - - if (nSolutions >= 1) { - result->m_objUB = cbc.getObjValue(); - const double* solDbl = cbc.getColSolution(); - vector solVec(solDbl, solDbl + nMasterCols); - result->m_solution.push_back(solVec); - assert(result->m_nSolutions == - static_cast(result->m_solution.size())); - //memcpy(result->m_solution, - // cbc.getColSolution(), - // nMasterCols * sizeof(double)); - } + const int statusSet[2] = {0, 1}; + result->m_solStatus = cbc.status(); + + if (!UtilIsInSet(result->m_solStatus, statusSet, 2)) { + cerr << "Error: CBC IP solver status = " << result->m_solStatus << endl; + // This shouldn't really cause an exception + // throw UtilException("CBC solver status", + // "solveMasterAsMIP", "DecompSubModel"); + } + + /** Secondary status of problem + * -1 unset (status_ will also be -1) + * 0 search completed with solution + * 1 linear relaxation not feasible (or worse than cutoff) + * 2 stopped on gap + * 3 stopped on nodes + * 4 stopped on time + * 5 stopped on user event + * 6 stopped on solutions + * 7 linear relaxation unbounded + */ + const int statusSet2[4] = {0, 1, 2, 4}; + result->m_solStatus2 = cbc.secondaryStatus(); + + //--- + //--- In root the subproblem should not be infeasible + //--- unless due to cutoff. But, after branching it + //--- can be infeasible. + //--- + if (!UtilIsInSet(result->m_solStatus2, statusSet2, 4)) { + cerr << "Warning: CBC IP solver 2nd status = " << result->m_solStatus2 + << endl; + // This shouldn't really cause an exception + // throw UtilException("CBC solver 2nd status", + // "solveMasterAsMIP", "DecompAlgoPC"); + } + + //--- + //--- update results object + //--- + result->m_nSolutions = 0; + result->m_isOptimal = false; + // TODO: can get multple solutions! + // how to retrieve? + // TODO: look into setHotstartSolution... done automatically + // look at one call to the next + // TODO: look into setNumberThreads + // TODO: redo cpx in this same way - it could be stopping on time, not gap + int nSolutions = cbc.getSolutionCount(); + result->m_nSolutions = nSolutions ? 1 : 0; + + if (cbc.isProvenOptimal() || cbc.isProvenInfeasible()) { + result->m_isOptimal = true; + } + + //--- + //--- get copy of solution + //--- + result->m_objLB = cbc.getBestPossibleObjValue(); + + if (nSolutions >= 1) { + result->m_objUB = cbc.getObjValue(); + const double *solDbl = cbc.getColSolution(); + vector solVec(solDbl, solDbl + nMasterCols); + result->m_solution.push_back(solVec); + assert(result->m_nSolutions == static_cast(result->m_solution.size())); + // memcpy(result->m_solution, + // cbc.getColSolution(), + // nMasterCols * sizeof(double)); + } #else - throw UtilException("Cbc selected as solver, but it's not available", - "solveMasterAsMIPCbc", "DecompAlgoPC"); + throw UtilException("Cbc selected as solver, but it's not available", + "solveMasterAsMIPCbc", "DecompAlgoPC"); #endif } //===========================================================================// -void DecompAlgoPC::solveMasterAsMIPCpx(DecompSolverResult* result) -{ +void DecompAlgoPC::solveMasterAsMIPCpx(DecompSolverResult *result) { #ifdef DIP_HAS_CPX - //--- - //--- get OsiCpx object from Osi object - //--- get CPEXENVptr for use with internal methods - //--- get CPXLPptr for use with internal methods - //--- - int nMasterCols = m_masterSI->getNumCols();//lambda - int logIpLevel = m_param.LogIpLevel; - OsiCpxSolverInterface* osiCpx - = dynamic_cast(m_masterSI); - CPXENVptr cpxEnv = osiCpx->getEnvironmentPtr(); - CPXLPptr cpxLp = osiCpx->getLpPtr(); - assert(cpxEnv && cpxLp); - //--- - //--- set parameters - //--- - int status = 0; - - if (logIpLevel) { - status = CPXsetintparam(cpxEnv, CPX_PARAM_SCRIND, CPX_ON); - - if (status) - throw UtilException("CPXsetintparam failure", - "solveMasterAsMIP", "DecompSubModel"); - - status = CPXsetintparam(cpxEnv, CPX_PARAM_SIMDISPLAY, logIpLevel); - - if (status) - throw UtilException("CPXsetintparam failure", - "solveMasterAsMIP", "DecompSubModel"); - } else { - status = CPXsetintparam(cpxEnv, CPX_PARAM_SCRIND, CPX_OFF); - - if (status) - throw UtilException("CPXsetintparam failure", - "solveMasterAsMIP", "DecompSubModel"); - } - - if (m_firstPhase2Call) { - //--- - //--- if calling with first Phase2 call, it is meant to - //--- "recombine" partial columns - i.e., if the user - //--- produced a fully feasible solution that was then - //--- separated into blocks - we want to be sure it - //--- at least recombines it - //--- so, make the stop on gap very small - //--- - //--- TODO: we should get this incumbent in the system without - //--- forcing the call to IP solver just to recombine - //--- - status = CPXsetdblparam(cpxEnv, CPX_PARAM_EPGAP, 0.005); //0.5% - } else { - status = CPXsetdblparam(cpxEnv, CPX_PARAM_EPGAP, - m_param.SolveMasterAsMipLimitGap); - } - - if (status) - throw UtilException("CPXsetdblparam failure", - "solveMasterAsMIP", "DecompAlgoPC"); - - status = CPXsetdblparam(cpxEnv, CPX_PARAM_TILIM, - m_param.SolveMasterAsMipTimeLimit); - - if (status) - throw UtilException("CPXsetdblparam failure", - "solveMasterAsMIP", "DecompAlgoPC"); - - status = CPXsetdblparam(cpxEnv, CPX_PARAM_CUTUP, m_globalUB); - - if (status) - throw UtilException("CPXsetdblparam failure", - "solveMasterAsMIP", "DecompAlgoPC"); + //--- + //--- get OsiCpx object from Osi object + //--- get CPEXENVptr for use with internal methods + //--- get CPXLPptr for use with internal methods + //--- + int nMasterCols = m_masterSI->getNumCols(); // lambda + int logIpLevel = m_param.LogIpLevel; + OsiCpxSolverInterface *osiCpx = + dynamic_cast(m_masterSI); + CPXENVptr cpxEnv = osiCpx->getEnvironmentPtr(); + CPXLPptr cpxLp = osiCpx->getLpPtr(); + assert(cpxEnv && cpxLp); + //--- + //--- set parameters + //--- + int status = 0; + + if (logIpLevel) { + status = CPXsetintparam(cpxEnv, CPX_PARAM_SCRIND, CPX_ON); + + if (status) + throw UtilException("CPXsetintparam failure", "solveMasterAsMIP", + "DecompSubModel"); + + status = CPXsetintparam(cpxEnv, CPX_PARAM_SIMDISPLAY, logIpLevel); + + if (status) + throw UtilException("CPXsetintparam failure", "solveMasterAsMIP", + "DecompSubModel"); + } else { + status = CPXsetintparam(cpxEnv, CPX_PARAM_SCRIND, CPX_OFF); + + if (status) + throw UtilException("CPXsetintparam failure", "solveMasterAsMIP", + "DecompSubModel"); + } + + if (m_firstPhase2Call) { + //--- + //--- if calling with first Phase2 call, it is meant to + //--- "recombine" partial columns - i.e., if the user + //--- produced a fully feasible solution that was then + //--- separated into blocks - we want to be sure it + //--- at least recombines it + //--- so, make the stop on gap very small + //--- + //--- TODO: we should get this incumbent in the system without + //--- forcing the call to IP solver just to recombine + //--- + status = CPXsetdblparam(cpxEnv, CPX_PARAM_EPGAP, 0.005); // 0.5% + } else { + status = CPXsetdblparam(cpxEnv, CPX_PARAM_EPGAP, + m_param.SolveMasterAsMipLimitGap); + } + + if (status) + throw UtilException("CPXsetdblparam failure", "solveMasterAsMIP", + "DecompAlgoPC"); + + status = CPXsetdblparam(cpxEnv, CPX_PARAM_TILIM, + m_param.SolveMasterAsMipTimeLimit); + + if (status) + throw UtilException("CPXsetdblparam failure", "solveMasterAsMIP", + "DecompAlgoPC"); + + status = CPXsetdblparam(cpxEnv, CPX_PARAM_CUTUP, m_globalUB); + + if (status) + throw UtilException("CPXsetdblparam failure", "solveMasterAsMIP", + "DecompAlgoPC"); #if CPX_VERSION >= 1100 - status = CPXsetintparam(cpxEnv, CPX_PARAM_THREADS, 1); + status = CPXsetintparam(cpxEnv, CPX_PARAM_THREADS, 1); - if (status) - throw UtilException("CPXsetintparam failure", - "solveMasterAsMIP", "DecompAlgoPC"); + if (status) + throw UtilException("CPXsetintparam failure", "solveMasterAsMIP", + "DecompAlgoPC"); #endif - //--- - //--- solve the MILP - //--- - osiCpx->branchAndBound(); - //--- - //--- get solver status - //--- - result->m_solStatus = CPXgetstat(cpxEnv, cpxLp); - result->m_solStatus2 = 0; - - //cout << "CPX IP solver status = " << result->m_solStatus << endl; - //TEMP FIX? - //THINK: if CPXMIP_INForUNBD, change to CPXMIP_INFEASIBLE, - // I don't think there is anyway the price+branch heur could - // be unbounded. But, what if the original full problem is unbounded? - if (result->m_solStatus == CPXMIP_INForUNBD ) { - result->m_solStatus = CPXMIP_INFEASIBLE; - } - - const int statusSet[5] = {CPXMIP_OPTIMAL, - CPXMIP_OPTIMAL_TOL, - CPXMIP_INFEASIBLE, - CPXMIP_TIME_LIM_FEAS, - CPXMIP_TIME_LIM_INFEAS - }; - - if (!UtilIsInSet(result->m_solStatus, statusSet, 5)) { - cerr << "Error: CPX IP solver status = " << result->m_solStatus << endl; - throw UtilException("CPX solver status", - "solveMasterAsMIP", "DecompAlgoPC"); - } - - //--- - //--- update results object - //--- - result->m_nSolutions = 0; - result->m_isOptimal = false; - - if (result->m_solStatus == CPXMIP_OPTIMAL || - result->m_solStatus == CPXMIP_OPTIMAL_TOL) { + //--- + //--- solve the MILP + //--- + osiCpx->branchAndBound(); + //--- + //--- get solver status + //--- + result->m_solStatus = CPXgetstat(cpxEnv, cpxLp); + result->m_solStatus2 = 0; + + // cout << "CPX IP solver status = " << result->m_solStatus << endl; + // TEMP FIX? + // THINK: if CPXMIP_INForUNBD, change to CPXMIP_INFEASIBLE, + // I don't think there is anyway the price+branch heur could + // be unbounded. But, what if the original full problem is unbounded? + if (result->m_solStatus == CPXMIP_INForUNBD) { + result->m_solStatus = CPXMIP_INFEASIBLE; + } + + const int statusSet[5] = {CPXMIP_OPTIMAL, CPXMIP_OPTIMAL_TOL, + CPXMIP_INFEASIBLE, CPXMIP_TIME_LIM_FEAS, + CPXMIP_TIME_LIM_INFEAS}; + + if (!UtilIsInSet(result->m_solStatus, statusSet, 5)) { + cerr << "Error: CPX IP solver status = " << result->m_solStatus << endl; + throw UtilException("CPX solver status", "solveMasterAsMIP", + "DecompAlgoPC"); + } + + //--- + //--- update results object + //--- + result->m_nSolutions = 0; + result->m_isOptimal = false; + + if (result->m_solStatus == CPXMIP_OPTIMAL || + result->m_solStatus == CPXMIP_OPTIMAL_TOL) { + result->m_nSolutions = 1; + result->m_isOptimal = true; + } else { + if (result->m_solStatus == CPXMIP_INFEASIBLE || + result->m_solStatus == CPXMIP_TIME_LIM_INFEAS) { + result->m_nSolutions = 0; + result->m_isOptimal = true; + } + // STOP - could have stopped on time... not just gap... do + // something like did in CBC + else { + //--- + //--- else it must have stopped on gap + //--- result->m_nSolutions = 1; - result->m_isOptimal = true; - } else { - if (result->m_solStatus == CPXMIP_INFEASIBLE || - result->m_solStatus == CPXMIP_TIME_LIM_INFEAS) { - result->m_nSolutions = 0; - result->m_isOptimal = true; - } - //STOP - could have stopped on time... not just gap... do - //something like did in CBC - else { - //--- - //--- else it must have stopped on gap - //--- - result->m_nSolutions = 1; - result->m_isOptimal = false; - } - } - - //--- - //--- get copy of solution - //--- - status = CPXgetbestobjval(cpxEnv, cpxLp, &result->m_objLB); - - if (status) - throw UtilException("CPXgetbestobjval failure", - "solveMasterAsMIP", "DecompAlgoPC"); - - if (result->m_nSolutions >= 1) { - status = CPXgetmipobjval(cpxEnv, cpxLp, &result->m_objUB); - - if (status) - throw UtilException("CPXgetmipobjval failure", - "solveMasterAsMIP", "DecompAlgoPC"); - - const double* solDbl = osiCpx->getColSolution(); - vector solVec(solDbl, solDbl + nMasterCols); - result->m_solution.push_back(solVec); - assert(result->m_nSolutions == - static_cast(result->m_solution.size())); - //memcpy(result->m_solution, - // cbc.getColSolution(), - // nMasterCols * sizeof(double)); - } - //--- - //--- set time back - //--- - status = CPXsetdblparam(cpxEnv, CPX_PARAM_TILIM, m_infinity); - - if (status) - throw UtilException("CPXsetdblparam failure", - "solveMasterAsMIP", "DecompAlgoPC"); + result->m_isOptimal = false; + } + } + + //--- + //--- get copy of solution + //--- + status = CPXgetbestobjval(cpxEnv, cpxLp, &result->m_objLB); + + if (status) + throw UtilException("CPXgetbestobjval failure", "solveMasterAsMIP", + "DecompAlgoPC"); + + if (result->m_nSolutions >= 1) { + status = CPXgetmipobjval(cpxEnv, cpxLp, &result->m_objUB); + + if (status) + throw UtilException("CPXgetmipobjval failure", "solveMasterAsMIP", + "DecompAlgoPC"); + + const double *solDbl = osiCpx->getColSolution(); + vector solVec(solDbl, solDbl + nMasterCols); + result->m_solution.push_back(solVec); + assert(result->m_nSolutions == static_cast(result->m_solution.size())); + // memcpy(result->m_solution, + // cbc.getColSolution(), + // nMasterCols * sizeof(double)); + } + //--- + //--- set time back + //--- + status = CPXsetdblparam(cpxEnv, CPX_PARAM_TILIM, m_infinity); + + if (status) + throw UtilException("CPXsetdblparam failure", "solveMasterAsMIP", + "DecompAlgoPC"); #else - throw UtilException("CPLEX selected as solver, but it's not available", - "solveMasterAsMIPCpx", "DecompAlgoPC"); + throw UtilException("CPLEX selected as solver, but it's not available", + "solveMasterAsMIPCpx", "DecompAlgoPC"); #endif } //===========================================================================// -void DecompAlgoPC::solveMasterAsMIPGrb(DecompSolverResult* result) -{ +void DecompAlgoPC::solveMasterAsMIPGrb(DecompSolverResult *result) { #ifdef DIP_HAS_GRB - int stat; - const int numCols = m_masterSI->getNumCols(); + int stat; + const int numCols = m_masterSI->getNumCols(); - OsiGrbSolverInterface* osiGrb - = dynamic_cast(m_masterSI); + OsiGrbSolverInterface *osiGrb = + dynamic_cast(m_masterSI); - GRBenv* env = osiGrb->getEnvironmentPtr(); + GRBenv *env = osiGrb->getEnvironmentPtr(); - GRBmodel* model = osiGrb->getLpPtr(); + GRBmodel *model = osiGrb->getLpPtr(); - osiGrb->branchAndBound(); + osiGrb->branchAndBound(); - GRBgetintattr(model, GRB_INT_ATTR_STATUS, &stat); + GRBgetintattr(model, GRB_INT_ATTR_STATUS, &stat); - result->m_isUnbounded = false; - result->m_isOptimal = false; - result->m_isCutoff = false; - result->m_nSolutions = 0; - if (stat == GRB_OPTIMAL){ - const double *solution = osiGrb->getColSolution(); - vector solVec(solution, solution + numCols); - result->m_solution.push_back(solVec); - result->m_nSolutions++; - result->m_isOptimal = true; - }else{ - result->m_isOptimal = true; - } + result->m_isUnbounded = false; + result->m_isOptimal = false; + result->m_isCutoff = false; + result->m_nSolutions = 0; + if (stat == GRB_OPTIMAL) { + const double *solution = osiGrb->getColSolution(); + vector solVec(solution, solution + numCols); + result->m_solution.push_back(solVec); + result->m_nSolutions++; + result->m_isOptimal = true; + } else { + result->m_isOptimal = true; + } #else - throw UtilException("Gurobi selected as solver, but it's not available", - "solveMasterAsMIPGrb", "DecompAlgoPC"); + throw UtilException("Gurobi selected as solver, but it's not available", + "solveMasterAsMIPGrb", "DecompAlgoPC"); #endif } //===========================================================================// -//because rowReform, this is very specific to PC -void DecompAlgoPC::addCutsToPool(const double* x, - DecompCutList& newCuts, - int& n_newCuts) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "addCutsToPool()", m_param.LogDebugLevel, 2); - int r; - int cutIndex = 0; - bool isDupCore;//also check relax? - bool isDupPool; - bool isViolated; //TODO: do something similiar to check for pos-rc vars - bool addCut; - DecompConstraintSet* modelCore = m_modelCore.getModel(); - DecompCutPool::iterator ci; - DecompCutList::iterator li = newCuts.begin(); - - while (li != newCuts.end()) { - CoinPackedVector* row = new CoinPackedVector(); - //--- - //--- create a row (in terms of original formulation, x), from a cut - //--- - (*li)->expandCutToRow(row); - //--- - //--- set the hash string (for quick duplicate checks) - //--- - (*li)->setStringHash(row, m_infinity); - //bool isOptViolated = false; - //for(i = 0; i < m_optPoint.size(); i++){ - //isOptViolated = (*li)->calcViolation(row, &m_optPoint[i][0]); - //if(isOptViolated){ - // (*m_osLog) << "\n\nCUT VIOLATES OPT POINT"; - // (*li)->print(); - // } - // assert(!isOptViolated); - //} - //--- - //--- check the the cut is already in the model core - //--- NOTE: if so this is an error (always?) - //--- - addCut = true; - isDupCore = false; - - for (r = 0; r < modelCore->getNumRows(); r++) { - //TODO: allow user to set hash - // example: GSEC can be represented compactly with just S - // or directly them they override an isSame( ) - if (modelCore->rowHash[r] == (*li)->getStrHash()) { - (*m_osLog) << "CUT IS DUPLICATE with Core\n"; - //--- - //--- This should not happen, however, it is possible - //--- due to roundoff error. Since x = sum{}lambda, - //--- the masterLP might be feasible while an a.x might - //--- violate a row bound slightly. This is checked after - //--- the recomposition. But, we don't throw an error unless - //--- the error is significant. The cut generator might - //--- duplicate a cut, because it finds an inequality that - //--- does cut off the current point that matches a row/cut - //--- already in the LP. - //--- - //--- Like the check in checkPointFeasible, we should check - //--- that this duplicated cut violates by only a small - //--- percentage. If not, then it really is an error. - //--- - double actViol; - double relViol; - double cutLB = (*li)->getLowerBound(); - double cutUB = (*li)->getUpperBound(); - double ax = row->dotProduct(x); - actViol = std::max(cutLB - ax, ax - cutUB); - actViol = std::max(actViol, 0.0); - - if (UtilIsZero(ax)) { - relViol = actViol; - } else { - relViol = actViol / std::fabs(ax); - } - - //TODO: need status return not just assert - //--- - //--- since it is already in LP core, the violation - //--- should be very small - //--- - if (relViol > 0.005) { //0.5% violated - (*m_osLog) << "CUT actViol= " << actViol - << " relViol= " << relViol << "\n"; - (*li)->print(m_osLog); - assert(0);//0.1% violated - } - - isDupCore = true; - break; - } +// because rowReform, this is very specific to PC +void DecompAlgoPC::addCutsToPool(const double *x, DecompCutList &newCuts, + int &n_newCuts) { + UtilPrintFuncBegin(m_osLog, m_classTag, "addCutsToPool()", + m_param.LogDebugLevel, 2); + int r; + int cutIndex = 0; + bool isDupCore; // also check relax? + bool isDupPool; + bool isViolated; // TODO: do something similiar to check for pos-rc vars + bool addCut; + DecompConstraintSet *modelCore = m_modelCore.getModel(); + DecompCutPool::iterator ci; + DecompCutList::iterator li = newCuts.begin(); + + while (li != newCuts.end()) { + CoinPackedVector *row = new CoinPackedVector(); + //--- + //--- create a row (in terms of original formulation, x), from a cut + //--- + (*li)->expandCutToRow(row); + //--- + //--- set the hash string (for quick duplicate checks) + //--- + (*li)->setStringHash(row, m_infinity); + // bool isOptViolated = false; + // for(i = 0; i < m_optPoint.size(); i++){ + // isOptViolated = (*li)->calcViolation(row, &m_optPoint[i][0]); + // if(isOptViolated){ + // (*m_osLog) << "\n\nCUT VIOLATES OPT POINT"; + // (*li)->print(); + // } + // assert(!isOptViolated); + //} + //--- + //--- check the the cut is already in the model core + //--- NOTE: if so this is an error (always?) + //--- + addCut = true; + isDupCore = false; + + for (r = 0; r < modelCore->getNumRows(); r++) { + // TODO: allow user to set hash + // example: GSEC can be represented compactly with just S + // or directly them they override an isSame( ) + if (modelCore->rowHash[r] == (*li)->getStrHash()) { + (*m_osLog) << "CUT IS DUPLICATE with Core\n"; + //--- + //--- This should not happen, however, it is possible + //--- due to roundoff error. Since x = sum{}lambda, + //--- the masterLP might be feasible while an a.x might + //--- violate a row bound slightly. This is checked after + //--- the recomposition. But, we don't throw an error unless + //--- the error is significant. The cut generator might + //--- duplicate a cut, because it finds an inequality that + //--- does cut off the current point that matches a row/cut + //--- already in the LP. + //--- + //--- Like the check in checkPointFeasible, we should check + //--- that this duplicated cut violates by only a small + //--- percentage. If not, then it really is an error. + //--- + double actViol; + double relViol; + double cutLB = (*li)->getLowerBound(); + double cutUB = (*li)->getUpperBound(); + double ax = row->dotProduct(x); + actViol = std::max(cutLB - ax, ax - cutUB); + actViol = std::max(actViol, 0.0); + + if (UtilIsZero(ax)) { + relViol = actViol; + } else { + relViol = actViol / std::fabs(ax); + } + + // TODO: need status return not just assert + //--- + //--- since it is already in LP core, the violation + //--- should be very small + //--- + if (relViol > 0.005) { // 0.5% violated + (*m_osLog) << "CUT actViol= " << actViol << " relViol= " << relViol + << "\n"; + (*li)->print(m_osLog); + assert(0); // 0.1% violated + } + + isDupCore = true; + break; } + } - if (isDupCore) { - addCut = false; - } else { - //--- - //--- is this cut already in pool - //--- NOTE: this is not neccessarily an error, since - //--- there could be a cut from a previous iteration - //--- in the cut pool that was not entered because of - //--- the limit on the number of cuts entered per iteration - //--- - int cutIndexPool = 0; - isDupPool = false; - - for (ci = m_cutpool.begin(); ci != m_cutpool.end(); ci++) { - if ((*li)->getStrHash() == (*ci).getCutPtr()->getStrHash()) { - UTIL_MSG(m_param.LogLevel, 3, - (*m_osLog) << "CUT " << cutIndex - << " is Duplicate with Pool Cut " << cutIndexPool - << endl; - (*m_osLog) << "CUT Hash = " - << (*li)->getStrHash() << endl; - (*m_osLog) << "CUT (in Pool) Hash = " - << (*ci).getCutPtr()->getStrHash() << endl; - (*li)->print(); - ); - isDupPool = true; - break; - } - - cutIndexPool++; - } - - if (isDupPool) { - addCut = false; - } else { - isViolated = (*li)->calcViolation(row, x);//also sets it - - if (!isViolated) { - //--- - //--- we are trying to add a cut that is NOT violated - //--- NOTE: this is probably an error in the cut gen - //--- THINK: are there cases where we want to add cuts - //--- to pool even though we know they are not violated - //--- at the current point? i.e., might be violated later? - //--- - addCut = false; - (*m_osLog) << "CUT " << cutIndex - << " is not violated! Not adding to pool.\n"; - (*m_osLog) << "CUT Hash = " - << (*li)->getStrHash() << "\n"; - (*li)->print(); - assert(0); - } - } + if (isDupCore) { + addCut = false; + } else { + //--- + //--- is this cut already in pool + //--- NOTE: this is not neccessarily an error, since + //--- there could be a cut from a previous iteration + //--- in the cut pool that was not entered because of + //--- the limit on the number of cuts entered per iteration + //--- + int cutIndexPool = 0; + isDupPool = false; + + for (ci = m_cutpool.begin(); ci != m_cutpool.end(); ci++) { + if ((*li)->getStrHash() == (*ci).getCutPtr()->getStrHash()) { + UTIL_MSG(m_param.LogLevel, 3, + (*m_osLog) + << "CUT " << cutIndex << " is Duplicate with Pool Cut " + << cutIndexPool << endl; + (*m_osLog) + << "CUT Hash = " << (*li)->getStrHash() << endl; + (*m_osLog) << "CUT (in Pool) Hash = " + << (*ci).getCutPtr()->getStrHash() << endl; + (*li)->print();); + isDupPool = true; + break; + } + + cutIndexPool++; } - if (addCut) { - //--- - //--- create a row (in terms of reformulation, lambda), from row - //--- - CoinPackedVector* rowReform - = m_cutpool.createRowReform(modelCore->getNumCols(), - row, - m_vars); - int tempIndex(0); - map:: iterator mit; - for (int i = 0; i < row->getNumElements(); i++){ - tempIndex = row->getIndices()[i]; - mit = m_masterOnlyColsMap.find(tempIndex); - if (mit != m_masterOnlyColsMap.end()){ - rowReform->insert(mit->second, row->getElements()[i]); - } - } - - - if (!rowReform) { - //TODO: need status return code for failure in -O - (*m_osLog) << "ERROR in createRowReform\n"; - assert(0); - } else { - DecompWaitingRow waitingRow(*li, row, rowReform); - //do this in a separate function so addCutsTo is not dependent - //on passing in osolution for DecompVar - //waitingRow.setViolation(x);//always on original solution! - m_cutpool.push_back(waitingRow); - } - - li++; + if (isDupPool) { + addCut = false; } else { - //--- - //--- cut is not being added to pool, delete memory - //--- - UTIL_DELPTR(row); - UTIL_DELPTR(*li); //need to do? - li = newCuts.erase(li); //does this call cut destructor? - n_newCuts--; - } - - cutIndex++; - } - - CoinAssertDebug(n_newCuts >= 0); - UtilPrintFuncEnd(m_osLog, m_classTag, - "addCutsToPool()", m_param.LogDebugLevel, 2); -} - -//===========================================================================// -int DecompAlgoPC::addCutsFromPool() -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "addCutsFromPool()", m_param.LogDebugLevel, 2); - //TODO: make this a parameter - const int maxcuts_toadd = 100;//m_app->m_param.cut_maxcuts_periter; - DecompConstraintSet* modelCore = m_modelCore.getModel(); - int n_newrows = CoinMin(static_cast(m_cutpool.size()), maxcuts_toadd); - int index = 0; - //--- - //--- sort the cuts by violation - //--- TODO: partial sort (limit by n_newrows) - //--- - sort(m_cutpool.begin(), m_cutpool.end(), is_greater_thanD()); - //--- - //--- after sorting by violation, find the index that starts - //--- where there are no violations (this can happen if pool - //--- has leftover cuts from previous iterations due to limitation - //--- on number of cuts entered per pass) - //--- - DecompCutPool::iterator li; - - for (li = m_cutpool.begin(); li != m_cutpool.end(); li++) { - if (m_param.LogDebugLevel >= 3) { - (*m_osLog) << "CUT VIOLATION = " << (*li).getViolation() << endl; + isViolated = (*li)->calcViolation(row, x); // also sets it + + if (!isViolated) { + //--- + //--- we are trying to add a cut that is NOT violated + //--- NOTE: this is probably an error in the cut gen + //--- THINK: are there cases where we want to add cuts + //--- to pool even though we know they are not violated + //--- at the current point? i.e., might be violated later? + //--- + addCut = false; + (*m_osLog) << "CUT " << cutIndex + << " is not violated! Not adding to pool.\n"; + (*m_osLog) << "CUT Hash = " << (*li)->getStrHash() << "\n"; + (*li)->print(); + assert(0); + } } + } - if ((*li).getViolation() < DecompEpsilon) { //PARM - break; + if (addCut) { + //--- + //--- create a row (in terms of reformulation, lambda), from row + //--- + CoinPackedVector *rowReform = + m_cutpool.createRowReform(modelCore->getNumCols(), row, m_vars); + int tempIndex(0); + map::iterator mit; + for (int i = 0; i < row->getNumElements(); i++) { + tempIndex = row->getIndices()[i]; + mit = m_masterOnlyColsMap.find(tempIndex); + if (mit != m_masterOnlyColsMap.end()) { + rowReform->insert(mit->second, row->getElements()[i]); + } } - index++; - } - - n_newrows = std::min(n_newrows, index); - - if (n_newrows > 0) { - m_varpool.setColsAreValid(false); - } - - //TODO: look into coin build... - double* rlb = new double[n_newrows]; - double* rub = new double[n_newrows]; - const CoinPackedVectorBase** rowReformBlock = - new const CoinPackedVectorBase*[n_newrows]; - const CoinPackedVectorBase** rowBlock = - new const CoinPackedVectorBase*[n_newrows]; - //better design to have a "add row name" - vector& coreRowNames = modelCore->getRowNamesMutable(); - vector colNames, rowNames; - string colName, rowName; - int rowIndex, rowIndex0; - int colIndex, colIndex0; - char sense; - double rhs, range; - index = 0; - rowIndex0 = m_masterSI->getNumRows(); - colIndex0 = m_masterSI->getNumCols(); - - for (li = m_cutpool.begin(); li != m_cutpool.end(); li++) { - if (index >= n_newrows) { - break; + if (!rowReform) { + // TODO: need status return code for failure in -O + (*m_osLog) << "ERROR in createRowReform\n"; + assert(0); + } else { + DecompWaitingRow waitingRow(*li, row, rowReform); + // do this in a separate function so addCutsTo is not dependent + // on passing in osolution for DecompVar + // waitingRow.setViolation(x);//always on original solution! + m_cutpool.push_back(waitingRow); } - CoinPackedVector* rowReform = (*li).getRowReformPtr(); - CoinPackedVector* row = (*li).getRowPtr(); - DecompCut* cut = (*li).getCutPtr(); - rlb[index] = (*li).getLowerBound(); - rub[index] = (*li).getUpperBound(); - rowReformBlock[index] = rowReform; - rowBlock[index] = row; - rowIndex = rowIndex0 + index; - //TODO: allow user to give cut names? - rowName = "cut(" + UtilIntToStr(rowIndex) + ")"; - rowNames.push_back(rowName); - //--- - //--- add the cut ptr to the list of cuts in masterLP - //--- - m_cuts.push_back(cut); + li++; + } else { //--- - //--- set hash for cut + //--- cut is not being added to pool, delete memory //--- - modelCore->rowHash.push_back(cut->getStrHash()); - index++; - } - - //--- - //--- add the new (lambda) rows to master - //--- add the new (x) rows to core - //--- add the master row types - //--- add the row names to core - //--- add the row lb,ub to core - //--- add a new artificial column for this cut (fix to 0) - //--- - m_masterSI->addRows(n_newrows, rowReformBlock, rlb, rub); - modelCore->M->appendRows(n_newrows, rowBlock); - - for (index = 0; index < n_newrows; index++) { - if (rowNames.size()) { - coreRowNames.push_back(rowNames[index]); - } + UTIL_DELPTR(row); + UTIL_DELPTR(*li); // need to do? + li = newCuts.erase(li); // does this call cut destructor? + n_newCuts--; + } + + cutIndex++; + } + + CoinAssertDebug(n_newCuts >= 0); + UtilPrintFuncEnd(m_osLog, m_classTag, "addCutsToPool()", + m_param.LogDebugLevel, 2); +} - m_masterRowType.push_back(DecompRow_Cut); - //TODO: make this a function - UtilBoundToSense(rlb[index], rub[index], - m_infinity, sense, rhs, range); - modelCore->rowLB.push_back(rlb[index]); - modelCore->rowUB.push_back(rub[index]); - modelCore->rowSense.push_back(sense); - modelCore->rowRhs.push_back(rhs); - rowIndex = rowIndex0 + index; - colIndex = colIndex0 + index; - - switch (sense) { - case 'L': { - CoinPackedVector artCol; - artCol.insert(rowIndex, -1.0); - m_masterSI->addCol(artCol, 0.0, 0.0, 0.0); - m_masterColType.push_back(DecompCol_ArtForCutL); - m_masterArtCols.push_back(colIndex); - colName = "sCL(c_" + UtilIntToStr(colIndex) - + "_" + UtilIntToStr(rowIndex) + ")"; - colNames.push_back(colName); - colIndex++; - } +//===========================================================================// +int DecompAlgoPC::addCutsFromPool() { + UtilPrintFuncBegin(m_osLog, m_classTag, "addCutsFromPool()", + m_param.LogDebugLevel, 2); + // TODO: make this a parameter + const int maxcuts_toadd = 100; // m_app->m_param.cut_maxcuts_periter; + DecompConstraintSet *modelCore = m_modelCore.getModel(); + int n_newrows = CoinMin(static_cast(m_cutpool.size()), maxcuts_toadd); + int index = 0; + //--- + //--- sort the cuts by violation + //--- TODO: partial sort (limit by n_newrows) + //--- + sort(m_cutpool.begin(), m_cutpool.end(), is_greater_thanD()); + //--- + //--- after sorting by violation, find the index that starts + //--- where there are no violations (this can happen if pool + //--- has leftover cuts from previous iterations due to limitation + //--- on number of cuts entered per pass) + //--- + DecompCutPool::iterator li; + + for (li = m_cutpool.begin(); li != m_cutpool.end(); li++) { + if (m_param.LogDebugLevel >= 3) { + (*m_osLog) << "CUT VIOLATION = " << (*li).getViolation() << endl; + } + + if ((*li).getViolation() < DecompEpsilon) { // PARM break; - case 'G': { - CoinPackedVector artCol; - artCol.insert(rowIndex, 1.0); - m_masterSI->addCol(artCol, 0.0, 0.0, 0.0); - m_masterColType.push_back(DecompCol_ArtForCutG); - m_masterArtCols.push_back(colIndex); - colName = "sCG(c_" + UtilIntToStr(colIndex) - + "_" + UtilIntToStr(rowIndex) + ")"; - colNames.push_back(colName); - colIndex++; - } + } + + index++; + } + + n_newrows = std::min(n_newrows, index); + + if (n_newrows > 0) { + m_varpool.setColsAreValid(false); + } + + // TODO: look into coin build... + double *rlb = new double[n_newrows]; + double *rub = new double[n_newrows]; + const CoinPackedVectorBase **rowReformBlock = + new const CoinPackedVectorBase *[n_newrows]; + const CoinPackedVectorBase **rowBlock = + new const CoinPackedVectorBase *[n_newrows]; + // better design to have a "add row name" + vector &coreRowNames = modelCore->getRowNamesMutable(); + vector colNames, rowNames; + string colName, rowName; + int rowIndex, rowIndex0; + int colIndex, colIndex0; + char sense; + double rhs, range; + index = 0; + rowIndex0 = m_masterSI->getNumRows(); + colIndex0 = m_masterSI->getNumCols(); + + for (li = m_cutpool.begin(); li != m_cutpool.end(); li++) { + if (index >= n_newrows) { break; - case 'E': { - CoinPackedVector artColL; - CoinPackedVector artColG; - artColL.insert(rowIndex, -1.0); - m_masterSI->addCol(artColL, 0.0, 0.0, 0.0); - m_masterColType.push_back(DecompCol_ArtForCutL); - m_masterArtCols.push_back(colIndex); - colName = "sCL(c_" + UtilIntToStr(colIndex) - + "_" + UtilIntToStr(rowIndex) + ")"; - colNames.push_back(colName); - artColG.insert(rowIndex, 1.0); - m_masterSI->addCol(artColG, 0.0, 0.0, 0.0); - m_masterColType.push_back(DecompCol_ArtForCutG); - m_masterArtCols.push_back(colIndex); - colName = "sCG(c_" + UtilIntToStr(colIndex) - + "_" + UtilIntToStr(rowIndex) + ")"; - colNames.push_back(colName); - colIndex += 2; - } + } + + CoinPackedVector *rowReform = (*li).getRowReformPtr(); + CoinPackedVector *row = (*li).getRowPtr(); + DecompCut *cut = (*li).getCutPtr(); + rlb[index] = (*li).getLowerBound(); + rub[index] = (*li).getUpperBound(); + rowReformBlock[index] = rowReform; + rowBlock[index] = row; + rowIndex = rowIndex0 + index; + // TODO: allow user to give cut names? + rowName = "cut(" + UtilIntToStr(rowIndex) + ")"; + rowNames.push_back(rowName); + //--- + //--- add the cut ptr to the list of cuts in masterLP + //--- + m_cuts.push_back(cut); + //--- + //--- set hash for cut + //--- + modelCore->rowHash.push_back(cut->getStrHash()); + index++; + } + + //--- + //--- add the new (lambda) rows to master + //--- add the new (x) rows to core + //--- add the master row types + //--- add the row names to core + //--- add the row lb,ub to core + //--- add a new artificial column for this cut (fix to 0) + //--- + m_masterSI->addRows(n_newrows, rowReformBlock, rlb, rub); + modelCore->M->appendRows(n_newrows, rowBlock); + + for (index = 0; index < n_newrows; index++) { + if (rowNames.size()) { + coreRowNames.push_back(rowNames[index]); + } + + m_masterRowType.push_back(DecompRow_Cut); + // TODO: make this a function + UtilBoundToSense(rlb[index], rub[index], m_infinity, sense, rhs, range); + modelCore->rowLB.push_back(rlb[index]); + modelCore->rowUB.push_back(rub[index]); + modelCore->rowSense.push_back(sense); + modelCore->rowRhs.push_back(rhs); + rowIndex = rowIndex0 + index; + colIndex = colIndex0 + index; + + switch (sense) { + case 'L': { + CoinPackedVector artCol; + artCol.insert(rowIndex, -1.0); + m_masterSI->addCol(artCol, 0.0, 0.0, 0.0); + m_masterColType.push_back(DecompCol_ArtForCutL); + m_masterArtCols.push_back(colIndex); + colName = "sCL(c_" + UtilIntToStr(colIndex) + "_" + + UtilIntToStr(rowIndex) + ")"; + colNames.push_back(colName); + colIndex++; + } break; + case 'G': { + CoinPackedVector artCol; + artCol.insert(rowIndex, 1.0); + m_masterSI->addCol(artCol, 0.0, 0.0, 0.0); + m_masterColType.push_back(DecompCol_ArtForCutG); + m_masterArtCols.push_back(colIndex); + colName = "sCG(c_" + UtilIntToStr(colIndex) + "_" + + UtilIntToStr(rowIndex) + ")"; + colNames.push_back(colName); + colIndex++; + } break; + case 'E': { + CoinPackedVector artColL; + CoinPackedVector artColG; + artColL.insert(rowIndex, -1.0); + m_masterSI->addCol(artColL, 0.0, 0.0, 0.0); + m_masterColType.push_back(DecompCol_ArtForCutL); + m_masterArtCols.push_back(colIndex); + colName = "sCL(c_" + UtilIntToStr(colIndex) + "_" + + UtilIntToStr(rowIndex) + ")"; + colNames.push_back(colName); + artColG.insert(rowIndex, 1.0); + m_masterSI->addCol(artColG, 0.0, 0.0, 0.0); + m_masterColType.push_back(DecompCol_ArtForCutG); + m_masterArtCols.push_back(colIndex); + colName = "sCG(c_" + UtilIntToStr(colIndex) + "_" + + UtilIntToStr(rowIndex) + ")"; + colNames.push_back(colName); + colIndex += 2; + } break; + default: + assert(0); + } + + rowIndex++; + } + + //--- + //--- add the row names to master + //--- add the row names to master + //--- + if (rowNames.size() > 0) + m_masterSI->setRowNames(rowNames, 0, static_cast(rowNames.size()), + rowIndex0); + + if (colNames.size() > 0) + m_masterSI->setColNames(colNames, 0, static_cast(colNames.size()), + colIndex0); + + //--- + //--- clean up + //--- + index = 0; + + for (li = m_cutpool.begin(); li != m_cutpool.end(); li++) { + if (index >= n_newrows) { break; - default: - assert(0); - } - - rowIndex++; - } - - //--- - //--- add the row names to master - //--- add the row names to master - //--- - if (rowNames.size() > 0) - m_masterSI->setRowNames(rowNames, 0, - static_cast(rowNames.size()), rowIndex0); - - if (colNames.size() > 0) - m_masterSI->setColNames(colNames, 0, - static_cast(colNames.size()), colIndex0); - - //--- - //--- clean up - //--- - index = 0; - - for (li = m_cutpool.begin(); li != m_cutpool.end(); li++) { - if (index >= n_newrows) { - break; - } - - (*li).deleteRowReform(); - (*li).deleteRow(); - (*li).clearCut();//need to do this? - index++; - } - - m_cutpool.erase(m_cutpool.begin(), li); - UTIL_DELARR(rowReformBlock); - UTIL_DELARR(rowBlock); - UTIL_DELARR(rlb); - UTIL_DELARR(rub); + } + + (*li).deleteRowReform(); + (*li).deleteRow(); + (*li).clearCut(); // need to do this? + index++; + } + + m_cutpool.erase(m_cutpool.begin(), li); + UTIL_DELARR(rowReformBlock); + UTIL_DELARR(rowBlock); + UTIL_DELARR(rlb); + UTIL_DELARR(rub); #if 1 - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, - (*m_osLog) << "\nCUT POOL AFTER:\n"; - m_cutpool.print(m_osLog); - (*m_osLog) << "\nCUTS AFTER:\n"; - printCuts(m_osLog); - ); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 4, + (*m_osLog) << "\nCUT POOL AFTER:\n"; + m_cutpool.print(m_osLog); (*m_osLog) << "\nCUTS AFTER:\n"; + printCuts(m_osLog);); #endif - UtilPrintFuncEnd(m_osLog, m_classTag, - "addCutsFromPool()", m_param.LogDebugLevel, 2); - return n_newrows; + UtilPrintFuncEnd(m_osLog, m_classTag, "addCutsFromPool()", + m_param.LogDebugLevel, 2); + return n_newrows; } - - diff --git a/Dip/src/DecompAlgoRC.cpp b/Dip/src/DecompAlgoRC.cpp index 86a51465..a5c9a73a 100644 --- a/Dip/src/DecompAlgoRC.cpp +++ b/Dip/src/DecompAlgoRC.cpp @@ -13,49 +13,44 @@ //===========================================================================// //===========================================================================// +#include "DecompAlgoRC.h" #include "DecompApp.h" #include "DecompVar.h" -#include "DecompAlgoRC.h" using namespace std; //===========================================================================// -DecompPhase DecompAlgoRC::phaseInit() -{ - //THINK: should base have some container for master and sub - //since in RC, don't need OSI?? - //so base should never call m_masterSI, but m_masterContainer - //or something - if (m_param.LogDumpModel > 1) - printCurrentProblem(m_masterSI, - "masterProb", - m_nodeStats.nodeIndex, - m_nodeStats.cutCallsTotal, - m_nodeStats.priceCallsTotal); - - //--- - //--- update primal/dual vectors - //--- - m_status = STAT_FEASIBLE; - //TODO: what about the INF case?? artificial columns? DC-ABCC version - // --- - // --- update the phase - // --- - return PHASE_PRICE2; +DecompPhase DecompAlgoRC::phaseInit() { + // THINK: should base have some container for master and sub + // since in RC, don't need OSI?? + // so base should never call m_masterSI, but m_masterContainer + // or something + if (m_param.LogDumpModel > 1) + printCurrentProblem(m_masterSI, "masterProb", m_nodeStats.nodeIndex, + m_nodeStats.cutCallsTotal, m_nodeStats.priceCallsTotal); + + //--- + //--- update primal/dual vectors + //--- + m_status = STAT_FEASIBLE; + // TODO: what about the INF case?? artificial columns? DC-ABCC version + // --- + // --- update the phase + // --- + return PHASE_PRICE2; } //===========================================================================// -void DecompAlgoRC::phaseDone() -{ - //take the current set of variables and solve DW master to get primal - //TODO: right now, creating from scratch each time -- really need - // to append and warm start - esp if doing alot of branching - //delete m_masterSI; - //m_masterSI = NULL; - //m_masterSI = new OsiLpSolverInterface(); - //CoinAssertHint(m_masterSI, "Error: Out of Memory"); - //m_masterSI->messageHandler()->setLogLevel(m_param.LogLpLevel); - DecompConstraintSet* modelCore = m_modelCore.getModel(); +void DecompAlgoRC::phaseDone() { + // take the current set of variables and solve DW master to get primal + // TODO: right now, creating from scratch each time -- really need + // to append and warm start - esp if doing alot of branching + // delete m_masterSI; + // m_masterSI = NULL; + // m_masterSI = new OsiLpSolverInterface(); + // CoinAssertHint(m_masterSI, "Error: Out of Memory"); + // m_masterSI->messageHandler()->setLogLevel(m_param.LogLpLevel); + DecompConstraintSet *modelCore = m_modelCore.getModel(); #if 0 //--- //--- Initialize the solver interface for the master problem. @@ -168,182 +163,172 @@ void DecompAlgoRC::phaseDone() UTIL_DELARR(obj); UTIL_DELARR(zeroSol); #endif - m_status = DecompAlgo::solutionUpdate(PHASE_UNKNOWN, 99999, 99999); - - //--- - //--- check if IP feasible (are we done?) - //--- TODO: for nonexplicity, also check user app isfeasible - //--- - //TODO: should this whole section be phaseDone? - if (m_status != STAT_INFEASIBLE) { - DecompAlgo::recomposeSolution(m_masterSI->getColSolution(), m_xhat); - UTIL_DEBUG(m_param.LogDebugLevel, 4, - m_app->printOriginalSolution(modelCore->getNumCols(), - modelCore->getColNames(), - m_xhat); - ); - - if (isIPFeasible(m_xhat)) { - if (m_app->APPisUserFeasible(m_xhat, - modelCore->getNumCols(), - m_param.TolZero)) { - DecompSolution* decompSol - = new DecompSolution(modelCore->getNumCols(), - m_xhat, m_masterSI->getObjValue()); - m_xhatIPFeas.push_back(decompSol); - } + m_status = DecompAlgo::solutionUpdate(PHASE_UNKNOWN, 99999, 99999); + + //--- + //--- check if IP feasible (are we done?) + //--- TODO: for nonexplicity, also check user app isfeasible + //--- + // TODO: should this whole section be phaseDone? + if (m_status != STAT_INFEASIBLE) { + DecompAlgo::recomposeSolution(m_masterSI->getColSolution(), m_xhat); + UTIL_DEBUG(m_param.LogDebugLevel, 4, + m_app->printOriginalSolution(modelCore->getNumCols(), + modelCore->getColNames(), m_xhat);); + + if (isIPFeasible(m_xhat)) { + if (m_app->APPisUserFeasible(m_xhat, modelCore->getNumCols(), + m_param.TolZero)) { + DecompSolution *decompSol = new DecompSolution( + modelCore->getNumCols(), m_xhat, m_masterSI->getObjValue()); + m_xhatIPFeas.push_back(decompSol); } + } - vector::iterator vi; - DecompSolution* viBest = NULL; - double bestBoundUB = m_nodeStats.objBest.second; + vector::iterator vi; + DecompSolution *viBest = NULL; + double bestBoundUB = m_nodeStats.objBest.second; - for (vi = m_xhatIPFeas.begin(); vi != m_xhatIPFeas.end(); vi++) { - const DecompSolution* xhatIPFeas = *vi; + for (vi = m_xhatIPFeas.begin(); vi != m_xhatIPFeas.end(); vi++) { + const DecompSolution *xhatIPFeas = *vi; - if (isIPFeasible(xhatIPFeas->getValues())) { - if (xhatIPFeas->getQuality() <= bestBoundUB) { - bestBoundUB = xhatIPFeas->getQuality(); - viBest = *vi; - } - } + if (isIPFeasible(xhatIPFeas->getValues())) { + if (xhatIPFeas->getQuality() <= bestBoundUB) { + bestBoundUB = xhatIPFeas->getQuality(); + viBest = *vi; + } } + } - if (viBest) { - //save the best - setObjBoundIP(bestBoundUB); - m_xhatIPBest = viBest; - } - } + if (viBest) { + // save the best + setObjBoundIP(bestBoundUB); + m_xhatIPBest = viBest; + } + } - UtilPrintFuncEnd(m_osLog, m_classTag, - "phaseDone()", m_param.LogDebugLevel, 2); + UtilPrintFuncEnd(m_osLog, m_classTag, "phaseDone()", m_param.LogDebugLevel, + 2); } //===========================================================================// -void DecompAlgoRC::recomposeSolution(const double* solution, - double* rsolution) -{ - printf("RC recomposeSolution does nothing\n"); +void DecompAlgoRC::recomposeSolution(const double *solution, + double *rsolution) { + printf("RC recomposeSolution does nothing\n"); } //===========================================================================// -void DecompAlgoRC::createMasterProblem(DecompVarList& initVars) -{ - //--- - //--- there is no master LP in RC, just initialize the dual vector - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "createMasterProblem()", m_param.LogDebugLevel, 2); - DecompAlgo::createMasterProblem(initVars); - CoinAssert(initVars.size() > 0); - //--- - //--- In order to implement simple branching, we are going to - //--- treat all column bounds as explicit constraints. Then branching - //--- for DW can be done in the same way it is done for regular CPM. - //--- - //TODO: looks like volume doesn't like R rows... change and split up? - //printf("Volume Algorithm can't work if there is a non ELG row\n"); - // coreMatrixAppendColBounds(); - //THINK: is this the right place for this - //TODO: give user option to feed in a good starting dual vector? - //you might want to do that if switch between DW and RC... etc... THINK - DecompConstraintSet* modelCore = m_modelCore.getModel(); - fill_n(back_inserter(m_u), modelCore->getNumRows(), 0.0); - //TODO - m_rc = new double[modelCore->getNumCols()]; //better in constructor? - CoinAssertHint(m_rc, "Error: Out of Memory"); - //m_vars here will contain all the shat's used - we can only - //use one of them in subgradient, so just save the last one - //DecompVarList::iterator it = initVars.begin(); - //assert(*it); - //think - //double redCost = (*it)->getOriginalCost() - (*it)->m_s.dotProduct(&m_u[0]); - //(*it)->setReducedCost(redCost); - //for rc we want to calc the reduced cost - //m_vars.push_back(*it); - //UTIL_DEBUG(m_param.LogDebugLevel, 3, - // (*it)->print(m_osLog); - // ); - UtilPrintFuncEnd(m_osLog, m_classTag, - "createMasterProblem()", m_param.LogDebugLevel, 2); +void DecompAlgoRC::createMasterProblem(DecompVarList &initVars) { + //--- + //--- there is no master LP in RC, just initialize the dual vector + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "createMasterProblem()", + m_param.LogDebugLevel, 2); + DecompAlgo::createMasterProblem(initVars); + CoinAssert(initVars.size() > 0); + //--- + //--- In order to implement simple branching, we are going to + //--- treat all column bounds as explicit constraints. Then branching + //--- for DW can be done in the same way it is done for regular CPM. + //--- + // TODO: looks like volume doesn't like R rows... change and split up? + // printf("Volume Algorithm can't work if there is a non ELG row\n"); + // coreMatrixAppendColBounds(); + // THINK: is this the right place for this + // TODO: give user option to feed in a good starting dual vector? + // you might want to do that if switch between DW and RC... etc... THINK + DecompConstraintSet *modelCore = m_modelCore.getModel(); + fill_n(back_inserter(m_u), modelCore->getNumRows(), 0.0); + // TODO + m_rc = new double[modelCore->getNumCols()]; // better in constructor? + CoinAssertHint(m_rc, "Error: Out of Memory"); + // m_vars here will contain all the shat's used - we can only + // use one of them in subgradient, so just save the last one + // DecompVarList::iterator it = initVars.begin(); + // assert(*it); + // think + // double redCost = (*it)->getOriginalCost() - (*it)->m_s.dotProduct(&m_u[0]); + //(*it)->setReducedCost(redCost); + // for rc we want to calc the reduced cost + // m_vars.push_back(*it); + // UTIL_DEBUG(m_param.LogDebugLevel, 3, + // (*it)->print(m_osLog); + // ); + UtilPrintFuncEnd(m_osLog, m_classTag, "createMasterProblem()", + m_param.LogDebugLevel, 2); } // ------------------------------------------------------------------------- // -bool DecompAlgoRC::isDone() -{ - //iter count is checked by phaseUpdate - //need to check step limit in here - //need to check if ub-lb gap is small? isn't that always checked? - //printf("\nm_UB: %12.10f, m_LB: %12.10f", m_UB, m_LB); - if ((m_step < 1.0e-3) || //step length too small - m_zeroSub || //0 subgradient - UtilIsZero(m_UB - m_LB, 1.0e-3)) { //gap is small - return true; - } - - return false; +bool DecompAlgoRC::isDone() { + // iter count is checked by phaseUpdate + // need to check step limit in here + // need to check if ub-lb gap is small? isn't that always checked? + // printf("\nm_UB: %12.10f, m_LB: %12.10f", m_UB, m_LB); + if ((m_step < 1.0e-3) || // step length too small + m_zeroSub || // 0 subgradient + UtilIsZero(m_UB - m_LB, 1.0e-3)) { // gap is small + return true; + } + + return false; } -//do we need a var pool at all here? +// do we need a var pool at all here? // ------------------------------------------------------------------------- // -int DecompAlgoRC::addCutsFromPool() -{ - int nNewRows = DecompAlgo::addCutsFromPool(); - m_u.reserve(m_u.size() + nNewRows); - UtilFillN(m_u, nNewRows, 0.0); - - //is this the right place to do this? - //we were pricing out, then cutting, but when we price out, - //step=0, so we need to start over with step size - //best place for this would be in phaseUpdatE? - if (nNewRows > 0) { - m_step = 2.0; - } - - return nNewRows; +int DecompAlgoRC::addCutsFromPool() { + int nNewRows = DecompAlgo::addCutsFromPool(); + m_u.reserve(m_u.size() + nNewRows); + UtilFillN(m_u, nNewRows, 0.0); + + // is this the right place to do this? + // we were pricing out, then cutting, but when we price out, + // step=0, so we need to start over with step size + // best place for this would be in phaseUpdatE? + if (nNewRows > 0) { + m_step = 2.0; + } + + return nNewRows; } // ------------------------------------------------------------------------- // -int DecompAlgoRC::generateVars(DecompVarList& newVars, - double& mostNegReducedCost) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "generateVars()", m_param.LogDebugLevel, 2); - DecompConstraintSet* modelCore = m_modelCore.getModel(); - //really only returning one var here... - //for RC, doesn't have to be negative?? - mostNegReducedCost = m_infinity;//bad name here - //TODO: whenever a cut is added, if doing RC, you need to add an - //element to u.... do we need to override gen cuts just for that? - //gen cuts (x) is wrong here anyway... need gen cuts (s) - assert(static_cast(m_u.size()) == modelCore->getNumRows()); - //THINK: - // if we overload getDual method, then this can be same as PC genvars? - // in PC, we are letting OSI house the duals, in RC we do it ourself - // seems silly to use OSI at all for RC? - // - // almost want Decomp to house dual even for PC... - //TODO: m_app->m_model seems dumb - //reduced cost = c - uA - const double* origObjective = getOrigObjective(); - modelCore->M->transposeTimes(&m_u[0], m_rc); - - for (int c = 0; c < modelCore->getNumCols(); c++) { - printf("RC[%d] -> c: %g - uA: %g = m_rc: %g\n", - c, origObjective[c], m_rc[c], - origObjective[c] - m_rc[c]); - m_rc[c] = origObjective[c] - m_rc[c]; - } - - //double alpha = 0.0; - DecompVarList potentialVars; - //TODO: stat return, restrict how many? pass that in to user? - //only take those with negative reduced cost? - //check for dups here - //TODO: blocks! - //WRONG... +int DecompAlgoRC::generateVars(DecompVarList &newVars, + double &mostNegReducedCost) { + UtilPrintFuncBegin(m_osLog, m_classTag, "generateVars()", + m_param.LogDebugLevel, 2); + DecompConstraintSet *modelCore = m_modelCore.getModel(); + // really only returning one var here... + // for RC, doesn't have to be negative?? + mostNegReducedCost = m_infinity; // bad name here + // TODO: whenever a cut is added, if doing RC, you need to add an + // element to u.... do we need to override gen cuts just for that? + // gen cuts (x) is wrong here anyway... need gen cuts (s) + assert(static_cast(m_u.size()) == modelCore->getNumRows()); + // THINK: + // if we overload getDual method, then this can be same as PC genvars? + // in PC, we are letting OSI house the duals, in RC we do it ourself + // seems silly to use OSI at all for RC? + // + // almost want Decomp to house dual even for PC... + // TODO: m_app->m_model seems dumb + // reduced cost = c - uA + const double *origObjective = getOrigObjective(); + modelCore->M->transposeTimes(&m_u[0], m_rc); + + for (int c = 0; c < modelCore->getNumCols(); c++) { + printf("RC[%d] -> c: %g - uA: %g = m_rc: %g\n", c, origObjective[c], + m_rc[c], origObjective[c] - m_rc[c]); + m_rc[c] = origObjective[c] - m_rc[c]; + } + + // double alpha = 0.0; + DecompVarList potentialVars; + // TODO: stat return, restrict how many? pass that in to user? + // only take those with negative reduced cost? + // check for dups here + // TODO: blocks! + // WRONG... #if 0 solveRelaxed( 0, @@ -353,30 +338,30 @@ int DecompAlgoRC::generateVars(DecompVarList& newVars, NULL,//WILL FAIL potentialVars);//NO CHECK RC?? #endif - //another way to do this is to just collect all m_vars, - //not worrying about duplicates -- and when we get to DW - //strip out the dups before constructing the master formulation - DecompVarList::iterator it; - double varRedCost; - - for (it = potentialVars.begin(); it != potentialVars.end(); it++) { - varRedCost = (*it)->getReducedCost(); - newVars.push_back(*it); - - if (varRedCost < mostNegReducedCost) { - mostNegReducedCost = varRedCost; - //TODO: if this winds up being a dup in addVarsToPool - //this memory will be erased -- which will cause a problem - //because we need this var - //TODO: FUGLY - make a copy so if dup doesn't cause prob - //i don't like this! ugh - //m_shatVar = *it; //THINK - m_shatVar = *(*it); - m_shatVar.fillDenseArr(modelCore->getNumCols(), m_xhat); - } - } - - potentialVars.clear(); //THINK? what does clear do exactly ? + // another way to do this is to just collect all m_vars, + // not worrying about duplicates -- and when we get to DW + // strip out the dups before constructing the master formulation + DecompVarList::iterator it; + double varRedCost; + + for (it = potentialVars.begin(); it != potentialVars.end(); it++) { + varRedCost = (*it)->getReducedCost(); + newVars.push_back(*it); + + if (varRedCost < mostNegReducedCost) { + mostNegReducedCost = varRedCost; + // TODO: if this winds up being a dup in addVarsToPool + // this memory will be erased -- which will cause a problem + // because we need this var + // TODO: FUGLY - make a copy so if dup doesn't cause prob + // i don't like this! ugh + // m_shatVar = *it; //THINK + m_shatVar = *(*it); + m_shatVar.fillDenseArr(modelCore->getNumCols(), m_xhat); + } + } + + potentialVars.clear(); // THINK? what does clear do exactly ? #if 0 for (it = newVars.begin(); it != newVars.end(); it++) @@ -385,241 +370,228 @@ int DecompAlgoRC::generateVars(DecompVarList& newVars, ); #endif - UtilPrintFuncEnd(m_osLog, m_classTag, - "generateVars()", m_param.LogDebugLevel, 2); - return static_cast(newVars.size()); + UtilPrintFuncEnd(m_osLog, m_classTag, "generateVars()", m_param.LogDebugLevel, + 2); + return static_cast(newVars.size()); } // ------------------------------------------------------------------------- // DecompStatus DecompAlgoRC::solutionUpdate(const DecompPhase phase, - const int maxInnerIter, - const int maxOuterIter) -{ - //--- - //--- C, PC: This step solves (or takes a few steps to solve) master LP - //--- which updates both the primal (x,lambda) and dual(u) vectors. - //--- RC : This does one step of subgradient, which updates the dual (u) - //--- vector, given some shat (the last variable in m_var). - //generalize this for the many variants of subgradient? - //TODO: might make life easier to flip all inequalities to <= or >= ! - UtilPrintFuncBegin(m_osLog, m_classTag, - "solutionUpdate()", m_param.LogDebugLevel, 2); - m_UB = 1.05 * m_nodeStats.objBest.second; //TODO heuristics 1.05?? - //TODO: tols - //how to allow hooks to other stabilization methods, bundle, etc... user - //can simply derive from RC or base and recode this method... - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "\nVARS m_vars:\n"; - printVars(m_osLog); - ); - DecompVar* shatVar = &m_shatVar; //m_vars.back(); - //DecompVar * shatVar = m_vars.back(); - CoinAssert(shatVar); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "\nshat: "; - shatVar->print(m_infinity, m_osLog); - ); - //make this part of class, else realloc every iter... - //use vector, let it grow? - int r; - //char sense; - //double range; - DecompConstraintSet* modelCore = m_modelCore.getModel(); - int n_coreRows = modelCore->getNumRows(); - //const double * rhs = getRightHandSide(); - //const char * sense = getRowSense(); - const double* rhs = &modelCore->rowRhs[0]; - const char* sense = &modelCore->rowSense[0]; - double* violation = new double[n_coreRows]; - double* activity = new double[n_coreRows]; - modelCore->M->times(shatVar->m_s, activity); //As - assert(static_cast(m_u.size()) == n_coreRows); - // =, b - Ax or Ax - b - //>=, b - Ax > 0 is a violation - //<=, b - Ax < 0 is a violation - m_zeroSub = true; - - for (r = 0; r < n_coreRows; r++) { - violation[r] = rhs[r] - activity[r]; - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << setprecision(8); - (*m_osLog) << "r: " << r << " vio: " << violation[r] - << " rhs: " << rhs[r] << " act: " << activity[r] - << " u: " << m_u[r] << " sense: " << sense[r]; - ); - - //beasley suggestion... this is just another basic variant of SG? - // =, b - Ax != 0 is a violation - //>=, b - Ax > 0 is a violation - //<=, Ax - b > 0 is a violation - switch (sense[r]) { - case 'E': - //violation[i] = rhs[i] - activity[i]; - break; - case 'G': - - //violation[i] = rhs[i] - activity[i]; - //TODO: use tol - if (violation[r] < 0.0 && m_u[r] >= -1.0e-4 && m_u[r] <= 1.0e-4) { - violation[r] = 0.0; - } - - break; - case 'L': - - //violation[i] = rhs[i] - activity[i]; - if (violation[r] > 0.0 && m_u[r] >= -1.0e-4 && m_u[r] <= 1.0e-4) { - violation[r] = 0.0; - } - - break; + const int maxInnerIter, + const int maxOuterIter) { + //--- + //--- C, PC: This step solves (or takes a few steps to solve) master LP + //--- which updates both the primal (x,lambda) and dual(u) vectors. + //--- RC : This does one step of subgradient, which updates the dual (u) + //--- vector, given some shat (the last variable in m_var). + // generalize this for the many variants of subgradient? + // TODO: might make life easier to flip all inequalities to <= or >= ! + UtilPrintFuncBegin(m_osLog, m_classTag, "solutionUpdate()", + m_param.LogDebugLevel, 2); + m_UB = 1.05 * m_nodeStats.objBest.second; // TODO heuristics 1.05?? + // TODO: tols + // how to allow hooks to other stabilization methods, bundle, etc... user + // can simply derive from RC or base and recode this method... + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, (*m_osLog) << "\nVARS m_vars:\n"; + printVars(m_osLog);); + DecompVar *shatVar = &m_shatVar; // m_vars.back(); + // DecompVar * shatVar = m_vars.back(); + CoinAssert(shatVar); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, (*m_osLog) << "\nshat: "; + shatVar->print(m_infinity, m_osLog);); + // make this part of class, else realloc every iter... + // use vector, let it grow? + int r; + // char sense; + // double range; + DecompConstraintSet *modelCore = m_modelCore.getModel(); + int n_coreRows = modelCore->getNumRows(); + // const double * rhs = getRightHandSide(); + // const char * sense = getRowSense(); + const double *rhs = &modelCore->rowRhs[0]; + const char *sense = &modelCore->rowSense[0]; + double *violation = new double[n_coreRows]; + double *activity = new double[n_coreRows]; + modelCore->M->times(shatVar->m_s, activity); // As + assert(static_cast(m_u.size()) == n_coreRows); + // =, b - Ax or Ax - b + //>=, b - Ax > 0 is a violation + //<=, b - Ax < 0 is a violation + m_zeroSub = true; + + for (r = 0; r < n_coreRows; r++) { + violation[r] = rhs[r] - activity[r]; + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, (*m_osLog) << setprecision(8); + (*m_osLog) << "r: " << r << " vio: " << violation[r] + << " rhs: " << rhs[r] << " act: " << activity[r] + << " u: " << m_u[r] << " sense: " << sense[r];); + + // beasley suggestion... this is just another basic variant of SG? + // =, b - Ax != 0 is a violation + //>=, b - Ax > 0 is a violation + //<=, Ax - b > 0 is a violation + switch (sense[r]) { + case 'E': + // violation[i] = rhs[i] - activity[i]; + break; + case 'G': + + // violation[i] = rhs[i] - activity[i]; + // TODO: use tol + if (violation[r] < 0.0 && m_u[r] >= -1.0e-4 && m_u[r] <= 1.0e-4) { + violation[r] = 0.0; } - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << " -> vio: " << violation[r] << "\n"; - ); + break; + case 'L': - //?? shouldn't it be if all are feasible? - if (fabs(violation[r]) > 0.0001) { - m_zeroSub = false; + // violation[i] = rhs[i] - activity[i]; + if (violation[r] > 0.0 && m_u[r] >= -1.0e-4 && m_u[r] <= 1.0e-4) { + violation[r] = 0.0; } - } - //when to half the step size? - //same stuff as m_tlb?? setTrueLowerBound? - double bound = shatVar->getReducedCost();//c - uA (is this set?) - //double constant = calcConstant(m_u.size(), &m_u[0]); - double constant = 0.0; - - for (r = 0; r < n_coreRows; r++) { - constant += m_u[r] * rhs[r]; - } - - //needs rhs from OSI - this is why it was better to fake it - //and have the information in OSI... but then carrying around - //modelCore and OSI for no good reason... but... this is messier - //LR Bound = (c - uA)shat + ub, assumes u >= 0 - //first iter, LR Bound = cshat - is that a valid LB? - //yes, actually is a LB for any u >= 0 - //this assumes u is optimal? or just dual feasible - //is u = 0 dual feasible? - //but only a valid bound if shat has the lowest reduced cost - //for a given u... so, for first iter of smallip, should have given - //(2,1) with LB = 2... RC don't do genInitVars? - //TODO: think initial dual vector - solve an LP to get started? - if (bound + constant > m_LB + m_app->m_param.TolZero) { - m_LB = bound + constant; - m_cntSameLB = 0; - //count_sameLB = 0; - //make param to do or not - //reducedCostFixing(reducedCost); - } else { - m_cntSameLB++; - } - - //if(count_sameLB >= m_app->m_param.RC_sameLBLimit){ - if (m_cntSameLB >= 10) { - m_step /= 2.0; - cout << "LB has not changed in " << m_cntSameLB - << " iterations - halve step: " << m_step << endl; - m_cntSameLB = 0; - } - - printf("m_UB: %12.10f, m_LB: %12.10f\n", m_UB, m_LB); - assert((m_UB - m_LB) > -0.0001); - double theta = 0.0; - double denom = 0.0; - - for (r = 0; r < n_coreRows; r++) { - denom += violation[r] * violation[r]; - } - - if (denom > 0.0) { - theta = m_step * ((1.00 * m_UB) - m_LB) / denom; - } - - //TODO: debug util to print a list of values in a nice format to log? - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "m_UB: " << m_UB << " m_LB: " << m_LB - << " denom: " << denom << " m_step: " << m_step - << " theta: " << theta << "\n"; - ); - - //STOP 10/6/07 - //How do we deal with range constraints? What does volume do, for example? - for (r = 0; r < n_coreRows; r++) { - switch (sense[r]) { - case 'E': - m_u[r] += theta * violation[r]; - break; - case 'G': - //u > 0, g_i > 0 for violations - m_u[r] = max(0.0, m_u[r] + (theta * violation[r])); - break; - case 'L': - //u < 0, g_i < 0 for violatoins - m_u[r] = max(0.0, m_u[r] - (theta * violation[r])); - break; - default: - assert(0); - } - - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, - (*m_osLog) << "r: " << r << " m_u: " << m_u[r] << "\n"; - ); - } - - m_iter++; - //if no var pool buffer is used, make sure you clean up manually - /*it++; - UtilDeleteListPtr(potentialVars, it, potentialVars.end()); - potentialVars.clear();*/ - //TODO - back in algo, reduced cost fixing - //TODO: temp memory don't alloc/free each time! - //UTIL_DELARR(rhs); - UTIL_DELARR(violation); - UTIL_DELARR(activity); - UtilPrintFuncEnd(m_osLog, m_classTag, - "solutionUpdate()", m_param.LogDebugLevel, 2); - return STAT_FEASIBLE; + break; + } + + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << " -> vio: " << violation[r] << "\n";); + + //?? shouldn't it be if all are feasible? + if (fabs(violation[r]) > 0.0001) { + m_zeroSub = false; + } + } + + // when to half the step size? + // same stuff as m_tlb?? setTrueLowerBound? + double bound = shatVar->getReducedCost(); // c - uA (is this set?) + // double constant = calcConstant(m_u.size(), &m_u[0]); + double constant = 0.0; + + for (r = 0; r < n_coreRows; r++) { + constant += m_u[r] * rhs[r]; + } + + // needs rhs from OSI - this is why it was better to fake it + // and have the information in OSI... but then carrying around + // modelCore and OSI for no good reason... but... this is messier + // LR Bound = (c - uA)shat + ub, assumes u >= 0 + // first iter, LR Bound = cshat - is that a valid LB? + // yes, actually is a LB for any u >= 0 + // this assumes u is optimal? or just dual feasible + // is u = 0 dual feasible? + // but only a valid bound if shat has the lowest reduced cost + // for a given u... so, for first iter of smallip, should have given + //(2,1) with LB = 2... RC don't do genInitVars? + // TODO: think initial dual vector - solve an LP to get started? + if (bound + constant > m_LB + m_app->m_param.TolZero) { + m_LB = bound + constant; + m_cntSameLB = 0; + // count_sameLB = 0; + // make param to do or not + // reducedCostFixing(reducedCost); + } else { + m_cntSameLB++; + } + + // if(count_sameLB >= m_app->m_param.RC_sameLBLimit){ + if (m_cntSameLB >= 10) { + m_step /= 2.0; + cout << "LB has not changed in " << m_cntSameLB + << " iterations - halve step: " << m_step << endl; + m_cntSameLB = 0; + } + + printf("m_UB: %12.10f, m_LB: %12.10f\n", m_UB, m_LB); + assert((m_UB - m_LB) > -0.0001); + double theta = 0.0; + double denom = 0.0; + + for (r = 0; r < n_coreRows; r++) { + denom += violation[r] * violation[r]; + } + + if (denom > 0.0) { + theta = m_step * ((1.00 * m_UB) - m_LB) / denom; + } + + // TODO: debug util to print a list of values in a nice format to log? + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "m_UB: " << m_UB << " m_LB: " << m_LB + << " denom: " << denom << " m_step: " << m_step + << " theta: " << theta << "\n";); + + // STOP 10/6/07 + // How do we deal with range constraints? What does volume do, for example? + for (r = 0; r < n_coreRows; r++) { + switch (sense[r]) { + case 'E': + m_u[r] += theta * violation[r]; + break; + case 'G': + // u > 0, g_i > 0 for violations + m_u[r] = max(0.0, m_u[r] + (theta * violation[r])); + break; + case 'L': + // u < 0, g_i < 0 for violatoins + m_u[r] = max(0.0, m_u[r] - (theta * violation[r])); + break; + default: + assert(0); + } + + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 3, + (*m_osLog) << "r: " << r << " m_u: " << m_u[r] << "\n";); + } + + m_iter++; + // if no var pool buffer is used, make sure you clean up manually + /*it++; + UtilDeleteListPtr(potentialVars, it, potentialVars.end()); + potentialVars.clear();*/ + // TODO - back in algo, reduced cost fixing + // TODO: temp memory don't alloc/free each time! + // UTIL_DELARR(rhs); + UTIL_DELARR(violation); + UTIL_DELARR(activity); + UtilPrintFuncEnd(m_osLog, m_classTag, "solutionUpdate()", + m_param.LogDebugLevel, 2); + return STAT_FEASIBLE; } // ------------------------------------------------------------------------- // -bool DecompAlgoRC::updateObjBound(const double mostNegRC) -{ - //--- - //--- C : LB = masterLP obj - //--- PC : LB = zDW_RMP + RC* <= zDW <= zDW_RMP - //--- where RC* is the most negative reduced cost - //--- assuming the relaxation subproblem was solved exactly - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "updateObjBound()", m_param.LogDebugLevel, 2); - DecompConstraintSet* modelCore = m_modelCore.getModel(); - //mostNegRC not used? - //DecompVar * shatVar = m_shatVar;//m_vars.back(); - //DecompVar * shatVar = m_vars.back(); - //CoinAssert(shatVar); - int r; - const int n_coreRows = modelCore->getNumRows(); - //double bound = shatVar->getReducedCost();//c - uA (is this set?) - double constant = 0.0; - const double* rhs = &modelCore->rowRhs[0]; - - for (r = 0; r < n_coreRows; r++) { - constant += m_u[r] * rhs[r]; - } - - //double thisBoundLB = shatVar->getReducedCost() + constant; - double thisBoundLB = mostNegRC + constant; - setObjBound(thisBoundLB, constant); - UTIL_DEBUG(m_param.LogDebugLevel, 5, - (*m_osLog) - << "ThisLB = " << UtilDblToStr(thisBoundLB) << "\t" - << "BestLB = " << UtilDblToStr(m_nodeStats.objBest.first) - << "\n"; - ); - UtilPrintFuncEnd(m_osLog, m_classTag, - "updateObjBound()", m_param.LogDebugLevel, 2); - return false; +bool DecompAlgoRC::updateObjBound(const double mostNegRC) { + //--- + //--- C : LB = masterLP obj + //--- PC : LB = zDW_RMP + RC* <= zDW <= zDW_RMP + //--- where RC* is the most negative reduced cost + //--- assuming the relaxation subproblem was solved exactly + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "updateObjBound()", + m_param.LogDebugLevel, 2); + DecompConstraintSet *modelCore = m_modelCore.getModel(); + // mostNegRC not used? + // DecompVar * shatVar = m_shatVar;//m_vars.back(); + // DecompVar * shatVar = m_vars.back(); + // CoinAssert(shatVar); + int r; + const int n_coreRows = modelCore->getNumRows(); + // double bound = shatVar->getReducedCost();//c - uA (is this set?) + double constant = 0.0; + const double *rhs = &modelCore->rowRhs[0]; + + for (r = 0; r < n_coreRows; r++) { + constant += m_u[r] * rhs[r]; + } + + // double thisBoundLB = shatVar->getReducedCost() + constant; + double thisBoundLB = mostNegRC + constant; + setObjBound(thisBoundLB, constant); + UTIL_DEBUG(m_param.LogDebugLevel, 5, + (*m_osLog) << "ThisLB = " << UtilDblToStr(thisBoundLB) << "\t" + << "BestLB = " + << UtilDblToStr(m_nodeStats.objBest.first) << "\n";); + UtilPrintFuncEnd(m_osLog, m_classTag, "updateObjBound()", + m_param.LogDebugLevel, 2); + return false; } diff --git a/Dip/src/DecompApp.cpp b/Dip/src/DecompApp.cpp index 63f64b94..31ac15e9 100644 --- a/Dip/src/DecompApp.cpp +++ b/Dip/src/DecompApp.cpp @@ -14,22 +14,22 @@ #include "DecompApp.h" #include "DecompAlgo.h" -#include "DecompVar.h" #include "DecompConfig.h" -#include -#include +#include "DecompVar.h" +#include "iterator" #include +#include #include -#include "iterator" +#include //#if defined(autoDecomp) && defined(PaToH) -#include -#include +#include "iterator" #include +#include #include -#include "iterator" +#include //#if defined(autoDecomp) && defined(PaToH) -#if defined(PaToH) +#if defined(PaToH) #include "patoh.h" @@ -37,7 +37,7 @@ #else extern "C" { -#if defined (COIN_HAS_HMETIS) +#if defined(COIN_HAS_HMETIS) #include "hmetis.h" #endif } @@ -47,1283 +47,1233 @@ extern "C" { using namespace std; // --------------------------------------------------------------------- // -void DecompApp::startupLog() -{ - if (m_param.LogLevel >= 0) { - (*m_osLog) - << "\n========================================================" - << "\n========================================================" - << "\nWelcome to the DIP Decomposition Framework" - << "\nCopyright 2002-2019 Lehigh University and others" - << "\nAll Rights Reserved" - << "\nDistributed under the Eclipse Public License 1.0" - << "\nVersion: " << DIP_VERSION - << "\nBuild Date: " << __DATE__ +void DecompApp::startupLog() { + if (m_param.LogLevel >= 0) { + (*m_osLog) << "\n========================================================" + << "\n========================================================" + << "\nWelcome to the DIP Decomposition Framework" + << "\nCopyright 2002-2019 Lehigh University and others" + << "\nAll Rights Reserved" + << "\nDistributed under the Eclipse Public License 1.0" + << "\nVersion: " << DIP_VERSION << "\nBuild Date: " << __DATE__ #ifdef DIP_SVN_REV - << "\nRevision Number: " << DIP_SVN_REV + << "\nRevision Number: " << DIP_SVN_REV #endif - << "\n========================================================" - << "\n========================================================" - << "\n"; - } - - if (m_param.LogLevel > 1) { - //m_param.dumpSettings(m_osLog); - } + << "\n========================================================" + << "\n========================================================" + << "\n"; + } + + if (m_param.LogLevel > 1) { + // m_param.dumpSettings(m_osLog); + } } // --------------------------------------------------------------------- // -int DecompApp::generateInitVars(DecompVarList& initVars) -{ - // --- - // --- this function does nothing by default - // --- - UtilPrintFuncBegin(m_osLog, m_classTag, - "generateInitVars()", m_param.LogDebugLevel, 2); - UtilPrintFuncEnd(m_osLog, m_classTag, - "generateInitVars()", m_param.LogDebugLevel, 2); - return 0; +int DecompApp::generateInitVars(DecompVarList &initVars) { + // --- + // --- this function does nothing by default + // --- + UtilPrintFuncBegin(m_osLog, m_classTag, "generateInitVars()", + m_param.LogDebugLevel, 2); + UtilPrintFuncEnd(m_osLog, m_classTag, "generateInitVars()", + m_param.LogDebugLevel, 2); + return 0; } // --------------------------------------------------------------------- // -int DecompApp::generateCuts(const double* x, - DecompCutList& newCuts) -{ - // --- - // --- this function does nothing by default - // --- - UtilPrintFuncBegin(m_osLog, m_classTag, - "generateCuts()", m_param.LogDebugLevel, 2); - UtilPrintFuncEnd(m_osLog, m_classTag, - "generateCuts()", m_param.LogDebugLevel, 2); - return 0; +int DecompApp::generateCuts(const double *x, DecompCutList &newCuts) { + // --- + // --- this function does nothing by default + // --- + UtilPrintFuncBegin(m_osLog, m_classTag, "generateCuts()", + m_param.LogDebugLevel, 2); + UtilPrintFuncEnd(m_osLog, m_classTag, "generateCuts()", m_param.LogDebugLevel, + 2); + return 0; } /*-------------------------------------------------------------------------*/ -void DecompApp::printOriginalColumn(const int index, - ostream* os) const -{ - (*os) << index << " "; +void DecompApp::printOriginalColumn(const int index, ostream *os) const { + (*os) << index << " "; } /*-------------------------------------------------------------------------*/ -void DecompApp::printOriginalSolution(const int n_cols, - const vector& colNames, - const double* solution, - ostream* os) const -{ - int i; - bool hasNames = false; - - //--- - //--- do we have column names? - //--- - if (colNames.size() > 0) { - hasNames = true; - } - - (*os) << setiosflags(ios::fixed | ios::showpoint); - - for (i = 0; i < n_cols; i++) { - if (!UtilIsZero(solution[i])) { - printOriginalColumn(i, os); - - if (hasNames) - (*os) << "\t" << colNames[i] - << "\t" << solution[i] << endl; - else { - (*os) << "\t" << solution[i] << endl; - } +void DecompApp::printOriginalSolution(const int n_cols, + const vector &colNames, + const double *solution, + ostream *os) const { + int i; + bool hasNames = false; + + //--- + //--- do we have column names? + //--- + if (colNames.size() > 0) { + hasNames = true; + } + + (*os) << setiosflags(ios::fixed | ios::showpoint); + + for (i = 0; i < n_cols; i++) { + if (!UtilIsZero(solution[i])) { + printOriginalColumn(i, os); + + if (hasNames) + (*os) << "\t" << colNames[i] << "\t" << solution[i] << endl; + else { + (*os) << "\t" << solution[i] << endl; } - } + } + } - (*os) << resetiosflags(ios::fixed | ios::showpoint | ios::scientific); + (*os) << resetiosflags(ios::fixed | ios::showpoint | ios::scientific); } /* * The following methods are from MILPBlock_DecompApp */ -void DecompApp::initializeApp() -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "initializeApp()", m_param.LogLevel, 2); - - readProblem(); - - if (!m_param.Concurrent && !NumBlocks) { - //--- - //--- read block file - //--- - if (m_param.BlockFile != ""){ - readBlockFile(); - } - } else - // automatic structure detection - { +void DecompApp::initializeApp() { + UtilPrintFuncBegin(m_osLog, m_classTag, "initializeApp()", m_param.LogLevel, + 2); + + readProblem(); + + if (!m_param.Concurrent && !NumBlocks) { + //--- + //--- read block file + //--- + if (m_param.BlockFile != "") { + readBlockFile(); + } + } else + // automatic structure detection + { #pragma omp critical - singlyBorderStructureDetection(); - } - - /* - * After identifying the strucuture either through files or - * automatic structure detection, call the method below to - * create models - * - */ - createModels(); - UtilPrintFuncEnd(m_osLog, m_classTag, - "initializeApp()", m_param.LogLevel, 2); + singlyBorderStructureDetection(); + } + + /* + * After identifying the strucuture either through files or + * automatic structure detection, call the method below to + * create models + * + */ + createModels(); + UtilPrintFuncEnd(m_osLog, m_classTag, "initializeApp()", m_param.LogLevel, 2); } - -void DecompApp::readProblem() -{ - //--- - //--- read MILP instance (mps format) - //--- - string fileName; - - if (m_param.DataDir != "") { - fileName = m_param.DataDir + UtilDirSlash() + m_param.Instance; - } else { - fileName = m_param.Instance; - } - - if (m_param.Instance.empty()) { - cerr << "================================================" << std::endl - << "Usage:" - << "./dip --BlockFileFormat List" << std::endl - << " --Instance /FilePath/ABC.mps" << std::endl - << " --BlockFile /FilePath/ABC.block" << std::endl - << "================================================" << std::endl - << std::endl; - exit(0); - } - - int rstatus = 0; - bool foundFormat = false; - - if (m_param.InstanceFormat == "") { - string::size_type idx = fileName.rfind('.'); - string extension = fileName.substr(idx + 1); - std::size_t found = fileName.substr(0, idx).rfind('.'); - - if (found != std::string::npos && extension == "gz") { - extension = fileName.substr(found + 1); +void DecompApp::readProblem() { + //--- + //--- read MILP instance (mps format) + //--- + string fileName; + + if (m_param.DataDir != "") { + fileName = m_param.DataDir + UtilDirSlash() + m_param.Instance; + } else { + fileName = m_param.Instance; + } + + if (m_param.Instance.empty()) { + cerr << "================================================" << std::endl + << "Usage:" + << "./dip --BlockFileFormat List" << std::endl + << " --Instance /FilePath/ABC.mps" << std::endl + << " --BlockFile /FilePath/ABC.block" << std::endl + << "================================================" << std::endl + << std::endl; + exit(0); + } + + int rstatus = 0; + bool foundFormat = false; + + if (m_param.InstanceFormat == "") { + string::size_type idx = fileName.rfind('.'); + string extension = fileName.substr(idx + 1); + std::size_t found = fileName.substr(0, idx).rfind('.'); + + if (found != std::string::npos && extension == "gz") { + extension = fileName.substr(found + 1); + } + + if (idx != string::npos) { + if (extension == "MPS" || extension == "mps" || extension == "mps.gz") { + m_param.InstanceFormat = "MPS"; + } else if (extension == "LP" || extension == "lp" || + extension == "lp.gz") { + m_param.InstanceFormat = "LP"; } - - if (idx != string::npos) { - if (extension == "MPS" || extension == "mps" || extension == "mps.gz") { - m_param.InstanceFormat = "MPS"; - } else if (extension == "LP" || extension == "lp" || extension == "lp.gz") { - m_param.InstanceFormat = "LP"; - } - } else { - cerr << "File format not specified and no file extension" << endl; - throw UtilException("I/O Error.", "initializeApp", "DecompApp"); - } - } - - if (m_param.InstanceFormat == "MPS") { - m_mpsIO.messageHandler()->setLogLevel(m_param.LogLpLevel); - } else if (m_param.InstanceFormat == "LP") { - m_lpIO.messageHandler()->setLogLevel(m_param.LogLpLevel); - } - - if (m_param.InstanceFormat == "MPS") { - rstatus = m_mpsIO.readMps(fileName.c_str()); - foundFormat = true; - } else if (m_param.InstanceFormat == "LP") { - m_lpIO.readLp(fileName.c_str()); - foundFormat = true; - } - - if (!foundFormat) { - cerr << "Error: Format = " << m_param.InstanceFormat << " unknown." - << endl; - throw UtilException("I/O Error.", "initalizeApp", "DecompApp"); - } - - if (rstatus < 0) { - cerr << "Error: Filename = " << fileName << " failed to open." << endl; - throw UtilException("I/O Error.", "initalizeApp", "DecompApp"); - } - - if (m_param.LogLevel >= 2) { - if (m_param.InstanceFormat == "MPS") { - (*m_osLog) << "Objective Offset = " - << UtilDblToStr(m_mpsIO.objectiveOffset()) << endl; - } else if (m_param.InstanceFormat == "LP") { - (*m_osLog) << "Objective Offset = " - << UtilDblToStr(m_lpIO.objectiveOffset()) << endl; - } - } - - //--- - //--- set best known lb/ub - //--- - double offset = 0; - - if (m_param.InstanceFormat == "MPS") { - offset = m_mpsIO.objectiveOffset(); - } else if (m_param.InstanceFormat == "LP") { - offset = m_lpIO.objectiveOffset(); - } - - setBestKnownLB(m_param.BestKnownLB + offset); - setBestKnownUB(m_param.BestKnownUB + offset); - preprocess(); - - if (m_param.InstanceFormat == "MPS") { - m_matrix = m_mpsIO.getMatrixByRow(); - } else if (m_param.InstanceFormat == "LP") { - m_matrix = m_lpIO.getMatrixByRow(); - } + } else { + cerr << "File format not specified and no file extension" << endl; + throw UtilException("I/O Error.", "initializeApp", "DecompApp"); + } + } + + if (m_param.InstanceFormat == "MPS") { + m_mpsIO.messageHandler()->setLogLevel(m_param.LogLpLevel); + } else if (m_param.InstanceFormat == "LP") { + m_lpIO.messageHandler()->setLogLevel(m_param.LogLpLevel); + } + + if (m_param.InstanceFormat == "MPS") { + rstatus = m_mpsIO.readMps(fileName.c_str()); + foundFormat = true; + } else if (m_param.InstanceFormat == "LP") { + m_lpIO.readLp(fileName.c_str()); + foundFormat = true; + } + + if (!foundFormat) { + cerr << "Error: Format = " << m_param.InstanceFormat << " unknown." << endl; + throw UtilException("I/O Error.", "initalizeApp", "DecompApp"); + } + + if (rstatus < 0) { + cerr << "Error: Filename = " << fileName << " failed to open." << endl; + throw UtilException("I/O Error.", "initalizeApp", "DecompApp"); + } + + if (m_param.LogLevel >= 2) { + if (m_param.InstanceFormat == "MPS") { + (*m_osLog) << "Objective Offset = " + << UtilDblToStr(m_mpsIO.objectiveOffset()) << endl; + } else if (m_param.InstanceFormat == "LP") { + (*m_osLog) << "Objective Offset = " + << UtilDblToStr(m_lpIO.objectiveOffset()) << endl; + } + } + + //--- + //--- set best known lb/ub + //--- + double offset = 0; + + if (m_param.InstanceFormat == "MPS") { + offset = m_mpsIO.objectiveOffset(); + } else if (m_param.InstanceFormat == "LP") { + offset = m_lpIO.objectiveOffset(); + } + + setBestKnownLB(m_param.BestKnownLB + offset); + setBestKnownUB(m_param.BestKnownUB + offset); + preprocess(); + + if (m_param.InstanceFormat == "MPS") { + m_matrix = m_mpsIO.getMatrixByRow(); + } else if (m_param.InstanceFormat == "LP") { + m_matrix = m_lpIO.getMatrixByRow(); + } } - - void DecompApp::preprocess() {} -void DecompApp::readBlockFile() -{ - ifstream is; - string fileName; - - if (m_param.DataDir != "") { - fileName = m_param.DataDir + UtilDirSlash() + m_param.BlockFile; - } else { - fileName = m_param.BlockFile; - } - - //--- - //--- is there a permutation file? - //--- this file just remaps the row ids - //--- (for use in submission of atm to MIPLIB2010 and debugging) - //--- - map permute; - map::iterator mit; - - if (m_param.PermuteFile.size() > 0) { - if (m_param.DataDir != "") { - fileName = m_param.DataDir + UtilDirSlash() + m_param.PermuteFile; - } else { - fileName = m_param.PermuteFile; +void DecompApp::readBlockFile() { + ifstream is; + string fileName; + + if (m_param.DataDir != "") { + fileName = m_param.DataDir + UtilDirSlash() + m_param.BlockFile; + } else { + fileName = m_param.BlockFile; + } + + //--- + //--- is there a permutation file? + //--- this file just remaps the row ids + //--- (for use in submission of atm to MIPLIB2010 and debugging) + //--- + map permute; + map::iterator mit; + + if (m_param.PermuteFile.size() > 0) { + if (m_param.DataDir != "") { + fileName = m_param.DataDir + UtilDirSlash() + m_param.PermuteFile; + } else { + fileName = m_param.PermuteFile; + } + + ifstream isP; + int rowIdOld, rowIdNew; + //--- + //--- open file streams + //--- + UtilOpenFile(isP, fileName.c_str()); + + while (!isP.eof()) { + if (isP.eof()) { + break; } - ifstream isP; - int rowIdOld, rowIdNew; - //--- - //--- open file streams - //--- - UtilOpenFile(isP, fileName.c_str()); - - while (!isP.eof()) { - if (isP.eof()) { - break; - } - - isP >> rowIdOld >> rowIdNew; - permute.insert(make_pair(rowIdOld, rowIdNew)); - } + isP >> rowIdOld >> rowIdNew; + permute.insert(make_pair(rowIdOld, rowIdNew)); + } + + isP.close(); + } + + //--- + //--- open file streams + //--- + UtilOpenFile(is, fileName.c_str()); + int i, rowId, rowIdP, numRowsInBlock, blockId; + //--- + //--- first create a map from row name to row id from mps + //--- CHECK: mps to OSI guaranteed to keep order of rows? + //--- + map rowNameToId; + map::iterator rowNameToIdIt; + int numRows = 0; + + if (m_param.InstanceFormat == "MPS") { + numRows = m_mpsIO.getNumRows(); + + for (i = 0; i < numRows; i++) { + rowNameToId.insert(make_pair(m_mpsIO.rowName(i), i)); + } + } else if (m_param.InstanceFormat == "LP") { + numRows = m_lpIO.getNumRows(); + + for (i = 0; i < numRows; i++) { + rowNameToId.insert(make_pair(m_lpIO.rowName(i), i)); + } + } + + if (m_param.LogLevel >= 1) { + (*m_osLog) << "Reading " << fileName << endl; + } + + map> blocks; + map>::iterator blocksIt; + + if (m_param.BlockFileFormat == "") { + string::size_type idx = fileName.rfind('.'); + + if (idx != string::npos) { + string extension = fileName.substr(idx + 1); - isP.close(); - } - - //--- - //--- open file streams - //--- - UtilOpenFile(is, fileName.c_str()); - int i, rowId, rowIdP, numRowsInBlock, blockId; - //--- - //--- first create a map from row name to row id from mps - //--- CHECK: mps to OSI guaranteed to keep order of rows? - //--- - map rowNameToId; - map::iterator rowNameToIdIt; - int numRows = 0; - - if (m_param.InstanceFormat == "MPS") { - numRows = m_mpsIO.getNumRows(); - - for (i = 0; i < numRows; i++) { - rowNameToId.insert(make_pair(m_mpsIO.rowName(i), i)); + if (extension == "DEC" || extension == "dec") { + m_param.BlockFileFormat = "ZIBList"; + } else if (extension == "block" || extension == "blk") { + m_param.BlockFileFormat = "List"; } - } else if (m_param.InstanceFormat == "LP") { - numRows = m_lpIO.getNumRows(); + } else { + cerr << "File format not specified and no file extension" << endl; + throw UtilException("I/O Error.", "initializeApp", "DecompApp"); + } + } + + if (m_param.BlockFileFormat == "List" || m_param.BlockFileFormat == "LIST") { + //--- + //--- The block file defines those rows in each block. + //--- + //--- + //--- + //--- + //--- + string rowName; + + while (!is.eof()) { + is >> blockId; + is >> numRowsInBlock; - for (i = 0; i < numRows; i++) { - rowNameToId.insert(make_pair(m_lpIO.rowName(i), i)); + if (is.eof()) { + break; } - } - if (m_param.LogLevel >= 1) { - (*m_osLog) << "Reading " << fileName << endl; - } + vector rowsInBlock; - map > blocks; - map >::iterator blocksIt; + for (i = 0; i < numRowsInBlock; i++) { + is >> rowId; + mit = permute.find(rowId); - if (m_param.BlockFileFormat == "") { - string::size_type idx = fileName.rfind('.'); - - if (idx != string::npos) { - string extension = fileName.substr(idx + 1); - - if (extension == "DEC" || extension == "dec") { - m_param.BlockFileFormat = "ZIBList"; - } else if (extension == "block" || extension == "blk") { - m_param.BlockFileFormat = "List"; - } - } else { - cerr << "File format not specified and no file extension" << endl; - throw UtilException("I/O Error.", "initializeApp", "DecompApp"); + if (mit != permute.end()) { + rowsInBlock.push_back(mit->second); + } else { + rowsInBlock.push_back(rowId); + } } - } - if (m_param.BlockFileFormat == "List" || - m_param.BlockFileFormat == "LIST") { - //--- - //--- The block file defines those rows in each block. - //--- - //--- - //--- - //--- - //--- - string rowName; - - while (!is.eof()) { - is >> blockId; - is >> numRowsInBlock; - - if (is.eof()) { - break; - } - - vector rowsInBlock; - - for (i = 0; i < numRowsInBlock; i++) { - is >> rowId; - mit = permute.find(rowId); - - if (mit != permute.end()) { - rowsInBlock.push_back(mit->second); - } else { - rowsInBlock.push_back(rowId); - } - } + blocks.insert(make_pair(blockId, rowsInBlock)); - blocks.insert(make_pair(blockId, rowsInBlock)); - - if (is.eof()) { - break; - } - } - } else if (m_param.BlockFileFormat == "ZIBList" || - m_param.BlockFileFormat == "ZIBLIST") { - //-- The block file defines those rows in each block. - //-- NBLOCKS - //-- - //-- BLOCK - //-- - //-- BLOCK - //-- - int numBlocks = 0; - string tmp, rowName; - - while (!numBlocks) { - is >> tmp; - - if (tmp == "NBLOCKS") { - is >> numBlocks; - } + if (is.eof()) { + break; } - - while (tmp != "BLOCK") { - is >> tmp; + } + } else if (m_param.BlockFileFormat == "ZIBList" || + m_param.BlockFileFormat == "ZIBLIST") { + //-- The block file defines those rows in each block. + //-- NBLOCKS + //-- + //-- BLOCK + //-- + //-- BLOCK + //-- + int numBlocks = 0; + string tmp, rowName; + + while (!numBlocks) { + is >> tmp; + + if (tmp == "NBLOCKS") { + is >> numBlocks; } + } - while (!is.eof() && rowName != "MASTERCONSS") { - is >> blockId; - vector rowsInBlock; - - while (true) { - is >> rowName; + while (tmp != "BLOCK") { + is >> tmp; + } - if (is.eof()) { - break; - } + while (!is.eof() && rowName != "MASTERCONSS") { + is >> blockId; + vector rowsInBlock; - if (rowName == "BLOCK" || rowName == "MASTERCONSS") { - break; - } + while (true) { + is >> rowName; - rowNameToIdIt = rowNameToId.find(rowName); + if (is.eof()) { + break; + } - if (rowNameToIdIt != rowNameToId.end()) { - rowId = rowNameToIdIt->second; - } else { - std::cout << "Warning: Unrecognized row name" << rowName; - std::cout << "in block file" << std::endl; - } + if (rowName == "BLOCK" || rowName == "MASTERCONSS") { + break; + } - rowsInBlock.push_back(rowId); - } + rowNameToIdIt = rowNameToId.find(rowName); - blocks.insert(make_pair(blockId, rowsInBlock)); + if (rowNameToIdIt != rowNameToId.end()) { + rowId = rowNameToIdIt->second; + } else { + std::cout << "Warning: Unrecognized row name" << rowName; + std::cout << "in block file" << std::endl; + } - if (is.eof()) { - break; - } + rowsInBlock.push_back(rowId); } - } else if (m_param.BlockFileFormat == "Pair" || - m_param.BlockFileFormat == "PAIR") { - //--- - //--- - //--- ... - //--- - is >> blockId; - while (!is.eof()) { - is >> rowId; - mit = permute.find(rowId); + blocks.insert(make_pair(blockId, rowsInBlock)); - if (mit != permute.end()) { - rowIdP = mit->second; - } else { - rowIdP = rowId; - } + if (is.eof()) { + break; + } + } + } else if (m_param.BlockFileFormat == "Pair" || + m_param.BlockFileFormat == "PAIR") { + //--- + //--- + //--- ... + //--- + is >> blockId; + + while (!is.eof()) { + is >> rowId; + mit = permute.find(rowId); + + if (mit != permute.end()) { + rowIdP = mit->second; + } else { + rowIdP = rowId; + } - blocksIt = blocks.find(blockId); + blocksIt = blocks.find(blockId); - if (blocksIt != blocks.end()) { - blocksIt->second.push_back(rowIdP); - } else { - vector rowsInBlocks; - rowsInBlocks.push_back(rowIdP); - blocks.insert(make_pair(blockId, rowsInBlocks)); - } + if (blocksIt != blocks.end()) { + blocksIt->second.push_back(rowIdP); + } else { + vector rowsInBlocks; + rowsInBlocks.push_back(rowIdP); + blocks.insert(make_pair(blockId, rowsInBlocks)); + } - is >> blockId; + is >> blockId; - if (is.eof()) { - break; - } + if (is.eof()) { + break; } - } else if (m_param.BlockFileFormat == "PairName" || - m_param.BlockFileFormat == "PAIRNAME") { - //--- - //--- - //--- ... - //--- - string rowName = ""; - is >> blockId; + } + } else if (m_param.BlockFileFormat == "PairName" || + m_param.BlockFileFormat == "PAIRNAME") { + //--- + //--- + //--- ... + //--- + string rowName = ""; + is >> blockId; + + while (!is.eof()) { + is >> rowName; - while (!is.eof()) { - is >> rowName; - - if (is.eof()) { - break; - } - - rowNameToIdIt = rowNameToId.find(rowName); - - if (rowNameToIdIt != rowNameToId.end()) { - rowId = rowNameToIdIt->second; - //printf("rowName=%s rowId=%d\n", rowName.c_str(), rowId); - } else { - //--- - //--- NOTE: this can happen if we use a presolved mps file - //--- with an original blocks file - //--- - if (m_param.LogLevel >= 3) { - (*m_osLog) << "Warning: Row name (" - << rowName << " in block file " - << "is not found in instance file" << endl; - } - - //throw UtilException("Invalid Input.", - // "readBlockFile", "DecompApp"); - rowId = -1; - } - - if (rowId != -1) { - mit = permute.find(rowId); - - if (mit != permute.end()) { - rowIdP = mit->second; - } else { - rowIdP = rowId; - } - - blocksIt = blocks.find(blockId); - - if (blocksIt != blocks.end()) { - blocksIt->second.push_back(rowIdP); - } else { - vector rowsInBlocks; - rowsInBlocks.push_back(rowIdP); - blocks.insert(make_pair(blockId, rowsInBlocks)); - } - } - - is >> blockId; - - if (is.eof()) { - break; - } + if (is.eof()) { + break; } - } else { - cerr << "Error: BlockFileFormat = " - << m_param.BlockFileFormat - << " is an invalid type. Valid types = (List,ZIBList,Pair,PairName)." - << endl; - throw UtilException("Invalid Parameter.", - "readBlockFile", "DecompApp"); - } - - //--- - //--- after presolve, some blocks might have been completely - //--- removed - renumber the block ids - it is arbitrary anyway - //--- and copy into class object m_blocks - //--- - blockId = 0; - - for (blocksIt = blocks.begin(); blocksIt != blocks.end(); blocksIt++) { - m_blocks.insert(make_pair(blockId, blocksIt->second)); - blockId++; - } - - if (m_param.LogLevel >= 3) { - map >::iterator mit; - vector ::iterator vit; - - for (mit = m_blocks.begin(); mit != m_blocks.end(); mit++) { - (*m_osLog) << "Block " << (*mit).first << " : "; - - for (vit = (*mit).second.begin(); vit != (*mit).second.end(); vit++) { - (*m_osLog) << (*vit) << " "; - } - - (*m_osLog) << endl; + + rowNameToIdIt = rowNameToId.find(rowName); + + if (rowNameToIdIt != rowNameToId.end()) { + rowId = rowNameToIdIt->second; + // printf("rowName=%s rowId=%d\n", rowName.c_str(), rowId); + } else { + //--- + //--- NOTE: this can happen if we use a presolved mps file + //--- with an original blocks file + //--- + if (m_param.LogLevel >= 3) { + (*m_osLog) << "Warning: Row name (" << rowName << " in block file " + << "is not found in instance file" << endl; + } + + // throw UtilException("Invalid Input.", + // "readBlockFile", "DecompApp"); + rowId = -1; } - } - //exit(1); - is.close(); -} + if (rowId != -1) { + mit = permute.find(rowId); + if (mit != permute.end()) { + rowIdP = mit->second; + } else { + rowIdP = rowId; + } -void DecompApp::readInitSolutionFile(DecompVarList& initVars) -{ - //TODO: is this ok for sparse? - ifstream is; - string fileName = m_param.DataDir - + UtilDirSlash() + m_param.InitSolutionFile; - - if (m_param.InitSolutionFile == "") { - return; - } - - //--- - //--- create map from col name to col index - //--- - int i; - map colNameToIndex; - const vector& colNames = m_modelC->getColNames(); - - for (i = 0; i < m_modelC->getNumCols(); i++) { - colNameToIndex.insert(make_pair(colNames[i], i)); - } - - //--- - //--- create a map from col index to block index - //--- - map colIndexToBlockIndex; - map::iterator mit; - - for (mit = m_modelR.begin(); mit != m_modelR.end(); mit++) { - int blockIndex = mit->first; - DecompConstraintSet* model = mit->second; - const vector& activeColumns = model->getActiveColumns(); - vector::const_iterator vit; - - for (vit = activeColumns.begin(); vit != activeColumns.end(); vit++) { - colIndexToBlockIndex.insert(make_pair(*vit, blockIndex)); - } - } - - //--- - //--- open file streams - //--- - UtilOpenFile(is, fileName.c_str()); - - if (m_param.LogLevel >= 1) { - (*m_osLog) << "Reading " << fileName << endl; - } - - //--- - //--- create variables for each block of each solution - //--- - int solutionIndex, colIndex, blockIndex; - string colName; - double colValue; - char line[1000]; - map< pair, pair< vector, vector > > varTemp; - map< pair, pair< vector, vector > >::iterator it; - is.getline(line, 1000); - - //TODO? master-only - // 1. if user gives lb, then add lb only - // if 0, add 0-col? or just let it take care of from PI? - // 2. if user gives ub, then add ub only - // 3. if user gives betwen bounds, then add lb and ub - // unless it is general integer - while (!is.eof()) { - is >> solutionIndex >> colName >> colValue; + blocksIt = blocks.find(blockId); - if (is.eof()) { - break; + if (blocksIt != blocks.end()) { + blocksIt->second.push_back(rowIdP); + } else { + vector rowsInBlocks; + rowsInBlocks.push_back(rowIdP); + blocks.insert(make_pair(blockId, rowsInBlocks)); + } } - colIndex = colNameToIndex[colName]; - blockIndex = colIndexToBlockIndex[colIndex]; - /* - const double* colLB = m_modelC->getColLB(); - const double* colUB = m_modelC->getColUB(); - DecompConstraintSet* model = m_modelR[blockIndex]; - if (model->m_masterOnly) { - printf("MasterOnly col=%s value=%g lb=%g ub=%g", - colName.c_str(), colValue, colLB[colIndex], colUB[colIndex]); - - if (colValue < (colUB[colIndex] - 1.0e-5) && - colValue > (colLB[colIndex] + 1.0e-5)) { - printf(" --> in between bounds"); - //TODO: if so, should add both lb and ub - } - - printf("\n"); - } - */ - pair p = make_pair(solutionIndex, blockIndex); - it = varTemp.find(p); - - if (it == varTemp.end()) { - vector ind; - vector els; - ind.push_back(colIndex); - els.push_back(colValue); - varTemp.insert(make_pair(p, make_pair(ind, els))); - } else { - vector& ind = it->second.first; - vector& els = it->second.second; - ind.push_back(colIndex); - els.push_back(colValue); + is >> blockId; + + if (is.eof()) { + break; } - } - - //--- - //--- create DecompVar's from varTemp - //--- - for (it = varTemp.begin(); it != varTemp.end(); it++) { - const pair& indexPair = it->first; - pair< vector, vector >& columnPair = it->second; - double origCost = 0.0; - - for (i = 0; i < static_cast(columnPair.first.size()); i++) { - origCost += columnPair.second[i] * - m_objective[columnPair.first[i]]; + } + } else { + cerr << "Error: BlockFileFormat = " << m_param.BlockFileFormat + << " is an invalid type. Valid types = (List,ZIBList,Pair,PairName)." + << endl; + throw UtilException("Invalid Parameter.", "readBlockFile", "DecompApp"); + } + + //--- + //--- after presolve, some blocks might have been completely + //--- removed - renumber the block ids - it is arbitrary anyway + //--- and copy into class object m_blocks + //--- + blockId = 0; + + for (blocksIt = blocks.begin(); blocksIt != blocks.end(); blocksIt++) { + m_blocks.insert(make_pair(blockId, blocksIt->second)); + blockId++; + } + + if (m_param.LogLevel >= 3) { + map>::iterator mit; + vector::iterator vit; + + for (mit = m_blocks.begin(); mit != m_blocks.end(); mit++) { + (*m_osLog) << "Block " << (*mit).first << " : "; + + for (vit = (*mit).second.begin(); vit != (*mit).second.end(); vit++) { + (*m_osLog) << (*vit) << " "; } - DecompVar* var = new DecompVar(columnPair.first, - columnPair.second, - -1.0, - origCost); - var->setBlockId(indexPair.second); - var->print(m_decompAlgo->getInfinity(), m_osLog, colNames); - initVars.push_back(var); - printf("Adding initial variable with origCost = %g\n", origCost); - } - - is.close(); + (*m_osLog) << endl; + } + } + + // exit(1); + is.close(); } +void DecompApp::readInitSolutionFile(DecompVarList &initVars) { + // TODO: is this ok for sparse? + ifstream is; + string fileName = m_param.DataDir + UtilDirSlash() + m_param.InitSolutionFile; + + if (m_param.InitSolutionFile == "") { + return; + } + + //--- + //--- create map from col name to col index + //--- + int i; + map colNameToIndex; + const vector &colNames = m_modelC->getColNames(); + + for (i = 0; i < m_modelC->getNumCols(); i++) { + colNameToIndex.insert(make_pair(colNames[i], i)); + } + + //--- + //--- create a map from col index to block index + //--- + map colIndexToBlockIndex; + map::iterator mit; + + for (mit = m_modelR.begin(); mit != m_modelR.end(); mit++) { + int blockIndex = mit->first; + DecompConstraintSet *model = mit->second; + const vector &activeColumns = model->getActiveColumns(); + vector::const_iterator vit; + + for (vit = activeColumns.begin(); vit != activeColumns.end(); vit++) { + colIndexToBlockIndex.insert(make_pair(*vit, blockIndex)); + } + } + + //--- + //--- open file streams + //--- + UtilOpenFile(is, fileName.c_str()); + + if (m_param.LogLevel >= 1) { + (*m_osLog) << "Reading " << fileName << endl; + } + + //--- + //--- create variables for each block of each solution + //--- + int solutionIndex, colIndex, blockIndex; + string colName; + double colValue; + char line[1000]; + map, pair, vector>> varTemp; + map, pair, vector>>::iterator it; + is.getline(line, 1000); + + // TODO? master-only + // 1. if user gives lb, then add lb only + // if 0, add 0-col? or just let it take care of from PI? + // 2. if user gives ub, then add ub only + // 3. if user gives betwen bounds, then add lb and ub + // unless it is general integer + while (!is.eof()) { + is >> solutionIndex >> colName >> colValue; + + if (is.eof()) { + break; + } + + colIndex = colNameToIndex[colName]; + blockIndex = colIndexToBlockIndex[colIndex]; + /* + const double* colLB = m_modelC->getColLB(); + const double* colUB = m_modelC->getColUB(); + DecompConstraintSet* model = m_modelR[blockIndex]; + if (model->m_masterOnly) { + printf("MasterOnly col=%s value=%g lb=%g ub=%g", + colName.c_str(), colValue, colLB[colIndex], colUB[colIndex]); + + if (colValue < (colUB[colIndex] - 1.0e-5) && + colValue > (colLB[colIndex] + 1.0e-5)) { + printf(" --> in between bounds"); + //TODO: if so, should add both lb and ub + } + + printf("\n"); + } + */ + pair p = make_pair(solutionIndex, blockIndex); + it = varTemp.find(p); + + if (it == varTemp.end()) { + vector ind; + vector els; + ind.push_back(colIndex); + els.push_back(colValue); + varTemp.insert(make_pair(p, make_pair(ind, els))); + } else { + vector &ind = it->second.first; + vector &els = it->second.second; + ind.push_back(colIndex); + els.push_back(colValue); + } + } + + //--- + //--- create DecompVar's from varTemp + //--- + for (it = varTemp.begin(); it != varTemp.end(); it++) { + const pair &indexPair = it->first; + pair, vector> &columnPair = it->second; + double origCost = 0.0; + + for (i = 0; i < static_cast(columnPair.first.size()); i++) { + origCost += columnPair.second[i] * m_objective[columnPair.first[i]]; + } + + DecompVar *var = + new DecompVar(columnPair.first, columnPair.second, -1.0, origCost); + var->setBlockId(indexPair.second); + var->print(m_decompAlgo->getInfinity(), m_osLog, colNames); + initVars.push_back(var); + printf("Adding initial variable with origCost = %g\n", origCost); + } + + is.close(); +} -void DecompApp::findActiveColumns(const vector& rowsPart, - set& activeColsSet) -{ - const CoinPackedMatrix* M = NULL; +void DecompApp::findActiveColumns(const vector &rowsPart, + set &activeColsSet) { + const CoinPackedMatrix *M = NULL; - if (m_param.InstanceFormat == "MPS") { - M = m_mpsIO.getMatrixByRow(); - } else if (m_param.InstanceFormat == "LP") { - M = m_lpIO.getMatrixByRow(); - } + if (m_param.InstanceFormat == "MPS") { + M = m_mpsIO.getMatrixByRow(); + } else if (m_param.InstanceFormat == "LP") { + M = m_lpIO.getMatrixByRow(); + } - const int* ind = M->getIndices(); + const int *ind = M->getIndices(); - const int* beg = M->getVectorStarts(); + const int *beg = M->getVectorStarts(); - const int* len = M->getVectorLengths(); + const int *len = M->getVectorLengths(); - const int* indR = NULL; + const int *indR = NULL; - //--- - //--- which columns are present in this part's rows - //--- - int k, r; + //--- + //--- which columns are present in this part's rows + //--- + int k, r; - vector::const_iterator it; + vector::const_iterator it; - for (it = rowsPart.begin(); it != rowsPart.end(); it++) { - r = *it; - indR = ind + beg[r]; + for (it = rowsPart.begin(); it != rowsPart.end(); it++) { + r = *it; + indR = ind + beg[r]; - for (k = 0; k < len[r]; k++) { - activeColsSet.insert(indR[k]); - } - } + for (k = 0; k < len[r]; k++) { + activeColsSet.insert(indR[k]); + } + } } -void DecompApp::createModelPart(DecompConstraintSet* model, - const int nRowsPart, - const int* rowsPart) -{ - int nCols = 0; - double* rowLB = NULL; - double* rowUB = NULL; - double* colLB = NULL; - double* colUB = NULL; - char* integerVars = NULL; - - if (m_param.InstanceFormat == "MPS") { - nCols = m_mpsIO.getNumCols(); - rowLB = const_cast(m_mpsIO.getRowLower()); - rowUB = const_cast(m_mpsIO.getRowUpper()); - colLB = const_cast(m_mpsIO.getColLower()); - colUB = const_cast(m_mpsIO.getColUpper()); - integerVars = const_cast (m_mpsIO.integerColumns()); - } else if (m_param.InstanceFormat == "LP") { - nCols = m_lpIO.getNumCols(); - rowLB = const_cast(m_lpIO.getRowLower()); - rowUB = const_cast(m_lpIO.getRowUpper()); - colLB = const_cast(m_lpIO.getColLower()); - colUB = const_cast(m_lpIO.getColUpper()); - integerVars = const_cast (m_lpIO.integerColumns()); - } - - model->M = new CoinPackedMatrix(false, 0.0, 0.0); - - if (!model->M) { - throw UtilExceptionMemory("createModelPart", "DecompApp"); - } - - model->reserve(nRowsPart, nCols); - - if (m_param.InstanceFormat == "MPS") { - model->M->submatrixOf(*m_mpsIO.getMatrixByRow(), nRowsPart, rowsPart); - } else if (m_param.InstanceFormat == "LP") { - model->M->submatrixOf(*m_lpIO.getMatrixByRow(), nRowsPart, rowsPart); - } - - //--- - //--- set the row upper and lower bounds - //--- set the col upper and lower bounds - //--- - int i, r; - - for (i = 0; i < nRowsPart; i++) { - r = rowsPart[i]; - - if (m_param.UseNames) { - const char* rowName = NULL; - - if (m_param.InstanceFormat == "MPS") { - rowName = m_mpsIO.rowName(r); - } else if (m_param.InstanceFormat == "LP") { - rowName = m_lpIO.rowName(r); - } - - if (rowName) { - model->rowNames.push_back(rowName); - } - } +void DecompApp::createModelPart(DecompConstraintSet *model, const int nRowsPart, + const int *rowsPart) { + int nCols = 0; + double *rowLB = NULL; + double *rowUB = NULL; + double *colLB = NULL; + double *colUB = NULL; + char *integerVars = NULL; + + if (m_param.InstanceFormat == "MPS") { + nCols = m_mpsIO.getNumCols(); + rowLB = const_cast(m_mpsIO.getRowLower()); + rowUB = const_cast(m_mpsIO.getRowUpper()); + colLB = const_cast(m_mpsIO.getColLower()); + colUB = const_cast(m_mpsIO.getColUpper()); + integerVars = const_cast(m_mpsIO.integerColumns()); + } else if (m_param.InstanceFormat == "LP") { + nCols = m_lpIO.getNumCols(); + rowLB = const_cast(m_lpIO.getRowLower()); + rowUB = const_cast(m_lpIO.getRowUpper()); + colLB = const_cast(m_lpIO.getColLower()); + colUB = const_cast(m_lpIO.getColUpper()); + integerVars = const_cast(m_lpIO.integerColumns()); + } + + model->M = new CoinPackedMatrix(false, 0.0, 0.0); + + if (!model->M) { + throw UtilExceptionMemory("createModelPart", "DecompApp"); + } + + model->reserve(nRowsPart, nCols); + + if (m_param.InstanceFormat == "MPS") { + model->M->submatrixOf(*m_mpsIO.getMatrixByRow(), nRowsPart, rowsPart); + } else if (m_param.InstanceFormat == "LP") { + model->M->submatrixOf(*m_lpIO.getMatrixByRow(), nRowsPart, rowsPart); + } + + //--- + //--- set the row upper and lower bounds + //--- set the col upper and lower bounds + //--- + int i, r; + + for (i = 0; i < nRowsPart; i++) { + r = rowsPart[i]; + + if (m_param.UseNames) { + const char *rowName = NULL; - model->rowLB.push_back(rowLB[r]); - model->rowUB.push_back(rowUB[r]); - } - - copy(colLB, colLB + nCols, back_inserter( model->colLB) ); - copy(colUB, colUB + nCols, back_inserter( model->colUB) ); - - //--- - //--- big fat hack... we don't deal with dual rays yet, - //--- so, we assume subproblems are bounded - //--- - //--- NOTE: might also need to tighten LBs - //--- - //--- Too small - ATM infeasible! - //--- Too big - round off issues with big coeffs in - //--- master-only vars - //--- - //--- TODO: need extreme rays or bounded subproblems from user - //--- - if (m_param.ColumnUB < 1.0e15) { - for (i = 0; i < nCols; i++) { - if (colUB[i] > 1.0e15) { - model->colUB[i] = m_param.ColumnUB; - } + if (m_param.InstanceFormat == "MPS") { + rowName = m_mpsIO.rowName(r); + } else if (m_param.InstanceFormat == "LP") { + rowName = m_lpIO.rowName(r); } - } - if (m_param.ColumnLB > -1.0e15) { - for (i = 0; i < nCols; i++) { - if (colLB[i] < -1.0e15) { - model->colLB[i] = m_param.ColumnLB; - } + if (rowName) { + model->rowNames.push_back(rowName); } - } - - //--- - //--- set the indices of the integer variables of modelRelax - //--- also set the column names, if they exist - //--- - for (i = 0; i < nCols; i++) { - if (m_param.UseNames) { - const char* colName = NULL; - - if (m_param.InstanceFormat == "MPS") { - colName = m_mpsIO.columnName(i); - } else if (m_param.InstanceFormat == "LP") { - colName = m_lpIO.columnName(i); - } - - if (colName) { - model->colNames.push_back(colName); - } + } + + model->rowLB.push_back(rowLB[r]); + model->rowUB.push_back(rowUB[r]); + } + + copy(colLB, colLB + nCols, back_inserter(model->colLB)); + copy(colUB, colUB + nCols, back_inserter(model->colUB)); + + //--- + //--- big fat hack... we don't deal with dual rays yet, + //--- so, we assume subproblems are bounded + //--- + //--- NOTE: might also need to tighten LBs + //--- + //--- Too small - ATM infeasible! + //--- Too big - round off issues with big coeffs in + //--- master-only vars + //--- + //--- TODO: need extreme rays or bounded subproblems from user + //--- + if (m_param.ColumnUB < 1.0e15) { + for (i = 0; i < nCols; i++) { + if (colUB[i] > 1.0e15) { + model->colUB[i] = m_param.ColumnUB; } + } + } - if (integerVars && integerVars[i]) { - model->integerVars.push_back(i); + if (m_param.ColumnLB > -1.0e15) { + for (i = 0; i < nCols; i++) { + if (colLB[i] < -1.0e15) { + model->colLB[i] = m_param.ColumnLB; } - } -} + } + } + //--- + //--- set the indices of the integer variables of modelRelax + //--- also set the column names, if they exist + //--- + for (i = 0; i < nCols; i++) { + if (m_param.UseNames) { + const char *colName = NULL; + if (m_param.InstanceFormat == "MPS") { + colName = m_mpsIO.columnName(i); + } else if (m_param.InstanceFormat == "LP") { + colName = m_lpIO.columnName(i); + } -void DecompApp::createModelPartSparse(DecompConstraintSet* model, - const int nRowsPart, - const int* rowsPart) -{ - int nColsOrig = 0; - double* rowLB = NULL; - double* rowUB = NULL; - double* colLB = NULL; - double* colUB = NULL; - char* integerVars = NULL; - - if (m_param.InstanceFormat == "MPS") { - nColsOrig = m_mpsIO.getNumCols(); - rowLB = const_cast(m_mpsIO.getRowLower()); - rowUB = const_cast(m_mpsIO.getRowUpper()); - colLB = const_cast(m_mpsIO.getColLower()); - colUB = const_cast(m_mpsIO.getColUpper()); - integerVars = const_cast (m_mpsIO.integerColumns()); - } else if (m_param.InstanceFormat == "LP") { - nColsOrig = m_lpIO.getNumCols(); - rowLB = const_cast(m_lpIO.getRowLower()); - rowUB = const_cast(m_lpIO.getRowUpper()); - colLB = const_cast(m_lpIO.getColLower()); - colUB = const_cast(m_lpIO.getColUpper()); - integerVars = const_cast (m_lpIO.integerColumns()); - } - - //--- - //--- set model as sparse - //--- - model->setSparse(nColsOrig); - bool isInteger; - int nCols, origIndex, newIndex; - vector::iterator vit; - newIndex = 0; - - for (vit = model->activeColumns.begin(); - vit != model->activeColumns.end(); vit++) { - origIndex = *vit; - - if (integerVars && integerVars[origIndex]) { - isInteger = true; - } else { - isInteger = false; + if (colName) { + model->colNames.push_back(colName); } + } - model->pushCol(colLB[origIndex], - colUB[origIndex], - isInteger, - origIndex); + if (integerVars && integerVars[i]) { + model->integerVars.push_back(i); + } + } +} - //--- - //--- big fat hack... we don't deal with dual rays yet, - //--- so, we assume subproblems are bounded - //--- - if (m_param.ColumnUB < 1.0e15) { - if (colUB[origIndex] > 1.0e15) { - model->colUB[newIndex] = m_param.ColumnUB; - } +void DecompApp::createModelPartSparse(DecompConstraintSet *model, + const int nRowsPart, + const int *rowsPart) { + int nColsOrig = 0; + double *rowLB = NULL; + double *rowUB = NULL; + double *colLB = NULL; + double *colUB = NULL; + char *integerVars = NULL; + + if (m_param.InstanceFormat == "MPS") { + nColsOrig = m_mpsIO.getNumCols(); + rowLB = const_cast(m_mpsIO.getRowLower()); + rowUB = const_cast(m_mpsIO.getRowUpper()); + colLB = const_cast(m_mpsIO.getColLower()); + colUB = const_cast(m_mpsIO.getColUpper()); + integerVars = const_cast(m_mpsIO.integerColumns()); + } else if (m_param.InstanceFormat == "LP") { + nColsOrig = m_lpIO.getNumCols(); + rowLB = const_cast(m_lpIO.getRowLower()); + rowUB = const_cast(m_lpIO.getRowUpper()); + colLB = const_cast(m_lpIO.getColLower()); + colUB = const_cast(m_lpIO.getColUpper()); + integerVars = const_cast(m_lpIO.integerColumns()); + } + + //--- + //--- set model as sparse + //--- + model->setSparse(nColsOrig); + bool isInteger; + int nCols, origIndex, newIndex; + vector::iterator vit; + newIndex = 0; + + for (vit = model->activeColumns.begin(); vit != model->activeColumns.end(); + vit++) { + origIndex = *vit; + + if (integerVars && integerVars[origIndex]) { + isInteger = true; + } else { + isInteger = false; + } + + model->pushCol(colLB[origIndex], colUB[origIndex], isInteger, origIndex); + + //--- + //--- big fat hack... we don't deal with dual rays yet, + //--- so, we assume subproblems are bounded + //--- + if (m_param.ColumnUB < 1.0e15) { + if (colUB[origIndex] > 1.0e15) { + model->colUB[newIndex] = m_param.ColumnUB; } + } - if (m_param.ColumnLB > -1.0e15) { - if (colLB[origIndex] < -1.0e15) { - model->colLB[newIndex] = m_param.ColumnLB; - } + if (m_param.ColumnLB > -1.0e15) { + if (colLB[origIndex] < -1.0e15) { + model->colLB[newIndex] = m_param.ColumnLB; } + } - if (m_param.UseNames) { - const char* colName = NULL; + if (m_param.UseNames) { + const char *colName = NULL; - if (m_param.InstanceFormat == "MPS") { - colName = m_mpsIO.columnName(origIndex); - } else if (m_param.InstanceFormat == "LP") { - colName = m_lpIO.columnName(origIndex); - } - - if (colName) { - model->colNames.push_back(colName); - } + if (m_param.InstanceFormat == "MPS") { + colName = m_mpsIO.columnName(origIndex); + } else if (m_param.InstanceFormat == "LP") { + colName = m_lpIO.columnName(origIndex); } - newIndex++; - } + if (colName) { + model->colNames.push_back(colName); + } + } - nCols = static_cast(model->activeColumns.size()); - assert(static_cast(model->colLB.size()) == nCols); - assert(static_cast(model->colUB.size()) == nCols); - model->M = new CoinPackedMatrix(false, 0.0, 0.0); + newIndex++; + } - if (!model->M) { - throw UtilExceptionMemory("createModelPartSparse", "DecompApp"); - } + nCols = static_cast(model->activeColumns.size()); + assert(static_cast(model->colLB.size()) == nCols); + assert(static_cast(model->colUB.size()) == nCols); + model->M = new CoinPackedMatrix(false, 0.0, 0.0); - model->M->setDimensions(0, nCols); - model->reserve(nRowsPart, nCols); - //--- - //--- for each row in rowsPart, create the row using sparse mapping - //--- - int i, k, r, begInd; - const map& origToSparse = model->getMapOrigToSparse(); - const CoinPackedMatrix* M = NULL; + if (!model->M) { + throw UtilExceptionMemory("createModelPartSparse", "DecompApp"); + } - if (m_param.InstanceFormat == "MPS") { - M = m_mpsIO.getMatrixByRow(); - } else if (m_param.InstanceFormat == "LP") { - M = m_lpIO.getMatrixByRow(); - } + model->M->setDimensions(0, nCols); + model->reserve(nRowsPart, nCols); + //--- + //--- for each row in rowsPart, create the row using sparse mapping + //--- + int i, k, r, begInd; + const map &origToSparse = model->getMapOrigToSparse(); + const CoinPackedMatrix *M = NULL; - const int* matInd = M->getIndices(); + if (m_param.InstanceFormat == "MPS") { + M = m_mpsIO.getMatrixByRow(); + } else if (m_param.InstanceFormat == "LP") { + M = m_lpIO.getMatrixByRow(); + } - const CoinBigIndex* matBeg = M->getVectorStarts(); + const int *matInd = M->getIndices(); - const int* matLen = M->getVectorLengths(); + const CoinBigIndex *matBeg = M->getVectorStarts(); - const double* matVal = M->getElements(); + const int *matLen = M->getVectorLengths(); - const int* matIndI = NULL; + const double *matVal = M->getElements(); - const double* matValI = NULL; + const int *matIndI = NULL; - vector& rowBeg = model->m_rowBeg;//used as temp + const double *matValI = NULL; - vector& rowInd = model->m_rowInd;//used as temp + vector &rowBeg = model->m_rowBeg; // used as temp - vector& rowVal = model->m_rowVal;//used as temp + vector &rowInd = model->m_rowInd; // used as temp - map::const_iterator mit; + vector &rowVal = model->m_rowVal; // used as temp - begInd = 0; + map::const_iterator mit; - rowBeg.push_back(0); + begInd = 0; - for (i = 0; i < nRowsPart; i++) { - r = rowsPart[i]; + rowBeg.push_back(0); - if (m_param.UseNames) { - const char* rowName = NULL; + for (i = 0; i < nRowsPart; i++) { + r = rowsPart[i]; - if (m_param.InstanceFormat == "MPS") { - rowName = m_mpsIO.rowName(r); - } else if (m_param.InstanceFormat == "LP") { - rowName = m_lpIO.rowName(r); - } + if (m_param.UseNames) { + const char *rowName = NULL; - if (rowName) { - model->rowNames.push_back(rowName); - } + if (m_param.InstanceFormat == "MPS") { + rowName = m_mpsIO.rowName(r); + } else if (m_param.InstanceFormat == "LP") { + rowName = m_lpIO.rowName(r); } - model->rowLB.push_back(rowLB[r]); - model->rowUB.push_back(rowUB[r]); - matIndI = matInd + matBeg[r]; - matValI = matVal + matBeg[r]; - - for (k = 0; k < matLen[r]; k++) { - origIndex = matIndI[k]; - mit = origToSparse.find(origIndex); - assert(mit != origToSparse.end()); - rowInd.push_back(mit->second); - rowVal.push_back(matValI[k]); + if (rowName) { + model->rowNames.push_back(rowName); } - - begInd += matLen[r]; - rowBeg.push_back(begInd); - } - - model->M->appendRows(nRowsPart, - &rowBeg[0], - &rowInd[0], - &rowVal[0]); - rowBeg.clear(); - rowInd.clear(); - rowVal.clear(); + } + + model->rowLB.push_back(rowLB[r]); + model->rowUB.push_back(rowUB[r]); + matIndI = matInd + matBeg[r]; + matValI = matVal + matBeg[r]; + + for (k = 0; k < matLen[r]; k++) { + origIndex = matIndI[k]; + mit = origToSparse.find(origIndex); + assert(mit != origToSparse.end()); + rowInd.push_back(mit->second); + rowVal.push_back(matValI[k]); + } + + begInd += matLen[r]; + rowBeg.push_back(begInd); + } + + model->M->appendRows(nRowsPart, &rowBeg[0], &rowInd[0], &rowVal[0]); + rowBeg.clear(); + rowInd.clear(); + rowVal.clear(); } - -void DecompApp::createModels() -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "createModels()", m_param.LogLevel, 2); - //--- - //--- how many rows to put into relaxation - //--- - int i, nRowsRelax, nRowsCore; - int nRows = 0; - int nCols = 0; - - if (m_param.InstanceFormat == "MPS") { - nRows = m_mpsIO.getNumRows(); - nCols = m_mpsIO.getNumCols(); - } else if (m_param.InstanceFormat == "LP") { - nRows = m_lpIO.getNumRows(); - nCols = m_lpIO.getNumCols(); - } - - int nBlocks = static_cast(m_blocks.size()); - map >::iterator mit; - nRowsRelax = 0; - - for (mit = m_blocks.begin(); mit != m_blocks.end(); mit++) { - nRowsRelax += static_cast((*mit).second.size()); - } - - nRowsCore = nRows - nRowsRelax; - UTIL_MSG(m_param.LogLevel, 2, - (*m_osLog) << "Instance = " << m_param.Instance << endl; - (*m_osLog) << " nRows = " << nRows << endl; - (*m_osLog) << " nCols = " << nCols << endl; - (*m_osLog) << " nBlocks = " << nBlocks << endl; - (*m_osLog) << " nRowsCore = " << nRowsCore << endl; - (*m_osLog) << " nRowsRelax = " << nRowsRelax - << " [ " << 100 * nRowsRelax / nRows << " % ]" << endl; - ); - //--- - //--- setup markers for core and relax rows - //--- - int* rowsMarker = new int[nRows]; - int* rowsCore = new int[nRowsCore]; - UtilFillN(rowsMarker, nRows, -1);//-1 will mark core rows - - for (mit = m_blocks.begin(); mit != m_blocks.end(); mit++) { - vector& rowsRelax = (*mit).second; - vector::iterator vit; - - for (vit = rowsRelax.begin(); vit != rowsRelax.end(); vit++) { - rowsMarker[*vit] = (*mit).first; - } - } - - int nRowsCoreTmp = 0; - - for (i = 0; i < nRows; i++) { - if (rowsMarker[i] == -1) { - rowsCore[nRowsCoreTmp++] = i; - } - } - - assert(nRowsCoreTmp == nRowsCore); - UTIL_MSG(m_param.LogLevel, 3, - (*m_osLog) << "Core Rows:"; - - for (i = 0; i < nRowsCore; i++) - (*m_osLog) << rowsCore[i] << " "; - (*m_osLog) << "\n"; - ); - - //--- - //--- Construct the objective function. - //--- - double* objective = new double[nCols]; - - if (!objective) { - throw UtilExceptionMemory("createModels", "DecompApp"); - } - - if (m_param.InstanceFormat == "MPS") { - memcpy(objective, - m_mpsIO.getObjCoefficients(), nCols * sizeof(double)); - } else if (m_param.InstanceFormat == "LP") { - memcpy(objective, - m_lpIO.getObjCoefficients(), nCols * sizeof(double)); - } - - if (m_param.ObjectiveSense == -1) { - for (i = 0; i < nCols; i++) { - objective[i] *= -1; - } - } - - setModelObjective(objective, nCols); - //--- - //--- Construct the core matrix. - //--- - DecompConstraintSet* modelCore = new DecompConstraintSet(); - createModelPart(modelCore, nRowsCore, rowsCore); - //--- - //--- save a pointer so we can delete it later - //--- - m_modelC = modelCore; - - //--- - //--- Construct the relaxation matrices. - //--- - for (mit = m_blocks.begin(); mit != m_blocks.end(); mit++) { - vector& rowsRelax = (*mit).second; - int nRowsRelax = static_cast(rowsRelax.size()); - - if (m_param.LogLevel >= 1) - (*m_osLog) << "Create model part nRowsRelax = " - << nRowsRelax << " (Block=" << (*mit).first << ")" << endl; - - DecompConstraintSet* modelRelax = new DecompConstraintSet(); - CoinAssertHint(modelRelax, "Error: Out of Memory"); +void DecompApp::createModels() { + UtilPrintFuncBegin(m_osLog, m_classTag, "createModels()", m_param.LogLevel, + 2); + //--- + //--- how many rows to put into relaxation + //--- + int i, nRowsRelax, nRowsCore; + int nRows = 0; + int nCols = 0; + + if (m_param.InstanceFormat == "MPS") { + nRows = m_mpsIO.getNumRows(); + nCols = m_mpsIO.getNumCols(); + } else if (m_param.InstanceFormat == "LP") { + nRows = m_lpIO.getNumRows(); + nCols = m_lpIO.getNumCols(); + } + + int nBlocks = static_cast(m_blocks.size()); + map>::iterator mit; + nRowsRelax = 0; + + for (mit = m_blocks.begin(); mit != m_blocks.end(); mit++) { + nRowsRelax += static_cast((*mit).second.size()); + } + + nRowsCore = nRows - nRowsRelax; + UTIL_MSG(m_param.LogLevel, 2, + (*m_osLog) << "Instance = " << m_param.Instance << endl; + (*m_osLog) << " nRows = " << nRows << endl; + (*m_osLog) << " nCols = " << nCols << endl; + (*m_osLog) << " nBlocks = " << nBlocks << endl; + (*m_osLog) << " nRowsCore = " << nRowsCore << endl; + (*m_osLog) << " nRowsRelax = " << nRowsRelax << " [ " + << 100 * nRowsRelax / nRows << " % ]" << endl;); + //--- + //--- setup markers for core and relax rows + //--- + int *rowsMarker = new int[nRows]; + int *rowsCore = new int[nRowsCore]; + UtilFillN(rowsMarker, nRows, -1); //-1 will mark core rows + + for (mit = m_blocks.begin(); mit != m_blocks.end(); mit++) { + vector &rowsRelax = (*mit).second; + vector::iterator vit; + + for (vit = rowsRelax.begin(); vit != rowsRelax.end(); vit++) { + rowsMarker[*vit] = (*mit).first; + } + } + + int nRowsCoreTmp = 0; + + for (i = 0; i < nRows; i++) { + if (rowsMarker[i] == -1) { + rowsCore[nRowsCoreTmp++] = i; + } + } + + assert(nRowsCoreTmp == nRowsCore); + UTIL_MSG(m_param.LogLevel, 3, (*m_osLog) << "Core Rows:"; + + for (i = 0; i < nRowsCore; i++)(*m_osLog) << rowsCore[i] << " "; + (*m_osLog) << "\n";); + + //--- + //--- Construct the objective function. + //--- + double *objective = new double[nCols]; + + if (!objective) { + throw UtilExceptionMemory("createModels", "DecompApp"); + } + + if (m_param.InstanceFormat == "MPS") { + memcpy(objective, m_mpsIO.getObjCoefficients(), nCols * sizeof(double)); + } else if (m_param.InstanceFormat == "LP") { + memcpy(objective, m_lpIO.getObjCoefficients(), nCols * sizeof(double)); + } + + if (m_param.ObjectiveSense == -1) { + for (i = 0; i < nCols; i++) { + objective[i] *= -1; + } + } + + setModelObjective(objective, nCols); + //--- + //--- Construct the core matrix. + //--- + DecompConstraintSet *modelCore = new DecompConstraintSet(); + createModelPart(modelCore, nRowsCore, rowsCore); + //--- + //--- save a pointer so we can delete it later + //--- + m_modelC = modelCore; + + //--- + //--- Construct the relaxation matrices. + //--- + for (mit = m_blocks.begin(); mit != m_blocks.end(); mit++) { + vector &rowsRelax = (*mit).second; + int nRowsRelax = static_cast(rowsRelax.size()); + + if (m_param.LogLevel >= 1) + (*m_osLog) << "Create model part nRowsRelax = " << nRowsRelax + << " (Block=" << (*mit).first << ")" << endl; + + DecompConstraintSet *modelRelax = new DecompConstraintSet(); + CoinAssertHint(modelRelax, "Error: Out of Memory"); + //--- + //--- find and set active columns + //--- + set::iterator sit; + set activeColsSet; + findActiveColumns(rowsRelax, activeColsSet); + + for (sit = activeColsSet.begin(); sit != activeColsSet.end(); sit++) { + modelRelax->activeColumns.push_back(*sit); + } + + if (m_param.UseSparse) { //--- - //--- find and set active columns + //--- create model part (using sparse API) //--- - set::iterator sit; - set activeColsSet; - findActiveColumns(rowsRelax, activeColsSet); - - for (sit = activeColsSet.begin(); sit != activeColsSet.end(); sit++) { - modelRelax->activeColumns.push_back(*sit); - } - - if (m_param.UseSparse) { - //--- - //--- create model part (using sparse API) - //--- - createModelPartSparse(modelRelax, nRowsRelax, &rowsRelax[0]); - } else { - //--- - //--- create model part (using dense API) - //--- - createModelPart(modelRelax, nRowsRelax, &rowsRelax[0]); - } - + createModelPartSparse(modelRelax, nRowsRelax, &rowsRelax[0]); + } else { //--- - //--- save a pointer so we can delete it later + //--- create model part (using dense API) //--- - m_modelR.insert(make_pair((*mit).first, modelRelax)); - } - - //--- - //--- figure out which columns are not active in any subprobs - //--- we refer to these as "master-only" variables - //--- - int* colMarker = new int[nCols]; - - if (!colMarker) { - throw UtilExceptionMemory("createModels", "DecompApp"); - } - - UtilFillN(colMarker, nCols, 0); - vector ::iterator vi; - map ::iterator mdi; - - for (mdi = m_modelR.begin(); mdi != m_modelR.end(); mdi++) { - vector& activeColumns = (*mdi).second->activeColumns; - - for (vi = activeColumns.begin(); vi != activeColumns.end(); vi++) { - colMarker[*vi] = 1; - } - } - - // find master Only Cols - for (i = 0; i < nCols; i++) { - if (!colMarker[i]) { - if (m_param.LogLevel >= 3) { - if (modelCore->getColNames().size() > 0) - (*m_osLog) << "Column " << setw(5) << i << " -> " - << setw(25) << modelCore->colNames[i] - << " is not in union of blocks." << endl; - } - - modelCore->masterOnlyCols.push_back(i); + createModelPart(modelRelax, nRowsRelax, &rowsRelax[0]); + } + + //--- + //--- save a pointer so we can delete it later + //--- + m_modelR.insert(make_pair((*mit).first, modelRelax)); + } + + //--- + //--- figure out which columns are not active in any subprobs + //--- we refer to these as "master-only" variables + //--- + int *colMarker = new int[nCols]; + + if (!colMarker) { + throw UtilExceptionMemory("createModels", "DecompApp"); + } + + UtilFillN(colMarker, nCols, 0); + vector::iterator vi; + map::iterator mdi; + + for (mdi = m_modelR.begin(); mdi != m_modelR.end(); mdi++) { + vector &activeColumns = (*mdi).second->activeColumns; + + for (vi = activeColumns.begin(); vi != activeColumns.end(); vi++) { + colMarker[*vi] = 1; + } + } + + // find master Only Cols + for (i = 0; i < nCols; i++) { + if (!colMarker[i]) { + if (m_param.LogLevel >= 3) { + if (modelCore->getColNames().size() > 0) + (*m_osLog) << "Column " << setw(5) << i << " -> " << setw(25) + << modelCore->colNames[i] << " is not in union of blocks." + << endl; } - } - if (m_param.LogLevel >= 3) { - (*m_osLog) << "Master only columns:" << endl; - UtilPrintVector(modelCore->masterOnlyCols, m_osLog); + modelCore->masterOnlyCols.push_back(i); + } + } + + if (m_param.LogLevel >= 3) { + (*m_osLog) << "Master only columns:" << endl; + UtilPrintVector(modelCore->masterOnlyCols, m_osLog); + + if (modelCore->getColNames().size() > 0) + UtilPrintVector(modelCore->masterOnlyCols, modelCore->getColNames(), + m_osLog); + } + + if (m_param.LogLevel >= 3) { + std::cout << "the number of masterOnlyCols is " + << modelCore->masterOnlyCols.size() << std::endl; + } + + //--- + //--- set core and system in framework + //--- + setModelCore(modelCore, "core"); + + for (mdi = m_modelR.begin(); mdi != m_modelR.end(); mdi++) { + DecompConstraintSet *modelRelax = (*mdi).second; + //--- + //--- set system in framework + //--- + setModelRelax((*mdi).second, "relax" + UtilIntToStr((*mdi).first), + (*mdi).first); + + if (m_param.LogLevel >= 3) { + (*m_osLog) << "Active Columns:" << endl; + UtilPrintVector(modelRelax->activeColumns, m_osLog); if (modelCore->getColNames().size() > 0) - UtilPrintVector(modelCore->masterOnlyCols, - modelCore->getColNames(), m_osLog); - } - - if (m_param.LogLevel >= 3) { - std::cout << "the number of masterOnlyCols is " << modelCore->masterOnlyCols.size() - << std::endl ; - } - - //--- - //--- set core and system in framework - //--- - setModelCore(modelCore, "core"); - - for (mdi = m_modelR.begin(); mdi != m_modelR.end(); mdi++) { - DecompConstraintSet* modelRelax = (*mdi).second; - //--- - //--- set system in framework - //--- - setModelRelax((*mdi).second, - "relax" + UtilIntToStr((*mdi).first), - (*mdi).first); - - if (m_param.LogLevel >= 3) { - (*m_osLog) << "Active Columns:" << endl; - UtilPrintVector(modelRelax->activeColumns, m_osLog); - - if (modelCore->getColNames().size() > 0) - UtilPrintVector(modelRelax->activeColumns, - modelCore->getColNames(), m_osLog); - } - } - - //--- - //--- free up local memory - //--- - UTIL_DELARR(objective); - UTIL_DELARR(rowsMarker); - UTIL_DELARR(rowsCore); - UTIL_DELARR(colMarker); - UtilPrintFuncEnd(m_osLog, m_classTag, - "createModels()", m_param.LogLevel, 2); - //exit(1); + UtilPrintVector(modelRelax->activeColumns, modelCore->getColNames(), + m_osLog); + } + } + + //--- + //--- free up local memory + //--- + UTIL_DELARR(objective); + UTIL_DELARR(rowsMarker); + UTIL_DELARR(rowsCore); + UTIL_DELARR(colMarker); + UtilPrintFuncEnd(m_osLog, m_classTag, "createModels()", m_param.LogLevel, 2); + // exit(1); } /* int DecompApp::generateInitVars(DecompVarList & initVars){ UtilPrintFuncBegin(m_osLog, m_classTag, - "generateInitVars()", m_param.LogLevel, 2); + "generateInitVars()", m_param.LogLevel, 2); readInitSolutionFile(initVars); @@ -1334,456 +1284,442 @@ int DecompApp::generateInitVars(DecompVarList & initVars){ */ -void DecompApp::singlyBorderStructureDetection() -{ - std::ofstream blockdata; - std::string BlockFile; - BlockFile = m_param.Instance + '.' + "block"; - - if (m_param.BlockFileOutput) { - blockdata.open (BlockFile.c_str()); - } - - //====================================================================== - // Using Row-net hypergraph model for automatic matrix decomposition - //====================================================================== - int numRows = 0; - int numCols = 0; - int numElements = 0; - - if (m_param.InstanceFormat == "MPS") { - numRows = m_mpsIO.getNumRows(); - numCols = m_mpsIO.getNumCols(); - numElements = m_mpsIO.getNumElements(); - } else if (m_param.InstanceFormat == "LP") { - numRows = m_mpsIO.getNumRows(); - numCols = m_mpsIO.getNumCols(); - numElements = m_mpsIO.getNumElements(); - } - - // get the column/row index for by-row matrix - const int* minorIndex = m_matrix->getIndices(); - const int* majorIndex = m_matrix->getMajorIndices(); - UTIL_MSG(m_param.LogLevel, 2, - (*m_osLog) - << "The number of rows is " << numRows << "\n" - << "The number of columns is " << numCols << "\n" - << "The number of elements is " << numElements - << "\n"; - ); - // Here assume the matrix to be partitioned into singly-bordered - // diagonal matrix. ToDo: complete the Doubly-bordered diagonal - // matrix mapping - // the number of vertices - int numVertices ; - // The number of hyperedges - int numHyperedges; +void DecompApp::singlyBorderStructureDetection() { + std::ofstream blockdata; + std::string BlockFile; + BlockFile = m_param.Instance + '.' + "block"; + + if (m_param.BlockFileOutput) { + blockdata.open(BlockFile.c_str()); + } + + //====================================================================== + // Using Row-net hypergraph model for automatic matrix decomposition + //====================================================================== + int numRows = 0; + int numCols = 0; + int numElements = 0; + + if (m_param.InstanceFormat == "MPS") { + numRows = m_mpsIO.getNumRows(); + numCols = m_mpsIO.getNumCols(); + numElements = m_mpsIO.getNumElements(); + } else if (m_param.InstanceFormat == "LP") { + numRows = m_mpsIO.getNumRows(); + numCols = m_mpsIO.getNumCols(); + numElements = m_mpsIO.getNumElements(); + } + + // get the column/row index for by-row matrix + const int *minorIndex = m_matrix->getIndices(); + const int *majorIndex = m_matrix->getMajorIndices(); + UTIL_MSG(m_param.LogLevel, 2, + (*m_osLog) << "The number of rows is " << numRows << "\n" + << "The number of columns is " << numCols << "\n" + << "The number of elements is " << numElements << "\n";); + // Here assume the matrix to be partitioned into singly-bordered + // diagonal matrix. ToDo: complete the Doubly-bordered diagonal + // matrix mapping + // the number of vertices + int numVertices; + // The number of hyperedges + int numHyperedges; #ifdef doublyBordered - numVertices = numElements; - numHyperedges = numRows + numCols; + numVertices = numElements; + numHyperedges = numRows + numCols; #else - // default singlyBordered - numVertices = numElements ; - numHyperedges = numRows; + // default singlyBordered + numVertices = numElements; + numHyperedges = numRows; #endif - /* - throw UtilException("Please provide a valid partitioning model" , - "initializeApp", "DecompApp"); + /* + throw UtilException("Please provide a valid partitioning model" , + "initializeApp", "DecompApp"); + */ + //====================================================================== + // The code below prepares the input parameters of the hypergraph + // partitioning. for details , please refer to + // + // http://glaros.dtc.umn.edu/gkhome/fetch/sw/hmetis/manual.pdf + // + //====================================================================== + // pointer to hyperedge + int *eptr = new int[numHyperedges + 1]; + // the length vector for each row (number of nonzero elements + // in each row) + const int *lengthRows = m_matrix->getVectorLengths(); + /* + assigning the pointer to hyperedges, indicating the number of + vertices in each hyperedge + */ + eptr[0] = 0; + + for (int k = 1; k < numHyperedges + 1; k++) { + eptr[k] = eptr[k - 1] + lengthRows[k - 1]; + } + + assert(eptr[numHyperedges] == numVertices); + // declaring the hyperedges, which correspond to the rows in the matrix + int *eind = new int[numVertices]; + + for (int i = 0; i < numVertices; i++) { + eind[i] = minorIndex[i]; + } + + // declaring the number of partitions + int nparts = NumBlocks; + // weights of vertices and hyperedges + int *vwgts = new int[numVertices]; + int *hewgts = new int[numHyperedges]; + /* + * declare boolean variables indicating whether the nonzero elements + * are integer or not (corresponding to the vertices), the row in + * the matrix has integer columns (corresponding to the hyperedges) + * or not */ - //====================================================================== - // The code below prepares the input parameters of the hypergraph - // partitioning. for details , please refer to - // - // http://glaros.dtc.umn.edu/gkhome/fetch/sw/hmetis/manual.pdf - // - //====================================================================== - // pointer to hyperedge - int* eptr = new int[numHyperedges + 1]; - // the length vector for each row (number of nonzero elements - // in each row) - const int* lengthRows = m_matrix->getVectorLengths(); - /* - assigning the pointer to hyperedges, indicating the number of - vertices in each hyperedge - */ - eptr[0] = 0; - - for (int k = 1 ; k < numHyperedges + 1; k ++ ) { - eptr[k] = eptr[k - 1] + lengthRows[k - 1]; - } - - assert(eptr[numHyperedges] == numVertices); - // declaring the hyperedges, which correspond to the rows in the matrix - int* eind = new int[numVertices]; - - for (int i = 0; i < numVertices; i ++) { - eind[i] = minorIndex[i]; - } - - // declaring the number of partitions - int nparts = NumBlocks; - // weights of vertices and hyperedges - int* vwgts = new int[numVertices] ; - int* hewgts = new int[numHyperedges] ; - /* - * declare boolean variables indicating whether the nonzero elements - * are integer or not (corresponding to the vertices), the row in - * the matrix has integer columns (corresponding to the hyperedges) - * or not - */ - bool* intVertices = new bool[numVertices]; - bool* intHyperedges = new bool[numHyperedges]; - - // initialization - for (int i = 0 ; i < numHyperedges; i ++) { - intHyperedges[i] = false ; - } - - // the index of the original consstraint matrix - int index_base = 0 ; - int index = 0; - // counter of integer variables - int intCounter = 0 ; - // vector containing the number of integer elements in each row - int* intLengthRows = new int[numRows]; - - for (int i = 0 ; i < numRows ; i ++) { - intLengthRows[i] = 0 ; - } - - bool isInteger = false; - - for (int i = 0; i < numRows ; i ++) { - intCounter = 0; - - for ( int j = 0 ; j < lengthRows[i] ; j ++ ) { - index = index_base + j ; - - // determine whether the corresponding column is - // integer or not - if (m_param.InstanceFormat == "MPS") { - isInteger = m_mpsIO.isInteger(minorIndex[index]); - } else if (m_param.InstanceFormat == "LP") { - isInteger = m_lpIO.isInteger(minorIndex[index]); - } - - if (isInteger) { - intVertices[index] = true; - intHyperedges[majorIndex[index]] = true; - intCounter ++; - } else { - intVertices[index] = false; - } - } + bool *intVertices = new bool[numVertices]; + bool *intHyperedges = new bool[numHyperedges]; - index_base = index_base + lengthRows[i]; - intLengthRows[i] = intCounter ; - } + // initialization + for (int i = 0; i < numHyperedges; i++) { + intHyperedges[i] = false; + } - /* - * define the weight parameter in the hypergraph - */ - // assign the weights on vertices - for (int i = 0 ; i < numVertices ; i ++) { -#ifdef VARIABLE_WEIGHT + // the index of the original consstraint matrix + int index_base = 0; + int index = 0; + // counter of integer variables + int intCounter = 0; + // vector containing the number of integer elements in each row + int *intLengthRows = new int[numRows]; + + for (int i = 0; i < numRows; i++) { + intLengthRows[i] = 0; + } - if (intVertices[i]) { - vwgts[i] = 2 ; + bool isInteger = false; + + for (int i = 0; i < numRows; i++) { + intCounter = 0; + + for (int j = 0; j < lengthRows[i]; j++) { + index = index_base + j; + + // determine whether the corresponding column is + // integer or not + if (m_param.InstanceFormat == "MPS") { + isInteger = m_mpsIO.isInteger(minorIndex[index]); + } else if (m_param.InstanceFormat == "LP") { + isInteger = m_lpIO.isInteger(minorIndex[index]); + } + + if (isInteger) { + intVertices[index] = true; + intHyperedges[majorIndex[index]] = true; + intCounter++; } else { - vwgts[i] = 1; + intVertices[index] = false; } + } -#else + index_base = index_base + lengthRows[i]; + intLengthRows[i] = intCounter; + } + + /* + * define the weight parameter in the hypergraph + */ + // assign the weights on vertices + for (int i = 0; i < numVertices; i++) { +#ifdef VARIABLE_WEIGHT + + if (intVertices[i]) { + vwgts[i] = 2; + } else { vwgts[i] = 1; + } + +#else + vwgts[i] = 1; #endif - } + } - // assign the weights on hyperedges - for (int i = 0 ; i < numHyperedges; i ++) { + // assign the weights on hyperedges + for (int i = 0; i < numHyperedges; i++) { #ifdef VARIABLE_WEIGHT - if (intHyperedges[i]) { - hewgts[i] = 2 * lengthRows[i]; - } else { - hewgts[i] = 1; - } + if (intHyperedges[i]) { + hewgts[i] = 2 * lengthRows[i]; + } else { + hewgts[i] = 1; + } #else - hewgts[i] = 1; + hewgts[i] = 1; #endif - } - - // part is an array of size nvtxs that returns the computed partition - int* part = new int[numElements]; - // edgecut is the number of hyperedge cut - int* edgecut = new int[1]; - edgecut[0] = 0 ; - int* options = new int[1]; - // 0 indicates the default paraemter value, 1 otherwise; - options[0] = 0 ; - int* partweights = new int[nparts]; - - // initialization - for (int i = 0; i < nparts ; i ++) { - partweights[i] = 1; - } - - // calling HMETIS_PartKway API to perform the hypergraph partitioning + } + + // part is an array of size nvtxs that returns the computed partition + int *part = new int[numElements]; + // edgecut is the number of hyperedge cut + int *edgecut = new int[1]; + edgecut[0] = 0; + int *options = new int[1]; + // 0 indicates the default paraemter value, 1 otherwise; + options[0] = 0; + int *partweights = new int[nparts]; + + // initialization + for (int i = 0; i < nparts; i++) { + partweights[i] = 1; + } + + // calling HMETIS_PartKway API to perform the hypergraph partitioning #ifdef PaToH - PaToH_Parameters args; - args._k = nparts; - // PaToH_Initialize_Parameters(&args, PATOH_CUTPART, PATOH_SUGPARAM_QUALITY); - PaToH_Initialize_Parameters(&args, PATOH_CONPART, - PATOH_SUGPARAM_DEFAULT); - // the number of constraint in the multilevel algorithm - int nconst = 1; // single constraint - PaToH_Alloc(&args, numVertices, numHyperedges, nconst, - vwgts, hewgts, eptr, eind); - int cut = 0; - PaToH_Part(&args, numVertices, numHyperedges, nconst , 0 , vwgts, - hewgts, eptr, eind, NULL, part, partweights, &cut); - edgecut[0] = cut ; - int computedCut = PaToH_Compute_Cut(nparts, PATOH_CONPART, numVertices, - numHyperedges, hewgts, eptr, eind, - part); - UTIL_MSG(m_param.LogDebugLevel, 2, - (*m_osLog) - << "The computedCut is " - << computedCut << "\n"; - ); + PaToH_Parameters args; + args._k = nparts; + // PaToH_Initialize_Parameters(&args, PATOH_CUTPART, PATOH_SUGPARAM_QUALITY); + PaToH_Initialize_Parameters(&args, PATOH_CONPART, PATOH_SUGPARAM_DEFAULT); + // the number of constraint in the multilevel algorithm + int nconst = 1; // single constraint + PaToH_Alloc(&args, numVertices, numHyperedges, nconst, vwgts, hewgts, eptr, + eind); + int cut = 0; + PaToH_Part(&args, numVertices, numHyperedges, nconst, 0, vwgts, hewgts, eptr, + eind, NULL, part, partweights, &cut); + edgecut[0] = cut; + int computedCut = PaToH_Compute_Cut(nparts, PATOH_CONPART, numVertices, + numHyperedges, hewgts, eptr, eind, part); + UTIL_MSG(m_param.LogDebugLevel, 2, + (*m_osLog) << "The computedCut is " << computedCut << "\n";); #else - clock_t begin = clock(); + clock_t begin = clock(); #if defined(COIN_HAS_HMETIS) - // maximum load imbalance (%) - int ubfactor = 5; - HMETIS_PartRecursive(numVertices, numHyperedges, vwgts, eptr, - eind, hewgts, nparts, ubfactor, options, part, edgecut); + // maximum load imbalance (%) + int ubfactor = 5; + HMETIS_PartRecursive(numVertices, numHyperedges, vwgts, eptr, eind, hewgts, + nparts, ubfactor, options, part, edgecut); #endif - clock_t end = clock(); - UTIL_MSG(m_param.LogLevel, 2, - (*m_osLog) - << "********************************************" << "\n" - << "The time elapse for hypergraph partitioning is " - << (end - begin) / CLOCKS_PER_SEC << " seconds" << "\n" - << "********************************************" - << "\n"; - ); + clock_t end = clock(); + UTIL_MSG(m_param.LogLevel, 2, + (*m_osLog) << "********************************************" + << "\n" + << "The time elapse for hypergraph partitioning is " + << (end - begin) / CLOCKS_PER_SEC << " seconds" + << "\n" + << "********************************************" + << "\n";); #endif - /* - *The following codes try to find the hyperedges in the - * vertex separator set by traversing the hyperedges. - *If a hyperedge has vertices in more than one part, - *then it is a cut hyperedge then it is in separator. - */ - /* - * define a set to store the coupling rows (hyperedges in - * the vertex separator set) - */ - std::set netSet; - std::set :: iterator netIter; - // initilizations for global index - index = 0 ; - index_base = 0 ; - int tempBase = 0 ; - - /* - * Identify the coupling row in the matrix by storing - * them in a net set - */ - for ( int i = 0 ; i < numRows ; i ++) { - for ( int j = 0 ; j < lengthRows[i] ; j ++ ) { - index = index_base + j ; - - if ( j == 0) { - tempBase = part[minorIndex[index_base]]; - } else { - if (tempBase != part[minorIndex[index]]) { - netSet.insert(i); - j = lengthRows[i]; - } - } - } - - //update index_base - index_base = index_base + lengthRows[i]; - } - - UTIL_MSG(m_param.LogLevel, 2, - (*m_osLog) - << "The size of the net set after finding the coupling" - << "row is " - << static_cast(netSet.size()) - << "\n"; - ); - // Eliminate the coupling rows from each partition set - std::set numRowIndex; - std::set :: iterator rowIter; - std::vector rowsBlock; - /* - * truePartNum indicates the true partition number after eliminating the partitioned - * blocks where there is no element in the block - */ - int truePartNum = 0 ; - - for (int part_index = 0 ; part_index < nparts; part_index ++) { - // first, store the rows in different nets - for (int j = 0; j < numElements; j ++) { - if (part[minorIndex[j]] == part_index ) { - numRowIndex.insert(majorIndex[j]); - } - } - - //second, temp stores the row index that is duplicating in the - // coupling net set, and then removes it - std::vector temp; + /* + *The following codes try to find the hyperedges in the + * vertex separator set by traversing the hyperedges. + *If a hyperedge has vertices in more than one part, + *then it is a cut hyperedge then it is in separator. + */ + /* + * define a set to store the coupling rows (hyperedges in + * the vertex separator set) + */ + std::set netSet; + std::set::iterator netIter; + // initilizations for global index + index = 0; + index_base = 0; + int tempBase = 0; + + /* + * Identify the coupling row in the matrix by storing + * them in a net set + */ + for (int i = 0; i < numRows; i++) { + for (int j = 0; j < lengthRows[i]; j++) { + index = index_base + j; - for (rowIter = numRowIndex.begin(); rowIter != numRowIndex.end(); rowIter ++) { - for (netIter = netSet.begin(); netIter != netSet.end(); netIter ++) { - if ((*rowIter) == (*netIter)) { - temp.push_back(*rowIter); - } - } + if (j == 0) { + tempBase = part[minorIndex[index_base]]; + } else { + if (tempBase != part[minorIndex[index]]) { + netSet.insert(i); + j = lengthRows[i]; + } } + } + + // update index_base + index_base = index_base + lengthRows[i]; + } + + UTIL_MSG(m_param.LogLevel, 2, + (*m_osLog) << "The size of the net set after finding the coupling" + << "row is " << static_cast(netSet.size()) << "\n";); + // Eliminate the coupling rows from each partition set + std::set numRowIndex; + std::set::iterator rowIter; + std::vector rowsBlock; + /* + * truePartNum indicates the true partition number after eliminating the + * partitioned blocks where there is no element in the block + */ + int truePartNum = 0; - for (int s = 0 ; s < static_cast(temp.size()); s ++) { - numRowIndex.erase(temp.at(s)); + for (int part_index = 0; part_index < nparts; part_index++) { + // first, store the rows in different nets + for (int j = 0; j < numElements; j++) { + if (part[minorIndex[j]] == part_index) { + numRowIndex.insert(majorIndex[j]); } - - if (numRowIndex.size() != 0) { - //GCG defaults 1 as starting block number, DIP had default value 0 but - // 1 should be fine, which is why we add 1 - blockdata << "BLOCK " << (truePartNum + 1) << "\n"; - - for (rowIter = numRowIndex.begin(); rowIter != numRowIndex.end(); - rowIter++) { - if (m_param.InstanceFormat == "MPS") { - blockdata << m_mpsIO.rowName(*rowIter) << "\n"; - } else if (m_param.InstanceFormat == "LP") { - blockdata << m_lpIO.rowName(*rowIter) << "\n"; - } - - rowsBlock.push_back(*rowIter); - } - - m_blocks.insert(make_pair(truePartNum, rowsBlock)); - truePartNum ++; + } + + // second, temp stores the row index that is duplicating in the + // coupling net set, and then removes it + std::vector temp; + + for (rowIter = numRowIndex.begin(); rowIter != numRowIndex.end(); + rowIter++) { + for (netIter = netSet.begin(); netIter != netSet.end(); netIter++) { + if ((*rowIter) == (*netIter)) { + temp.push_back(*rowIter); + } } - - numRowIndex.clear(); - rowsBlock.clear(); - temp.clear(); - } - - blockdata.close(); - - if (m_param.BlockFileOutput) { - fstream input_file; - input_file.open(BlockFile.c_str(), ios::in); - std::ofstream blockdata2; - std::string BlockFile2; - BlockFile2 = m_param.Instance + '.' + "dec"; - blockdata2.open(BlockFile2.c_str()); - blockdata2 << "NBLOCKS " << truePartNum << "\n"; - string line; - - while (!input_file.eof()) { - getline(input_file, line); - blockdata2 << line << "\n"; + } + + for (int s = 0; s < static_cast(temp.size()); s++) { + numRowIndex.erase(temp.at(s)); + } + + if (numRowIndex.size() != 0) { + // GCG defaults 1 as starting block number, DIP had default value 0 but + // 1 should be fine, which is why we add 1 + blockdata << "BLOCK " << (truePartNum + 1) << "\n"; + + for (rowIter = numRowIndex.begin(); rowIter != numRowIndex.end(); + rowIter++) { + if (m_param.InstanceFormat == "MPS") { + blockdata << m_mpsIO.rowName(*rowIter) << "\n"; + } else if (m_param.InstanceFormat == "LP") { + blockdata << m_lpIO.rowName(*rowIter) << "\n"; + } + + rowsBlock.push_back(*rowIter); } - blockdata2.close(); - } - - UTIL_DELARR(eptr); - UTIL_DELARR(eind); - //UTIL_DELARR(minorIndex); - //UTIL_DELARR(lengthRows); - UTIL_DELARR(majorIndex); - UTIL_DELARR(intLengthRows); - UTIL_DELARR(edgecut); - UTIL_DELARR(part); - UTIL_DELARR(vwgts); - UTIL_DELARR(hewgts); - UTIL_DELARR(options); - UTIL_DELARR(intVertices); - UTIL_DELARR(intHyperedges); - UTIL_DELARR(partweights); + m_blocks.insert(make_pair(truePartNum, rowsBlock)); + truePartNum++; + } + + numRowIndex.clear(); + rowsBlock.clear(); + temp.clear(); + } + + blockdata.close(); + + if (m_param.BlockFileOutput) { + fstream input_file; + input_file.open(BlockFile.c_str(), ios::in); + std::ofstream blockdata2; + std::string BlockFile2; + BlockFile2 = m_param.Instance + '.' + "dec"; + blockdata2.open(BlockFile2.c_str()); + blockdata2 << "NBLOCKS " << truePartNum << "\n"; + string line; + + while (!input_file.eof()) { + getline(input_file, line); + blockdata2 << line << "\n"; + } + + blockdata2.close(); + } + + UTIL_DELARR(eptr); + UTIL_DELARR(eind); + // UTIL_DELARR(minorIndex); + // UTIL_DELARR(lengthRows); + UTIL_DELARR(majorIndex); + UTIL_DELARR(intLengthRows); + UTIL_DELARR(edgecut); + UTIL_DELARR(part); + UTIL_DELARR(vwgts); + UTIL_DELARR(hewgts); + UTIL_DELARR(options); + UTIL_DELARR(intVertices); + UTIL_DELARR(intHyperedges); + UTIL_DELARR(partweights); #ifdef PaToH - PaToH_Free(); + PaToH_Free(); #endif - if (m_threadIndex != 0) { - std::cout << "The number of blocks is " << truePartNum << std::endl; - } + if (m_threadIndex != 0) { + std::cout << "The number of blocks is " << truePartNum << std::endl; + } } //===========================================================================// -void DecompApp::setModelRelax(DecompConstraintSet* model, - const std::string modelName, - const int blockId) { - if (model && !model->hasPrepRun()) { - model->prepareModel(m_infinity); - } - - //--- - //--- make sure this block has not been set yet - //--- - std::map::iterator mit = m_modelRelax.find(blockId); - - if (mit != m_modelRelax.end()) { - std::cerr << "Block " << blockId << " relaxation has already been set. " - << "Only one relaxation definition can be used at one time." - << std::endl; - throw UtilException("Multiple relaxation definitions", - "setModelRelax", "DecompApp"); - } - - DecompModel appModel(model, modelName, blockId, *m_utilParam); - m_modelRelax.insert(std::make_pair(blockId, appModel)); +void DecompApp::setModelRelax(DecompConstraintSet *model, + const std::string modelName, const int blockId) { + if (model && !model->hasPrepRun()) { + model->prepareModel(m_infinity); + } + + //--- + //--- make sure this block has not been set yet + //--- + std::map::iterator mit = m_modelRelax.find(blockId); + + if (mit != m_modelRelax.end()) { + std::cerr << "Block " << blockId << " relaxation has already been set. " + << "Only one relaxation definition can be used at one time." + << std::endl; + throw UtilException("Multiple relaxation definitions", "setModelRelax", + "DecompApp"); + } + + DecompModel appModel(model, modelName, blockId, *m_utilParam); + m_modelRelax.insert(std::make_pair(blockId, appModel)); } //===========================================================================// -void DecompApp::setModelRelaxNest(DecompConstraintSet* model, - const std::string modelName, - const int blockId) { - assert(model); - - if (!model->hasPrepRun()) { - model->prepareModel(m_infinity); - } - - DecompModel appModel(model, modelName, blockId, *m_utilParam); - m_modelRelaxNest[blockId].push_back(appModel); +void DecompApp::setModelRelaxNest(DecompConstraintSet *model, + const std::string modelName, + const int blockId) { + assert(model); + + if (!model->hasPrepRun()) { + model->prepareModel(m_infinity); + } + + DecompModel appModel(model, modelName, blockId, *m_utilParam); + m_modelRelaxNest[blockId].push_back(appModel); } // --------------------------------------------------------------------- // -void DecompApp::setInfinity(){ - if (m_param.DecompLPSolver == "Clp"){ +void DecompApp::setInfinity() { + if (m_param.DecompLPSolver == "Clp") { #ifdef DIP_HAS_CLP - m_infinity = OsiClpInfinity; + m_infinity = OsiClpInfinity; #else - throw UtilException("Clp selected as solver, but it's not available", - "setDecompInf", "DecompApp"); + throw UtilException("Clp selected as solver, but it's not available", + "setDecompInf", "DecompApp"); #endif - }else if (m_param.DecompLPSolver == "CPLEX"){ + } else if (m_param.DecompLPSolver == "CPLEX") { #ifdef DIP_HAS_CPX - m_infinity = CPX_INFBOUND; + m_infinity = CPX_INFBOUND; #else - throw UtilException("CPLEX selected as solver, but it's not available", - "setDecompInf", "DecompApp"); + throw UtilException("CPLEX selected as solver, but it's not available", + "setDecompInf", "DecompApp"); #endif - }else if (m_param.DecompLPSolver == "Gurobi"){ + } else if (m_param.DecompLPSolver == "Gurobi") { #ifdef DIP_HAS_GRB - m_infinity = GRB_INFINITY; + m_infinity = GRB_INFINITY; #else - throw UtilException("Gurobi selected as solver, but it's not available", - "setDecompInf", "DecompApp"); + throw UtilException("Gurobi selected as solver, but it's not available", + "setDecompInf", "DecompApp"); #endif - }else{ - throw UtilException("Unknown solver selected", - "setDecompInf", "DecompApp"); - } - return; + } else { + throw UtilException("Unknown solver selected", "setDecompInf", "DecompApp"); + } + return; } #if 0 diff --git a/Dip/src/DecompBranch.cpp b/Dip/src/DecompBranch.cpp index 774e780a..de67c429 100644 --- a/Dip/src/DecompBranch.cpp +++ b/Dip/src/DecompBranch.cpp @@ -12,88 +12,82 @@ // All Rights Reserved. // //===========================================================================// - #include "DecompAlgo.h" #include "DecompApp.h" // --------------------------------------------------------------------- // -bool DecompAlgo:: -chooseBranchSet(std::vector< std::pair >& downBranchLB, - std::vector< std::pair >& downBranchUB, - std::vector< std::pair >& upBranchLB, - std::vector< std::pair >& upBranchUB) -{ - UtilPrintFuncBegin(m_osLog, m_classTag, - "chooseBranchSet()", m_param.LogDebugLevel, 1); - //--- - //--- Default branching in DIP is the most simple approach possible. - //--- Choose variables farthest from integer - based on x formulation. - //--- - std::vector::iterator intIt; - int branchedOnIndex, j; - double branchedOnValue, x, dist, maxDist; - double obj = 0.0; - const double* objCoeff = getOrigObjective(); - DecompConstraintSet* modelCore = m_modelCore.getModel(); - maxDist = DecompEpsilon;//TODO: parameter - branchedOnIndex = -1; - branchedOnValue = 0; - // const std::vector & colNames = modelCore->getColNames(); +bool DecompAlgo::chooseBranchSet( + std::vector> &downBranchLB, + std::vector> &downBranchUB, + std::vector> &upBranchLB, + std::vector> &upBranchUB) { + UtilPrintFuncBegin(m_osLog, m_classTag, "chooseBranchSet()", + m_param.LogDebugLevel, 1); + //--- + //--- Default branching in DIP is the most simple approach possible. + //--- Choose variables farthest from integer - based on x formulation. + //--- + std::vector::iterator intIt; + int branchedOnIndex, j; + double branchedOnValue, x, dist, maxDist; + double obj = 0.0; + const double *objCoeff = getOrigObjective(); + DecompConstraintSet *modelCore = m_modelCore.getModel(); + maxDist = DecompEpsilon; // TODO: parameter + branchedOnIndex = -1; + branchedOnValue = 0; + // const std::vector & colNames = modelCore->getColNames(); - for (intIt = modelCore->integerVars.begin(); - intIt != modelCore->integerVars.end(); intIt++) { - j = *intIt; - x = m_xhat[j]; - obj += m_xhat[j] * objCoeff[j]; - dist = fabs(x - floor(x + 0.5)); + for (intIt = modelCore->integerVars.begin(); + intIt != modelCore->integerVars.end(); intIt++) { + j = *intIt; + x = m_xhat[j]; + obj += m_xhat[j] * objCoeff[j]; + dist = fabs(x - floor(x + 0.5)); - if (dist > maxDist) { - maxDist = dist; - branchedOnIndex = j; - branchedOnValue = x; - } - } + if (dist > maxDist) { + maxDist = dist; + branchedOnIndex = j; + branchedOnValue = x; + } + } - std::map:: iterator mit; + std::map::iterator mit; - if (branchedOnIndex != -1) { - //--- - //--- Example x[0]=2.5: - //--- x[0] <= 2 (down) - //--- x[0] >= 3 (up ) - //--- - mit = m_masterOnlyColsMap.find(branchedOnIndex); + if (branchedOnIndex != -1) { + //--- + //--- Example x[0]=2.5: + //--- x[0] <= 2 (down) + //--- x[0] >= 3 (up ) + //--- + mit = m_masterOnlyColsMap.find(branchedOnIndex); - if (mit != m_masterOnlyColsMap.end()) { - // it indicates the branched variable is a master-only variable - // we need to set the branching method to branch in the master - m_branchingImplementation = DecompBranchInMaster; - } - - //std::cout << "The branching variable is " << branchedOnIndex - // << " " << colNames[branchedOnIndex] << std::endl; - downBranchUB.push_back(std::pair(branchedOnIndex, - floor(branchedOnValue))); - upBranchLB.push_back(std::pair(branchedOnIndex, - ceil(branchedOnValue))); - UTIL_MSG(m_param.LogDebugLevel, 3, - int nColNames = static_cast(modelCore->colNames.size()); - (*m_osLog) << "branchOnInd = " << branchedOnIndex << " -> "; + if (mit != m_masterOnlyColsMap.end()) { + // it indicates the branched variable is a master-only variable + // we need to set the branching method to branch in the master + m_branchingImplementation = DecompBranchInMaster; + } - if ( branchedOnIndex < nColNames && - branchedOnIndex >= 0) - (*m_osLog) << modelCore->colNames[branchedOnIndex]; - else { - m_app->printOriginalColumn(branchedOnIndex, m_osLog); - } - (*m_osLog) << "\tbranchOnVal = " << branchedOnValue << "\n"; - ); - return true; - } else { - return false; - } + // std::cout << "The branching variable is " << branchedOnIndex + // << " " << colNames[branchedOnIndex] << std::endl; + downBranchUB.push_back( + std::pair(branchedOnIndex, floor(branchedOnValue))); + upBranchLB.push_back( + std::pair(branchedOnIndex, ceil(branchedOnValue))); + UTIL_MSG( + m_param.LogDebugLevel, 3, + int nColNames = static_cast(modelCore->colNames.size()); + (*m_osLog) << "branchOnInd = " << branchedOnIndex << " -> "; - UtilPrintFuncBegin(m_osLog, m_classTag, - "chooseBranchSet()", m_param.LogDebugLevel, 1); -} + if (branchedOnIndex < nColNames && branchedOnIndex >= 0)(*m_osLog) + << modelCore->colNames[branchedOnIndex]; + else { m_app->printOriginalColumn(branchedOnIndex, m_osLog); }(*m_osLog) + << "\tbranchOnVal = " << branchedOnValue << "\n";); + return true; + } else { + return false; + } + UtilPrintFuncBegin(m_osLog, m_classTag, "chooseBranchSet()", + m_param.LogDebugLevel, 1); +} diff --git a/Dip/src/DecompConstraintSet.cpp b/Dip/src/DecompConstraintSet.cpp index ca58eee3..04d19e95 100644 --- a/Dip/src/DecompConstraintSet.cpp +++ b/Dip/src/DecompConstraintSet.cpp @@ -13,295 +13,275 @@ //===========================================================================// //===========================================================================// +#include "DecompConstraintSet.h" #include "UtilHash.h" #include "UtilMacrosDecomp.h" -#include "DecompConstraintSet.h" using namespace std; //===========================================================================// -void DecompConstraintSet::prepareModel(double infinity, bool modelIsCore) -{ - //--- - //--- For each model: - //--- 1.) set row senses and/or bounds - //--- 2.) create row hash - //--- 3.) set nBaseRows - //--- 4.) flip to row ordered, if neccessary (for relaxed too) - //--- 5.) mark integers - //--- 6.) if sparse, set active columns - //--- - if (!M) { - return; - } - - UtilPrintMemUsage(&cout, 2, 2); - - //TODO: needed for relax? - if (M->isColOrdered()) { - M->reverseOrdering(); - } - - int numRows = getNumRows(); - int numCols = getNumCols(); - int numColsOrig = getNumColsOrig(); - UtilPrintMemUsage(&cout, 2, 2); - checkSenseAndBound(infinity); - - if (modelIsCore) { - createRowHash(infinity); - } - - nBaseRows = getNumRows(); - //TODO: make this an option - //--- - //--- if row/col names are not given, make up default ones - //--- - int i, j; - - if (rowNames.size() == 0) { - for (i = 0; i < numRows; i++) { - rowNames.push_back("r(" + UtilIntToStr(i) + ")"); - } - } - - if (colNames.size() == 0) { - for (j = 0; j < numCols; j++) { - colNames.push_back("x(" + UtilIntToStr(j) + ")"); - } - } - - prepHasRun = true; - - //--- - //--- if active columns were not set (or sparse), set to all columns - //--- note: this is in terms of the original indices (not sparse) - //--- - if (isSparse()) { - //--- - //--- is this case, the user might have set this - //--- or might not have, so we want to find the - //--- set based on the mapping, but need to check - //--- for duplicates, in case the user already - //--- provided this set - //--- - set activeColumnsSet(activeColumns.begin(), activeColumns.end()); - map::const_iterator mcit; - activeColumns.reserve(m_sparseToOrig.size()); - - for (mcit = m_sparseToOrig.begin(); - mcit != m_sparseToOrig.end(); mcit++) { - activeColumnsSet.insert(mcit->second); - } - - set::iterator sit; - activeColumns.clear(); - - for (sit = activeColumnsSet.begin(); - sit != activeColumnsSet.end(); sit++) { - activeColumns.push_back(*sit); - } - } else { - int nActiveColumns = static_cast(activeColumns.size()); - - if (!nActiveColumns) { - UtilIotaN(activeColumns, numColsOrig, 0); - } - } - - //--- - //--- if dense format, fix non-active columns - //--- - if (!isSparse()) { - fixNonActiveColumns(); - } - - //--- - //--- create set from vector - easier to check overlap, etc - //--- - vector::iterator vit; - - for (vit = activeColumns.begin(); vit != activeColumns.end(); vit++) { - activeColumnsS.insert(*vit); - } - - //--- - //--- set column markers (original number of cols) - //--- - //UtilFillN(columnMarker, numColsOrig, (int)DecompColNonActive); - //for(vit = activeColumns.begin(); vit != activeColumns.end(); vit++) - // columnMarker[*vit] = DecompColActive; - //for(vit = masterOnlyCols.begin(); vit != masterOnlyCols.end(); vit++) - // columnMarker[*vit] = DecompColMasterOnly; - //--- - //--- mark integers (original number of cols) - //--- only do this for core - if (modelIsCore) { - UtilFillN(integerMark, numColsOrig, 'C'); - - for (vit = integerVars.begin(); vit != integerVars.end(); vit++) { - integerMark[*vit] = 'I'; - } - } +void DecompConstraintSet::prepareModel(double infinity, bool modelIsCore) { + //--- + //--- For each model: + //--- 1.) set row senses and/or bounds + //--- 2.) create row hash + //--- 3.) set nBaseRows + //--- 4.) flip to row ordered, if neccessary (for relaxed too) + //--- 5.) mark integers + //--- 6.) if sparse, set active columns + //--- + if (!M) { + return; + } + + UtilPrintMemUsage(&cout, 2, 2); + + // TODO: needed for relax? + if (M->isColOrdered()) { + M->reverseOrdering(); + } + + int numRows = getNumRows(); + int numCols = getNumCols(); + int numColsOrig = getNumColsOrig(); + UtilPrintMemUsage(&cout, 2, 2); + checkSenseAndBound(infinity); + + if (modelIsCore) { + createRowHash(infinity); + } + + nBaseRows = getNumRows(); + // TODO: make this an option + //--- + //--- if row/col names are not given, make up default ones + //--- + int i, j; + + if (rowNames.size() == 0) { + for (i = 0; i < numRows; i++) { + rowNames.push_back("r(" + UtilIntToStr(i) + ")"); + } + } + + if (colNames.size() == 0) { + for (j = 0; j < numCols; j++) { + colNames.push_back("x(" + UtilIntToStr(j) + ")"); + } + } + + prepHasRun = true; + + //--- + //--- if active columns were not set (or sparse), set to all columns + //--- note: this is in terms of the original indices (not sparse) + //--- + if (isSparse()) { + //--- + //--- is this case, the user might have set this + //--- or might not have, so we want to find the + //--- set based on the mapping, but need to check + //--- for duplicates, in case the user already + //--- provided this set + //--- + set activeColumnsSet(activeColumns.begin(), activeColumns.end()); + map::const_iterator mcit; + activeColumns.reserve(m_sparseToOrig.size()); + + for (mcit = m_sparseToOrig.begin(); mcit != m_sparseToOrig.end(); mcit++) { + activeColumnsSet.insert(mcit->second); + } + + set::iterator sit; + activeColumns.clear(); + + for (sit = activeColumnsSet.begin(); sit != activeColumnsSet.end(); sit++) { + activeColumns.push_back(*sit); + } + } else { + int nActiveColumns = static_cast(activeColumns.size()); + + if (!nActiveColumns) { + UtilIotaN(activeColumns, numColsOrig, 0); + } + } + + //--- + //--- if dense format, fix non-active columns + //--- + if (!isSparse()) { + fixNonActiveColumns(); + } + + //--- + //--- create set from vector - easier to check overlap, etc + //--- + vector::iterator vit; + + for (vit = activeColumns.begin(); vit != activeColumns.end(); vit++) { + activeColumnsS.insert(*vit); + } + + //--- + //--- set column markers (original number of cols) + //--- + // UtilFillN(columnMarker, numColsOrig, (int)DecompColNonActive); + // for(vit = activeColumns.begin(); vit != activeColumns.end(); vit++) + // columnMarker[*vit] = DecompColActive; + // for(vit = masterOnlyCols.begin(); vit != masterOnlyCols.end(); vit++) + // columnMarker[*vit] = DecompColMasterOnly; + //--- + //--- mark integers (original number of cols) + //--- only do this for core + if (modelIsCore) { + UtilFillN(integerMark, numColsOrig, 'C'); + + for (vit = integerVars.begin(); vit != integerVars.end(); vit++) { + integerMark[*vit] = 'I'; + } + } } //===========================================================================// -void DecompConstraintSet::createRowHash(double infinity) -{ - int r; - string strHash; - const int* rmat_ind = M->getIndices(); - const double* rmat_els = M->getElements(); - const int* rmat_beg = M->getVectorStarts(); - const int* rmat_len = M->getVectorLengths(); - - for (r = 0; r < getNumRows(); r++) { - strHash = UtilCreateStringHash(rmat_len[r], - rmat_ind + rmat_beg[r], - rmat_els + rmat_beg[r], - rowSense[r], - rowRhs[r], - infinity); - rowHash.push_back(strHash); - } +void DecompConstraintSet::createRowHash(double infinity) { + int r; + string strHash; + const int *rmat_ind = M->getIndices(); + const double *rmat_els = M->getElements(); + const int *rmat_beg = M->getVectorStarts(); + const int *rmat_len = M->getVectorLengths(); + + for (r = 0; r < getNumRows(); r++) { + strHash = UtilCreateStringHash(rmat_len[r], rmat_ind + rmat_beg[r], + rmat_els + rmat_beg[r], rowSense[r], + rowRhs[r], infinity); + rowHash.push_back(strHash); + } } //===========================================================================// -void DecompConstraintSet::checkSenseAndBound(double infinity) -{ - assert(rowLB.size() + rowRhs.size() > 0); - assert(rowUB.size() + rowRhs.size() > 0); - - if (rowLB.size() > 0 && rowRhs.size() == 0) { - boundsToSenses(infinity); - } else if (rowLB.size() == 0 && rowRhs.size() > 0) { - sensesToBounds(infinity); - } - - assert(rowLB.size() == rowUB.size()); - assert(rowLB.size() == rowRhs.size()); - assert(rowLB.size() == rowSense.size()); +void DecompConstraintSet::checkSenseAndBound(double infinity) { + assert(rowLB.size() + rowRhs.size() > 0); + assert(rowUB.size() + rowRhs.size() > 0); + + if (rowLB.size() > 0 && rowRhs.size() == 0) { + boundsToSenses(infinity); + } else if (rowLB.size() == 0 && rowRhs.size() > 0) { + sensesToBounds(infinity); + } + + assert(rowLB.size() == rowUB.size()); + assert(rowLB.size() == rowRhs.size()); + assert(rowLB.size() == rowSense.size()); } //===========================================================================// -void DecompConstraintSet::sensesToBounds(double infinity) -{ - double rlb, rub; - int n_rows = static_cast(rowSense.size()); - rowLB.reserve(n_rows); - rowUB.reserve(n_rows); - - for (int r = 0; r < n_rows; r++) { - UtilSenseToBound(rowSense[r], rowRhs[r], 0.0,//TODO - infinity, rlb, rub); - rowLB.push_back(rlb); - rowUB.push_back(rub); - } +void DecompConstraintSet::sensesToBounds(double infinity) { + double rlb, rub; + int n_rows = static_cast(rowSense.size()); + rowLB.reserve(n_rows); + rowUB.reserve(n_rows); + + for (int r = 0; r < n_rows; r++) { + UtilSenseToBound(rowSense[r], rowRhs[r], 0.0, // TODO + infinity, rlb, rub); + rowLB.push_back(rlb); + rowUB.push_back(rub); + } } //===========================================================================// -void DecompConstraintSet::boundsToSenses(double infinity) -{ - char sense; - double rhs, range;//not used - int n_rows = static_cast(rowLB.size()); - rowRhs.reserve(n_rows); - rowSense.reserve(n_rows); - - for (int r = 0; r < n_rows; r++) { - UtilBoundToSense(rowLB[r], rowUB[r], infinity, - sense, rhs, range); - rowRhs.push_back(rhs); - rowSense.push_back(sense); - } +void DecompConstraintSet::boundsToSenses(double infinity) { + char sense; + double rhs, range; // not used + int n_rows = static_cast(rowLB.size()); + rowRhs.reserve(n_rows); + rowSense.reserve(n_rows); + + for (int r = 0; r < n_rows; r++) { + UtilBoundToSense(rowLB[r], rowUB[r], infinity, sense, rhs, range); + rowRhs.push_back(rhs); + rowSense.push_back(sense); + } } //===========================================================================// -void DecompConstraintSet::fixNonActiveColumns() -{ - const int numCols = getNumCols(); - const int nActiveCols = static_cast(activeColumns.size()); +void DecompConstraintSet::fixNonActiveColumns() { + const int numCols = getNumCols(); + const int nActiveCols = static_cast(activeColumns.size()); - if (nActiveCols == numCols) { - return; - } + if (nActiveCols == numCols) { + return; + } - int* marker = new int[numCols]; + int *marker = new int[numCols]; - if (!marker) { - UtilExceptionMemory("fixNonActiveColumns", "DecompConstraintSet"); - } + if (!marker) { + UtilExceptionMemory("fixNonActiveColumns", "DecompConstraintSet"); + } - UtilFillN(marker, numCols, 0); - vector::iterator vi; + UtilFillN(marker, numCols, 0); + vector::iterator vi; - for (vi = activeColumns.begin(); vi != activeColumns.end(); vi++) { - marker[*vi] = 1; - } + for (vi = activeColumns.begin(); vi != activeColumns.end(); vi++) { + marker[*vi] = 1; + } - int i; + int i; - for (i = 0; i < numCols; i++) { - if (marker[i]) { - continue; - } + for (i = 0; i < numCols; i++) { + if (marker[i]) { + continue; + } - colLB[i] = 0.0; - colUB[i] = 0.0; - } + colLB[i] = 0.0; + colUB[i] = 0.0; + } - UTIL_DELARR(marker); + UTIL_DELARR(marker); } //===========================================================================// -CoinPackedMatrix* DecompConstraintSet::sparseToOrigMatrix() -{ - assert(m_isSparse); - //--- - //--- create a dense row-majored version of the sparse matrix M - //--- - bool colOrdered = M->isColOrdered(); - int nCols = m_numColsOrig; - int nRows = M->getNumRows(); - CoinPackedMatrix* MRow = NULL; - - if (colOrdered) { - //--- - //--- first create a row-ordered version - //--- - MRow = new CoinPackedMatrix(); - CoinAssertHint(MRow, "Error: Out of Memory"); - MRow->reverseOrderedCopyOf(*M); - } else { - MRow = new CoinPackedMatrix(*M); - CoinAssertHint(MRow, "Error: Out of Memory"); - } - - int i; - int nElems = MRow->getNumElements(); - const int* matInd = MRow->getIndices(); - const int* matLen = MRow->getVectorLengths(); - const double* matVal = MRow->getElements(); - const CoinBigIndex* matBeg = MRow->getVectorStarts(); - int* matIndOrig = new int[nElems]; - CoinAssertHint(matIndOrig, "Error: Out of Memory"); - - for (i = 0; i < nElems; i++) { - matIndOrig[i] = m_sparseToOrig[matInd[i]]; - } - - CoinPackedMatrix* MOrig - = new CoinPackedMatrix(false, - nCols, - nRows, - nElems, - matVal, - matIndOrig, - matBeg, - matLen, - 0.0, 0.0); - CoinAssertHint(MOrig, "Error: Out of Memory"); - UTIL_DELPTR(MRow); - UTIL_DELARR(matIndOrig); - return MOrig; +CoinPackedMatrix *DecompConstraintSet::sparseToOrigMatrix() { + assert(m_isSparse); + //--- + //--- create a dense row-majored version of the sparse matrix M + //--- + bool colOrdered = M->isColOrdered(); + int nCols = m_numColsOrig; + int nRows = M->getNumRows(); + CoinPackedMatrix *MRow = NULL; + + if (colOrdered) { + //--- + //--- first create a row-ordered version + //--- + MRow = new CoinPackedMatrix(); + CoinAssertHint(MRow, "Error: Out of Memory"); + MRow->reverseOrderedCopyOf(*M); + } else { + MRow = new CoinPackedMatrix(*M); + CoinAssertHint(MRow, "Error: Out of Memory"); + } + + int i; + int nElems = MRow->getNumElements(); + const int *matInd = MRow->getIndices(); + const int *matLen = MRow->getVectorLengths(); + const double *matVal = MRow->getElements(); + const CoinBigIndex *matBeg = MRow->getVectorStarts(); + int *matIndOrig = new int[nElems]; + CoinAssertHint(matIndOrig, "Error: Out of Memory"); + + for (i = 0; i < nElems; i++) { + matIndOrig[i] = m_sparseToOrig[matInd[i]]; + } + + CoinPackedMatrix *MOrig = + new CoinPackedMatrix(false, nCols, nRows, nElems, matVal, matIndOrig, + matBeg, matLen, 0.0, 0.0); + CoinAssertHint(MOrig, "Error: Out of Memory"); + UTIL_DELPTR(MRow); + UTIL_DELARR(matIndOrig); + return MOrig; } diff --git a/Dip/src/DecompCut.cpp b/Dip/src/DecompCut.cpp index 78960123..62d30009 100644 --- a/Dip/src/DecompCut.cpp +++ b/Dip/src/DecompCut.cpp @@ -12,37 +12,29 @@ // All Rights Reserved. // //===========================================================================// - #include "DecompCut.h" using namespace std; // --------------------------------------------------------------------- // -bool DecompCut::calcViolation(const CoinPackedVector* row, - const double* x) -{ - //always calculated wrt to original row! - const double activity = row->dotProduct(x); - //printf("\nact: %g, m_lb: %g, m_ub: %g", - // activity, m_lb, m_ub); - double violation = std::max(m_lb - activity, activity - m_ub); - violation = std::max(0.0, violation); - //printf("\nviolation = %g", violation); - setViolation(violation); //should it set it here? - return violation > 0.0000001;//param? +bool DecompCut::calcViolation(const CoinPackedVector *row, const double *x) { + // always calculated wrt to original row! + const double activity = row->dotProduct(x); + // printf("\nact: %g, m_lb: %g, m_ub: %g", + // activity, m_lb, m_ub); + double violation = std::max(m_lb - activity, activity - m_ub); + violation = std::max(0.0, violation); + // printf("\nviolation = %g", violation); + setViolation(violation); // should it set it here? + return violation > 0.0000001; // param? } // --------------------------------------------------------------------- // -void DecompCut::print(ostream* os) const -{ - (*os) << "\nCUT" - << " vio: " << m_violation - << " eff: " << m_effCnt - << " lb: " << m_lb - << " ub: " << m_ub - << "\n"; - //UtilPrintPackedVector(m_s, os); - //we don't know anything like in var, we know about s... - //think.... +void DecompCut::print(ostream *os) const { + (*os) << "\nCUT" + << " vio: " << m_violation << " eff: " << m_effCnt << " lb: " << m_lb + << " ub: " << m_ub << "\n"; + // UtilPrintPackedVector(m_s, os); + // we don't know anything like in var, we know about s... + // think.... } - diff --git a/Dip/src/DecompCutPool.cpp b/Dip/src/DecompCutPool.cpp index 08e857c2..56289829 100644 --- a/Dip/src/DecompCutPool.cpp +++ b/Dip/src/DecompCutPool.cpp @@ -12,10 +12,9 @@ // All Rights Reserved. // //===========================================================================// - -#include "DecompVar.h" #include "DecompCutPool.h" #include "DecompConstraintSet.h" +#include "DecompVar.h" using namespace std; @@ -33,7 +32,6 @@ bool DecompWaitingRow::calcViolation(const double* x) }; #endif - #if 0 // --------------------------------------------------------------------- // bool DecompCutPool::isDuplicate(const DecompWaitingRow& wcol) @@ -59,123 +57,119 @@ bool DecompCutPool::isDuplicate(const DecompWaitingRow& wcol) #endif /*-------------------------------------------------------------------------*/ -bool DecompCutPool::calcViolations(const double* x, - DecompCutPool::iterator first, - DecompCutPool::iterator last) -{ - bool found_violated_cut = false; - - for (DecompCutPool::iterator vi = first; vi != last; vi++) { - // --- - // --- calculate and set the violations for the cuts - // --- which are pointed to in this pool, if any have vio > 0, - // --- return true - // --- - found_violated_cut - = (*vi).getCutPtr()->calcViolation((*vi).getRowPtr(), x) ? - true : found_violated_cut; - } - - return found_violated_cut; +bool DecompCutPool::calcViolations(const double *x, + DecompCutPool::iterator first, + DecompCutPool::iterator last) { + bool found_violated_cut = false; + + for (DecompCutPool::iterator vi = first; vi != last; vi++) { + // --- + // --- calculate and set the violations for the cuts + // --- which are pointed to in this pool, if any have vio > 0, + // --- return true + // --- + found_violated_cut = (*vi).getCutPtr()->calcViolation((*vi).getRowPtr(), x) + ? true + : found_violated_cut; + } + + return found_violated_cut; } /*-------------------------------------------------------------------------*/ -void DecompCutPool::reExpand(const DecompVarList& vars, - const int n_coreCols, - const int n_artCols) -{ - //--- - //--- For each waiting row in the cut pool, we need to reset - //--- the row in the current master LP (in terms of reformulation) - //--- to take into account any new columns. - //--- - DecompCutPool::iterator vi; - - for (vi = begin(); vi != end(); vi++) { - //only need to do this reformulation in PC... - //make this re-expansion a function? - also called in addCutsToPool - CoinPackedVector* rowReform = createRowReform(n_coreCols, - //n_artCols, - (*vi).getRowPtr(), - vars); - - if (!rowReform) { - assert(0); - vi = erase(vi);//THINK... - } else { - (*vi).deleteRowReform(); - (*vi).setRowReform(rowReform); - } - - //THINK: once we reexpand, do we need to reset violation? - //no, this is done next.... this section just "re-expands" - } - - setRowsAreValid(true); +void DecompCutPool::reExpand(const DecompVarList &vars, const int n_coreCols, + const int n_artCols) { + //--- + //--- For each waiting row in the cut pool, we need to reset + //--- the row in the current master LP (in terms of reformulation) + //--- to take into account any new columns. + //--- + DecompCutPool::iterator vi; + + for (vi = begin(); vi != end(); vi++) { + // only need to do this reformulation in PC... + // make this re-expansion a function? - also called in addCutsToPool + CoinPackedVector *rowReform = createRowReform(n_coreCols, + // n_artCols, + (*vi).getRowPtr(), vars); + + if (!rowReform) { + assert(0); + vi = erase(vi); // THINK... + } else { + (*vi).deleteRowReform(); + (*vi).setRowReform(rowReform); + } + + // THINK: once we reexpand, do we need to reset violation? + // no, this is done next.... this section just "re-expands" + } + + setRowsAreValid(true); } /*------------------------------------------------------------------------*/ -CoinPackedVector* -DecompCutPool::createRowReform(const int n_coreCols, - //const int n_artCols, - const CoinPackedVector* row, //x-space - const DecompVarList& vars) -{ - //--- - //--- Create a dense row from the original sparse row (in terms of x). - //--- - double* rowDense = row->denseVector(n_coreCols); - //--- - //--- In order to expand to the reformulated row (in terms of lambda), - //--- we need to substitute x = sum{s in F'} s lambda[s] - //--- - //--- Example - Given a cut: - //--- a[1]x[1] + a[2]x[2] >= b - //--- a[1]x[1] = a[1] (s1[1] lam[1] + s2[1] lam[2]) - //--- a[2]x[2] = a[2] (s1[2] lam[1] + s2[2] lam[2]) - //--- So, lam[1]'s coeff = a[1] s1[1] + a[2] s1[2] - //--- lam[2]'s coeff = a[1] s2[1] + a[2] s2[2] - //--- - double coeff; - int colIndex; - CoinPackedVector* rowReform = new CoinPackedVector(); - //--- - //--- for each variable (non-artificial), dot product the dense row (in terms of x) - //--- with the incidence vector of the variable (var->m_s) to get the coefficient - //--- in lambda space - //--- - DecompVarList::const_iterator vli; - vector noNames; - - for (vli = vars.begin(); vli != vars.end(); vli++) { - //printf("REFORM ROW for CUT on var master index = %d\n", - // (*vli)->getColMasterIndex()); - //UtilPrintPackedVector((*vli)->m_s, &cout, - // noNames, - // rowDense); - coeff = (*vli)->m_s.dotProduct(rowDense); - - //printf("COEFF using dotProduct = %12.10f\n", coeff); - if (fabs(coeff) > DecompZero) { - colIndex = (*vli)->getColMasterIndex(); - rowReform->insert(colIndex, coeff); - } - } - - //assert(rowReform->getNumElements() > 0); - //--- - //--- delete the temporary memory - //--- - UTIL_DELARR(rowDense); - return rowReform; +CoinPackedVector * +DecompCutPool::createRowReform(const int n_coreCols, + // const int n_artCols, + const CoinPackedVector *row, // x-space + const DecompVarList &vars) { + //--- + //--- Create a dense row from the original sparse row (in terms of x). + //--- + double *rowDense = row->denseVector(n_coreCols); + //--- + //--- In order to expand to the reformulated row (in terms of lambda), + //--- we need to substitute x = sum{s in F'} s lambda[s] + //--- + //--- Example - Given a cut: + //--- a[1]x[1] + a[2]x[2] >= b + //--- a[1]x[1] = a[1] (s1[1] lam[1] + s2[1] lam[2]) + //--- a[2]x[2] = a[2] (s1[2] lam[1] + s2[2] lam[2]) + //--- So, lam[1]'s coeff = a[1] s1[1] + a[2] s1[2] + //--- lam[2]'s coeff = a[1] s2[1] + a[2] s2[2] + //--- + double coeff; + int colIndex; + CoinPackedVector *rowReform = new CoinPackedVector(); + //--- + //--- for each variable (non-artificial), dot product the dense row (in terms + // of x) + //--- with the incidence vector of the variable (var->m_s) to get the + // coefficient + //--- in lambda space + //--- + DecompVarList::const_iterator vli; + vector noNames; + + for (vli = vars.begin(); vli != vars.end(); vli++) { + // printf("REFORM ROW for CUT on var master index = %d\n", + // (*vli)->getColMasterIndex()); + // UtilPrintPackedVector((*vli)->m_s, &cout, + // noNames, + // rowDense); + coeff = (*vli)->m_s.dotProduct(rowDense); + + // printf("COEFF using dotProduct = %12.10f\n", coeff); + if (fabs(coeff) > DecompZero) { + colIndex = (*vli)->getColMasterIndex(); + rowReform->insert(colIndex, coeff); + } + } + + // assert(rowReform->getNumElements() > 0); + //--- + //--- delete the temporary memory + //--- + UTIL_DELARR(rowDense); + return rowReform; } // --------------------------------------------------------------------- // -void DecompCutPool::print(ostream* os) const -{ - vector::const_iterator vi; +void DecompCutPool::print(ostream *os) const { + vector::const_iterator vi; - for (vi = begin(); vi != end(); vi++) { - (*vi).getCutPtr()->print(os); - } + for (vi = begin(); vi != end(); vi++) { + (*vi).getCutPtr()->print(os); + } } diff --git a/Dip/src/DecompDebug.cpp b/Dip/src/DecompDebug.cpp index 98d980e8..4dc0e438 100644 --- a/Dip/src/DecompDebug.cpp +++ b/Dip/src/DecompDebug.cpp @@ -12,298 +12,281 @@ // All Rights Reserved. // //===========================================================================// -#include "DecompApp.h" #include "DecompAlgo.h" #include "DecompAlgoC.h" +#include "DecompApp.h" using namespace std; //===========================================================================// -bool DecompAlgo::checkPointFeasible(const DecompConstraintSet* model, - const double* x) -{ - //--- - //--- sanity check - //--- Does the recomposed solution (x*) satisfy the core - //--- constraints. If not, but in master solver OR in the - //--- process of recomposed (the map). - //--- - const CoinPackedMatrix* M = model->getMatrix(); - - if (!M) { - return true; - } - - int i; - double actViol; - double relViol; - int precision = 7; - bool isFeas = true; - bool hasColNames = false; - bool hasRowNames = false; - const int nCols = model->getNumCols(); - const int nRows = model->getNumRows(); - const double* colLB = model->getColLB(); - const double* colUB = model->getColUB(); - const double* rowLB = model->getRowLB(); - const double* rowUB = model->getRowUB(); - const vector& colNames = model->getColNames(); - const vector& rowNames = model->getRowNames(); - double* ax = new double[nRows]; - assert(M); - assert(ax); - - if (colNames.size()) { - hasColNames = true; - } +bool DecompAlgo::checkPointFeasible(const DecompConstraintSet *model, + const double *x) { + //--- + //--- sanity check + //--- Does the recomposed solution (x*) satisfy the core + //--- constraints. If not, but in master solver OR in the + //--- process of recomposed (the map). + //--- + const CoinPackedMatrix *M = model->getMatrix(); + + if (!M) { + return true; + } + + int i; + double actViol; + double relViol; + int precision = 7; + bool isFeas = true; + bool hasColNames = false; + bool hasRowNames = false; + const int nCols = model->getNumCols(); + const int nRows = model->getNumRows(); + const double *colLB = model->getColLB(); + const double *colUB = model->getColUB(); + const double *rowLB = model->getRowLB(); + const double *rowUB = model->getRowUB(); + const vector &colNames = model->getColNames(); + const vector &rowNames = model->getRowNames(); + double *ax = new double[nRows]; + assert(M); + assert(ax); + + if (colNames.size()) { + hasColNames = true; + } + + if (rowNames.size()) { + hasRowNames = true; + } + + //--- + //--- check column bounds + //--- + for (i = 0; i < nCols; i++) { + actViol = std::max(colLB[i] - x[i], x[i] - colUB[i]); + actViol = std::max(actViol, 0.0); + + if (UtilIsZero(x[i], 1.0e-3) || (x[i] < 0 && UtilIsZero(colLB[i])) || + (x[i] > 0 && UtilIsZero(colUB[i]))) { + relViol = actViol; + } else { + relViol = actViol / std::fabs(x[i]); + } + + if (relViol > 0.0001) { // 0.01% violated + (*m_osLog) << "Point violates column " << i; + + if (hasColNames) { + (*m_osLog) << " -> " << colNames[i]; + } - if (rowNames.size()) { - hasRowNames = true; - } + (*m_osLog) << " LB= " << UtilDblToStr(colLB[i], precision) + << " x= " << UtilDblToStr(x[i], precision) + << " UB= " << UtilDblToStr(colUB[i], precision) + << " RelViol= " << UtilDblToStr(relViol, precision) << endl; - //--- - //--- check column bounds - //--- - for (i = 0; i < nCols; i++) { - actViol = std::max(colLB[i] - x[i], x[i] - colUB[i]); - actViol = std::max(actViol, 0.0); - - if (UtilIsZero(x[i], 1.0e-3) || - (x[i] < 0 && UtilIsZero(colLB[i])) || - (x[i] > 0 && UtilIsZero(colUB[i]))) { - relViol = actViol; - } else { - relViol = actViol / std::fabs(x[i]); + //>1% violation is probably a bug, but <1% could be just + // round off error??? not sure about that + if (relViol > 0.01) { + isFeas = false; + } + } + } + + //--- + //--- M * x = ax + //--- + M->times(x, ax); + //--- + //--- check row bounds + //--- + //--- Need to deal with masterOnly variable + + for (i = 0; i < nRows; i++) { + actViol = std::max(rowLB[i] - ax[i], ax[i] - rowUB[i]); + // printf("ax=%12.10f, actViol=%12.10f\n", ax[i], actViol); + actViol = std::max(actViol, 0.0); + + // printf(" actViol=%12.10f\n", actViol); + if (m_param.LogDebugLevel >= 4) { + CoinShallowPackedVector row = M->getVector(i); + (*m_osLog) << "Row i: " << i; + + if (hasRowNames) { + (*m_osLog) << " -> " << rowNames[i]; } - if (relViol > 0.0001) { //0.01% violated - (*m_osLog) << "Point violates column " << i; - - if (hasColNames) { - (*m_osLog) << " -> " << colNames[i]; - } + (*m_osLog) << " LB= " << UtilDblToStr(rowLB[i], precision) + << " ax= " << UtilDblToStr(ax[i], precision) + << " UB= " << UtilDblToStr(rowUB[i], precision) << endl; + // UtilPrintPackedVector(row); + } - (*m_osLog) << " LB= " << UtilDblToStr(colLB[i], precision) - << " x= " << UtilDblToStr(x[i], precision) - << " UB= " << UtilDblToStr(colUB[i], precision) - << " RelViol= " << UtilDblToStr(relViol, precision) - << endl; + if (UtilIsZero(ax[i], 1.0e-3) || (ax[i] < 0 && UtilIsZero(rowLB[i])) || + (ax[i] > 0 && UtilIsZero(rowUB[i]))) { + relViol = actViol; + } else { + relViol = actViol / std::fabs(ax[i]); + } - //>1% violation is probably a bug, but <1% could be just - // round off error??? not sure about that - if (relViol > 0.01) { - isFeas = false; - } - } - } + if (relViol > 0.005) { // 0.5% violated + (*m_osLog) << "Point violates row " << i; - //--- - //--- M * x = ax - //--- - M->times(x, ax); - //--- - //--- check row bounds - //--- - //--- Need to deal with masterOnly variable - - for (i = 0; i < nRows; i++) { - actViol = std::max(rowLB[i] - ax[i], ax[i] - rowUB[i]); - //printf("ax=%12.10f, actViol=%12.10f\n", ax[i], actViol); - actViol = std::max(actViol, 0.0); - - //printf(" actViol=%12.10f\n", actViol); - if (m_param.LogDebugLevel >= 4) { - CoinShallowPackedVector row = M->getVector(i); - (*m_osLog) << "Row i: " << i; - - if (hasRowNames) { - (*m_osLog) << " -> " << rowNames[i]; - } - - (*m_osLog) << " LB= " << UtilDblToStr(rowLB[i], precision) - << " ax= " << UtilDblToStr(ax[i], precision) - << " UB= " << UtilDblToStr(rowUB[i], precision) << endl; - //UtilPrintPackedVector(row); + if (hasRowNames) { + (*m_osLog) << " -> " << rowNames[i]; } - if (UtilIsZero(ax[i], 1.0e-3) || - (ax[i] < 0 && UtilIsZero(rowLB[i])) || + (*m_osLog) << " LB= " << UtilDblToStr(rowLB[i], precision) + << " ax= " << UtilDblToStr(ax[i], precision) + << " UB= " << UtilDblToStr(rowUB[i], precision) + << " RelViol= " << UtilDblToStr(relViol, precision) << endl; + + //>5% violation is probably a bug, but <5% could be just + // round off error??? not sure about that + if (relViol > 0.05) { + isFeas = false; + + //--- + //--- if special case of relViol=actViol, + //--- then check to see if possible round off issues + //--- e.g., harp2 a[j]=1.0e9, actViol=1.0e3 is OK + //--- + if (UtilIsZero(ax[i], 1.0e-3) || (ax[i] < 0 && UtilIsZero(rowLB[i])) || (ax[i] > 0 && UtilIsZero(rowUB[i]))) { - relViol = actViol; - } else { - relViol = actViol / std::fabs(ax[i]); - } - - if (relViol > 0.005) { //0.5% violated - (*m_osLog) << "Point violates row " << i; - - if (hasRowNames) { - (*m_osLog) << " -> " << rowNames[i]; - } - - (*m_osLog) << " LB= " << UtilDblToStr(rowLB[i], precision) - << " ax= " << UtilDblToStr(ax[i], precision) - << " UB= " << UtilDblToStr(rowUB[i], precision) - << " RelViol= " << UtilDblToStr(relViol, precision) - << endl; - - //>5% violation is probably a bug, but <5% could be just - // round off error??? not sure about that - if (relViol > 0.05) { - isFeas = false; - - //--- - //--- if special case of relViol=actViol, - //--- then check to see if possible round off issues - //--- e.g., harp2 a[j]=1.0e9, actViol=1.0e3 is OK - //--- - if (UtilIsZero(ax[i], 1.0e-3) || - (ax[i] < 0 && UtilIsZero(rowLB[i])) || - (ax[i] > 0 && UtilIsZero(rowUB[i]))) { - int k; - CoinShallowPackedVector row = M->getVector(i); - const int numNZ = row.getNumElements(); - const double* els = row.getElements(); - - for (k = 0; k < numNZ; k++) { - if (fabs(els[k]) > 1.0e7) { - (*m_osLog) << " row has a big coefficient " - << els[k] << endl; - isFeas = true; - break; - } - } + int k; + CoinShallowPackedVector row = M->getVector(i); + const int numNZ = row.getNumElements(); + const double *els = row.getElements(); + + for (k = 0; k < numNZ; k++) { + if (fabs(els[k]) > 1.0e7) { + (*m_osLog) << " row has a big coefficient " << els[k] << endl; + isFeas = true; + break; } - } + } + } } - } + } + } - UTIL_DELARR(ax); - return isFeas; + UTIL_DELARR(ax); + return isFeas; } //===========================================================================// -void DecompAlgo::checkMasterDualObj() -{ - const int nRows = m_masterSI->getNumRows(); - const double* rowRhs = m_masterSI->getRightHandSide(); - const double* dual = m_masterSI->getRowPrice(); - const double primalObj = m_masterSI->getObjValue(); - double dualObj = 0.0; - const int nCols = m_masterSI->getNumCols(); - const double* rc = m_masterSI->getReducedCost(); - const double* colLower = m_masterSI->getColLower(); - const double* colUpper = m_masterSI->getColUpper(); - //rStat might not be needed now, but will be needed - // when we support ranged rows. - int* rStat = new int[nRows]; - int* cStat = new int[nCols]; - m_masterSI->getBasisStatus(cStat, rStat); - - for (int c = 0; c < nCols; c++) { - if (cStat[c] == 3) { - dualObj += rc[c] * colLower[c]; - } else if (cStat[c] == 2 ) { - dualObj += rc[c] * colUpper[c]; - } - } - - for (int r = 0; r < nRows; r++) { - dualObj += dual[r] * rowRhs[r]; - } - - UTIL_DEBUG(m_param.LogDebugLevel, 4, - (*m_osLog) - << "checkMasterDualObj" - << setw(10) << "primalObj=" - << setw(10) << UtilDblToStr(primalObj, 3) - << setw(10) << "dualObj=" - << setw(10) << UtilDblToStr(dualObj, 3) << endl; - ); - double actViol = std::fabs(primalObj - dualObj); - double relViol = actViol; - - if (!UtilIsZero(dualObj, 1.0e-3)) { - relViol = actViol / std::fabs(dualObj); - } - - if (relViol > 1.0e-4) { - cerr << "checkMasterDualObj" - << setw(10) << "primalObj=" - << setw(10) << UtilDblToStr(primalObj, 3) - << setw(10) << "dualObj=" - << setw(10) << UtilDblToStr(dualObj, 3) << endl; - throw UtilException("primal and dual obj do not match", - "checkMasterDualObj", "DecompAlgo"); - } - - UTIL_DELARR(rStat); - UTIL_DELARR(cStat); +void DecompAlgo::checkMasterDualObj() { + const int nRows = m_masterSI->getNumRows(); + const double *rowRhs = m_masterSI->getRightHandSide(); + const double *dual = m_masterSI->getRowPrice(); + const double primalObj = m_masterSI->getObjValue(); + double dualObj = 0.0; + const int nCols = m_masterSI->getNumCols(); + const double *rc = m_masterSI->getReducedCost(); + const double *colLower = m_masterSI->getColLower(); + const double *colUpper = m_masterSI->getColUpper(); + // rStat might not be needed now, but will be needed + // when we support ranged rows. + int *rStat = new int[nRows]; + int *cStat = new int[nCols]; + m_masterSI->getBasisStatus(cStat, rStat); + + for (int c = 0; c < nCols; c++) { + if (cStat[c] == 3) { + dualObj += rc[c] * colLower[c]; + } else if (cStat[c] == 2) { + dualObj += rc[c] * colUpper[c]; + } + } + + for (int r = 0; r < nRows; r++) { + dualObj += dual[r] * rowRhs[r]; + } + + UTIL_DEBUG(m_param.LogDebugLevel, 4, + (*m_osLog) << "checkMasterDualObj" << setw(10) << "primalObj=" + << setw(10) << UtilDblToStr(primalObj, 3) << setw(10) + << "dualObj=" << setw(10) << UtilDblToStr(dualObj, 3) + << endl;); + double actViol = std::fabs(primalObj - dualObj); + double relViol = actViol; + + if (!UtilIsZero(dualObj, 1.0e-3)) { + relViol = actViol / std::fabs(dualObj); + } + + if (relViol > 1.0e-4) { + cerr << "checkMasterDualObj" << setw(10) << "primalObj=" << setw(10) + << UtilDblToStr(primalObj, 3) << setw(10) << "dualObj=" << setw(10) + << UtilDblToStr(dualObj, 3) << endl; + throw UtilException("primal and dual obj do not match", + "checkMasterDualObj", "DecompAlgo"); + } + + UTIL_DELARR(rStat); + UTIL_DELARR(cStat); } //===========================================================================// -bool DecompAlgo::isDualRayInfProof(const double* dualRay, - const CoinPackedMatrix* rowMatrix, - const double* colLB, - const double* colUB, - const double* rowRhs, - ostream* os) -{ - //--- - //--- Does dualRay provide a proof according to Farkas Lemma? - //--- yA >= 0, yb < 0, or - //--- yA <= 0, yb > 0 ?? - //--- - int i; - double yb; - bool isProof = true; - bool ybPos = true; - double* yA = 0; - const int m = rowMatrix->getNumRows(); - const int n = rowMatrix->getNumCols(); - //y^T b - yb = 0.0; - - for (i = 0; i < m; i++) { - yb += dualRay[i] * rowRhs[i]; - +bool DecompAlgo::isDualRayInfProof(const double *dualRay, + const CoinPackedMatrix *rowMatrix, + const double *colLB, const double *colUB, + const double *rowRhs, ostream *os) { + //--- + //--- Does dualRay provide a proof according to Farkas Lemma? + //--- yA >= 0, yb < 0, or + //--- yA <= 0, yb > 0 ?? + //--- + int i; + double yb; + bool isProof = true; + bool ybPos = true; + double *yA = 0; + const int m = rowMatrix->getNumRows(); + const int n = rowMatrix->getNumCols(); + // y^T b + yb = 0.0; + + for (i = 0; i < m; i++) { + yb += dualRay[i] * rowRhs[i]; + + if (os) { + (*os) << "i : " << i << " dualRay = " << dualRay[i] + << " rowRhs = " << rowRhs[i] << " yb = " << yb << endl; + } + } + + // TODO: tol + if (yb > 1.0e-10) { + ybPos = true; + } else if (yb < -1.0e-10) { + ybPos = false; + } else { + return isProof; + } + + yA = new double[n]; + rowMatrix->transposeTimes(dualRay, yA); // y^T A + + for (i = 0; i < n; i++) { + if (os) { + (*os) << "yA[" << i << "]:\t" << yA[i]; + } + + // TODO: tol 1.0e-6 is too tight? + if ((ybPos && (yA[i] > 1.0e-2)) || (!ybPos && (yA[i] < -1.0e-2))) { if (os) { - (*os) << "i : " << i << " dualRay = " << dualRay[i] - << " rowRhs = " << rowRhs[i] << " yb = " << yb << endl; + (*os) << " -->isProof (false)" << endl; } - } - - //TODO: tol - if (yb > 1.0e-10) { - ybPos = true; - } else if (yb < -1.0e-10) { - ybPos = false; - } else { - return isProof; - } - - yA = new double[n]; - rowMatrix->transposeTimes(dualRay, yA); //y^T A - - for (i = 0; i < n; i++) { - if (os) { - (*os) << "yA[" << i << "]:\t" << yA[i]; - } - - //TODO: tol 1.0e-6 is too tight? - if ((ybPos && (yA[i] > 1.0e-2)) || - (!ybPos && (yA[i] < -1.0e-2))) { - if (os) { - (*os) << " -->isProof (false)" << endl; - } - isProof = false; - } else if (os) { - (*os) << endl; - } - } + isProof = false; + } else if (os) { + (*os) << endl; + } + } - UTIL_DELARR(yA); + UTIL_DELARR(yA); #if 0 //sanity check @@ -312,399 +295,375 @@ bool DecompAlgo::isDualRayInfProof(const double* dualRay, = isDualRayInfProofCpx(dualRay, rowMatrix, colLB, colUB, rowRhs, os); #endif - return isProof; + return isProof; } //===========================================================================// -bool DecompAlgo::isDualRayInfProofCpx(const double* dualRay, - const CoinPackedMatrix* rowMatrix, - const double* colLB, - const double* colUB, - const double* rowRhs, - ostream* os) -{ - //--- - //--- Assume: - //--- Ax >= b - //--- y^T Ax >= y^T b, y >= 0 (for >=) - //--- - //--- Let z[j] = u[j], if y^T A[j] > 0 - //--- = l[j], if y^T A[j] < 0 - //--- = arbitrary, otherwise - //--- - //--- Then, WHY? - //--- y^T b - y^T A z > 0 ==> contradiction - //--- - //--- proof_p = y^T b - y^T A z > 0 - //--- - //--- So, we want to maximize y^T A x to break the proof. - //--- - int i, j; - double yb, yAz; - double* yA = 0; - double* z = 0; - const int m = rowMatrix->getNumRows(); - const int n = rowMatrix->getNumCols(); - //TODO: check for out-of-mem conditions? - yA = new double[n]; - UtilFillN(yA, n, 0.0); - double* yA2 = new double[n]; - rowMatrix->transposeTimes(dualRay, yA2); //y^T A - - for (i = 0; i < m; i++) { - double yA_i = 0; - CoinShallowPackedVector pv = rowMatrix->getVector(i); - const int* indI = pv.getIndices(); - const double* elsI = pv.getElements(); - const int lenI = pv.getNumElements(); - - for (int j = 0; j < lenI; j++) { - yA_i += dualRay[indI[j]] * elsI[j]; - printf("i: %d, j: %d, indIj: %d, elsIj: %g ray: %g yA_i: %g\n", - i, j, indI[j], elsI[j], dualRay[indI[j]], yA_i); - } - - yA[i] = yA_i; - - if (!UtilIsZero(yA[i] - yA2[i])) { - printf(" ---> yA: %g, yA2: %g\n", yA[i], yA2[i]); - } - - fflush(stdout); - CoinAssert(UtilIsZero(yA[i] - yA2[i])); - } - - z = new double[n]; - - for (j = 0; j < n; j++) { - if (yA[j] >= 0) { - z[j] = CoinMin(1.0e20, colUB[j]); - } else { - z[j] = colLB[j]; - } - } - - //y^T b - yb = 0.0; - - for (i = 0; i < m; i++) { - yb += dualRay[i] * rowRhs[i]; - - if (os) - (*os) << "\ni : " << i << " dualRay = " << dualRay[i] - << " rowRhs = " << rowRhs[i] << " yb = " << yb; - } - - //y^T A z - yAz = 0.0; - - for (j = 0; j < n; j++) { - yAz += yA[j] * z[j]; - - if (os) - (*os) << "\nj : " << j << " yA = " << yA[j] - << " z = " << z[j] << " yAz = " << yAz; - } - - if (os) { - (*os) << "\nyb - yAz = " << yb - yAz << endl; - } - - UTIL_DELARR(yA); - UTIL_DELARR(z); - - //TODO: tol - if (yb - yAz > 1.0e-3) { - return true; - } else { - return false; - } +bool DecompAlgo::isDualRayInfProofCpx(const double *dualRay, + const CoinPackedMatrix *rowMatrix, + const double *colLB, const double *colUB, + const double *rowRhs, ostream *os) { + //--- + //--- Assume: + //--- Ax >= b + //--- y^T Ax >= y^T b, y >= 0 (for >=) + //--- + //--- Let z[j] = u[j], if y^T A[j] > 0 + //--- = l[j], if y^T A[j] < 0 + //--- = arbitrary, otherwise + //--- + //--- Then, WHY? + //--- y^T b - y^T A z > 0 ==> contradiction + //--- + //--- proof_p = y^T b - y^T A z > 0 + //--- + //--- So, we want to maximize y^T A x to break the proof. + //--- + int i, j; + double yb, yAz; + double *yA = 0; + double *z = 0; + const int m = rowMatrix->getNumRows(); + const int n = rowMatrix->getNumCols(); + // TODO: check for out-of-mem conditions? + yA = new double[n]; + UtilFillN(yA, n, 0.0); + double *yA2 = new double[n]; + rowMatrix->transposeTimes(dualRay, yA2); // y^T A + + for (i = 0; i < m; i++) { + double yA_i = 0; + CoinShallowPackedVector pv = rowMatrix->getVector(i); + const int *indI = pv.getIndices(); + const double *elsI = pv.getElements(); + const int lenI = pv.getNumElements(); + + for (int j = 0; j < lenI; j++) { + yA_i += dualRay[indI[j]] * elsI[j]; + printf("i: %d, j: %d, indIj: %d, elsIj: %g ray: %g yA_i: %g\n", i, j, + indI[j], elsI[j], dualRay[indI[j]], yA_i); + } + + yA[i] = yA_i; + + if (!UtilIsZero(yA[i] - yA2[i])) { + printf(" ---> yA: %g, yA2: %g\n", yA[i], yA2[i]); + } + + fflush(stdout); + CoinAssert(UtilIsZero(yA[i] - yA2[i])); + } + + z = new double[n]; + + for (j = 0; j < n; j++) { + if (yA[j] >= 0) { + z[j] = CoinMin(1.0e20, colUB[j]); + } else { + z[j] = colLB[j]; + } + } + + // y^T b + yb = 0.0; + + for (i = 0; i < m; i++) { + yb += dualRay[i] * rowRhs[i]; + + if (os) + (*os) << "\ni : " << i << " dualRay = " << dualRay[i] + << " rowRhs = " << rowRhs[i] << " yb = " << yb; + } + + // y^T A z + yAz = 0.0; + + for (j = 0; j < n; j++) { + yAz += yA[j] * z[j]; + + if (os) + (*os) << "\nj : " << j << " yA = " << yA[j] << " z = " << z[j] + << " yAz = " << yAz; + } + + if (os) { + (*os) << "\nyb - yAz = " << yb - yAz << endl; + } + + UTIL_DELARR(yA); + UTIL_DELARR(z); + + // TODO: tol + if (yb - yAz > 1.0e-3) { + return true; + } else { + return false; + } } //===========================================================================// -void DecompAlgo::printBasisInfo(OsiSolverInterface* si, - ostream* os) -{ - int b, r, c; - int* basics = 0; - int* rstat = 0; - int* cstat = 0; - double* bInvRow = 0; - double* bInvARow = 0; - const int n = si->getNumCols(); - const int m = si->getNumRows(); - char type[4] = {'F', 'B', 'U', 'L'}; - //TODO: have to check sense? - const double* rowRhs = si->getRightHandSide(); - basics = new int[m]; - bInvRow = new double[m]; - bInvARow = new double[n]; - rstat = new int[m]; - cstat = new int[n]; - si->enableSimplexInterface(false); - si->getBasics(basics); - (*os) << "\n\nBasics: "; - - for (b = 0; b < m; b++) { - (*os) << basics[b] << " "; - } - - si->getBasisStatus(cstat, rstat); - (*os) << "\ncstat: "; - - for (c = 0; c < n; c++) { - (*os) << type[cstat[c]]; - } - - (*os) << "\n"; - (*os) << "rstat: "; - - for (r = 0; r < m; r++) { - (*os) << type[rstat[r]]; - } - - (*os) << "\n"; - //yb, where y is a row of B-1 - double yb = 0.0; - (*os) << "\nB-1:"; - - for (r = 0; r < m; r++) { - yb = 0.0; - si->getBInvRow(r, bInvRow); - (*os) << "\nB-1Row r: " << r << ": "; - - for (b = 0; b < m; b++) { - (*os) << bInvRow[b] << " "; - //rowRhs is just orig row rhs? or change based on who is basic? - yb += bInvRow[b] * rowRhs[b]; - } - - (*os) << " ---> yb: " << yb; - } - - //all pos case? if yb < 0 - //all neg case? if yb > 0 - // what if yb=0? - (*os) << "\nB-1A:"; - bool allpos = true; - bool allneg = true; - - for (r = 0; r < m; r++) { - si->getBInvARow(r, bInvARow); - (*os) << "\nB-1ARow r: " << r << ": "; - allpos = true; - allneg = true; - - for (c = 0; c < n; c++) { - (*os) << bInvARow[c] << " "; - - if (bInvARow[c] < 0) { - allpos = false; - } - - if (bInvARow[c] > 0) { - allneg = false; - } +void DecompAlgo::printBasisInfo(OsiSolverInterface *si, ostream *os) { + int b, r, c; + int *basics = 0; + int *rstat = 0; + int *cstat = 0; + double *bInvRow = 0; + double *bInvARow = 0; + const int n = si->getNumCols(); + const int m = si->getNumRows(); + char type[4] = {'F', 'B', 'U', 'L'}; + // TODO: have to check sense? + const double *rowRhs = si->getRightHandSide(); + basics = new int[m]; + bInvRow = new double[m]; + bInvARow = new double[n]; + rstat = new int[m]; + cstat = new int[n]; + si->enableSimplexInterface(false); + si->getBasics(basics); + (*os) << "\n\nBasics: "; + + for (b = 0; b < m; b++) { + (*os) << basics[b] << " "; + } + + si->getBasisStatus(cstat, rstat); + (*os) << "\ncstat: "; + + for (c = 0; c < n; c++) { + (*os) << type[cstat[c]]; + } + + (*os) << "\n"; + (*os) << "rstat: "; + + for (r = 0; r < m; r++) { + (*os) << type[rstat[r]]; + } + + (*os) << "\n"; + // yb, where y is a row of B-1 + double yb = 0.0; + (*os) << "\nB-1:"; + + for (r = 0; r < m; r++) { + yb = 0.0; + si->getBInvRow(r, bInvRow); + (*os) << "\nB-1Row r: " << r << ": "; + + for (b = 0; b < m; b++) { + (*os) << bInvRow[b] << " "; + // rowRhs is just orig row rhs? or change based on who is basic? + yb += bInvRow[b] * rowRhs[b]; + } + + (*os) << " ---> yb: " << yb; + } + + // all pos case? if yb < 0 + // all neg case? if yb > 0 + // what if yb=0? + (*os) << "\nB-1A:"; + bool allpos = true; + bool allneg = true; + + for (r = 0; r < m; r++) { + si->getBInvARow(r, bInvARow); + (*os) << "\nB-1ARow r: " << r << ": "; + allpos = true; + allneg = true; + + for (c = 0; c < n; c++) { + (*os) << bInvARow[c] << " "; + + if (bInvARow[c] < 0) { + allpos = false; } - if (allpos) { - (*os) << " ---> allpos"; + if (bInvARow[c] > 0) { + allneg = false; } - - if (allneg) { - (*os) << " ---> allneg"; - } - } - - UTIL_DELARR(basics); - UTIL_DELARR(bInvRow); - UTIL_DELARR(bInvARow); - UTIL_DELARR(rstat); - UTIL_DELARR(cstat); - si->disableSimplexInterface(); - //if you do this and want dual ray back, you need to resolve - si->setHintParam(OsiDoPresolveInResolve, false, OsiHintDo); - si->resolve(); - si->setHintParam(OsiDoPresolveInResolve, true, OsiHintDo); + } + + if (allpos) { + (*os) << " ---> allpos"; + } + + if (allneg) { + (*os) << " ---> allneg"; + } + } + + UTIL_DELARR(basics); + UTIL_DELARR(bInvRow); + UTIL_DELARR(bInvARow); + UTIL_DELARR(rstat); + UTIL_DELARR(cstat); + si->disableSimplexInterface(); + // if you do this and want dual ray back, you need to resolve + si->setHintParam(OsiDoPresolveInResolve, false, OsiHintDo); + si->resolve(); + si->setHintParam(OsiDoPresolveInResolve, true, OsiHintDo); } //===========================================================================// -void DecompAlgo::printCurrentProblemDual(OsiSolverInterface* si, - const string baseName, - const int nodeIndex, - const int cutPass, - const int pricePass) -{ - if (!si) { - return; - } - - UtilPrintFuncBegin(m_osLog, m_classTag, - "printCurrentProblemDual()", m_param.LogDebugLevel, 2); - if (m_param.DecompIPSolver == "CPLEX"){ +void DecompAlgo::printCurrentProblemDual(OsiSolverInterface *si, + const string baseName, + const int nodeIndex, const int cutPass, + const int pricePass) { + if (!si) { + return; + } + + UtilPrintFuncBegin(m_osLog, m_classTag, "printCurrentProblemDual()", + m_param.LogDebugLevel, 2); + if (m_param.DecompIPSolver == "CPLEX") { #ifdef DIP_HAS_CPX - OsiCpxSolverInterface* siCpx - = dynamic_cast(si); - CPXENVptr env = siCpx->getEnvironmentPtr(); - CPXLPptr lp = siCpx->getLpPtr(OsiCpxSolverInterface::KEEPCACHED_ALL); - string filename = DecompAlgoStr[m_algo] + "_" + baseName - + ".n" + UtilIntToStr(nodeIndex) - + ".c" + UtilIntToStr(cutPass) - + ".p" + UtilIntToStr(pricePass) - + ".dual.mps"; - double objShift; - int status = CPXdualwrite(env, lp, filename.c_str(), &objShift); - - if (status) - throw UtilException("CPXdualwrite failure", - "printCurrentProblemDual", "DecompAlgo"); - - printf("objShift in dual = %g\n", objShift); - UTIL_DEBUG(m_param.LogDebugLevel, 3, - (*m_osLog) << "calling CPXdualwrite filename = " - << filename << endl; - ); + OsiCpxSolverInterface *siCpx = dynamic_cast(si); + CPXENVptr env = siCpx->getEnvironmentPtr(); + CPXLPptr lp = siCpx->getLpPtr(OsiCpxSolverInterface::KEEPCACHED_ALL); + string filename = DecompAlgoStr[m_algo] + "_" + baseName + ".n" + + UtilIntToStr(nodeIndex) + ".c" + UtilIntToStr(cutPass) + + ".p" + UtilIntToStr(pricePass) + ".dual.mps"; + double objShift; + int status = CPXdualwrite(env, lp, filename.c_str(), &objShift); + + if (status) + throw UtilException("CPXdualwrite failure", "printCurrentProblemDual", + "DecompAlgo"); + + printf("objShift in dual = %g\n", objShift); + UTIL_DEBUG(m_param.LogDebugLevel, 3, + (*m_osLog) << "calling CPXdualwrite filename = " << filename + << endl;); #endif - } - UtilPrintFuncEnd(m_osLog, m_classTag, - "printCurrentProblemDual()", m_param.LogDebugLevel, 2); + } + UtilPrintFuncEnd(m_osLog, m_classTag, "printCurrentProblemDual()", + m_param.LogDebugLevel, 2); } //===========================================================================// -void DecompAlgo::printCurrentProblem(const OsiSolverInterface* si, - const string baseName, - const int nodeIndex, - const int cutPass, - const int pricePass, - const int blockId, - const bool printMps, - const bool printLp) -{ - if (!si) { - return; - } - - string fileName = DecompAlgoStr[m_algo] + "_" + baseName - + ".n" + UtilIntToStr(nodeIndex) - + ".c" + UtilIntToStr(cutPass) - + ".p" + UtilIntToStr(pricePass); - - if (blockId != -1) { - fileName += ".b" + UtilIntToStr(blockId); - } - - printCurrentProblem(si, fileName, printMps, printLp); +void DecompAlgo::printCurrentProblem(const OsiSolverInterface *si, + const string baseName, const int nodeIndex, + const int cutPass, const int pricePass, + const int blockId, const bool printMps, + const bool printLp) { + if (!si) { + return; + } + + string fileName = DecompAlgoStr[m_algo] + "_" + baseName + ".n" + + UtilIntToStr(nodeIndex) + ".c" + UtilIntToStr(cutPass) + + ".p" + UtilIntToStr(pricePass); + + if (blockId != -1) { + fileName += ".b" + UtilIntToStr(blockId); + } + + printCurrentProblem(si, fileName, printMps, printLp); } //===========================================================================// -void DecompAlgo::printCurrentProblem(const OsiSolverInterface* si, - const string fileName, - const bool printMps, - const bool printLp) -{ - if (!si) { - return; - } - - UtilPrintFuncBegin(m_osLog, m_classTag, - "printCurrentProblem()", m_param.LogDebugLevel, 2); - UTIL_DEBUG(m_param.LogDebugLevel, 3, - - if (printMps) - (*m_osLog) << "calling writeMps fileName = " - << fileName << endl; - if (printLp) - (*m_osLog) << "calling writeLp fileName = " - << fileName << endl; - ); - - - if (m_param.DecompIPSolver == "CPLEX"){ +void DecompAlgo::printCurrentProblem(const OsiSolverInterface *si, + const string fileName, const bool printMps, + const bool printLp) { + if (!si) { + return; + } + + UtilPrintFuncBegin(m_osLog, m_classTag, "printCurrentProblem()", + m_param.LogDebugLevel, 2); + UTIL_DEBUG(m_param.LogDebugLevel, 3, + + if (printMps)(*m_osLog) + << "calling writeMps fileName = " << fileName << endl; + if (printLp)(*m_osLog) + << "calling writeLp fileName = " << fileName << endl;); + + if (m_param.DecompIPSolver == "CPLEX") { #ifdef DIP_HAS_CPX - //--- - //--- There is no derived OsiCpx::writeLp and the base writeLp does not - //--- use names - for some reason (even though they are in Osi memory) - //--- - //--- The characters [] are often used in names but not allowed by - //--- CoinLp writer - so replace them here with (). - //--- - int i = 0; - int nCols = si->getNumCols(); - int nRows = si->getNumRows(); - char** colNamesChar = new char*[nCols]; - char** rowNamesChar = new char*[nRows + 1]; - - for (i = 0; i < nCols; i++) { - string colName = si->getColName(i); - replace(colName.begin(), colName.end(), '[', '('); - replace(colName.begin(), colName.end(), ']', ')'); - colNamesChar[i] = new char[colName.size() + 1]; - copy(colName.begin(), colName.end(), colNamesChar[i]); - colNamesChar[i][colName.size()] = '\0'; - } - - for (i = 0; i < nRows; i++) { - string rowName = si->getRowName(i); - replace(rowName.begin(), rowName.end(), '[', '('); - replace(rowName.begin(), rowName.end(), ']', ')'); - rowNamesChar[i] = new char[rowName.size() + 1]; - copy(rowName.begin(), rowName.end(), rowNamesChar[i]); - rowNamesChar[i][rowName.size()] = '\0'; - } - - string objName = si->getObjName(); - //printf("objname=%s\n", objName.c_str()); - replace(objName.begin(), objName.end(), '[', '('); - replace(objName.begin(), objName.end(), ']', ')'); - rowNamesChar[nRows] = new char[objName.size() + 1]; - copy(objName.begin(), objName.end(), rowNamesChar[nRows]); - rowNamesChar[nRows][objName.size()] = '\0'; - //printf("nRows=%d objname=%s\n", nRows, rowNamesChar[nRows]); - if (printMps) { - string fileNameMps = fileName + ".mps"; - si->writeMpsNative(fileNameMps.c_str(), - const_cast(rowNamesChar), - const_cast(colNamesChar), 1); - } - if (printLp) { - double epsilon = 1e-30; - int numberAcross = 5; - int decimals = 10; - string fileNameLp = fileName + ".lp"; - si->writeLpNative(fileNameLp.c_str(), - rowNamesChar, colNamesChar, - epsilon, numberAcross, decimals); - } - for (int i = 0; i < nCols; i++) { - UTIL_DELARR(colNamesChar[i]); - } - - for (i = 0; i < (nRows + 1); i++) { - UTIL_DELARR(rowNamesChar[i]); - } - - UTIL_DELARR(colNamesChar); - UTIL_DELARR(rowNamesChar); + //--- + //--- There is no derived OsiCpx::writeLp and the base writeLp does not + //--- use names - for some reason (even though they are in Osi memory) + //--- + //--- The characters [] are often used in names but not allowed by + //--- CoinLp writer - so replace them here with (). + //--- + int i = 0; + int nCols = si->getNumCols(); + int nRows = si->getNumRows(); + char **colNamesChar = new char *[nCols]; + char **rowNamesChar = new char *[nRows + 1]; + + for (i = 0; i < nCols; i++) { + string colName = si->getColName(i); + replace(colName.begin(), colName.end(), '[', '('); + replace(colName.begin(), colName.end(), ']', ')'); + colNamesChar[i] = new char[colName.size() + 1]; + copy(colName.begin(), colName.end(), colNamesChar[i]); + colNamesChar[i][colName.size()] = '\0'; + } + + for (i = 0; i < nRows; i++) { + string rowName = si->getRowName(i); + replace(rowName.begin(), rowName.end(), '[', '('); + replace(rowName.begin(), rowName.end(), ']', ')'); + rowNamesChar[i] = new char[rowName.size() + 1]; + copy(rowName.begin(), rowName.end(), rowNamesChar[i]); + rowNamesChar[i][rowName.size()] = '\0'; + } + + string objName = si->getObjName(); + // printf("objname=%s\n", objName.c_str()); + replace(objName.begin(), objName.end(), '[', '('); + replace(objName.begin(), objName.end(), ']', ')'); + rowNamesChar[nRows] = new char[objName.size() + 1]; + copy(objName.begin(), objName.end(), rowNamesChar[nRows]); + rowNamesChar[nRows][objName.size()] = '\0'; + // printf("nRows=%d objname=%s\n", nRows, rowNamesChar[nRows]); + if (printMps) { + string fileNameMps = fileName + ".mps"; + si->writeMpsNative(fileNameMps.c_str(), + const_cast(rowNamesChar), + const_cast(colNamesChar), 1); + } + if (printLp) { + double epsilon = 1e-30; + int numberAcross = 5; + int decimals = 10; + string fileNameLp = fileName + ".lp"; + si->writeLpNative(fileNameLp.c_str(), rowNamesChar, colNamesChar, epsilon, + numberAcross, decimals); + } + for (int i = 0; i < nCols; i++) { + UTIL_DELARR(colNamesChar[i]); + } + + for (i = 0; i < (nRows + 1); i++) { + UTIL_DELARR(rowNamesChar[i]); + } + + UTIL_DELARR(colNamesChar); + UTIL_DELARR(rowNamesChar); #endif - }else{ - if (printMps) { - si->writeMps(fileName.c_str()); - } - - if (printLp) { - double epsilon = 1e-30; - int numberAcross = 5; - int decimals = 10; - string fileNameLp = fileName + ".lp"; - //This works because the Osi object in this case is OsiClp - // and Clp takes care of transferring the names. - si->writeLp(fileName.c_str(), "lp", - epsilon, numberAcross, decimals); - } - } - - UtilPrintFuncEnd(m_osLog, m_classTag, - "printCurrentProblem()", m_param.LogDebugLevel, 2); + } else { + if (printMps) { + si->writeMps(fileName.c_str()); + } + + if (printLp) { + double epsilon = 1e-30; + int numberAcross = 5; + int decimals = 10; + string fileNameLp = fileName + ".lp"; + // This works because the Osi object in this case is OsiClp + // and Clp takes care of transferring the names. + si->writeLp(fileName.c_str(), "lp", epsilon, numberAcross, decimals); + } + } + + UtilPrintFuncEnd(m_osLog, m_classTag, "printCurrentProblem()", + m_param.LogDebugLevel, 2); } /* @@ -729,597 +688,569 @@ void DecompAlgo::printCurrentProblem(const OsiSolverInterface * si, */ //===========================================================================// -void DecompAlgo::printVars(ostream* os) -{ - DecompVarList::iterator it; - int var_index = 0; - - for (it = m_vars.begin(); it != m_vars.end(); it++) { - (*os) << "VAR " << var_index++ << " : "; - (*it)->print(m_infinity, os, m_app); - (*os) << endl; - } +void DecompAlgo::printVars(ostream *os) { + DecompVarList::iterator it; + int var_index = 0; - (*os) << endl; + for (it = m_vars.begin(); it != m_vars.end(); it++) { + (*os) << "VAR " << var_index++ << " : "; + (*it)->print(m_infinity, os, m_app); + (*os) << endl; + } + + (*os) << endl; } //===========================================================================// -void DecompAlgo::createFullMps(const string fileName) -{ - CoinAssert(m_algo == CUT); - DecompConstraintSet* modelCore = m_modelCore.getModel(); - int n_integerVars = static_cast(modelCore->integerVars.size()); - m_masterSI->setInteger(&modelCore->integerVars[0], n_integerVars); - m_masterSI->writeMps(fileName.c_str()); - m_masterSI->setContinuous(&modelCore->integerVars[0], n_integerVars); +void DecompAlgo::createFullMps(const string fileName) { + CoinAssert(m_algo == CUT); + DecompConstraintSet *modelCore = m_modelCore.getModel(); + int n_integerVars = static_cast(modelCore->integerVars.size()); + m_masterSI->setInteger(&modelCore->integerVars[0], n_integerVars); + m_masterSI->writeMps(fileName.c_str()); + m_masterSI->setContinuous(&modelCore->integerVars[0], n_integerVars); } //===========================================================================// -void DecompAlgo::printCuts(ostream* os) -{ - DecompCutList::iterator it; - int cut_index = 0; - - for (it = m_cuts.begin(); it != m_cuts.end(); it++) { - (*os) << "CUT " << cut_index++ << " : "; - (*it)->print(os); - } +void DecompAlgo::printCuts(ostream *os) { + DecompCutList::iterator it; + int cut_index = 0; + + for (it = m_cuts.begin(); it != m_cuts.end(); it++) { + (*os) << "CUT " << cut_index++ << " : "; + (*it)->print(os); + } - (*os) << endl; + (*os) << endl; } //===========================================================================// -void DecompAlgo::checkDuals() -{ - //--- - //--- sanity check on duals returned - //--- complementary slackness (c-uA)x = 0 - //--- also check that the given reduced cost matches the - //--- hand calculation - //--- - const double* x = m_masterSI->getColSolution(); - const double* pi = m_masterSI->getRowPrice(); - const int nCols = m_masterSI->getNumCols(); - const CoinPackedMatrix* M = m_masterSI->getMatrixByRow(); - double* uA = new double[nCols]; - const double* objC = m_masterSI->getObjCoefficients(); - const double* rcLP = m_masterSI->getReducedCost(); - M->transposeTimes(pi, uA); +void DecompAlgo::checkDuals() { + //--- + //--- sanity check on duals returned + //--- complementary slackness (c-uA)x = 0 + //--- also check that the given reduced cost matches the + //--- hand calculation + //--- + const double *x = m_masterSI->getColSolution(); + const double *pi = m_masterSI->getRowPrice(); + const int nCols = m_masterSI->getNumCols(); + const CoinPackedMatrix *M = m_masterSI->getMatrixByRow(); + double *uA = new double[nCols]; + const double *objC = m_masterSI->getObjCoefficients(); + const double *rcLP = m_masterSI->getReducedCost(); + M->transposeTimes(pi, uA); #ifndef DO_INTERIOR - for (int i = 0; i < nCols; i++) { - if (!UtilIsZero( x[i], 1.0e-5 ) && - !UtilIsZero( (objC[i] - uA[i]) * x[i], 1.0e-4 ) ) { - printf("ERR in COMPL-SLACK i:%d objC:%15.10f uA:%15.10f x:%15.10f\n", - i, objC[i], uA[i], x[i]); - fflush(stdout); - assert(0); - } + for (int i = 0; i < nCols; i++) { + if (!UtilIsZero(x[i], 1.0e-5) && + !UtilIsZero((objC[i] - uA[i]) * x[i], 1.0e-4)) { + printf("ERR in COMPL-SLACK i:%d objC:%15.10f uA:%15.10f x:%15.10f\n", i, + objC[i], uA[i], x[i]); + fflush(stdout); + assert(0); + } - if (!UtilIsZero( (objC[i] - uA[i]) - rcLP[i], 1.0e-4 ) ) { - printf("ERR in RC i:%d objC:%15.10f uA:%15.10f RCLP:%15.10f\n", - i, objC[i], uA[i], rcLP[i]); - fflush(stdout); - assert(0); - } - } + if (!UtilIsZero((objC[i] - uA[i]) - rcLP[i], 1.0e-4)) { + printf("ERR in RC i:%d objC:%15.10f uA:%15.10f RCLP:%15.10f\n", i, + objC[i], uA[i], rcLP[i]); + fflush(stdout); + assert(0); + } + } #endif - UTIL_DELARR(uA); - + UTIL_DELARR(uA); } //===========================================================================// -void DecompAlgo::checkReducedCost(const double *u, const double *u_adjusted) -{ - //--- - //--- sanity check - none of the columns currently in master - //--- should have negative reduced cost - //--- m_vars contains the variables (in x-space) that have - //--- been pushed into the master LP (assumes no compression) - //--- - - DecompVarList::iterator it; - int b, var_index = 0; - double* redCostX = NULL; - const double* objC = m_masterSI->getObjCoefficients(); - const double* rcLP = m_masterSI->getReducedCost(); - DecompConstraintSet* modelCore = m_modelCore.getModel(); - const int nCoreCols = modelCore->getNumCols(); - double alpha = 0.0; - int nBaseCoreRows = modelCore->nBaseRows; - const double* origObjective = getOrigObjective(); - - for (it = m_vars.begin(); it != m_vars.end(); it++) { - double redCost = 0.0; - //m_s is a sparse vector in x-space (the column) - //redCostX is a dense vector in x-space (the cost in subproblem) - b = (*it)->getBlockId(); - redCost = (*it)->m_s.dotProduct(redCostX);//?? - - if ( (*it)->getVarType() == DecompVar_Point) { - alpha = u[nBaseCoreRows + b]; - } else if ((*it)->getVarType() == DecompVar_Ray) { - alpha = 0; +void DecompAlgo::checkReducedCost(const double *u, const double *u_adjusted) { + //--- + //--- sanity check - none of the columns currently in master + //--- should have negative reduced cost + //--- m_vars contains the variables (in x-space) that have + //--- been pushed into the master LP (assumes no compression) + //--- + + DecompVarList::iterator it; + int b, var_index = 0; + double *redCostX = NULL; + const double *objC = m_masterSI->getObjCoefficients(); + const double *rcLP = m_masterSI->getReducedCost(); + DecompConstraintSet *modelCore = m_modelCore.getModel(); + const int nCoreCols = modelCore->getNumCols(); + double alpha = 0.0; + int nBaseCoreRows = modelCore->nBaseRows; + const double *origObjective = getOrigObjective(); + + for (it = m_vars.begin(); it != m_vars.end(); it++) { + double redCost = 0.0; + // m_s is a sparse vector in x-space (the column) + // redCostX is a dense vector in x-space (the cost in subproblem) + b = (*it)->getBlockId(); + redCost = (*it)->m_s.dotProduct(redCostX); //?? + + if ((*it)->getVarType() == DecompVar_Point) { + alpha = u[nBaseCoreRows + b]; + } else if ((*it)->getVarType() == DecompVar_Ray) { + alpha = 0; + } + + assert(m_masterRowType[nBaseCoreRows + b] == DecompRow_Convex); + assert(isMasterColStructural((*it)->getColMasterIndex())); + UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, + (*m_osLog) << "MasterColIndex = " << setw(6) + << (*it)->getColMasterIndex() << "Block = " << setw(3) + << b << "LPRedCost = " << setw(10) + << UtilDblToStr(rcLP[(*it)->getColMasterIndex()], 5) + << "CalcRedCost = " << setw(10) + << UtilDblToStr(redCost - alpha, 5) + << "ObjCost = " << setw(10) + << UtilDblToStr(objC[(*it)->getColMasterIndex()], 5) + << "Alpha = " << setw(10) << UtilDblToStr(alpha, 5) + << endl;); + //--- + //--- sanity check - none of the columns currently in master + //--- should have negative reduced cost + //--- unless they have been fixed to 0 by branching + //--- + // const double * colLB = m_masterSI->getColLower(); + const double *colUB = m_masterSI->getColUpper(); + int index = (*it)->getColMasterIndex(); + double rcLPi = rcLP[index]; + + if (rcLPi < -m_param.RedCostEpsilon && colUB[index] > DecompEpsilon) { + (*m_osLog) << "VAR v-index:" << var_index++ + << " m-index: " << (*it)->getColMasterIndex() + << " b-index: " << b << " rcLP: " << rcLPi << endl; + (*it)->print(m_infinity, m_osLog, modelCore->colNames, + const_cast(redCostX)); + (*m_osLog) << "******** ERROR ********" << endl; + assert(0); + } + + //--- + //--- check that objective in LP and calculated objective match + //--- + if (m_phase == PHASE_PRICE2) { + double objCalc = (*it)->m_s.dotProduct(origObjective); + + if (!UtilIsZero(objCalc - objC[(*it)->getColMasterIndex()], 1.0e-3)) { + (*m_osLog) << "VAR v-index:" << var_index++ + << " m-index: " << (*it)->getColMasterIndex() + << " b-index: " << b + << " objLP: " << objC[(*it)->getColMasterIndex()] + << " objCalc: " << objCalc << endl; + (*it)->print(m_infinity, m_osLog, modelCore->colNames, + const_cast(origObjective)); + (*m_osLog) << "******** ERROR ********" << endl; + assert(0); } - - assert(m_masterRowType[nBaseCoreRows + b] == DecompRow_Convex); - assert(isMasterColStructural((*it)->getColMasterIndex())); - UTIL_DEBUG(m_app->m_param.LogDebugLevel, 5, - (*m_osLog) - << "MasterColIndex = " - << setw(6) << (*it)->getColMasterIndex() - << "Block = " - << setw(3) << b - << "LPRedCost = " << setw(10) - << UtilDblToStr(rcLP[(*it)->getColMasterIndex()], 5) - << "CalcRedCost = " << setw(10) - << UtilDblToStr(redCost - alpha, 5) - << "ObjCost = " << setw(10) - << UtilDblToStr(objC[(*it)->getColMasterIndex()], 5) - << "Alpha = " << setw(10) - << UtilDblToStr(alpha, 5) - << endl; ); + } + + //--- + //--- check that LP reduced cost and calculated reduced cost + //--- match up + //--- in the case of using dual smoothing, we cannot do this check + //--- since the calculated reduced cost is based on the smoothed + //--- duals + //--- + if (!m_param.DualStab && + !UtilIsZero(rcLP[(*it)->getColMasterIndex()] - (redCost - alpha), + 1.0e-3)) { //--- - //--- sanity check - none of the columns currently in master - //--- should have negative reduced cost - //--- unless they have been fixed to 0 by branching + //--- this whole next section is an expansion of log + //--- when there is an issue found that the solver + //--- returns RC that doesn't match the one calculated + //--- based on core matrix and duals //--- - //const double * colLB = m_masterSI->getColLower(); - const double* colUB = m_masterSI->getColUpper(); - int index = (*it)->getColMasterIndex(); - double rcLPi = rcLP[index]; - - if (rcLPi < - m_param.RedCostEpsilon && - colUB[index] > DecompEpsilon) { - (*m_osLog) << "VAR v-index:" << var_index++ - << " m-index: " << (*it)->getColMasterIndex() - << " b-index: " << b - << " rcLP: " << rcLPi - << endl; - (*it)->print(m_infinity, m_osLog, modelCore->colNames, - const_cast(redCostX)); - (*m_osLog) << "******** ERROR ********" << endl; - assert(0); - } - + (*m_osLog) << "VAR v-index:" << var_index++ + << " m-index: " << (*it)->getColMasterIndex() + << " b-index: " << b << " rc: " << redCost + << " alpha: " << alpha << " rc-a: " << redCost - alpha + << " RCLP: " << rcLP[(*it)->getColMasterIndex()] << endl; //--- - //--- check that objective in LP and calculated objective match + //--- this, plus alpha shows the calculation of the red-cost in + //--- x-space, next, look at the same calculation in lambda-space //--- - if (m_phase == PHASE_PRICE2) { - double objCalc = (*it)->m_s.dotProduct(origObjective); - - if (!UtilIsZero(objCalc - objC[(*it)->getColMasterIndex()], - 1.0e-3)) { - (*m_osLog) << "VAR v-index:" << var_index++ - << " m-index: " << (*it)->getColMasterIndex() - << " b-index: " << b - << " objLP: " << objC[(*it)->getColMasterIndex()] - << " objCalc: " << objCalc - << endl; - (*it)->print(m_infinity, m_osLog, modelCore->colNames, - const_cast(origObjective)); - (*m_osLog) << "******** ERROR ********" << endl; - assert(0); - } - } + (*it)->print(m_infinity, m_osLog, modelCore->colNames, + const_cast(redCostX)); + (*m_osLog) << "******** ERROR ********" << endl; + //--- + //--- the rows in lambda that show up should be ones where + //--- these components show up in original A'' + //--- + double *uA2 = new double[nCoreCols]; + modelCore->M->transposeTimes(u_adjusted, uA2); + (*it)->print(m_infinity, m_osLog, modelCore->colNames, + const_cast(uA2)); + UTIL_DELARR(uA2); + //--- + //--- RC of a col of lambda-space is u.A[i] + //--- + (*m_osLog) << " objLP: " + << UtilDblToStr(objC[(*it)->getColMasterIndex()], 4) << endl; //--- - //--- check that LP reduced cost and calculated reduced cost - //--- match up - //--- in the case of using dual smoothing, we cannot do this check - //--- since the calculated reduced cost is based on the smoothed - //--- duals + //--- recalc column of master A.s for this s //--- - if (!m_param.DualStab && - !UtilIsZero(rcLP[(*it)->getColMasterIndex()] - - (redCost - alpha), 1.0e-3)) { - //--- - //--- this whole next section is an expansion of log - //--- when there is an issue found that the solver - //--- returns RC that doesn't match the one calculated - //--- based on core matrix and duals - //--- - (*m_osLog) << "VAR v-index:" << var_index++ - << " m-index: " << (*it)->getColMasterIndex() - << " b-index: " << b - << " rc: " << redCost - << " alpha: " << alpha - << " rc-a: " << redCost - alpha - << " RCLP: " << rcLP[(*it)->getColMasterIndex()] - << endl; - //--- - //--- this, plus alpha shows the calculation of the red-cost in - //--- x-space, next, look at the same calculation in lambda-space - //--- - (*it)->print(m_infinity, m_osLog, modelCore->colNames, - const_cast(redCostX)); - (*m_osLog) << "******** ERROR ********" << endl; - //--- - //--- the rows in lambda that show up should be ones where - //--- these components show up in original A'' - //--- - double* uA2 = new double[nCoreCols]; - modelCore->M->transposeTimes(u_adjusted, uA2); - (*it)->print(m_infinity, m_osLog, modelCore->colNames, - const_cast(uA2)); - UTIL_DELARR(uA2); - //--- - //--- RC of a col of lambda-space is u.A[i] - //--- - (*m_osLog) << " objLP: " - << UtilDblToStr(objC[(*it)->getColMasterIndex()], 4) - << endl; - - //--- - //--- recalc column of master A.s for this s - //--- - if (m_algo != DECOMP) { - double* denseS = new double[modelCore->getNumCols()]; - (*it)->fillDenseArr(modelCore->getNumCols(), denseS); - int r; - const CoinPackedMatrix* Mr = modelCore->getMatrix(); - - for (r = 0; r < modelCore->getNumRows(); r++) { - printf("\nROW %d\n", r); - CoinShallowPackedVector vec = Mr->getVector(r); - UtilPrintPackedVector(vec, m_osLog, - modelCore->getColNames(), - denseS); - } - - UTIL_DELARR(denseS); - } - - const CoinPackedMatrix* Mc = m_masterSI->getMatrixByCol(); - - CoinShallowPackedVector vec - = Mc->getVector((*it)->getColMasterIndex()); - - UtilPrintPackedVector(vec, m_osLog, m_masterSI->getColNames(), u); - - double uA = vec.dotProduct(u); - - (*m_osLog) << " objLP: " - << UtilDblToStr(objC[(*it)->getColMasterIndex()], 4) - << endl; - - (*m_osLog) << " uA : " << UtilDblToStr(uA, 4) << endl; - - (*m_osLog) << " RC : " - << UtilDblToStr(objC[(*it)->getColMasterIndex()] - uA, - 4) << endl; - - (*m_osLog) << " RCLP : " - << UtilDblToStr(rcLP[(*it)->getColMasterIndex()], 4) - << endl; - - assert(0); - - (*m_osLog) << endl; - } //END: if(!UtilIsZero(rcLP[(*it)->getColMasterIndex()] ... - } //END: for(it = m_vars.begin(); it != m_vars.end(); it++) -} + if (m_algo != DECOMP) { + double *denseS = new double[modelCore->getNumCols()]; + (*it)->fillDenseArr(modelCore->getNumCols(), denseS); + int r; + const CoinPackedMatrix *Mr = modelCore->getMatrix(); + + for (r = 0; r < modelCore->getNumRows(); r++) { + printf("\nROW %d\n", r); + CoinShallowPackedVector vec = Mr->getVector(r); + UtilPrintPackedVector(vec, m_osLog, modelCore->getColNames(), denseS); + } + + UTIL_DELARR(denseS); + } -//===========================================================================// -DecompSolverResult* DecompAlgoC::solveDirect(const DecompSolution* startSol) -{ - //--- - //--- Solve the original IP with a generic IP solver. - //--- - //--- A simple sanity check for the case where it is possible to - //--- represent [A,b] in polynomial size, for example, SmallIP, 3AP. - //--- - UtilPrintFuncBegin(m_osLog, m_classTag, - "solveDirect()", m_param.LogDebugLevel, 2); - DecompVarList dummy; - int i, nNodes; - double objLB = -m_infinity; - double objUB = m_infinity; - int logIpLevel = m_param.LogIpLevel; - DecompConstraintSet* modelCore = m_modelCore.getModel(); - int numInts = modelCore->getNumInts(); - int numCols = m_masterSI->getNumCols(); - double timeLimit = m_param.TimeLimit; - //--- - //--- start timer - //--- - UtilTimer timer; - timer.start(); - //--- - //--- create a results object - //--- - DecompSolverResult* result = new DecompSolverResult(m_infinity); - //--- - //--- create the master problem - //--- - createMasterProblem(dummy); - //--- - //--- adjust ip solver log levels - //--- - m_masterSI->messageHandler()->setLogLevel(logIpLevel); - - //--- - //--- set integer vars - //--- - for (i = 0; i < numInts; i++) { - m_masterSI->setInteger(modelCore->integerVars[i]); - } + const CoinPackedMatrix *Mc = m_masterSI->getMatrixByCol(); - //#define PERMUTE_STUFF - - /*#ifdef PERMUTE_STUFF - - //--- - //--- randomly permute rows and cols for MIPLIB2010 - //--- delete random rows, append to end - //--- delete random cols, append to end - //--- - { - int k, r, c, tmp; - int nCols = m_masterSI->getNumCols(); - int nRows = m_masterSI->getNumRows(); - int rowsToPermute = static_cast(nRows / 7); - int colsToPermute = static_cast(nCols / 7); - vector newRowInd; //old row to new row index - int rowToDelete[1]; - int colToDelete[1]; - - srand(1); - for(i = 0; i < nRows; i++){ - newRowInd.push_back(i); - } - for(i = 0; i < rowsToPermute; i++){ - r = UtilURand(0, nRows-1); - //--- - //--- Example: - //--- 0,1,2,3,4,5,6 (r=2) - //--- -> 0,1,3,4,5,6,2 - //--- - tmp = newRowInd[r]; - for(k = r; k < (nRows-1); k++){ - newRowInd[k] = newRowInd[k+1]; - } - newRowInd[nRows-1] = tmp; - - const CoinPackedMatrix * M = m_masterSI->getMatrixByRow(); - const double * rowLB = m_masterSI->getRowLower(); - const double * rowUB = m_masterSI->getRowUpper(); - const double rLB = rowLB[r]; - const double rUB = rowUB[r]; - CoinShallowPackedVector vecS = M->getVector(r); - CoinPackedVector vec(vecS); - //printf("delete and move to end row r=%d %s\n", - // r, m_masterSI->getRowName(r).c_str()); - //vec.print(); - - rowToDelete[0] = r; - m_masterSI->deleteRows(1, rowToDelete); - m_masterSI->addRow(vec, rLB, rUB); - } + CoinShallowPackedVector vec = Mc->getVector((*it)->getColMasterIndex()); - for(i = 0; i < colsToPermute; i++){ - c = UtilURand(0, nCols-1); - //--- - //--- Example: - //--- 0,1,2,3,4,5,6 (r=2) - //--- -> 0,1,3,4,5,6,2 - //--- - const CoinPackedMatrix * M = m_masterSI->getMatrixByCol(); - const double * colLB = m_masterSI->getColLower(); - const double * colUB = m_masterSI->getColUpper(); - const double * objC = m_masterSI->getObjCoefficients(); - const double cLB = colLB[c]; - const double cUB = colUB[c]; - const double obj = objC[c]; - const CoinShallowPackedVector vecS = M->getVector(c); - CoinPackedVector vec(vecS); - - /////////// THIS IS WRONG /////////// - //TODO: copy integer info! - - colToDelete[0] = c; - m_masterSI->deleteCols(1, colToDelete); - m_masterSI->addCol(vec, cLB, cUB, obj); - } + UtilPrintPackedVector(vec, m_osLog, m_masterSI->getColNames(), u); - printf("\n\nNew Row Map\n"); - for(i = 0; i < nRows; i++){ - printf("%10d%10d\n", i, newRowInd[i]); - } - } - #endif */ - //--- - //--- dump full milp - //--- - if (m_param.LogDumpModel >= 2) { - string fileName = "directMILP"; - printCurrentProblem(m_masterSI, fileName); - } + double uA = vec.dotProduct(u); + + (*m_osLog) << " objLP: " + << UtilDblToStr(objC[(*it)->getColMasterIndex()], 4) << endl; - if (m_param.DecompIPSolver == "Cbc"){ + (*m_osLog) << " uA : " << UtilDblToStr(uA, 4) << endl; + + (*m_osLog) << " RC : " + << UtilDblToStr(objC[(*it)->getColMasterIndex()] - uA, 4) + << endl; + + (*m_osLog) << " RCLP : " + << UtilDblToStr(rcLP[(*it)->getColMasterIndex()], 4) << endl; + + assert(0); + + (*m_osLog) << endl; + } // END: if(!UtilIsZero(rcLP[(*it)->getColMasterIndex()] ... + } // END: for(it = m_vars.begin(); it != m_vars.end(); it++) +} + +//===========================================================================// +DecompSolverResult *DecompAlgoC::solveDirect(const DecompSolution *startSol) { + //--- + //--- Solve the original IP with a generic IP solver. + //--- + //--- A simple sanity check for the case where it is possible to + //--- represent [A,b] in polynomial size, for example, SmallIP, 3AP. + //--- + UtilPrintFuncBegin(m_osLog, m_classTag, "solveDirect()", + m_param.LogDebugLevel, 2); + DecompVarList dummy; + int i, nNodes; + double objLB = -m_infinity; + double objUB = m_infinity; + int logIpLevel = m_param.LogIpLevel; + DecompConstraintSet *modelCore = m_modelCore.getModel(); + int numInts = modelCore->getNumInts(); + int numCols = m_masterSI->getNumCols(); + double timeLimit = m_param.TimeLimit; + //--- + //--- start timer + //--- + UtilTimer timer; + timer.start(); + //--- + //--- create a results object + //--- + DecompSolverResult *result = new DecompSolverResult(m_infinity); + //--- + //--- create the master problem + //--- + createMasterProblem(dummy); + //--- + //--- adjust ip solver log levels + //--- + m_masterSI->messageHandler()->setLogLevel(logIpLevel); + + //--- + //--- set integer vars + //--- + for (i = 0; i < numInts; i++) { + m_masterSI->setInteger(modelCore->integerVars[i]); + } + + //#define PERMUTE_STUFF + + /*#ifdef PERMUTE_STUFF + + //--- + //--- randomly permute rows and cols for MIPLIB2010 + //--- delete random rows, append to end + //--- delete random cols, append to end + //--- + { + int k, r, c, tmp; + int nCols = m_masterSI->getNumCols(); + int nRows = m_masterSI->getNumRows(); + int rowsToPermute = static_cast(nRows / 7); + int colsToPermute = static_cast(nCols / 7); + vector newRowInd; //old row to new row index + int rowToDelete[1]; + int colToDelete[1]; + + srand(1); + for(i = 0; i < nRows; i++){ + newRowInd.push_back(i); + } + for(i = 0; i < rowsToPermute; i++){ + r = UtilURand(0, nRows-1); + //--- + //--- Example: + //--- 0,1,2,3,4,5,6 (r=2) + //--- -> 0,1,3,4,5,6,2 + //--- + tmp = newRowInd[r]; + for(k = r; k < (nRows-1); k++){ + newRowInd[k] = newRowInd[k+1]; + } + newRowInd[nRows-1] = tmp; + + const CoinPackedMatrix * M = m_masterSI->getMatrixByRow(); + const double * rowLB = m_masterSI->getRowLower(); + const double * rowUB = m_masterSI->getRowUpper(); + const double rLB = rowLB[r]; + const double rUB = rowUB[r]; + CoinShallowPackedVector vecS = M->getVector(r); + CoinPackedVector vec(vecS); + //printf("delete and move to end row r=%d %s\n", + // r, m_masterSI->getRowName(r).c_str()); + //vec.print(); + + rowToDelete[0] = r; + m_masterSI->deleteRows(1, rowToDelete); + m_masterSI->addRow(vec, rLB, rUB); + } + + for(i = 0; i < colsToPermute; i++){ + c = UtilURand(0, nCols-1); + //--- + //--- Example: + //--- 0,1,2,3,4,5,6 (r=2) + //--- -> 0,1,3,4,5,6,2 + //--- + const CoinPackedMatrix * M = m_masterSI->getMatrixByCol(); + const double * colLB = m_masterSI->getColLower(); + const double * colUB = m_masterSI->getColUpper(); + const double * objC = m_masterSI->getObjCoefficients(); + const double cLB = colLB[c]; + const double cUB = colUB[c]; + const double obj = objC[c]; + const CoinShallowPackedVector vecS = M->getVector(c); + CoinPackedVector vec(vecS); + + /////////// THIS IS WRONG /////////// + //TODO: copy integer info! + + colToDelete[0] = c; + m_masterSI->deleteCols(1, colToDelete); + m_masterSI->addCol(vec, cLB, cUB, obj); + } + + printf("\n\nNew Row Map\n"); + for(i = 0; i < nRows; i++){ + printf("%10d%10d\n", i, newRowInd[i]); + } + } + #endif */ + //--- + //--- dump full milp + //--- + if (m_param.LogDumpModel >= 2) { + string fileName = "directMILP"; + printCurrentProblem(m_masterSI, fileName); + } + + if (m_param.DecompIPSolver == "Cbc") { #ifdef DIP_HAS_CBC - CbcModel cbc(*m_masterSI); - cbc.setLogLevel(logIpLevel); - cbc.setDblParam(CbcModel::CbcMaximumSeconds, timeLimit); - cbc.branchAndBound(); - const int statusSet[2] = {0, 1}; - int solStatus = cbc.status(); - int solStatus2 = cbc.secondaryStatus(); - - if (!UtilIsInSet(solStatus, statusSet, 2)) { - cerr << "Error: CBC IP solver status = " - << solStatus << endl; - throw UtilException("CBC solver status", "solveDirect", "solveDirect"); - } - - //--- - //--- get number of nodes - //--- - nNodes = cbc.getNodeCount(); - //--- - //--- get objective and solution - //--- - objLB = cbc.getBestPossibleObjValue(); - - if (cbc.isProvenOptimal() || cbc.isSecondsLimitReached()) { - objUB = cbc.getObjValue(); - - if (result && cbc.getSolutionCount()) { - const double* solDbl = cbc.getColSolution(); - vector solVec(solDbl, solDbl + numCols); - result->m_solution.push_back(solVec); - result->m_nSolutions++; - assert(result->m_nSolutions == - static_cast(result->m_solution.size())); - //copy(solution, solution+numCols, result->m_solution); - } - } - - //--- - //--- copy sol status into result - //--- - if (result) { - result->m_solStatus = solStatus; - result->m_solStatus2 = solStatus2; + CbcModel cbc(*m_masterSI); + cbc.setLogLevel(logIpLevel); + cbc.setDblParam(CbcModel::CbcMaximumSeconds, timeLimit); + cbc.branchAndBound(); + const int statusSet[2] = {0, 1}; + int solStatus = cbc.status(); + int solStatus2 = cbc.secondaryStatus(); + + if (!UtilIsInSet(solStatus, statusSet, 2)) { + cerr << "Error: CBC IP solver status = " << solStatus << endl; + throw UtilException("CBC solver status", "solveDirect", "solveDirect"); + } + + //--- + //--- get number of nodes + //--- + nNodes = cbc.getNodeCount(); + //--- + //--- get objective and solution + //--- + objLB = cbc.getBestPossibleObjValue(); + + if (cbc.isProvenOptimal() || cbc.isSecondsLimitReached()) { + objUB = cbc.getObjValue(); + + if (result && cbc.getSolutionCount()) { + const double *solDbl = cbc.getColSolution(); + vector solVec(solDbl, solDbl + numCols); + result->m_solution.push_back(solVec); + result->m_nSolutions++; + assert(result->m_nSolutions == + static_cast(result->m_solution.size())); + // copy(solution, solution+numCols, result->m_solution); } + } + + //--- + //--- copy sol status into result + //--- + if (result) { + result->m_solStatus = solStatus; + result->m_solStatus2 = solStatus2; + } #else - throw UtilException("Cbc selected as solver, but it's not available", - "solveDirect", "DecompDebug"); + throw UtilException("Cbc selected as solver, but it's not available", + "solveDirect", "DecompDebug"); #endif - }else if (m_param.DecompIPSolver == "CPLEX"){ + } else if (m_param.DecompIPSolver == "CPLEX") { #ifdef DIP_HAS_CPX - OsiCpxSolverInterface* masterSICpx = - dynamic_cast(m_masterSI); - CPXLPptr cpxLp = masterSICpx->getLpPtr(); - CPXENVptr cpxEnv = masterSICpx->getEnvironmentPtr(); - int status = 0; - masterSICpx->switchToMIP();//need? - - if (startSol) { - int nCols = masterSICpx->getNumCols(); - int beg[1] = {0}; - int* varInd = new int[nCols]; - const double* solution = startSol->getValues(); - assert(nCols == startSol->getSize()); - UtilIotaN(varInd, nCols, 0); - status = CPXaddmipstarts(cpxEnv, cpxLp, - 1, nCols, beg, varInd, solution, NULL, NULL); - - if (status) - throw UtilException("CPXaddmipstarts failure", - "solveDirect", "DecompAlgoC"); - - UTIL_DELARR(varInd); - } - - //--- - //--- set the time limit - //--- - status = CPXsetdblparam(cpxEnv, CPX_PARAM_TILIM, timeLimit); - //--- - //--- set the thread limit, otherwise CPLEX will use all the resources - //--- - status = CPXsetintparam(cpxEnv, CPX_PARAM_THREADS, m_param.NumThreadsIPSolver); - + OsiCpxSolverInterface *masterSICpx = + dynamic_cast(m_masterSI); + CPXLPptr cpxLp = masterSICpx->getLpPtr(); + CPXENVptr cpxEnv = masterSICpx->getEnvironmentPtr(); + int status = 0; + masterSICpx->switchToMIP(); // need? + + if (startSol) { + int nCols = masterSICpx->getNumCols(); + int beg[1] = {0}; + int *varInd = new int[nCols]; + const double *solution = startSol->getValues(); + assert(nCols == startSol->getSize()); + UtilIotaN(varInd, nCols, 0); + status = CPXaddmipstarts(cpxEnv, cpxLp, 1, nCols, beg, varInd, solution, + NULL, NULL); + if (status) - throw UtilException("CPXsetdblparam failure", - "solveDirect", "DecompAlgoC"); - - //--- - //--- solve the MILP - //--- - UtilTimer timer1; - timer1.start(); - m_masterSI->branchAndBound(); - timer1.stop(); - cout << "just after solving" << endl; - cout << " Real=" << setw(10) << UtilDblToStr(timer1.getRealTime(), 5) - << " Cpu= " << setw(10) << UtilDblToStr(timer1.getCpuTime() , 5); - //--- - //--- get solver status - //--- - //--- - int solStatus = CPXgetstat(cpxEnv, cpxLp); - - if (result) { - result->m_solStatus = solStatus; - result->m_solStatus2 = 0; - } - - //--- - //--- get number of nodes - //--- - nNodes = CPXgetnodecnt(cpxEnv, cpxLp); - //--- - //--- get objective and solution - //--- - status = CPXgetbestobjval(cpxEnv, cpxLp, &objLB); - + throw UtilException("CPXaddmipstarts failure", "solveDirect", + "DecompAlgoC"); + + UTIL_DELARR(varInd); + } + + //--- + //--- set the time limit + //--- + status = CPXsetdblparam(cpxEnv, CPX_PARAM_TILIM, timeLimit); + //--- + //--- set the thread limit, otherwise CPLEX will use all the resources + //--- + status = + CPXsetintparam(cpxEnv, CPX_PARAM_THREADS, m_param.NumThreadsIPSolver); + + if (status) + throw UtilException("CPXsetdblparam failure", "solveDirect", + "DecompAlgoC"); + + //--- + //--- solve the MILP + //--- + UtilTimer timer1; + timer1.start(); + m_masterSI->branchAndBound(); + timer1.stop(); + cout << "just after solving" << endl; + cout << " Real=" << setw(10) << UtilDblToStr(timer1.getRealTime(), 5) + << " Cpu= " << setw(10) << UtilDblToStr(timer1.getCpuTime(), 5); + //--- + //--- get solver status + //--- + //--- + int solStatus = CPXgetstat(cpxEnv, cpxLp); + + if (result) { + result->m_solStatus = solStatus; + result->m_solStatus2 = 0; + } + + //--- + //--- get number of nodes + //--- + nNodes = CPXgetnodecnt(cpxEnv, cpxLp); + //--- + //--- get objective and solution + //--- + status = CPXgetbestobjval(cpxEnv, cpxLp, &objLB); + + if (status) + throw UtilException("CPXgetbestobjval failure", "solveDirect", + "DecompAlgoC"); + + //--- + //--- get objective and solution + //--- + if (solStatus == CPXMIP_OPTIMAL || solStatus == CPXMIP_OPTIMAL_TOL || + solStatus == CPXMIP_TIME_LIM_FEAS) { + status = CPXgetmipobjval(cpxEnv, cpxLp, &objUB); + if (status) - throw UtilException("CPXgetbestobjval failure", - "solveDirect", "DecompAlgoC"); - - //--- - //--- get objective and solution - //--- - if (solStatus == CPXMIP_OPTIMAL || - solStatus == CPXMIP_OPTIMAL_TOL || - solStatus == CPXMIP_TIME_LIM_FEAS) { - status = CPXgetmipobjval(cpxEnv, cpxLp, &objUB); - - if (status) - throw UtilException("CPXgetmipobjval failure", - "solveDirect", "DecompAlgoC"); - - if (result) { - const double* solDbl = m_masterSI->getColSolution(); - vector solVec(solDbl, solDbl + numCols); - result->m_solution.push_back(solVec); - result->m_nSolutions++; - assert(result->m_nSolutions == - static_cast(result->m_solution.size())); - //copy(solution, solution+numCols, result->m_solution); - } - } - - //--- - //--- copy sol status into result - //--- + throw UtilException("CPXgetmipobjval failure", "solveDirect", + "DecompAlgoC"); + if (result) { - result->m_solStatus = solStatus; - result->m_solStatus2 = 0; + const double *solDbl = m_masterSI->getColSolution(); + vector solVec(solDbl, solDbl + numCols); + result->m_solution.push_back(solVec); + result->m_nSolutions++; + assert(result->m_nSolutions == + static_cast(result->m_solution.size())); + // copy(solution, solution+numCols, result->m_solution); } + } + + //--- + //--- copy sol status into result + //--- + if (result) { + result->m_solStatus = solStatus; + result->m_solStatus2 = 0; + } #else - throw UtilException("CPLEX selected as solver, but it's not available", - "solveDirect", "DecompDebug"); + throw UtilException("CPLEX selected as solver, but it's not available", + "solveDirect", "DecompDebug"); #endif - }else{ - throw UtilException("solveDirect not implemented for selected solver", - "solveDirect", "DecompDebug"); - } - - //--- - //--- copy bounds into result - //--- - if (result) { - result->m_objUB = objUB; - result->m_objLB = objLB; - } - - //--- - //--- stop the timer, dump time to solve - //--- - timer.stop(); - (*m_osLog) << "DIRECT SOLVE" - << " Real=" << setw(10) << UtilDblToStr(timer.getRealTime(), 5) - << " Cpu= " << setw(10) << UtilDblToStr(timer.getCpuTime() , 5) - << " Nodes= " << setw(8) << nNodes - << " objLB= " << setw(10) << UtilDblToStr(objLB, 3) - << " objUB= " << setw(10) << UtilDblToStr(objUB, 3) - << endl; - UtilPrintFuncEnd(m_osLog, m_classTag, - "solveDirect()", m_param.LogDebugLevel, 2); - return result; + } else { + throw UtilException("solveDirect not implemented for selected solver", + "solveDirect", "DecompDebug"); + } + + //--- + //--- copy bounds into result + //--- + if (result) { + result->m_objUB = objUB; + result->m_objLB = objLB; + } + + //--- + //--- stop the timer, dump time to solve + //--- + timer.stop(); + (*m_osLog) << "DIRECT SOLVE" + << " Real=" << setw(10) << UtilDblToStr(timer.getRealTime(), 5) + << " Cpu= " << setw(10) << UtilDblToStr(timer.getCpuTime(), 5) + << " Nodes= " << setw(8) << nNodes << " objLB= " << setw(10) + << UtilDblToStr(objLB, 3) << " objUB= " << setw(10) + << UtilDblToStr(objUB, 3) << endl; + UtilPrintFuncEnd(m_osLog, m_classTag, "solveDirect()", m_param.LogDebugLevel, + 2); + return result; } - diff --git a/Dip/src/DecompMain.cpp b/Dip/src/DecompMain.cpp index 5a8fd6aa..1fcbf289 100644 --- a/Dip/src/DecompMain.cpp +++ b/Dip/src/DecompMain.cpp @@ -30,782 +30,756 @@ #endif using namespace std; -void blockNumberFinder(DecompParam utilParam, - std::vector& blockNums, - const CoinPackedMatrix* matrix); +void blockNumberFinder(DecompParam utilParam, std::vector &blockNums, + const CoinPackedMatrix *matrix); -void DecompAuto(DecompApp milp, - UtilParameters& utilParam, - UtilTimer& timer, - DecompMainParam& decompMainParam); +void DecompAuto(DecompApp milp, UtilParameters &utilParam, UtilTimer &timer, + DecompMainParam &decompMainParam); -DecompSolverResult* solveDirect(const DecompApp& decompApp); +DecompSolverResult *solveDirect(const DecompApp &decompApp); //===========================================================================// -int main(int argc, char** argv) -{ - try { - - UtilTimer timer; - std::vector blockNumCandidates; - DecompMainParam decompMainParam; - decompMainParam.timeSetupReal = 0.0; - decompMainParam.timeSetupCpu = 0.0; - decompMainParam.timeSolveReal = 0.0; - decompMainParam.timeSolveCpu = 0.0; - - //--- - //--- Parse cpmmand line and store parameters - //--- - UtilParameters utilParam(argc, argv); - - //--- - //--- start overall timer - //--- - timer.start(); - - //--- - //--- construct the instance - //--- - DecompApp milp(utilParam); - - // get the current working Directory. - char the_path[256]; - milp.m_param.CurrentWorkingDir = std::string(getcwd(the_path, 255)); - if (milp.m_param.LogDebugLevel >= 1) { - std::cout << milp.m_param.CurrentWorkingDir << std::endl; - } - - //--- - //--- Analyze the matrix - //--- - const CoinPackedMatrix* m_matrix = milp.getMatrix(); - if (milp.m_param.BlockNumInput > 0) { - milp.NumBlocks = milp.m_param.BlockNumInput; - milp.m_param.Concurrent = false ; - milp.m_param.NumBlocksCand = 0; - } - if (milp.m_param.NumBlocksCand == 0) { - blockNumberFinder(milp.m_param, blockNumCandidates, m_matrix); - } - // obtain the number of CPU (core)s on machines with operating - // system Linux, Solaris, & AIX and Mac OS X - // (for all OS releases >= 10.4, i.e., Tiger onwards) - // other systems has different syntax to obtain the core number +int main(int argc, char **argv) { + try { + + UtilTimer timer; + std::vector blockNumCandidates; + DecompMainParam decompMainParam; + decompMainParam.timeSetupReal = 0.0; + decompMainParam.timeSetupCpu = 0.0; + decompMainParam.timeSolveReal = 0.0; + decompMainParam.timeSolveCpu = 0.0; + + //--- + //--- Parse cpmmand line and store parameters + //--- + UtilParameters utilParam(argc, argv); + + //--- + //--- start overall timer + //--- + timer.start(); + + //--- + //--- construct the instance + //--- + DecompApp milp(utilParam); + + // get the current working Directory. + char the_path[256]; + milp.m_param.CurrentWorkingDir = std::string(getcwd(the_path, 255)); + if (milp.m_param.LogDebugLevel >= 1) { + std::cout << milp.m_param.CurrentWorkingDir << std::endl; + } + + //--- + //--- Analyze the matrix + //--- + const CoinPackedMatrix *m_matrix = milp.getMatrix(); + if (milp.m_param.BlockNumInput > 0) { + milp.NumBlocks = milp.m_param.BlockNumInput; + milp.m_param.Concurrent = false; + milp.m_param.NumBlocksCand = 0; + } + if (milp.m_param.NumBlocksCand == 0) { + blockNumberFinder(milp.m_param, blockNumCandidates, m_matrix); + } + // obtain the number of CPU (core)s on machines with operating + // system Linux, Solaris, & AIX and Mac OS X + // (for all OS releases >= 10.4, i.e., Tiger onwards) + // other systems has different syntax to obtain the core number #ifdef _OPENMP - int numCPU = omp_get_num_procs(); + int numCPU = omp_get_num_procs(); #else - int numCPU = 1; + int numCPU = 1; #endif - if (milp.m_param.LogDebugLevel > 1) { - std::cout << "The number of cores is " - << numCPU << std::endl; - } - - // the actual thread number is the minimum of - // number of cores, total block numbers and the thread number - // used in concurrent computations - int numThreads = min(min(numCPU, - static_cast(blockNumCandidates.size())), - milp.m_param.ConcurrentThreadsNum); - std::vector milpArray(static_cast(numThreads + 1), milp); - std::vector decompMainParamArray(static_cast - (numThreads + 1), - decompMainParam); - std::vector timerArray(static_cast(numThreads + 1), - timer); - std::vector utilParamArray(static_cast - (numThreads + 1), - utilParam); - - if (milp.m_param.Concurrent == true ) { - printf("===== START Concurrent Computations Process. =====\n"); + if (milp.m_param.LogDebugLevel > 1) { + std::cout << "The number of cores is " << numCPU << std::endl; + } + + // the actual thread number is the minimum of + // number of cores, total block numbers and the thread number + // used in concurrent computations + int numThreads = + min(min(numCPU, static_cast(blockNumCandidates.size())), + milp.m_param.ConcurrentThreadsNum); + std::vector milpArray(static_cast(numThreads + 1), milp); + std::vector decompMainParamArray( + static_cast(numThreads + 1), decompMainParam); + std::vector timerArray(static_cast(numThreads + 1), timer); + std::vector utilParamArray(static_cast(numThreads + 1), + utilParam); + + if (milp.m_param.Concurrent == true) { + printf("===== START Concurrent Computations Process. =====\n"); #ifdef _OPENMP #pragma omp parallel for #endif - for (int i = 0 ; i < (numThreads + 1); i++) { - if (i == 0) { - decompMainParamArray[i].doCut = true; - decompMainParamArray[i].doPriceCut = false; - decompMainParamArray[i].doDirect = true; - } else { - decompMainParamArray[i].doCut = false; - decompMainParamArray[i].doPriceCut = true; - decompMainParamArray[i].doDirect = false; - milpArray[i].NumBlocks = blockNumCandidates[i - 1]; - } - - milpArray[i].m_threadIndex = i; - DecompAuto(milpArray[i], utilParamArray[i], - timerArray[i], decompMainParamArray[i]); - } - } else { - decompMainParam.doCut = utilParam.GetSetting("doCut", false); - decompMainParam.doPriceCut = utilParam.GetSetting("doPriceCut", true); - decompMainParam.doDirect = utilParam.GetSetting("doDirect", false); - DecompAuto(milp, utilParam, timer, decompMainParam); + for (int i = 0; i < (numThreads + 1); i++) { + if (i == 0) { + decompMainParamArray[i].doCut = true; + decompMainParamArray[i].doPriceCut = false; + decompMainParamArray[i].doDirect = true; + } else { + decompMainParamArray[i].doCut = false; + decompMainParamArray[i].doPriceCut = true; + decompMainParamArray[i].doDirect = false; + milpArray[i].NumBlocks = blockNumCandidates[i - 1]; + } + + milpArray[i].m_threadIndex = i; + DecompAuto(milpArray[i], utilParamArray[i], timerArray[i], + decompMainParamArray[i]); } - - if (milp.m_param.Concurrent == true) { - printf("===== FINISH Concurrent Computations Process. =====\n"); - printf("======== SUMMARY OF CONCURRENT COMPUTATIONS =======\n"); - cout << "Method" << setw(20) << "BlockNumber" << setw(20) - << "WallClockTime" << setw(20) << "CPUTime" << setw(20) - << "BestLB" << setw(25) << "BestUB" << endl; - - for (int i = 0 ; i < (numThreads + 1); i++) { - if (i == 0) { - cout << "B&C "; - } else { - cout << "B&P"; - } - - cout << setw(15); - - if (i == 0) { - cout << "NA"; - } else { - cout << milpArray[i].NumBlocks; - } - - cout << setw(25) << setprecision(7) - << decompMainParamArray[i].timeSetupReal + - decompMainParamArray[i].timeSolveReal - << setw(23) << setprecision(7) - << decompMainParamArray[i].timeSetupCpu + - decompMainParamArray[i].timeSolveCpu - << setw(23) << setprecision(7) - << decompMainParamArray[i].bestLB - << setw(25) << setprecision(7) - << decompMainParamArray[i].bestUB - << endl; - } + } else { + decompMainParam.doCut = utilParam.GetSetting("doCut", false); + decompMainParam.doPriceCut = utilParam.GetSetting("doPriceCut", true); + decompMainParam.doDirect = utilParam.GetSetting("doDirect", false); + DecompAuto(milp, utilParam, timer, decompMainParam); + } + + if (milp.m_param.Concurrent == true) { + printf("===== FINISH Concurrent Computations Process. =====\n"); + printf("======== SUMMARY OF CONCURRENT COMPUTATIONS =======\n"); + cout << "Method" << setw(20) << "BlockNumber" << setw(20) + << "WallClockTime" << setw(20) << "CPUTime" << setw(20) << "BestLB" + << setw(25) << "BestUB" << endl; + + for (int i = 0; i < (numThreads + 1); i++) { + if (i == 0) { + cout << "B&C "; + } else { + cout << "B&P"; + } + + cout << setw(15); + + if (i == 0) { + cout << "NA"; + } else { + cout << milpArray[i].NumBlocks; + } + + cout << setw(25) << setprecision(7) + << decompMainParamArray[i].timeSetupReal + + decompMainParamArray[i].timeSolveReal + << setw(23) << setprecision(7) + << decompMainParamArray[i].timeSetupCpu + + decompMainParamArray[i].timeSolveCpu + << setw(23) << setprecision(7) << decompMainParamArray[i].bestLB + << setw(25) << setprecision(7) << decompMainParamArray[i].bestUB + << endl; } - } catch (CoinError& ex) { - cerr << "COIN Exception [ " << ex.message() << " ]" - << " at " << ex.fileName() << ":L" << ex.lineNumber() - << " in " << ex.className() << "::" << ex.methodName() << endl; - return 1; - } - - return 0; + } + } catch (CoinError &ex) { + cerr << "COIN Exception [ " << ex.message() << " ]" + << " at " << ex.fileName() << ":L" << ex.lineNumber() << " in " + << ex.className() << "::" << ex.methodName() << endl; + return 1; + } + + return 0; } /* * Determining the candidate block numbers based on the instance frequency * table */ -void blockNumberFinder(DecompParam decompParam, - std::vector& blockNums, - const CoinPackedMatrix* matrix) -{ - if (decompParam.NumBlocksCand == 0) { - return ; - } - - const int* lengthRows = matrix->getVectorLengths(); - - int numRows = matrix->getNumRows(); - - // The following code creates a histogram table to store the - // nonzero counts and number of rows - std::map histogram; - - for (int i = 0 ; i < numRows; ++i) { - if (histogram.count(lengthRows[i]) > 0) { - histogram[lengthRows[i]] += 1; - } else { - histogram.insert(std::pair(lengthRows[i], 1)); +void blockNumberFinder(DecompParam decompParam, std::vector &blockNums, + const CoinPackedMatrix *matrix) { + if (decompParam.NumBlocksCand == 0) { + return; + } + + const int *lengthRows = matrix->getVectorLengths(); + + int numRows = matrix->getNumRows(); + + // The following code creates a histogram table to store the + // nonzero counts and number of rows + std::map histogram; + + for (int i = 0; i < numRows; ++i) { + if (histogram.count(lengthRows[i]) > 0) { + histogram[lengthRows[i]] += 1; + } else { + histogram.insert(std::pair(lengthRows[i], 1)); + } + } + + std::map::iterator histIter; + + if (decompParam.LogDebugLevel >= 1) { + std::ofstream histogramTable; + std::string path1 = + decompParam.CurrentWorkingDir + UtilDirSlash() + "histogramTable.dat"; + histogramTable.open(path1.c_str()); + + for (histIter = histogram.begin(); histIter != histogram.end(); + histIter++) { + histogramTable << histIter->first << " " << histIter->second << "\n"; + } + + histogramTable.close(); + } + + // Aggregation steps, aggregate the entries in the histogram + // Number of nonzeros Number of rows + // 4 8 + // 9 8 + // After aggregation: + // 9 8 + // Then put the number of rows into the candidates queue + std::map::iterator histIter2; + std::map histogram2; + std::set blocksNumTemp; + + for (histIter = histogram.begin(); histIter != histogram.end(); ++histIter) { + int keyvalue_pre = histIter->second; + int max = 0; + std::map::iterator histIterTemp = histIter; + ++histIterTemp; + + for (histIter2 = histIterTemp; histIter2 != histogram.end(); ++histIter2) { + int keyvalue_curr = histIter2->second; + // std::cout << " The current value of key is " << keyvalue_curr + // <first > histIter2->first) ? histIter->first + : histIter2->first; } - } - std::map::iterator histIter; + max = (max_inter > max) ? max_inter : max; - if (decompParam.LogDebugLevel >= 1) { - std::ofstream histogramTable; - std::string path1 = decompParam.CurrentWorkingDir + UtilDirSlash() + - "histogramTable.dat"; - histogramTable.open(path1.c_str()); - - for (histIter = histogram.begin(); histIter != histogram.end(); - histIter++) { - histogramTable << histIter->first << " " << histIter->second << "\n"; + if (max && max != 1) { + blocksNumTemp.insert(max); + blockNums.push_back(max); + histogram2.insert(std::pair(histIter->second, max)); } + } - histogramTable.close(); - } - - // Aggregation steps, aggregate the entries in the histogram - // Number of nonzeros Number of rows - // 4 8 - // 9 8 - // After aggregation: - // 9 8 - // Then put the number of rows into the candidates queue - std::map::iterator histIter2; - std::map histogram2; - std::set blocksNumTemp; - - for (histIter = histogram.begin(); histIter != histogram.end(); - ++histIter) { - int keyvalue_pre = histIter->second ; - int max = 0; - std::map::iterator histIterTemp = histIter; - ++histIterTemp; - - for (histIter2 = histIterTemp; histIter2 != histogram.end(); - ++histIter2) { - int keyvalue_curr = histIter2->second; - // std::cout << " The current value of key is " << keyvalue_curr - // <first > histIter2->first) - ? histIter->first : histIter2->first; - } - - max = (max_inter > max) ? max_inter : max; - - if (max && max != 1) { - blocksNumTemp.insert(max); - blockNums.push_back(max); - histogram2.insert(std::pair(histIter->second, max)); - } - } + if (histogram2.find(histIter->second) == histogram2.end()) { + histogram2.insert(std::pair(histIter->second, histIter->first)); + } + } - if (histogram2.find(histIter->second) == histogram2.end() ) { - histogram2.insert(std::pair(histIter->second, - histIter->first)); - } - } - - if (decompParam.LogDebugLevel >= 1) { - std::ofstream histogramTable1; - std::string path2 = decompParam.CurrentWorkingDir + UtilDirSlash() + - "histogramTable1.dat"; - histogramTable1.open(path2.c_str()); - std::map::iterator histIter3; - - for (histIter3 = histogram2.begin(); histIter3 != histogram2.end(); - ++histIter3) { - histogramTable1 << histIter3->second << " " << histIter3->first << "\n"; - } + if (decompParam.LogDebugLevel >= 1) { + std::ofstream histogramTable1; + std::string path2 = + decompParam.CurrentWorkingDir + UtilDirSlash() + "histogramTable1.dat"; + histogramTable1.open(path2.c_str()); + std::map::iterator histIter3; - histogramTable1.close(); - } + for (histIter3 = histogram2.begin(); histIter3 != histogram2.end(); + ++histIter3) { + histogramTable1 << histIter3->second << " " << histIter3->first << "\n"; + } - int blockCands = std::min(decompParam.NumBlocksCand - static_cast - (blocksNumTemp.size()), - static_cast(histogram2.size())); + histogramTable1.close(); + } - if (blockCands > 0) { - std::map::iterator histIterLower = histogram2.end(); + int blockCands = std::min(decompParam.NumBlocksCand - + static_cast(blocksNumTemp.size()), + static_cast(histogram2.size())); - while (blockCands ) { - --histIterLower; - blockCands--; + if (blockCands > 0) { + std::map::iterator histIterLower = histogram2.end(); - if (blocksNumTemp.find(histIterLower->second) == blocksNumTemp.end()) { - blockNums.push_back(histIterLower->second); - } - } - } else { - int counter = decompParam.NumBlocksCand; - std::set:: iterator setIter = blocksNumTemp.begin(); - - while (counter) { - blockNums.push_back(*setIter); - setIter++ ; - --counter; + while (blockCands) { + --histIterLower; + blockCands--; + + if (blocksNumTemp.find(histIterLower->second) == blocksNumTemp.end()) { + blockNums.push_back(histIterLower->second); } - } + } + } else { + int counter = decompParam.NumBlocksCand; + std::set::iterator setIter = blocksNumTemp.begin(); + + while (counter) { + blockNums.push_back(*setIter); + setIter++; + --counter; + } + } } -void DecompAuto(DecompApp milp, - UtilParameters& utilParam, - UtilTimer& timer, - DecompMainParam& decompMainParam) -{ - if (milp.NumBlocks == 0 && milp.m_param.Concurrent) { - milp.NumBlocks = 3; - } - - if (milp.m_threadIndex == 0 && milp.m_param.Concurrent) { - decompMainParam.timeSetupCpu = timer.getCpuTime(); - decompMainParam.timeSetupReal = timer.getRealTime(); - //--- - //--- solve - //--- - timer.start(); - DecompSolverResult* result = solveDirect(milp); - timer.stop(); - decompMainParam.bestLB = result->m_objLB; - decompMainParam.bestUB = result->m_objUB; - decompMainParam.timeSolveCpu = timer.getCpuTime(); - decompMainParam.timeSolveReal = timer.getRealTime(); - UTIL_DELPTR(result); - return ; - } - - //--- - //--- Initialize - //--- - milp.initializeApp(); - - //--- - //--- create the algorithm (a DecompAlgo) - //--- - - if ((decompMainParam.doCut + decompMainParam.doPriceCut) != 1) - throw UtilException("doCut or doPriceCut must be set", - "main", "main"); - - //--- - //--- create the algorithm object - //--- - DecompAlgo* algo = NULL; - if (decompMainParam.doCut) { - algo = new DecompAlgoC(&milp, utilParam); - }else{ - algo = new DecompAlgoPC(&milp, utilParam); - } - - if (decompMainParam.doCut && decompMainParam.doDirect) { - timer.stop(); - decompMainParam.timeSetupCpu = timer.getCpuTime(); - decompMainParam.timeSetupReal = timer.getRealTime(); - //--- - //--- solve - //--- - timer.start(); - DecompSolverResult* result = algo->solveDirect(); - timer.stop(); - decompMainParam.bestLB = result->m_objLB; - decompMainParam.bestUB = result->m_objUB; - decompMainParam.timeSolveCpu = timer.getCpuTime(); - decompMainParam.timeSolveReal = timer.getRealTime(); - UTIL_DELPTR(result); - } else { - //--- - //--- create the driver AlpsDecomp model - //--- - AlpsDecompModel alpsModel(utilParam, algo); - timer.stop(); - decompMainParam.timeSetupCpu = timer.getCpuTime(); - decompMainParam.timeSetupReal = timer.getRealTime(); - //--- - //--- solve - //--- - timer.start(); - alpsModel.solve(); - timer.stop(); - - if (milp.m_param.Concurrent == 1) { - std::cout << "====== The thread number is " << milp.m_threadIndex - << "====" << std::endl; - std::cout << "====== The block number is " << milp.NumBlocks - << "====" << std::endl; - std::cout << "====== Branch-and-Cut " << decompMainParam.doCut - << "====" << std::endl; - std::cout << "====== Branch-and-Price " << decompMainParam.doPriceCut - << "====" << std::endl; - std::cout << " " << std::endl; - std::cout << " " << std::endl; - std::cout << " " << std::endl; +void DecompAuto(DecompApp milp, UtilParameters &utilParam, UtilTimer &timer, + DecompMainParam &decompMainParam) { + if (milp.NumBlocks == 0 && milp.m_param.Concurrent) { + milp.NumBlocks = 3; + } + + if (milp.m_threadIndex == 0 && milp.m_param.Concurrent) { + decompMainParam.timeSetupCpu = timer.getCpuTime(); + decompMainParam.timeSetupReal = timer.getRealTime(); + //--- + //--- solve + //--- + timer.start(); + DecompSolverResult *result = solveDirect(milp); + timer.stop(); + decompMainParam.bestLB = result->m_objLB; + decompMainParam.bestUB = result->m_objUB; + decompMainParam.timeSolveCpu = timer.getCpuTime(); + decompMainParam.timeSolveReal = timer.getRealTime(); + UTIL_DELPTR(result); + return; + } + + //--- + //--- Initialize + //--- + milp.initializeApp(); + + //--- + //--- create the algorithm (a DecompAlgo) + //--- + + if ((decompMainParam.doCut + decompMainParam.doPriceCut) != 1) + throw UtilException("doCut or doPriceCut must be set", "main", "main"); + + //--- + //--- create the algorithm object + //--- + DecompAlgo *algo = NULL; + if (decompMainParam.doCut) { + algo = new DecompAlgoC(&milp, utilParam); + } else { + algo = new DecompAlgoPC(&milp, utilParam); + } + + if (decompMainParam.doCut && decompMainParam.doDirect) { + timer.stop(); + decompMainParam.timeSetupCpu = timer.getCpuTime(); + decompMainParam.timeSetupReal = timer.getRealTime(); + //--- + //--- solve + //--- + timer.start(); + DecompSolverResult *result = algo->solveDirect(); + timer.stop(); + decompMainParam.bestLB = result->m_objLB; + decompMainParam.bestUB = result->m_objUB; + decompMainParam.timeSolveCpu = timer.getCpuTime(); + decompMainParam.timeSolveReal = timer.getRealTime(); + UTIL_DELPTR(result); + } else { + //--- + //--- create the driver AlpsDecomp model + //--- + AlpsDecompModel alpsModel(utilParam, algo); + timer.stop(); + decompMainParam.timeSetupCpu = timer.getCpuTime(); + decompMainParam.timeSetupReal = timer.getRealTime(); + //--- + //--- solve + //--- + timer.start(); + alpsModel.solve(); + timer.stop(); + + if (milp.m_param.Concurrent == 1) { + std::cout << "====== The thread number is " << milp.m_threadIndex + << "====" << std::endl; + std::cout << "====== The block number is " << milp.NumBlocks + << "====" << std::endl; + std::cout << "====== Branch-and-Cut " << decompMainParam.doCut + << "====" << std::endl; + std::cout << "====== Branch-and-Price " << decompMainParam.doPriceCut + << "====" << std::endl; + std::cout << " " << std::endl; + std::cout << " " << std::endl; + std::cout << " " << std::endl; + } + + decompMainParam.timeSolveCpu = timer.getCpuTime(); + decompMainParam.timeSolveReal = timer.getRealTime(); + //--- + //--- sanity check + //--- + cout << setiosflags(ios::fixed | ios::showpoint); + int statusCheck = alpsModel.getSolStatus(); + cout << " " << endl; + cout << "============== DECOMP Solution Info [Begin]: ============== \n"; + cout << "Status = "; + + if (!statusCheck) { + cout << "Optimal" << endl; + } else if (statusCheck == 1) { + cout << "TimeLimit" << endl; + } else if (statusCheck == 2) { + cout << "NodeLimit" << endl; + } else if (statusCheck == 3) { + cout << "SolLimit" << endl; + } else if (statusCheck == 4) { + cout << "Feasible" << endl; + } else if (statusCheck == 5) { + cout << "Infeasible" << endl; + } else if (statusCheck == 6) { + cout << "NoMemory" << endl; + } else if (statusCheck == 7) { + cout << "Failed" << endl; + } else if (statusCheck == 8) { + cout << "Unbounded" << endl; + } else { + cout << "Unknown" << endl; + } + + decompMainParam.bestLB = alpsModel.getGlobalLB(); + decompMainParam.bestUB = alpsModel.getGlobalUB(); + cout << "BestLB = " << setw(10) + << UtilDblToStr(alpsModel.getGlobalLB(), 5) << endl + << "BestUB = " << setw(10) + << UtilDblToStr(alpsModel.getGlobalUB(), 5) << endl + << "OptiGap = " << setw(10) + << UtilDblToStr(UtilCalculateGap(alpsModel.getGlobalLB(), + alpsModel.getGlobalUB(), + milp.getDecompAlgo()->getInfinity()), + 5) + << endl + << "Nodes = " << alpsModel.getNumNodesProcessed() << endl + << "SetupCPU = " << decompMainParam.timeSetupCpu << endl + << "SolveCPU = " << decompMainParam.timeSolveCpu << endl + << "TotalCPU = " + << decompMainParam.timeSetupCpu + decompMainParam.timeSolveCpu << endl + << "SetupWallclock= " << decompMainParam.timeSetupReal << endl + << "SolveWallclock= " << decompMainParam.timeSolveReal << endl + << "TotalWallclock= " + << decompMainParam.timeSetupReal + decompMainParam.timeSolveReal + << endl; + cout << "============== DECOMP Solution Info [END ]: ============== \n"; + /* TODO: Add a global parameter to control the subproblem + parallelization + cout << "The parallel efficiency is " + << timeSolveCpu/(milp.m_param.NumThreads*timeSolveReal) + << endl; + */ + //--- + //--- sanity check + //--- if user defines bestLB==bestUB (i.e., known optimal) + //--- and solved claims we have optimal, check that they match + //--- + double epsilon = 0.01; // 1% + double userLB = milp.getBestKnownLB(); + double userUB = milp.getBestKnownUB(); + double userDiff = fabs(userUB - userLB); + + if (alpsModel.getSolStatus() == AlpsExitStatusOptimal && + userDiff < epsilon) { + double diff = fabs(alpsModel.getGlobalUB() - userUB); + double diffPer = userUB == 0 ? diff : diff / userUB; + + if (diffPer > epsilon) { + cerr << setiosflags(ios::fixed | ios::showpoint); + cerr << "ERROR. BestKnownLB/UB= " << UtilDblToStr(userUB, 5) + << " but DIP claims GlobalUB= " + << UtilDblToStr(alpsModel.getGlobalUB(), 5) << endl; + throw UtilException("Invalid claim of optimal.", "main", "MILPBlock"); } + } + + //--- + //--- get optimal solution + //--- + + if (milp.m_param.SolutionOutputToFile && alpsModel.getGlobalUB() < 1.e100) { + const DecompSolution *solution = alpsModel.getBestSolution(); + const vector &colNames = alpsModel.getColNames(); + string solutionFile; - decompMainParam.timeSolveCpu = timer.getCpuTime(); - decompMainParam.timeSolveReal = timer.getRealTime(); - //--- - //--- sanity check - //--- - cout << setiosflags(ios::fixed | ios::showpoint); - int statusCheck = alpsModel.getSolStatus(); - cout << " " << endl; - cout << "============== DECOMP Solution Info [Begin]: ============== \n"; - cout << "Status = "; - - if ( !statusCheck) { - cout << "Optimal" << endl; - } else if (statusCheck == 1) { - cout << "TimeLimit" << endl; - } else if (statusCheck == 2) { - cout << "NodeLimit" << endl; - } else if (statusCheck == 3) { - cout << "SolLimit" << endl; - } else if (statusCheck == 4) { - cout << "Feasible" << endl; - } else if (statusCheck == 5) { - cout << "Infeasible" << endl; - } else if (statusCheck == 6) { - cout << "NoMemory" << endl; - } else if (statusCheck == 7) { - cout << "Failed" << endl; - } else if (statusCheck == 8) { - cout << "Unbounded" << endl; + if (milp.m_param.SolutionOutputFileName == "") { + string::size_type idx = milp.getInstanceName().rfind('/'); + string intanceNameWoDir; + + if (idx != string::npos) { + intanceNameWoDir = milp.getInstanceName().substr(idx + 1); + } else { + intanceNameWoDir = milp.getInstanceName(); + } + + solutionFile = milp.m_param.CurrentWorkingDir + UtilDirSlash() + + intanceNameWoDir + ".sol"; } else { - cout << "Unknown" << endl; + solutionFile = milp.m_param.SolutionOutputFileName; } - decompMainParam.bestLB = alpsModel.getGlobalLB(); - decompMainParam.bestUB = alpsModel.getGlobalUB(); - cout << "BestLB = " << setw(10) - << UtilDblToStr(alpsModel.getGlobalLB(), 5) << endl - << "BestUB = " << setw(10) - << UtilDblToStr(alpsModel.getGlobalUB(), 5) << endl - << "OptiGap = " << setw(10) - << UtilDblToStr(UtilCalculateGap(alpsModel.getGlobalLB(), - alpsModel.getGlobalUB(), - milp.getDecompAlgo()->getInfinity()), 5) - << endl - << "Nodes = " - << alpsModel.getNumNodesProcessed() << endl - << "SetupCPU = " << decompMainParam.timeSetupCpu << endl - << "SolveCPU = " << decompMainParam.timeSolveCpu << endl - << "TotalCPU = " << decompMainParam.timeSetupCpu + - decompMainParam.timeSolveCpu << endl - << "SetupWallclock= " << decompMainParam.timeSetupReal << endl - << "SolveWallclock= " << decompMainParam.timeSolveReal << endl - << "TotalWallclock= " << decompMainParam.timeSetupReal - + decompMainParam.timeSolveReal << endl ; - cout << "============== DECOMP Solution Info [END ]: ============== \n"; - /* TODO: Add a global parameter to control the subproblem - parallelization - cout << "The parallel efficiency is " - << timeSolveCpu/(milp.m_param.NumThreads*timeSolveReal) - << endl; - */ - //--- - //--- sanity check - //--- if user defines bestLB==bestUB (i.e., known optimal) - //--- and solved claims we have optimal, check that they match - //--- - double epsilon = 0.01; //1% - double userLB = milp.getBestKnownLB(); - double userUB = milp.getBestKnownUB(); - double userDiff = fabs(userUB - userLB); - - if (alpsModel.getSolStatus() == AlpsExitStatusOptimal && - userDiff < epsilon) { - double diff = fabs(alpsModel.getGlobalUB() - userUB); - double diffPer = userUB == 0 ? diff : diff / userUB; - - if (diffPer > epsilon) { - cerr << setiosflags(ios::fixed | ios::showpoint); - cerr << "ERROR. BestKnownLB/UB= " - << UtilDblToStr(userUB, 5) - << " but DIP claims GlobalUB= " - << UtilDblToStr(alpsModel.getGlobalUB(), 5) - << endl; - throw UtilException("Invalid claim of optimal.", - "main", "MILPBlock"); - } + ofstream osSolution(solutionFile.c_str()); + osSolution.precision(16); + const double *sol = solution->getValues(); + osSolution << "=obj=" << setw(10); + osSolution.precision(8); + osSolution << " " << alpsModel.getGlobalUB() << std::endl; + + for (int i = 0; i < solution->getSize(); i++) { + if (!UtilIsZero(sol[i])) { + osSolution << colNames[i] << setw(10); + osSolution.precision(8); + osSolution << " " << sol[i] << std::endl; + } else { + osSolution << colNames[i] << setw(10); + osSolution.precision(8); + osSolution << " " << 0.0000000 << std::endl; + } } - //--- - //--- get optimal solution - //--- - - if (milp.m_param.SolutionOutputToFile - && alpsModel.getGlobalUB() < 1.e100) { - const DecompSolution* solution = alpsModel.getBestSolution(); - const vector& colNames = alpsModel.getColNames(); - string solutionFile; - - if (milp.m_param.SolutionOutputFileName == "") { - string::size_type idx = milp.getInstanceName().rfind('/'); - string intanceNameWoDir; - - if (idx != string::npos) { - intanceNameWoDir = milp.getInstanceName().substr(idx + 1); - } else { - intanceNameWoDir = milp.getInstanceName(); - } - - solutionFile = milp.m_param.CurrentWorkingDir + UtilDirSlash() - + intanceNameWoDir + ".sol"; - } else { - solutionFile = milp.m_param.SolutionOutputFileName; - } - - ofstream osSolution(solutionFile.c_str()); - osSolution.precision(16); - const double* sol = solution->getValues(); - osSolution << "=obj=" << setw(10); - osSolution.precision(8); - osSolution << " " << alpsModel.getGlobalUB() - << std::endl; - - for (int i = 0; i < solution->getSize(); i++) { - if (!UtilIsZero(sol[i])) { - osSolution << colNames[i] << setw(10); - osSolution.precision(8); - osSolution << " " << sol[i] << std::endl; - } else { - osSolution << colNames[i] << setw(10); - osSolution.precision(8); - osSolution << " " << 0.0000000 << std::endl; - } - } - - osSolution.close(); - - if (alpsModel.getSolStatus() == AlpsExitStatusOptimal) { - std::cout << "Optimal Solution is " << std::endl; - solution->print(colNames, 8); - cout << " Optimal Solution can be found in the file " - << solutionFile << endl; - } + osSolution.close(); + + if (alpsModel.getSolStatus() == AlpsExitStatusOptimal) { + std::cout << "Optimal Solution is " << std::endl; + solution->print(colNames, 8); + cout << " Optimal Solution can be found in the file " << solutionFile + << endl; } + } - //--- - //--- free local memory - //--- - delete algo; - } + //--- + //--- free local memory + //--- + delete algo; + } } -DecompSolverResult* solveDirect(const DecompApp& decompApp) -{ - //--- - //--- Solve the original IP with a generic IP solver - //--- without going through the decomposition phase - //--- this function is created such that the DIP can serves - //--- as an interface to call standalone branch-and-cut solver - //--- +DecompSolverResult *solveDirect(const DecompApp &decompApp) { + //--- + //--- Solve the original IP with a generic IP solver + //--- without going through the decomposition phase + //--- this function is created such that the DIP can serves + //--- as an interface to call standalone branch-and-cut solver + //--- - OsiSolverInterface *m_problemSI; + OsiSolverInterface *m_problemSI; - if (decompApp.m_param.DecompIPSolver == "SYMPHONY"){ + if (decompApp.m_param.DecompIPSolver == "SYMPHONY") { #ifdef DIP_HAS_SYMPHONY - m_problemSI = new OsiSymSolverInterface(); + m_problemSI = new OsiSymSolverInterface(); #else - throw UtilException("SYMPHONY selected as solver, but it's not available", - "getOsiIpSolverInterface", "DecompAlgo"); + throw UtilException("SYMPHONY selected as solver, but it's not available", + "getOsiIpSolverInterface", "DecompAlgo"); #endif - }else if (decompApp.m_param.DecompIPSolver == "Cbc"){ + } else if (decompApp.m_param.DecompIPSolver == "Cbc") { #ifdef DIP_HAS_CBC - m_problemSI = new OsiCbcSolverInterface(); + m_problemSI = new OsiCbcSolverInterface(); #else - throw UtilException("Cbc selected as solver, but it's not available", - "getOsiIpSolverInterface", "DecompAlgo"); + throw UtilException("Cbc selected as solver, but it's not available", + "getOsiIpSolverInterface", "DecompAlgo"); #endif - }else if (decompApp.m_param.DecompIPSolver == "CPLEX"){ + } else if (decompApp.m_param.DecompIPSolver == "CPLEX") { #ifdef DIP_HAS_CPX - m_problemSI = new OsiCpxSolverInterface(); + m_problemSI = new OsiCpxSolverInterface(); #else - throw UtilException("CPLEX selected as solver, but it's not available", - "getOsiIpSolverInterface", "DecompAlgo"); + throw UtilException("CPLEX selected as solver, but it's not available", + "getOsiIpSolverInterface", "DecompAlgo"); #endif - }else if (decompApp.m_param.DecompIPSolver == "Gurobi"){ + } else if (decompApp.m_param.DecompIPSolver == "Gurobi") { #ifdef DIP_HAS_GRB - m_problemSI = new OsiGrbSolverInterface(); + m_problemSI = new OsiGrbSolverInterface(); #else - throw UtilException("Gurobi selected as solver, but it's not available", - "getOsiIpSolverInterface", "DecompAlgo"); + throw UtilException("Gurobi selected as solver, but it's not available", + "getOsiIpSolverInterface", "DecompAlgo"); #endif - } - - string fileName; - - if (decompApp.m_param.DataDir != "") { - fileName = decompApp.m_param.DataDir + UtilDirSlash() + - decompApp.m_param.Instance; - } else { - fileName = decompApp.m_param.Instance; - } - - std::cout << "The file name is " << fileName << std::endl; - - if (decompApp.m_param.Instance.empty()) { - cerr << "================================================" << std::endl - << "Usage:" - << "./dip --MILP:BlockFileFormat List" << std::endl - << " --MILP:Instance /FilePath/ABC.mps" << std::endl - << " --MILP:BlockFile /FilePath/ABC.block" << std::endl - << "================================================" << std::endl - << std::endl; - exit(0); - } - - m_problemSI->readMps(fileName.c_str()); - int numCols = decompApp.m_mpsIO.getNumCols(); - int nNodes; - double objLB = -m_problemSI->getInfinity(); - double objUB = m_problemSI->getInfinity(); - double timeLimit = decompApp.m_param.TimeLimit; - UtilTimer timer; - timer.start(); - DecompSolverResult* result = new DecompSolverResult(m_problemSI->getInfinity()); - if (decompApp.m_param.DecompIPSolver == "Cbc"){ + } + + string fileName; + + if (decompApp.m_param.DataDir != "") { + fileName = + decompApp.m_param.DataDir + UtilDirSlash() + decompApp.m_param.Instance; + } else { + fileName = decompApp.m_param.Instance; + } + + std::cout << "The file name is " << fileName << std::endl; + + if (decompApp.m_param.Instance.empty()) { + cerr << "================================================" << std::endl + << "Usage:" + << "./dip --MILP:BlockFileFormat List" << std::endl + << " --MILP:Instance /FilePath/ABC.mps" << std::endl + << " --MILP:BlockFile /FilePath/ABC.block" << std::endl + << "================================================" << std::endl + << std::endl; + exit(0); + } + + m_problemSI->readMps(fileName.c_str()); + int numCols = decompApp.m_mpsIO.getNumCols(); + int nNodes; + double objLB = -m_problemSI->getInfinity(); + double objUB = m_problemSI->getInfinity(); + double timeLimit = decompApp.m_param.TimeLimit; + UtilTimer timer; + timer.start(); + DecompSolverResult *result = + new DecompSolverResult(m_problemSI->getInfinity()); + if (decompApp.m_param.DecompIPSolver == "Cbc") { #ifdef DIP_HAS_CBC - CbcModel cbc(*m_problemSI); - int logIpLevel = decompApp.m_param.LogIpLevel; - cbc.setLogLevel(logIpLevel); - cbc.setDblParam(CbcModel::CbcMaximumSeconds, timeLimit); - cbc.branchAndBound(); - const int statusSet[2] = {0, 1}; - int solStatus = cbc.status(); - int solStatus2 = cbc.secondaryStatus(); - - if (!UtilIsInSet(solStatus, statusSet, 2)) { - cerr << "Error: CBC IP solver status = " - << solStatus << endl; - throw UtilException("CBC solver status", "solveDirect", "solveDirect"); - } - - //--- - //--- get number of nodes - //--- - nNodes = cbc.getNodeCount(); - //--- - //--- get objective and solution - //--- - objLB = cbc.getBestPossibleObjValue(); - - if (cbc.isProvenOptimal() || cbc.isSecondsLimitReached()) { - objUB = cbc.getObjValue(); - - if (result && cbc.getSolutionCount()) { - const double* solDbl = cbc.getColSolution(); - vector solVec(solDbl, solDbl + numCols); - result->m_solution.push_back(solVec); - result->m_nSolutions++; - assert(result->m_nSolutions == - static_cast(result->m_solution.size())); - //copy(solution, solution+numCols, result->m_solution); - } - } - - //--- - //--- copy sol status into result - //--- - if (result) { - result->m_solStatus = solStatus; - result->m_solStatus2 = solStatus2; + CbcModel cbc(*m_problemSI); + int logIpLevel = decompApp.m_param.LogIpLevel; + cbc.setLogLevel(logIpLevel); + cbc.setDblParam(CbcModel::CbcMaximumSeconds, timeLimit); + cbc.branchAndBound(); + const int statusSet[2] = {0, 1}; + int solStatus = cbc.status(); + int solStatus2 = cbc.secondaryStatus(); + + if (!UtilIsInSet(solStatus, statusSet, 2)) { + cerr << "Error: CBC IP solver status = " << solStatus << endl; + throw UtilException("CBC solver status", "solveDirect", "solveDirect"); + } + + //--- + //--- get number of nodes + //--- + nNodes = cbc.getNodeCount(); + //--- + //--- get objective and solution + //--- + objLB = cbc.getBestPossibleObjValue(); + + if (cbc.isProvenOptimal() || cbc.isSecondsLimitReached()) { + objUB = cbc.getObjValue(); + + if (result && cbc.getSolutionCount()) { + const double *solDbl = cbc.getColSolution(); + vector solVec(solDbl, solDbl + numCols); + result->m_solution.push_back(solVec); + result->m_nSolutions++; + assert(result->m_nSolutions == + static_cast(result->m_solution.size())); + // copy(solution, solution+numCols, result->m_solution); } + } + + //--- + //--- copy sol status into result + //--- + if (result) { + result->m_solStatus = solStatus; + result->m_solStatus2 = solStatus2; + } #else - throw UtilException("Cbc selected as solver, but it's not available", - "solveDirect", "DecompMain"); + throw UtilException("Cbc selected as solver, but it's not available", + "solveDirect", "DecompMain"); #endif - }else if (decompApp.m_param.DecompIPSolver == "CPLEX"){ + } else if (decompApp.m_param.DecompIPSolver == "CPLEX") { #ifdef DIP_HAS_CPX - OsiCpxSolverInterface* masterSICpx - = dynamic_cast(m_problemSI); - CPXLPptr cpxLp = masterSICpx->getLpPtr(); - CPXENVptr cpxEnv = masterSICpx->getEnvironmentPtr(); - int status = 0; - masterSICpx->switchToMIP();//need? - //--- - //--- set the time limit - //--- - status = CPXsetdblparam(cpxEnv, CPX_PARAM_TILIM, timeLimit); - //--- - //--- set the thread limit, otherwise CPLEX will use all the resources - //--- - status = CPXsetintparam(cpxEnv, CPX_PARAM_THREADS, - decompApp.m_param.NumThreadsIPSolver); - - if (status) - throw UtilException("CPXsetdblparam failure", - "solveDirect", "DecompAlgoC"); - - //--- - //--- solve the MILP - //--- - UtilTimer timer1; - timer1.start(); - masterSICpx->branchAndBound(); - timer1.stop(); - cout << "just after solving" << endl; - cout << " Real=" << setw(10) << UtilDblToStr(timer1.getRealTime(), 5) - << " Cpu= " << setw(10) << UtilDblToStr(timer1.getCpuTime() , 5); - //--- - //--- get solver status - //--- - //--- - int solStatus = CPXgetstat(cpxEnv, cpxLp); - - if (result) { - result->m_solStatus = solStatus; - result->m_solStatus2 = 0; - } - - //--- - //--- get number of nodes - //--- - nNodes = CPXgetnodecnt(cpxEnv, cpxLp); - //--- - //--- get objective and solution - //--- - status = CPXgetbestobjval(cpxEnv, cpxLp, &objLB); - + OsiCpxSolverInterface *masterSICpx = + dynamic_cast(m_problemSI); + CPXLPptr cpxLp = masterSICpx->getLpPtr(); + CPXENVptr cpxEnv = masterSICpx->getEnvironmentPtr(); + int status = 0; + masterSICpx->switchToMIP(); // need? + //--- + //--- set the time limit + //--- + status = CPXsetdblparam(cpxEnv, CPX_PARAM_TILIM, timeLimit); + //--- + //--- set the thread limit, otherwise CPLEX will use all the resources + //--- + status = CPXsetintparam(cpxEnv, CPX_PARAM_THREADS, + decompApp.m_param.NumThreadsIPSolver); + + if (status) + throw UtilException("CPXsetdblparam failure", "solveDirect", + "DecompAlgoC"); + + //--- + //--- solve the MILP + //--- + UtilTimer timer1; + timer1.start(); + masterSICpx->branchAndBound(); + timer1.stop(); + cout << "just after solving" << endl; + cout << " Real=" << setw(10) << UtilDblToStr(timer1.getRealTime(), 5) + << " Cpu= " << setw(10) << UtilDblToStr(timer1.getCpuTime(), 5); + //--- + //--- get solver status + //--- + //--- + int solStatus = CPXgetstat(cpxEnv, cpxLp); + + if (result) { + result->m_solStatus = solStatus; + result->m_solStatus2 = 0; + } + + //--- + //--- get number of nodes + //--- + nNodes = CPXgetnodecnt(cpxEnv, cpxLp); + //--- + //--- get objective and solution + //--- + status = CPXgetbestobjval(cpxEnv, cpxLp, &objLB); + + if (status) + throw UtilException("CPXgetbestobjval failure", "solveDirect", + "DecompAlgoC"); + + //--- + //--- get objective and solution + //--- + if (solStatus == CPXMIP_OPTIMAL || solStatus == CPXMIP_OPTIMAL_TOL || + solStatus == CPXMIP_TIME_LIM_FEAS) { + status = CPXgetmipobjval(cpxEnv, cpxLp, &objUB); + if (status) - throw UtilException("CPXgetbestobjval failure", - "solveDirect", "DecompAlgoC"); - - //--- - //--- get objective and solution - //--- - if (solStatus == CPXMIP_OPTIMAL || - solStatus == CPXMIP_OPTIMAL_TOL || - solStatus == CPXMIP_TIME_LIM_FEAS) { - status = CPXgetmipobjval(cpxEnv, cpxLp, &objUB); - - if (status) - throw UtilException("CPXgetmipobjval failure", - "solveDirect", "DecompAlgoC"); - - if (result) { - const double* solDbl = masterSICpx->getColSolution(); - vector solVec(solDbl, solDbl + numCols); - result->m_solution.push_back(solVec); - result->m_nSolutions++; - assert(result->m_nSolutions == - static_cast(result->m_solution.size())); - //copy(solution, solution+numCols, result->m_solution); - } - } - - //--- - //--- copy sol status into result - //--- + throw UtilException("CPXgetmipobjval failure", "solveDirect", + "DecompAlgoC"); + if (result) { - result->m_solStatus = solStatus; - result->m_solStatus2 = 0; + const double *solDbl = masterSICpx->getColSolution(); + vector solVec(solDbl, solDbl + numCols); + result->m_solution.push_back(solVec); + result->m_nSolutions++; + assert(result->m_nSolutions == + static_cast(result->m_solution.size())); + // copy(solution, solution+numCols, result->m_solution); } + } + + //--- + //--- copy sol status into result + //--- + if (result) { + result->m_solStatus = solStatus; + result->m_solStatus2 = 0; + } #else - throw UtilException("CPLEX selected as solver, but it's not available", - "solveDirect", "DecompMain"); + throw UtilException("CPLEX selected as solver, but it's not available", + "solveDirect", "DecompMain"); #endif - }else{ - throw UtilException("solveDirect not implemented for selected solver", - "solveDirect", "DecompDebug"); - } - - //--- - //--- copy bounds into result - //--- - if (result) { - result->m_objUB = objUB; - result->m_objLB = objLB; - } - - //--- - //--- stop the timer, dump time to solve - //--- - timer.stop(); - cout << "DIRECT SOLVE" - << " Real=" << setw(10) << UtilDblToStr(timer.getRealTime(), 5) - << " Cpu= " << setw(10) << UtilDblToStr(timer.getCpuTime() , 5) - << " Nodes= " << setw(8) << nNodes - << " objLB= " << setw(10) << UtilDblToStr(objLB, 3) - << " objUB= " << setw(10) << UtilDblToStr(objUB, 3) - << endl; - UTIL_DELPTR( m_problemSI); - return result; + } else { + throw UtilException("solveDirect not implemented for selected solver", + "solveDirect", "DecompDebug"); + } + + //--- + //--- copy bounds into result + //--- + if (result) { + result->m_objUB = objUB; + result->m_objLB = objLB; + } + + //--- + //--- stop the timer, dump time to solve + //--- + timer.stop(); + cout << "DIRECT SOLVE" + << " Real=" << setw(10) << UtilDblToStr(timer.getRealTime(), 5) + << " Cpu= " << setw(10) << UtilDblToStr(timer.getCpuTime(), 5) + << " Nodes= " << setw(8) << nNodes << " objLB= " << setw(10) + << UtilDblToStr(objLB, 3) << " objUB= " << setw(10) + << UtilDblToStr(objUB, 3) << endl; + UTIL_DELPTR(m_problemSI); + return result; } diff --git a/Dip/src/DecompModel.cpp b/Dip/src/DecompModel.cpp index 301d8838..5d23c14f 100644 --- a/Dip/src/DecompModel.cpp +++ b/Dip/src/DecompModel.cpp @@ -21,992 +21,925 @@ using namespace std; //===========================================================================// -bool DecompSubModel::isPointFeasible(const double* x, - const bool isXSparse, - const int logLevel, - const double feasVarTol, - const double feasConTol) -{ - DecompConstraintSet* model = getModel(); - - if (!model) { - return true; - } - - const CoinPackedMatrix* M = model->getMatrix(); - - if (!M) { - return true; - } - - const vector& colNames = model->getColNames(); - const vector& rowNames = model->getRowNames(); - int c, r, i; - bool isFeas = true; - bool hasColNames = false; - bool hasRowNames = false; - double xj = 0.0; - double ax = 0.0; - double clb = 0.0; - double cub = 0.0; - double rlb = 0.0; - double rub = 0.0; - double actViol = 0.0; - double relViol = 0.0; - - if (colNames.size()) { - hasColNames = true; - } - - if (rowNames.size()) { - hasRowNames = true; - } - - double feasVarTol100 = 100 * feasVarTol; - double feasConTol100 = 100 * feasConTol; - //--- - //--- do we satisfy all (active) column bounds - //--- - vector ::const_iterator it; - map::const_iterator mcit; - const vector& activeColumns = model->getActiveColumns(); - bool isSparse = model->isSparse(); - const map& origToSparse = model->getMapOrigToSparse(); - const map& sparseToOrig = model->getMapSparseToOrig(); - - for (it = activeColumns.begin(); it != activeColumns.end(); it++) { - if (isSparse) { - mcit = origToSparse.find(*it); - c = mcit->second; - xj = isXSparse ? x[c] : x[*it]; - } else { - c = *it; - xj = x[c]; - assert(!isXSparse); - } - - clb = model->colLB[c]; - cub = model->colUB[c]; - UTIL_DEBUG(logLevel, 5, - int precision = 7; - - if (!UtilIsZero(xj)) { - cout << "Point " << c; - - if (hasColNames) { +bool DecompSubModel::isPointFeasible(const double *x, const bool isXSparse, + const int logLevel, + const double feasVarTol, + const double feasConTol) { + DecompConstraintSet *model = getModel(); + + if (!model) { + return true; + } + + const CoinPackedMatrix *M = model->getMatrix(); + + if (!M) { + return true; + } + + const vector &colNames = model->getColNames(); + const vector &rowNames = model->getRowNames(); + int c, r, i; + bool isFeas = true; + bool hasColNames = false; + bool hasRowNames = false; + double xj = 0.0; + double ax = 0.0; + double clb = 0.0; + double cub = 0.0; + double rlb = 0.0; + double rub = 0.0; + double actViol = 0.0; + double relViol = 0.0; + + if (colNames.size()) { + hasColNames = true; + } + + if (rowNames.size()) { + hasRowNames = true; + } + + double feasVarTol100 = 100 * feasVarTol; + double feasConTol100 = 100 * feasConTol; + //--- + //--- do we satisfy all (active) column bounds + //--- + vector::const_iterator it; + map::const_iterator mcit; + const vector &activeColumns = model->getActiveColumns(); + bool isSparse = model->isSparse(); + const map &origToSparse = model->getMapOrigToSparse(); + const map &sparseToOrig = model->getMapSparseToOrig(); + + for (it = activeColumns.begin(); it != activeColumns.end(); it++) { + if (isSparse) { + mcit = origToSparse.find(*it); + c = mcit->second; + xj = isXSparse ? x[c] : x[*it]; + } else { + c = *it; + xj = x[c]; + assert(!isXSparse); + } + + clb = model->colLB[c]; + cub = model->colUB[c]; + UTIL_DEBUG( + logLevel, 5, int precision = 7; + + if (!UtilIsZero(xj)) { + cout << "Point " << c; + + if (hasColNames) { cout << " -> " << colNames[c]; - } - - cout << " LB= " << UtilDblToStr(clb, precision) - << " x= " << UtilDblToStr(xj, precision) - << " UB= " << UtilDblToStr(cub, precision) - << endl; + } + + cout << " LB= " << UtilDblToStr(clb, precision) + << " x= " << UtilDblToStr(xj, precision) + << " UB= " << UtilDblToStr(cub, precision) << endl; + }); + actViol = std::max(clb - xj, xj - cub); + actViol = std::max(actViol, 0.0); + + if (UtilIsZero(xj, feasVarTol) || (xj < 0 && UtilIsZero(clb)) || + (xj > 0 && UtilIsZero(cub))) { + relViol = actViol; + } else { + relViol = actViol / std::fabs(xj); + } + + if (relViol > feasVarTol) { + // Notify, but don't mark in feasible unless 10x worse. + UTIL_DEBUG(logLevel, 4, int precision = 7; + cout << "Point violates column " << c; + + if (hasColNames) cout << " -> " << colNames[c]; + cout << " LB= " << UtilDblToStr(clb, precision) + << " x= " << UtilDblToStr(xj, precision) + << " UB= " << UtilDblToStr(cub, precision) << " RelViol= " + << UtilDblToStr(relViol, precision) << endl;); + + if (relViol > feasVarTol100) { + isFeas = false; + goto FUNC_EXIT; } - ); - actViol = std::max(clb - xj, xj - cub); - actViol = std::max(actViol, 0.0); - - if (UtilIsZero(xj, feasVarTol) || - (xj < 0 && UtilIsZero(clb)) || - (xj > 0 && UtilIsZero(cub))) { - relViol = actViol; - } else { - relViol = actViol / std::fabs(xj); - } - - if (relViol > feasVarTol) { - //Notify, but don't mark in feasible unless 10x worse. - UTIL_DEBUG(logLevel, 4, - int precision = 7; - cout << "Point violates column " << c; - - if (hasColNames) - cout << " -> " << colNames[c]; - cout << " LB= " << UtilDblToStr(clb, precision) - << " x= " << UtilDblToStr(xj, precision) - << " UB= " << UtilDblToStr(cub, precision) - << " RelViol= " << UtilDblToStr(relViol, precision) - << endl; - ); - - if (relViol > feasVarTol100) { - isFeas = false; - goto FUNC_EXIT; - } - } - } - - //--- - //--- do we satisfy all row bounds - //--- TODO: for core model, this includes branching rows and cuts - //--- we can actually get away with just checking the - //--- original base rows - //--- - //--- TODO: masterOnly variable - - for (r = 0; r < model->getNumRows(); r++) { - if (isSparse) { - if (isXSparse) { - ax = model->M->getVector(r).dotProduct(x); - } else { - //TODO: "sparse" is the wrong word here - should be using - // the word projected for param, etc... - //--- - //--- x is with respect to the original set of columns, - //--- but the matrix is the sparse version - //--- - const CoinShallowPackedVector v = model->M->getVector(r); - const int len = v.getNumElements(); - const int* ind = v.getIndices(); - const double* els = v.getElements(); - ax = 0.0; - - for (i = 0; i < len; i++) { - mcit = sparseToOrig.find(ind[i]); - c = mcit->second; - ax += x[c] * els[i]; - } - } - } else { - ax = model->M->getVector(r).dotProduct(x); - assert(!isXSparse); - } - - rlb = model->rowLB[r]; - rub = model->rowUB[r]; - actViol = std::max(rlb - ax, ax - rub); - actViol = std::max(actViol, 0.0); - - //printf("CORE r:%d rlb:%g ax:%g rub:%g actViol:%g\n", - // r, rlb, ax, rub, actViol); - if (UtilIsZero(ax, feasConTol) || - (ax < 0 && UtilIsZero(rlb)) || - (ax > 0 && UtilIsZero(rub))) { - relViol = actViol; + } + } + + //--- + //--- do we satisfy all row bounds + //--- TODO: for core model, this includes branching rows and cuts + //--- we can actually get away with just checking the + //--- original base rows + //--- + //--- TODO: masterOnly variable + + for (r = 0; r < model->getNumRows(); r++) { + if (isSparse) { + if (isXSparse) { + ax = model->M->getVector(r).dotProduct(x); } else { - relViol = actViol / std::fabs(ax); + // TODO: "sparse" is the wrong word here - should be using + // the word projected for param, etc... + //--- + //--- x is with respect to the original set of columns, + //--- but the matrix is the sparse version + //--- + const CoinShallowPackedVector v = model->M->getVector(r); + const int len = v.getNumElements(); + const int *ind = v.getIndices(); + const double *els = v.getElements(); + ax = 0.0; + + for (i = 0; i < len; i++) { + mcit = sparseToOrig.find(ind[i]); + c = mcit->second; + ax += x[c] * els[i]; + } } - - if (relViol > feasConTol) { - //Notify, but don't mark in feasible unless 10x worse. - UTIL_DEBUG(logLevel, 4, - cout - << "Point violates row "; - - if (hasRowNames) - cout << " -> " << rowNames[r]; - cout << " ax[" << r << "]: " - << UtilDblToStr(ax) - << " LB: " << UtilDblToStr(rlb) - << " UB: " << UtilDblToStr(rub) - << " RelViol: " << UtilDblToStr(relViol) - << endl; - ); - - if (relViol > feasConTol100) { - isFeas = false; - goto FUNC_EXIT; - } + } else { + ax = model->M->getVector(r).dotProduct(x); + assert(!isXSparse); + } + + rlb = model->rowLB[r]; + rub = model->rowUB[r]; + actViol = std::max(rlb - ax, ax - rub); + actViol = std::max(actViol, 0.0); + + // printf("CORE r:%d rlb:%g ax:%g rub:%g actViol:%g\n", + // r, rlb, ax, rub, actViol); + if (UtilIsZero(ax, feasConTol) || (ax < 0 && UtilIsZero(rlb)) || + (ax > 0 && UtilIsZero(rub))) { + relViol = actViol; + } else { + relViol = actViol / std::fabs(ax); + } + + if (relViol > feasConTol) { + // Notify, but don't mark in feasible unless 10x worse. + UTIL_DEBUG(logLevel, 4, cout << "Point violates row "; + + if (hasRowNames) cout << " -> " << rowNames[r]; + cout << " ax[" << r << "]: " << UtilDblToStr(ax) << " LB: " + << UtilDblToStr(rlb) << " UB: " << UtilDblToStr(rub) + << " RelViol: " << UtilDblToStr(relViol) << endl;); + + if (relViol > feasConTol100) { + isFeas = false; + goto FUNC_EXIT; } - } + } + } FUNC_EXIT: - UTIL_DEBUG(logLevel, 4, - cout << "isPointFeasible = " << isFeas << endl; - ); - return isFeas; + UTIL_DEBUG(logLevel, 4, cout << "isPointFeasible = " << isFeas << endl;); + return isFeas; } //===========================================================================// -void DecompSubModel::solveAsMIP(DecompSolverResult* result, - DecompParam& param, - bool doExact, - bool doCutoff, - bool isRoot, - double cutoff, - double timeLimit) -{ - //--- - //--- clear out any old solutions - //--- - result->m_solution.clear(); - - if (param.DecompIPSolver == "SYMPHONY"){ - solveAsMIPSym(result, param, doExact, doCutoff, isRoot, cutoff, - timeLimit); - }else if (param.DecompIPSolver == "Cbc"){ - solveAsMIPCbc(result, param, doExact, doCutoff, isRoot, cutoff, - timeLimit); - }else if (param.DecompIPSolver == "CPLEX"){ - solveAsMIPCpx(result, param, doExact, doCutoff, isRoot, cutoff, - timeLimit); - }else if (param.DecompIPSolver == "Gurobi"){ - solveAsMIPGrb(result, param, doExact, doCutoff, isRoot, cutoff, - timeLimit); - }else{ - throw UtilException("Unknown solver selected.", - "solveAsMIP", "DecompSubModel"); - } +void DecompSubModel::solveAsMIP(DecompSolverResult *result, DecompParam ¶m, + bool doExact, bool doCutoff, bool isRoot, + double cutoff, double timeLimit) { + //--- + //--- clear out any old solutions + //--- + result->m_solution.clear(); + + if (param.DecompIPSolver == "SYMPHONY") { + solveAsMIPSym(result, param, doExact, doCutoff, isRoot, cutoff, timeLimit); + } else if (param.DecompIPSolver == "Cbc") { + solveAsMIPCbc(result, param, doExact, doCutoff, isRoot, cutoff, timeLimit); + } else if (param.DecompIPSolver == "CPLEX") { + solveAsMIPCpx(result, param, doExact, doCutoff, isRoot, cutoff, timeLimit); + } else if (param.DecompIPSolver == "Gurobi") { + solveAsMIPGrb(result, param, doExact, doCutoff, isRoot, cutoff, timeLimit); + } else { + throw UtilException("Unknown solver selected.", "solveAsMIP", + "DecompSubModel"); + } } //===========================================================================// -void DecompSubModel::solveAsMIPSym(DecompSolverResult* result, - DecompParam& param, - bool doExact, - bool doCutoff, - bool isRoot, - double cutoff, - double timeLimit) -{ +void DecompSubModel::solveAsMIPSym(DecompSolverResult *result, + DecompParam ¶m, bool doExact, + bool doCutoff, bool isRoot, double cutoff, + double timeLimit) { #ifdef COIN_HAS_SYMPHONY - const int numCols = m_osi->getNumCols(); - const int logIpLevel = param.LogIpLevel; - double* solution = new double[numCols]; - assert(solution); - //OsiSymSolverInterface* osiSym - // = dynamic_cast(m_osi->clone()); - OsiSymSolverInterface* osiSym - = dynamic_cast(m_osi); - sym_environment* env = osiSym->getSymphonyEnvironment(); - - if (logIpLevel == 0 ) { - sym_set_int_param(env, "verbosity", -10); - } else { - sym_set_int_param(env, "verbosity", logIpLevel); - } - - sym_set_int_param(env, "max_active_nodes", param.NumThreadsIPSolver); - sym_set_int_param(env, "prep_level", 0); - - if (param.WarmStart) { - sym_set_int_param(env, "do_reduced_cost_fixing", 0); - - osiSym->setSymParam(OsiSymKeepWarmStart, true); - //whether to trim the warm start tree before re-solving. - osiSym->setSymParam(OsiSymTrimWarmTree, true); - - //This call automatically detects whether to warm start or not - osiSym->resolve(); - - } else { - osiSym->initialSolve(); - } - - int status = sym_get_status(env); - - if (param.LogDebugLevel >= 4){ - if (status == TM_OPTIMAL_SOLUTION_FOUND) { - std::cout << "Tree Manager(TM) found the " - << "optimal solution and stopped" - << std::endl; - } else if (status == TM_TIME_LIMIT_EXCEEDED) { - std::cout << "TM stopped after reaching the" - << " predefined time limit " - << std::endl; - } else if (status == TM_NODE_LIMIT_EXCEEDED) { - std::cout << "TM stopped after reading" - << " the predefined node limit " - << std::endl; - } else if (status == TM_TARGET_GAP_ACHIEVED) { - std::cout << "TM stopped after achieving " - << "the predened target gap" - << std::endl; - } else if (status == TM_NO_SOLUTION) { - std::cout << "TM has NO SOLUTION" - << std::endl; - } else { - std::cerr << "Error: SYPHONMY IP solver status = " - << status << std::endl; - } - } + const int numCols = m_osi->getNumCols(); + const int logIpLevel = param.LogIpLevel; + double *solution = new double[numCols]; + assert(solution); + // OsiSymSolverInterface* osiSym + // = dynamic_cast(m_osi->clone()); + OsiSymSolverInterface *osiSym = dynamic_cast(m_osi); + sym_environment *env = osiSym->getSymphonyEnvironment(); + + if (logIpLevel == 0) { + sym_set_int_param(env, "verbosity", -10); + } else { + sym_set_int_param(env, "verbosity", logIpLevel); + } + + sym_set_int_param(env, "max_active_nodes", param.NumThreadsIPSolver); + sym_set_int_param(env, "prep_level", 0); + + if (param.WarmStart) { + sym_set_int_param(env, "do_reduced_cost_fixing", 0); + + osiSym->setSymParam(OsiSymKeepWarmStart, true); + // whether to trim the warm start tree before re-solving. + osiSym->setSymParam(OsiSymTrimWarmTree, true); + + // This call automatically detects whether to warm start or not + osiSym->resolve(); + + } else { + osiSym->initialSolve(); + } + + int status = sym_get_status(env); + + if (param.LogDebugLevel >= 4) { + if (status == TM_OPTIMAL_SOLUTION_FOUND) { + std::cout << "Tree Manager(TM) found the " + << "optimal solution and stopped" << std::endl; + } else if (status == TM_TIME_LIMIT_EXCEEDED) { + std::cout << "TM stopped after reaching the" + << " predefined time limit " << std::endl; + } else if (status == TM_NODE_LIMIT_EXCEEDED) { + std::cout << "TM stopped after reading" + << " the predefined node limit " << std::endl; + } else if (status == TM_TARGET_GAP_ACHIEVED) { + std::cout << "TM stopped after achieving " + << "the predened target gap" << std::endl; + } else if (status == TM_NO_SOLUTION) { + std::cout << "TM has NO SOLUTION" << std::endl; + } else { + std::cerr << "Error: SYPHONMY IP solver status = " << status + << std::endl; + } + } + + if ((status == PREP_OPTIMAL_SOLUTION_FOUND) || + (status == TM_OPTIMAL_SOLUTION_FOUND) || + (status == TM_TARGET_GAP_ACHIEVED)) { + result->m_isOptimal = true; + double objective_value = 0.0; + sym_get_obj_val(env, &objective_value); + if (param.LogDebugLevel >= 4) { + std::cout << "The optimal objective value is " << objective_value + << std::endl; + } - if ( (status == PREP_OPTIMAL_SOLUTION_FOUND ) || - (status == TM_OPTIMAL_SOLUTION_FOUND) - || (status == TM_TARGET_GAP_ACHIEVED)) { - result->m_isOptimal = true; - double objective_value = 0.0; - sym_get_obj_val(env, &objective_value); - if (param.LogDebugLevel >= 4){ - std::cout << "The optimal objective value is " - << objective_value << std::endl; - } + double objval; + double *opt_solution = new double[numCols]; + int nSols = 0; - double objval; - double* opt_solution = new double[numCols]; - int nSols = 0; + status = sym_get_sp_size(env, &nSols); - status = sym_get_sp_size(env, &nSols); + result->m_nSolutions = 1; + status = sym_get_col_solution(env, opt_solution); + vector solVec(opt_solution, opt_solution + numCols); + result->m_solution.push_back(solVec); - result->m_nSolutions = 1; - status = sym_get_col_solution(env, opt_solution); - vector solVec(opt_solution, opt_solution + numCols); - result->m_solution.push_back(solVec); + nSols = std::min(nSols, param.SubProbNumSolLimit); - nSols = std::min(nSols, param.SubProbNumSolLimit); - - for (int i = 0; i < nSols; i++){ - status = sym_get_sp_solution(env, i, solution, &objval); - /* - for (int i = 0 ; i < numCols; ++i) { - std::cout << "the solution is " << solution[i] - << std::endl; - } - */ - //We have to make sure that the solution is not one we already have - if (memcmp(opt_solution, solution, numCols*DSIZE) == 0){ - vector solVec(solution, solution + numCols); - result->m_solution.push_back(solVec); - result->m_nSolutions += 1; - } - } - UTIL_DELARR(opt_solution); - } else { - if (sym_is_proven_primal_infeasible(env)) { - result->m_nSolutions = 0; - result->m_isOptimal = true; - result->m_isCutoff = doCutoff; - } else { - result->m_isCutoff = doCutoff; - result->m_isOptimal = false ; + for (int i = 0; i < nSols; i++) { + status = sym_get_sp_solution(env, i, solution, &objval); + /* + for (int i = 0 ; i < numCols; ++i) { + std::cout << "the solution is " << solution[i] + << std::endl; + } + */ + // We have to make sure that the solution is not one we already have + if (memcmp(opt_solution, solution, numCols * DSIZE) == 0) { + vector solVec(solution, solution + numCols); + result->m_solution.push_back(solVec); + result->m_nSolutions += 1; } - } - UTIL_DELARR(solution); + } + UTIL_DELARR(opt_solution); + } else { + if (sym_is_proven_primal_infeasible(env)) { + result->m_nSolutions = 0; + result->m_isOptimal = true; + result->m_isCutoff = doCutoff; + } else { + result->m_isCutoff = doCutoff; + result->m_isOptimal = false; + } + } + UTIL_DELARR(solution); #else - throw UtilException("SYMPHONY selected as solver, but it's not available", - "solveAsMIPSym", "DecompSubModel"); + throw UtilException("SYMPHONY selected as solver, but it's not available", + "solveAsMIPSym", "DecompSubModel"); #endif } //===========================================================================// -void DecompSubModel::solveAsMIPCbc(DecompSolverResult* result, - DecompParam& param, - bool doExact, - bool doCutoff, - bool isRoot, - double cutoff, - double timeLimit) -{ +void DecompSubModel::solveAsMIPCbc(DecompSolverResult *result, + DecompParam ¶m, bool doExact, + bool doCutoff, bool isRoot, double cutoff, + double timeLimit) { #ifdef COIN_HAS_CBC - const int numCols = m_osi->getNumCols(); - const int logIpLevel = param.LogIpLevel; - //TODO: what exactly does this do? make copy of entire model!? - CbcModel cbc(*m_osi); - cbc.setLogLevel(logIpLevel); + const int numCols = m_osi->getNumCols(); + const int logIpLevel = param.LogIpLevel; + // TODO: what exactly does this do? make copy of entire model!? + CbcModel cbc(*m_osi); + cbc.setLogLevel(logIpLevel); #ifdef _OPENMP - cbc.setDblParam(CbcModel::CbcMaximumSeconds, timeLimit); - cbc.branchAndBound(); - const int statusSet[2] = {0, 1}; - result->m_solStatus = cbc.status(); - - if (!UtilIsInSet(result->m_solStatus, statusSet, 2)) { - cerr << "Error: CBC IP solver status = " << result->m_solStatus << endl; - throw UtilException("CBC solver status", - "solveAsMIPCbc", "DecompSubModel"); - } + cbc.setDblParam(CbcModel::CbcMaximumSeconds, timeLimit); + cbc.branchAndBound(); + const int statusSet[2] = {0, 1}; + result->m_solStatus = cbc.status(); + + if (!UtilIsInSet(result->m_solStatus, statusSet, 2)) { + cerr << "Error: CBC IP solver status = " << result->m_solStatus << endl; + throw UtilException("CBC solver status", "solveAsMIPCbc", "DecompSubModel"); + } #else - //int i; - //const double * colUB = cbc.getColUpper(); - //for(i = 0; i < cbc.getNumCols(); i++){ - // printf("col %d -> ub: %g\n", - // i, colUB[i]); - //} - //--- - //--- build argument list - //--- - const char* argv[20]; - int argc = 0; - string cbcExe = "cbc"; - string cbcSolve = "-solve"; - string cbcQuit = "-quit"; - string cbcLog = "-log"; - string cbcLogSet = UtilIntToStr(logIpLevel); - string cbcGap = "-ratio"; - string cbcGapSet = "0"; - string cbcTime = "-seconds"; - string cbcTimeSet = "0"; - string cbcCutoff = "-cutoff"; - string cbcCutoffSet = UtilDblToStr(cutoff, -1, COIN_DBL_MAX); - string cbcSLog = "-slog"; - string cbcSLogSet = "2"; - - if (doExact) { - cbcTimeSet = UtilDblToStr(min(param.SubProbTimeLimitExact, - param.TimeLimit), -1, - COIN_DBL_MAX); - cbcGapSet = UtilDblToStr(param.SubProbGapLimitExact, -1, - COIN_DBL_MAX); - } else { - cbcTimeSet = UtilDblToStr(min(param.SubProbTimeLimitInexact, - param.TimeLimit), -1, - COIN_DBL_MAX); - cbcGapSet = UtilDblToStr(param.SubProbGapLimitInexact, -1, - COIN_DBL_MAX); - } - - bool doTime = false; - double cbcMaxSecUB = 1e100; - - if (doExact) { - if (param.SubProbTimeLimitExact < cbcMaxSecUB) { - doTime = true; - } - } else { - if (param.SubProbTimeLimitInexact < cbcMaxSecUB) { - doTime = true; - } - } - - argv[argc++] = cbcExe.c_str(); - argv[argc++] = cbcLog.c_str(); - argv[argc++] = cbcLogSet.c_str(); - //argv[argc++] = cbcSLog.c_str(); //for extra debugging - //argv[argc++] = cbcSLogSet.c_str(); //for extra debugging - argv[argc++] = cbcGap.c_str(); - argv[argc++] = cbcGapSet.c_str(); - - if (doTime) { - argv[argc++] = cbcTime.c_str(); - argv[argc++] = cbcTimeSet.c_str(); - } - - if (doCutoff) { - argv[argc++] = cbcCutoff.c_str(); - argv[argc++] = cbcCutoffSet.c_str(); - } - - argv[argc++] = cbcSolve.c_str(); - argv[argc++] = cbcQuit.c_str(); - //--- - //--- solve IP using argument list - //--- - CbcMain(argc, argv, cbc); - //--- - //--- get solver status - //--- comments based on Cbc2.3 - //--- - /** Final status of problem. - * -1 before branchAndBound - * 0 finished - check isProvenOptimal or isProvenInfeasible - * to see if solution found (or check value of best solution) - * 1 stopped - on maxnodes, maxsols, maxtime - * 2 difficulties so run was abandoned - * (5 event user programmed event occurred) + // int i; + // const double * colUB = cbc.getColUpper(); + // for(i = 0; i < cbc.getNumCols(); i++){ + // printf("col %d -> ub: %g\n", + // i, colUB[i]); + //} + //--- + //--- build argument list + //--- + const char *argv[20]; + int argc = 0; + string cbcExe = "cbc"; + string cbcSolve = "-solve"; + string cbcQuit = "-quit"; + string cbcLog = "-log"; + string cbcLogSet = UtilIntToStr(logIpLevel); + string cbcGap = "-ratio"; + string cbcGapSet = "0"; + string cbcTime = "-seconds"; + string cbcTimeSet = "0"; + string cbcCutoff = "-cutoff"; + string cbcCutoffSet = UtilDblToStr(cutoff, -1, COIN_DBL_MAX); + string cbcSLog = "-slog"; + string cbcSLogSet = "2"; + + if (doExact) { + cbcTimeSet = UtilDblToStr(min(param.SubProbTimeLimitExact, param.TimeLimit), + -1, COIN_DBL_MAX); + cbcGapSet = UtilDblToStr(param.SubProbGapLimitExact, -1, COIN_DBL_MAX); + } else { + cbcTimeSet = UtilDblToStr( + min(param.SubProbTimeLimitInexact, param.TimeLimit), -1, COIN_DBL_MAX); + cbcGapSet = UtilDblToStr(param.SubProbGapLimitInexact, -1, COIN_DBL_MAX); + } + + bool doTime = false; + double cbcMaxSecUB = 1e100; + + if (doExact) { + if (param.SubProbTimeLimitExact < cbcMaxSecUB) { + doTime = true; + } + } else { + if (param.SubProbTimeLimitInexact < cbcMaxSecUB) { + doTime = true; + } + } + + argv[argc++] = cbcExe.c_str(); + argv[argc++] = cbcLog.c_str(); + argv[argc++] = cbcLogSet.c_str(); + // argv[argc++] = cbcSLog.c_str(); //for extra debugging + // argv[argc++] = cbcSLogSet.c_str(); //for extra debugging + argv[argc++] = cbcGap.c_str(); + argv[argc++] = cbcGapSet.c_str(); + + if (doTime) { + argv[argc++] = cbcTime.c_str(); + argv[argc++] = cbcTimeSet.c_str(); + } + + if (doCutoff) { + argv[argc++] = cbcCutoff.c_str(); + argv[argc++] = cbcCutoffSet.c_str(); + } + + argv[argc++] = cbcSolve.c_str(); + argv[argc++] = cbcQuit.c_str(); + //--- + //--- solve IP using argument list + //--- + CbcMain(argc, argv, cbc); + //--- + //--- get solver status + //--- comments based on Cbc2.3 + //--- + /** Final status of problem. + * -1 before branchAndBound + * 0 finished - check isProvenOptimal or isProvenInfeasible + * to see if solution found (or check value of best solution) + * 1 stopped - on maxnodes, maxsols, maxtime + * 2 difficulties so run was abandoned + * (5 event user programmed event occurred) */ #endif - /** Secondary status of problem - * -1 unset (status_ will also be -1) - * 0 search completed with solution - * 1 linear relaxation not feasible (or worse than cutoff) - * 2 stopped on gap - * 3 stopped on nodes - * 4 stopped on time - * 5 stopped on user event - * 6 stopped on solutions - * 7 linear relaxation unbounded + /** Secondary status of problem + * -1 unset (status_ will also be -1) + * 0 search completed with solution + * 1 linear relaxation not feasible (or worse than cutoff) + * 2 stopped on gap + * 3 stopped on nodes + * 4 stopped on time + * 5 stopped on user event + * 6 stopped on solutions + * 7 linear relaxation unbounded */ - int nSeta = 0; - int nSetb = 0; - const int statusSet2a[4] = {0, 2, 3, 4}; - nSeta = 4; - const int statusSet2b[5] = {0, 1, 2, 4, 5}; - nSetb = 5; - result->m_solStatus2 = cbc.secondaryStatus(); - - //--- - //--- In root the subproblem should not be infeasible - //--- unless due to cutoff. But, after branching it - //--- can be infeasible. - //--- - if (!doCutoff && isRoot) { - if (!UtilIsInSet(result->m_solStatus2, statusSet2a, nSeta)) { - cerr << "Error: CBC IP solver 2nd status = " - << result->m_solStatus2 << endl; - throw UtilException("CBC solver 2nd status", - "solveAsMIPCbc", "DecompSubModel"); - } - } else { - if (!UtilIsInSet(result->m_solStatus2, statusSet2b, nSetb)) { - cerr << "Error: CBC IP solver 2nd status = " - << result->m_solStatus2 << endl; - throw UtilException("CBC solver 2nd status", - "solveAsMIPCbc", "DecompSubModel"); - } - } - - //--- - //--- update results object - //--- - result->m_nSolutions = 0; - result->m_isOptimal = false; - result->m_isCutoff = false; - - if (cbc.isContinuousUnbounded()) { - OsiClpSolverInterface* m_relax = dynamic_cast(m_osi); - m_relax->initialSolve(); - std::vector solDbl; - //ToDo: To add parameter of number of rays in the getPrimalRays() - solDbl = m_relax->getPrimalRays(1); - const double* solDbl2 = solDbl.front(); - vector solVec(solDbl2, solDbl2 + numCols); - result->m_solution.push_back(solVec); - result->m_nSolutions++; - result->m_isUnbounded = true; - } - - //printf("cbc.isProvenOptimal() = %d\n", cbc.isProvenOptimal()); - if (cbc.isProvenOptimal()) { - result->m_nSolutions = cbc.numberSavedSolutions(); - result->m_isOptimal = true; - } else { - if (cbc.isProvenInfeasible()) { - result->m_nSolutions = 0; - result->m_isCutoff = doCutoff; - result->m_isOptimal = true; - } else { - //--- - //--- else it must have stopped on gap - //--- - result->m_nSolutions = 1; - result->m_isCutoff = doCutoff; - result->m_isOptimal = false; - } - } - - //--- - //--- get copy of solution(s) - //--- - result->m_objLB = cbc.getBestPossibleObjValue(); - int nSols = std::min(result->m_nSolutions, - param.SubProbNumSolLimit); - for(int i = 0; i < nSols; i++){ - //result->m_objUB = cbc.getObjValue(); - const double* solDbl = cbc.savedSolution(i); - vector solVec(solDbl, solDbl + numCols); - result->m_solution.push_back(solVec); - /* - for(unsigned i=0; i < solVec.size(); i++){ - std::cout << "index " << i <<" "<< solVec[i] << std::endl; - } - */ - //memcpy(result->m_solution, - // cbc.getColSolution(), numCols * sizeof(double)); - assert(result->m_nSolutions == - static_cast(result->m_solution.size())); - } + int nSeta = 0; + int nSetb = 0; + const int statusSet2a[4] = {0, 2, 3, 4}; + nSeta = 4; + const int statusSet2b[5] = {0, 1, 2, 4, 5}; + nSetb = 5; + result->m_solStatus2 = cbc.secondaryStatus(); + + //--- + //--- In root the subproblem should not be infeasible + //--- unless due to cutoff. But, after branching it + //--- can be infeasible. + //--- + if (!doCutoff && isRoot) { + if (!UtilIsInSet(result->m_solStatus2, statusSet2a, nSeta)) { + cerr << "Error: CBC IP solver 2nd status = " << result->m_solStatus2 + << endl; + throw UtilException("CBC solver 2nd status", "solveAsMIPCbc", + "DecompSubModel"); + } + } else { + if (!UtilIsInSet(result->m_solStatus2, statusSet2b, nSetb)) { + cerr << "Error: CBC IP solver 2nd status = " << result->m_solStatus2 + << endl; + throw UtilException("CBC solver 2nd status", "solveAsMIPCbc", + "DecompSubModel"); + } + } + + //--- + //--- update results object + //--- + result->m_nSolutions = 0; + result->m_isOptimal = false; + result->m_isCutoff = false; + + if (cbc.isContinuousUnbounded()) { + OsiClpSolverInterface *m_relax = + dynamic_cast(m_osi); + m_relax->initialSolve(); + std::vector solDbl; + // ToDo: To add parameter of number of rays in the getPrimalRays() + solDbl = m_relax->getPrimalRays(1); + const double *solDbl2 = solDbl.front(); + vector solVec(solDbl2, solDbl2 + numCols); + result->m_solution.push_back(solVec); + result->m_nSolutions++; + result->m_isUnbounded = true; + } + + // printf("cbc.isProvenOptimal() = %d\n", cbc.isProvenOptimal()); + if (cbc.isProvenOptimal()) { + result->m_nSolutions = cbc.numberSavedSolutions(); + result->m_isOptimal = true; + } else { + if (cbc.isProvenInfeasible()) { + result->m_nSolutions = 0; + result->m_isCutoff = doCutoff; + result->m_isOptimal = true; + } else { + //--- + //--- else it must have stopped on gap + //--- + result->m_nSolutions = 1; + result->m_isCutoff = doCutoff; + result->m_isOptimal = false; + } + } + + //--- + //--- get copy of solution(s) + //--- + result->m_objLB = cbc.getBestPossibleObjValue(); + int nSols = std::min(result->m_nSolutions, param.SubProbNumSolLimit); + for (int i = 0; i < nSols; i++) { + // result->m_objUB = cbc.getObjValue(); + const double *solDbl = cbc.savedSolution(i); + vector solVec(solDbl, solDbl + numCols); + result->m_solution.push_back(solVec); + /* + for(unsigned i=0; i < solVec.size(); i++){ + std::cout << "index " << i <<" "<< solVec[i] << std::endl; + } + */ + // memcpy(result->m_solution, + // cbc.getColSolution(), numCols * sizeof(double)); + assert(result->m_nSolutions == static_cast(result->m_solution.size())); + } #else - throw UtilException("Cbc selected as solver, but it's not available", - "solveAsMIPCbc", "DecompSubModel"); + throw UtilException("Cbc selected as solver, but it's not available", + "solveAsMIPCbc", "DecompSubModel"); #endif } //===========================================================================// -void DecompSubModel::solveAsMIPCpx(DecompSolverResult* result, - DecompParam& param, - bool doExact, - bool doCutoff, - bool isRoot, - double cutoff, - double timeLimit) -{ +void DecompSubModel::solveAsMIPCpx(DecompSolverResult *result, + DecompParam ¶m, bool doExact, + bool doCutoff, bool isRoot, double cutoff, + double timeLimit) { #ifdef COIN_HAS_CPX - const int numCols = m_osi->getNumCols(); - const int logIpLevel = param.LogIpLevel; - double* solution = new double[numCols]; - assert(solution); - //--- - //--- get OsiCpx object from Osi object - //--- get CPEXENVptr for use with internal methods - //--- get CPXLPptr for use with internal methods - //--- - OsiCpxSolverInterface* osiCpx - = dynamic_cast(m_osi); - CPXENVptr cpxEnv = osiCpx->getEnvironmentPtr(); - CPXLPptr cpxLp = osiCpx->getLpPtr(); - assert(cpxEnv && cpxLp); - //--- - //--- set parameters - //--- - int status = 0; - - if (logIpLevel) { - status = CPXsetintparam(cpxEnv, CPX_PARAM_SCRIND, CPX_ON); - - if (status) - throw UtilException("CPXsetintparam failure", - "solveAsMIPCpx", "DecompSubModel"); - - status = CPXsetintparam(cpxEnv, CPX_PARAM_SIMDISPLAY, logIpLevel); - - if (status) - throw UtilException("CPXsetintparam failure", - "solveAsMIPCpx", "DecompSubModel"); - } else { - status = CPXsetintparam(cpxEnv, CPX_PARAM_SCRIND, CPX_OFF); - - if (status) - throw UtilException("CPXsetintparam failure", - "solveAsMIPCpx", "DecompSubModel"); - } - - if (doExact) - status = CPXsetdblparam(cpxEnv, CPX_PARAM_EPGAP, - param.SubProbGapLimitExact); - else - status = CPXsetdblparam(cpxEnv, CPX_PARAM_EPGAP, - param.SubProbGapLimitInexact); - - if (status) - throw UtilException("CPXsetdblparam failure", - "solveAsMIPCpx", "DecompSubModel"); - - if (doExact) { - if (param.SubProbTimeLimitExact < COIN_DBL_MAX) { - status = CPXsetdblparam(cpxEnv, CPX_PARAM_TILIM, - param.SubProbTimeLimitExact); - } - } else { - if (param.SubProbTimeLimitInexact < COIN_DBL_MAX) { - status = CPXsetdblparam(cpxEnv, CPX_PARAM_TILIM, - param.SubProbTimeLimitInexact); - } - } - - if (status) - throw UtilException("CPXsetdblparam failure", - "solveAsMIPCpx", "DecompSubModel"); - - if (doCutoff) { - status = CPXsetdblparam(cpxEnv, CPX_PARAM_CUTUP, cutoff); - } else { - status = CPXsetdblparam(cpxEnv, CPX_PARAM_CUTUP, 1.0e+75); - } - - if (status) - throw UtilException("CPXsetdblparam failure", - "solveAsMIPCpx", "DecompSubModel"); - - //--- - //--- starting with CPX12, parallel MIP is on by default - //--- we do not want that (usually) - //--- Provide a user option - //--- -#if CPX_VERSION >=1200 - status = CPXsetintparam(cpxEnv, CPX_PARAM_THREADS, param.NumThreadsIPSolver); - - if (status) - throw UtilException("CPXsetdblparam failure", - "solveAsMIPCpx", "DecompSubModel"); - - int startAlgo = 0; - - switch (param.SubProbSolverStartAlgo) { - case DecompDualSimplex: - startAlgo = CPX_ALG_DUAL; - break; - case DecompPrimSimplex: - startAlgo = CPX_ALG_PRIMAL; - break; - case DecompBarrier: - startAlgo = CPX_ALG_BARRIER; - break; - } - - status = CPXsetintparam(cpxEnv, CPX_PARAM_STARTALG, startAlgo); - - if (status) - throw UtilException("CPXsetdblparam failure", - "solveAsMIPCpx", "DecompSubModel"); - - //--- - //--- check the mip starts solution pool, never let it get too - //--- big, and refresh it periodically - assuming that the last - //--- ones in the list are the last ones used - which would have the - //--- best potential to help warm start - //--- never let it get bigger than 10 solutions, - //--- when refresh - keep only last 2 - //--- - int nMipStarts = CPXgetnummipstarts(cpxEnv, cpxLp); - - if (nMipStarts > 10) { - status = CPXdelmipstarts(cpxEnv, cpxLp, 0, nMipStarts - 3); - - if (status) - throw UtilException("CPXdelmipstarts failure", - "solveAsMIPCpx", "DecompSubModel"); - } + const int numCols = m_osi->getNumCols(); + const int logIpLevel = param.LogIpLevel; + double *solution = new double[numCols]; + assert(solution); + //--- + //--- get OsiCpx object from Osi object + //--- get CPEXENVptr for use with internal methods + //--- get CPXLPptr for use with internal methods + //--- + OsiCpxSolverInterface *osiCpx = dynamic_cast(m_osi); + CPXENVptr cpxEnv = osiCpx->getEnvironmentPtr(); + CPXLPptr cpxLp = osiCpx->getLpPtr(); + assert(cpxEnv && cpxLp); + //--- + //--- set parameters + //--- + int status = 0; + + if (logIpLevel) { + status = CPXsetintparam(cpxEnv, CPX_PARAM_SCRIND, CPX_ON); + + if (status) + throw UtilException("CPXsetintparam failure", "solveAsMIPCpx", + "DecompSubModel"); + + status = CPXsetintparam(cpxEnv, CPX_PARAM_SIMDISPLAY, logIpLevel); + + if (status) + throw UtilException("CPXsetintparam failure", "solveAsMIPCpx", + "DecompSubModel"); + } else { + status = CPXsetintparam(cpxEnv, CPX_PARAM_SCRIND, CPX_OFF); + + if (status) + throw UtilException("CPXsetintparam failure", "solveAsMIPCpx", + "DecompSubModel"); + } + + if (doExact) + status = + CPXsetdblparam(cpxEnv, CPX_PARAM_EPGAP, param.SubProbGapLimitExact); + else + status = + CPXsetdblparam(cpxEnv, CPX_PARAM_EPGAP, param.SubProbGapLimitInexact); + + if (status) + throw UtilException("CPXsetdblparam failure", "solveAsMIPCpx", + "DecompSubModel"); + + if (doExact) { + if (param.SubProbTimeLimitExact < COIN_DBL_MAX) { + status = + CPXsetdblparam(cpxEnv, CPX_PARAM_TILIM, param.SubProbTimeLimitExact); + } + } else { + if (param.SubProbTimeLimitInexact < COIN_DBL_MAX) { + status = CPXsetdblparam(cpxEnv, CPX_PARAM_TILIM, + param.SubProbTimeLimitInexact); + } + } + + if (status) + throw UtilException("CPXsetdblparam failure", "solveAsMIPCpx", + "DecompSubModel"); + + if (doCutoff) { + status = CPXsetdblparam(cpxEnv, CPX_PARAM_CUTUP, cutoff); + } else { + status = CPXsetdblparam(cpxEnv, CPX_PARAM_CUTUP, 1.0e+75); + } + + if (status) + throw UtilException("CPXsetdblparam failure", "solveAsMIPCpx", + "DecompSubModel"); + + //--- + //--- starting with CPX12, parallel MIP is on by default + //--- we do not want that (usually) + //--- Provide a user option + //--- +#if CPX_VERSION >= 1200 + status = CPXsetintparam(cpxEnv, CPX_PARAM_THREADS, param.NumThreadsIPSolver); + + if (status) + throw UtilException("CPXsetdblparam failure", "solveAsMIPCpx", + "DecompSubModel"); + + int startAlgo = 0; + + switch (param.SubProbSolverStartAlgo) { + case DecompDualSimplex: + startAlgo = CPX_ALG_DUAL; + break; + case DecompPrimSimplex: + startAlgo = CPX_ALG_PRIMAL; + break; + case DecompBarrier: + startAlgo = CPX_ALG_BARRIER; + break; + } + + status = CPXsetintparam(cpxEnv, CPX_PARAM_STARTALG, startAlgo); + + if (status) + throw UtilException("CPXsetdblparam failure", "solveAsMIPCpx", + "DecompSubModel"); + + //--- + //--- check the mip starts solution pool, never let it get too + //--- big, and refresh it periodically - assuming that the last + //--- ones in the list are the last ones used - which would have the + //--- best potential to help warm start + //--- never let it get bigger than 10 solutions, + //--- when refresh - keep only last 2 + //--- + int nMipStarts = CPXgetnummipstarts(cpxEnv, cpxLp); + + if (nMipStarts > 10) { + status = CPXdelmipstarts(cpxEnv, cpxLp, 0, nMipStarts - 3); + + if (status) + throw UtilException("CPXdelmipstarts failure", "solveAsMIPCpx", + "DecompSubModel"); + } #endif - //--- - //--- solve the MILP - //--- - osiCpx->branchAndBound(); - //--- - //--- get solver status - //--- - result->m_solStatus = CPXgetstat(cpxEnv, cpxLp); - result->m_solStatus2 = 0; - //printf("cplex status = %d\n", result->m_solStatus); - //printf("cplex status2 = %d\n", result->m_solStatus2); - const int statusSet1[6] = {CPXMIP_OPTIMAL, - CPXMIP_OPTIMAL_TOL, //for stopping on gap - CPXMIP_TIME_LIM_FEAS, - CPX_STAT_OPTIMAL, - CPX_STAT_UNBOUNDED, - CPXMIP_UNBOUNDED - }; - const int statusSet2[9] = {CPXMIP_OPTIMAL, - CPXMIP_OPTIMAL_TOL, //for stopping on gap - CPXMIP_TIME_LIM_FEAS, - CPXMIP_INFEASIBLE, - CPXMIP_INForUNBD, - CPX_STAT_UNBOUNDED, - CPX_STAT_INForUNBD, - CPX_STAT_OPTIMAL, - CPXMIP_UNBOUNDED//newly added status - }; - // Update result object - result->m_nSolutions = 0; - result->m_isUnbounded = false; - result->m_isOptimal = false; - result->m_isCutoff = false; - - if (result->m_solStatus == CPXMIP_INForUNBD || - result->m_solStatus == CPX_STAT_UNBOUNDED || - result->m_solStatus == CPXMIP_UNBOUNDED || - result->m_solStatus == CPX_STAT_INForUNBD ) { - std::cout << "There might be extreme rays in the subproblems " - << std::endl; + //--- + //--- solve the MILP + //--- + osiCpx->branchAndBound(); + //--- + //--- get solver status + //--- + result->m_solStatus = CPXgetstat(cpxEnv, cpxLp); + result->m_solStatus2 = 0; + // printf("cplex status = %d\n", result->m_solStatus); + // printf("cplex status2 = %d\n", result->m_solStatus2); + const int statusSet1[6] = {CPXMIP_OPTIMAL, + CPXMIP_OPTIMAL_TOL, // for stopping on gap + CPXMIP_TIME_LIM_FEAS, CPX_STAT_OPTIMAL, + CPX_STAT_UNBOUNDED, CPXMIP_UNBOUNDED}; + const int statusSet2[9] = { + CPXMIP_OPTIMAL, + CPXMIP_OPTIMAL_TOL, // for stopping on gap + CPXMIP_TIME_LIM_FEAS, CPXMIP_INFEASIBLE, CPXMIP_INForUNBD, + CPX_STAT_UNBOUNDED, CPX_STAT_INForUNBD, CPX_STAT_OPTIMAL, + CPXMIP_UNBOUNDED // newly added status + }; + // Update result object + result->m_nSolutions = 0; + result->m_isUnbounded = false; + result->m_isOptimal = false; + result->m_isCutoff = false; + + if (result->m_solStatus == CPXMIP_INForUNBD || + result->m_solStatus == CPX_STAT_UNBOUNDED || + result->m_solStatus == CPXMIP_UNBOUNDED || + result->m_solStatus == CPX_STAT_INForUNBD) { + std::cout << "There might be extreme rays in the subproblems " << std::endl; + /* + std::cout << "The solution statu is " + << result->m_solStatus << std::endl; + */ + // turn off the presolve and solve the relaxtion of the subproblem + // at the root + status = CPXsetintparam(cpxEnv, CPX_PARAM_PREIND, CPX_OFF); + + if (status) { + throw UtilException("XPXsetintparam failure", "solveAsMIPCpx", + "DecompSubModel"); + } + + osiCpx->initialSolve(); + result->m_solStatus = CPXgetstat(cpxEnv, cpxLp); + + if (result->m_solStatus == CPXMIP_UNBOUNDED || + result->m_solStatus == CPX_STAT_UNBOUNDED) { /* - std::cout << "The solution statu is " - << result->m_solStatus << std::endl; - */ - // turn off the presolve and solve the relaxtion of the subproblem - // at the root - status = CPXsetintparam(cpxEnv, CPX_PARAM_PREIND, CPX_OFF); - - if (status) { - throw UtilException("XPXsetintparam failure", - "solveAsMIPCpx", "DecompSubModel"); - } - - osiCpx->initialSolve(); - result->m_solStatus = CPXgetstat(cpxEnv, cpxLp); - - if (result->m_solStatus == CPXMIP_UNBOUNDED || - result->m_solStatus == CPX_STAT_UNBOUNDED) { - /* - std::cout << "The status of the problem is " - << result->m_solStatus - << std::endl; - */ - status = CPXgetray (cpxEnv, cpxLp, solution); - } - - osiCpx->switchToMIP(); + std::cout << "The status of the problem is " + << result->m_solStatus + << std::endl; + */ + status = CPXgetray(cpxEnv, cpxLp, solution); + } + + osiCpx->switchToMIP(); + + if (status) { + throw UtilException("CPXgetray failure", "solveAsMIPCpx", + "DecompSubModel"); + } + + vector solVec(solution, solution + numCols); + // std::cout << "The ray of the solution is " << std::endl; + /* + for (int i = 0 ; i < numCols ; i ++) { + std::cout << solution[i] << std::endl; + } + */ + result->m_solution.push_back(solVec); + result->m_nSolutions++; + } else { + if (!UtilIsInSet(result->m_solStatus, statusSet2, 9)) { + cerr << "Error: CPX IP solver status = " << result->m_solStatus << endl; + throw UtilException("CPX solver status", "solveAsMIPCpx", + "DecompSubModel"); + } + } + + //--- + //--- In root the subproblem should not be infeasible + //--- unless due to cutoff. But, after branching it + //--- can be infeasible. + //--- The problem infeasibility can be detected if any + //--- subproblem at the rootnode is infeasible + if (!doCutoff && isRoot) { + if (!UtilIsInSet(result->m_solStatus, statusSet1, 6)) { + cerr << "Error: CPX IP solver 2nd status = " << result->m_solStatus + << endl; + throw UtilException("CPX solver status", "solveAsMIPCpx", + "DecompSubModel"); + } + } else { + if (!UtilIsInSet(result->m_solStatus, statusSet2, 9)) { + cerr << "Error: CPX IP solver 2nd status = " << result->m_solStatus + << endl; + throw UtilException("CPX solver status", "solveAsMIPCpx", + "DecompSubModel"); + } + } + + //--- + //--- update results object + //--- + + if (UtilIsInSet(result->m_solStatus, statusSet1, 4)) { + int i; + int nSols = CPXgetsolnpoolnumsolns(cpxEnv, cpxLp); + double objVal; + // printf("Number of solutions in solution pool = %d\n", + // nSols); + // TODO: currently just take up to the limit, + // but, should sort by objective and take n least? + nSols = std::min(nSols, param.SubProbNumSolLimit); + + for (i = 0; i < nSols; i++) { + status = CPXgetsolnpoolobjval(cpxEnv, cpxLp, i, &objVal); - if (status) { - throw UtilException("CPXgetray failure", - "solveAsMIPCpx", "DecompSubModel"); - } + if (status) + throw UtilException("CPXgetsolnpoolobjval", "solveAsMIPCpx", + "DecompSubModel"); + // printf("Sol %4d: Obj: %10g\n", i, objVal); + status = CPXgetsolnpoolx(cpxEnv, cpxLp, i, solution, 0, numCols - 1); vector solVec(solution, solution + numCols); - // std::cout << "The ray of the solution is " << std::endl; - /* - for (int i = 0 ; i < numCols ; i ++) { - std::cout << solution[i] << std::endl; - } - */ result->m_solution.push_back(solVec); result->m_nSolutions++; - } else { - if (!UtilIsInSet(result->m_solStatus, statusSet2, 9)) { - cerr << "Error: CPX IP solver status = " << result->m_solStatus << endl; - throw UtilException("CPX solver status", - "solveAsMIPCpx", "DecompSubModel"); - } - } - - //--- - //--- In root the subproblem should not be infeasible - //--- unless due to cutoff. But, after branching it - //--- can be infeasible. - //--- The problem infeasibility can be detected if any - //--- subproblem at the rootnode is infeasible - if (!doCutoff && isRoot) { - if (!UtilIsInSet(result->m_solStatus, statusSet1, 6)) { - cerr << "Error: CPX IP solver 2nd status = " - << result->m_solStatus << endl; - throw UtilException("CPX solver status", - "solveAsMIPCpx", "DecompSubModel"); - } - } else { - if (!UtilIsInSet(result->m_solStatus, statusSet2, 9)) { - cerr << "Error: CPX IP solver 2nd status = " - << result->m_solStatus << endl; - throw UtilException("CPX solver status", - "solveAsMIPCpx", "DecompSubModel"); - } - } - - //--- - //--- update results object - //--- - - if (UtilIsInSet(result->m_solStatus, statusSet1, 4)) { - int i; - int nSols = CPXgetsolnpoolnumsolns(cpxEnv, cpxLp); - double objVal; - //printf("Number of solutions in solution pool = %d\n", - //nSols); - //TODO: currently just take up to the limit, - // but, should sort by objective and take n least? - nSols = std::min(nSols, param.SubProbNumSolLimit); - - for (i = 0; i < nSols; i++) { - status = CPXgetsolnpoolobjval(cpxEnv, cpxLp, i, &objVal); - - if (status) - throw UtilException("CPXgetsolnpoolobjval", - "solveAsMIPCpx", "DecompSubModel"); - - //printf("Sol %4d: Obj: %10g\n", i, objVal); - status = CPXgetsolnpoolx(cpxEnv, cpxLp, i, - solution, 0, numCols - 1); - vector solVec(solution, solution + numCols); - result->m_solution.push_back(solVec); - result->m_nSolutions++; - assert(result->m_nSolutions == - static_cast(result->m_solution.size())); - //memcpy(result->m_solution, - // osiCpx->getColSolution(), numCols * sizeof(double)); - } - - result->m_nSolutions = nSols; - } - - //printf("solStatus = %d\n", result->m_solStatus); - - if (result->m_solStatus == CPXMIP_OPTIMAL || - result->m_solStatus == CPX_STAT_OPTIMAL || - result->m_solStatus == CPXMIP_OPTIMAL_TOL) { - result->m_isOptimal = true; - } else if (result->m_solStatus == CPXMIP_UNBOUNDED || - result->m_solStatus == CPX_STAT_UNBOUNDED) { - // std::cout << "We are generating extreme rays " << std::endl; - result->m_isUnbounded = true; - result->m_isOptimal = false; - } else { - if (result->m_solStatus == CPXMIP_INFEASIBLE) { - result->m_nSolutions = 0; - result->m_isCutoff = doCutoff; - result->m_isOptimal = true; - } else { - //--- - //--- else it must have stopped on gap or time - //--- - result->m_isCutoff = doCutoff; - result->m_isOptimal = false; - } - + assert(result->m_nSolutions == + static_cast(result->m_solution.size())); + // memcpy(result->m_solution, + // osiCpx->getColSolution(), numCols * sizeof(double)); + } + + result->m_nSolutions = nSols; + } + + // printf("solStatus = %d\n", result->m_solStatus); + + if (result->m_solStatus == CPXMIP_OPTIMAL || + result->m_solStatus == CPX_STAT_OPTIMAL || + result->m_solStatus == CPXMIP_OPTIMAL_TOL) { + result->m_isOptimal = true; + } else if (result->m_solStatus == CPXMIP_UNBOUNDED || + result->m_solStatus == CPX_STAT_UNBOUNDED) { + // std::cout << "We are generating extreme rays " << std::endl; + result->m_isUnbounded = true; + result->m_isOptimal = false; + } else { + if (result->m_solStatus == CPXMIP_INFEASIBLE) { + result->m_nSolutions = 0; + result->m_isCutoff = doCutoff; + result->m_isOptimal = true; + } else { //--- - //--- get copy of solution + //--- else it must have stopped on gap or time //--- - status = CPXgetbestobjval(cpxEnv, cpxLp, &result->m_objLB); + result->m_isCutoff = doCutoff; + result->m_isOptimal = false; + } - if (status) - throw UtilException("CPXgetbestobjval failure", - "solveAsMIPCpx", "DecompSubModel"); + //--- + //--- get copy of solution + //--- + status = CPXgetbestobjval(cpxEnv, cpxLp, &result->m_objLB); - if (result->m_nSolutions >= 1 && !result->m_isUnbounded) { - status = CPXgetmipobjval(cpxEnv, cpxLp, &result->m_objUB); + if (status) + throw UtilException("CPXgetbestobjval failure", "solveAsMIPCpx", + "DecompSubModel"); - if (status) - throw UtilException("CPXgetmipobjval failure", - "solveAsMIPCpx", "DecompSubModel"); - } - } - UTIL_DELARR(solution); + if (result->m_nSolutions >= 1 && !result->m_isUnbounded) { + status = CPXgetmipobjval(cpxEnv, cpxLp, &result->m_objUB); + + if (status) + throw UtilException("CPXgetmipobjval failure", "solveAsMIPCpx", + "DecompSubModel"); + } + } + UTIL_DELARR(solution); #else - throw UtilException("CPLEX selected as solver, but it's not available", - "solveAsMIPCpx", "DecompSubModel"); + throw UtilException("CPLEX selected as solver, but it's not available", + "solveAsMIPCpx", "DecompSubModel"); #endif } //===========================================================================// -void DecompSubModel::solveAsMIPGrb(DecompSolverResult* result, - DecompParam& param, - bool doExact, - bool doCutoff, - bool isRoot, - double cutoff, - double timeLimit) -{ +void DecompSubModel::solveAsMIPGrb(DecompSolverResult *result, + DecompParam ¶m, bool doExact, + bool doCutoff, bool isRoot, double cutoff, + double timeLimit) { #ifdef COIN_HAS_GRB - int stat; - const int numCols = m_osi->getNumCols(); - - OsiGrbSolverInterface* osiGrb - = dynamic_cast(m_osi); - - GRBenv* env = osiGrb->getEnvironmentPtr(); - - GRBmodel* model = osiGrb->getLpPtr(); - - const std::map paramMap = m_utilParam->getParamMap(); - map::const_iterator it; - std::istringstream iss; - - for (it = paramMap.begin(); it != paramMap.end(); it++){ - std::vector elems; - m_utilParam->split(it->first, elems); - if (elems[0] == "gurobi"){ - int intParam; - double doubleParam; - iss.str(it->second); - if (iss >> intParam){ - GRBsetintparam(env, elems[1].c_str(), intParam); - } - iss.clear(); - iss.str(it->second); - if (iss >> doubleParam){ - GRBsetdblparam(env, elems[1].c_str(), doubleParam); - }else{ - GRBsetstrparam(env, elems[1].c_str(), it->second.c_str()); - } - iss.clear(); - } - } + int stat; + const int numCols = m_osi->getNumCols(); - osiGrb->branchAndBound(); + OsiGrbSolverInterface *osiGrb = dynamic_cast(m_osi); - GRBgetintattr(model, GRB_INT_ATTR_STATUS, &stat); + GRBenv *env = osiGrb->getEnvironmentPtr(); - result->m_isUnbounded = false; - result->m_isOptimal = false; - result->m_isCutoff = false; - result->m_nSolutions = 0; - if (stat == GRB_OPTIMAL){ - const double *solution = osiGrb->getColSolution(); - vector solVec(solution, solution + numCols); - result->m_solution.push_back(solVec); - result->m_nSolutions++; - result->m_isOptimal = true; - }else if (stat == GRB_UNBOUNDED){ - osiGrb->initialSolve(); - const double *ray = osiGrb->getDualRays(1, true)[0]; - vector solVec(ray, ray + numCols); - result->m_solution.push_back(solVec); - result->m_nSolutions++; - result->m_isUnbounded = true; - }else if (stat == GRB_INFEASIBLE){ - result->m_isOptimal = true; - }else{ - throw UtilException("Solution failure", - "solveAsMIPGrb", "DecompSubModel"); - } + GRBmodel *model = osiGrb->getLpPtr(); + + const std::map paramMap = + m_utilParam->getParamMap(); + map::const_iterator it; + std::istringstream iss; + + for (it = paramMap.begin(); it != paramMap.end(); it++) { + std::vector elems; + m_utilParam->split(it->first, elems); + if (elems[0] == "gurobi") { + int intParam; + double doubleParam; + iss.str(it->second); + if (iss >> intParam) { + GRBsetintparam(env, elems[1].c_str(), intParam); + } + iss.clear(); + iss.str(it->second); + if (iss >> doubleParam) { + GRBsetdblparam(env, elems[1].c_str(), doubleParam); + } else { + GRBsetstrparam(env, elems[1].c_str(), it->second.c_str()); + } + iss.clear(); + } + } + + osiGrb->branchAndBound(); + + GRBgetintattr(model, GRB_INT_ATTR_STATUS, &stat); + + result->m_isUnbounded = false; + result->m_isOptimal = false; + result->m_isCutoff = false; + result->m_nSolutions = 0; + if (stat == GRB_OPTIMAL) { + const double *solution = osiGrb->getColSolution(); + vector solVec(solution, solution + numCols); + result->m_solution.push_back(solVec); + result->m_nSolutions++; + result->m_isOptimal = true; + } else if (stat == GRB_UNBOUNDED) { + osiGrb->initialSolve(); + const double *ray = osiGrb->getDualRays(1, true)[0]; + vector solVec(ray, ray + numCols); + result->m_solution.push_back(solVec); + result->m_nSolutions++; + result->m_isUnbounded = true; + } else if (stat == GRB_INFEASIBLE) { + result->m_isOptimal = true; + } else { + throw UtilException("Solution failure", "solveAsMIPGrb", "DecompSubModel"); + } #else - throw UtilException("Gurobi selected as solver, but it's not available", - "solveAsMIPGrb", "DecompSubModel"); + throw UtilException("Gurobi selected as solver, but it's not available", + "solveAsMIPGrb", "DecompSubModel"); #endif } - diff --git a/Dip/src/DecompStats.cpp b/Dip/src/DecompStats.cpp index 1502513a..4458dbb9 100644 --- a/Dip/src/DecompStats.cpp +++ b/Dip/src/DecompStats.cpp @@ -13,168 +13,127 @@ //===========================================================================// // --------------------------------------------------------------------- // -#include "UtilMacros.h" #include "DecompStats.h" - +#include "UtilMacros.h" #include using namespace std; // --------------------------------------------------------------------- // // --------------------------------------------------------------------- // -void DecompStats::calculateStats () -{ - //--- - //--- calculate stats totals - //--- - totalDecomp = accumulate(thisDecomp.begin(), - thisDecomp.end(), 0.0); - totalSolveRelax = accumulate(thisSolveRelax.begin(), - thisSolveRelax.end(), 0.0); - totalSolveRelaxApp = accumulate(thisSolveRelaxApp.begin(), - thisSolveRelaxApp.end(), 0.0); - totalSolUpdate = accumulate(thisSolUpdate.begin(), - thisSolUpdate.end(), 0.0); - totalGenCuts = accumulate(thisGenCuts.begin(), - thisGenCuts.end(), 0.0); - totalGenVars = accumulate(thisGenVars.begin(), - thisGenVars.end(), 0.0); - totalCompressCols = accumulate(thisCompressCols.begin(), - thisCompressCols.end(), 0.0); - //--- - //--- calculate stats max - //--- - vector::const_iterator it; - - if (thisDecomp.size() > 0) { - it = max_element(thisDecomp.begin(), thisDecomp.end()); - maxDecomp = *it; - } - - if (thisSolveRelax.size() > 0) { - it = max_element(thisSolveRelax.begin(), thisSolveRelax.end()); - maxSolveRelax = *it; - } - - if (thisSolveRelaxApp.size() > 0) { - it = max_element(thisSolveRelaxApp.begin(), thisSolveRelaxApp.end()); - maxSolveRelaxApp = *it; - } - - if (thisSolUpdate.size() > 0) { - it = max_element(thisSolUpdate.begin(), thisSolUpdate.end()); - maxSolUpdate = *it; - } - - if (thisGenCuts.size() > 0) { - it = max_element(thisGenCuts.begin(), thisGenCuts.end()); - maxGenCuts = *it; - } - - if (thisGenVars.size() > 0) { - it = max_element(thisGenVars.begin(), thisGenVars.end()); - maxGenVars = *it; - } - - if (thisCompressCols.size() > 0) { - it = max_element(thisCompressCols.begin(), thisCompressCols.end()); - maxCompressCols = *it; - } +void DecompStats::calculateStats() { + //--- + //--- calculate stats totals + //--- + totalDecomp = accumulate(thisDecomp.begin(), thisDecomp.end(), 0.0); + totalSolveRelax = + accumulate(thisSolveRelax.begin(), thisSolveRelax.end(), 0.0); + totalSolveRelaxApp = + accumulate(thisSolveRelaxApp.begin(), thisSolveRelaxApp.end(), 0.0); + totalSolUpdate = accumulate(thisSolUpdate.begin(), thisSolUpdate.end(), 0.0); + totalGenCuts = accumulate(thisGenCuts.begin(), thisGenCuts.end(), 0.0); + totalGenVars = accumulate(thisGenVars.begin(), thisGenVars.end(), 0.0); + totalCompressCols = + accumulate(thisCompressCols.begin(), thisCompressCols.end(), 0.0); + //--- + //--- calculate stats max + //--- + vector::const_iterator it; + + if (thisDecomp.size() > 0) { + it = max_element(thisDecomp.begin(), thisDecomp.end()); + maxDecomp = *it; + } + + if (thisSolveRelax.size() > 0) { + it = max_element(thisSolveRelax.begin(), thisSolveRelax.end()); + maxSolveRelax = *it; + } + + if (thisSolveRelaxApp.size() > 0) { + it = max_element(thisSolveRelaxApp.begin(), thisSolveRelaxApp.end()); + maxSolveRelaxApp = *it; + } + + if (thisSolUpdate.size() > 0) { + it = max_element(thisSolUpdate.begin(), thisSolUpdate.end()); + maxSolUpdate = *it; + } + + if (thisGenCuts.size() > 0) { + it = max_element(thisGenCuts.begin(), thisGenCuts.end()); + maxGenCuts = *it; + } + + if (thisGenVars.size() > 0) { + it = max_element(thisGenVars.begin(), thisGenVars.end()); + maxGenVars = *it; + } + + if (thisCompressCols.size() > 0) { + it = max_element(thisCompressCols.begin(), thisCompressCols.end()); + maxCompressCols = *it; + } } // --------------------------------------------------------------------- // -void DecompNodeStats::printObjHistoryBound(ostream* os) const -{ - (*os) << setiosflags(ios::fixed | ios::showpoint); - (*os).precision(2); - (*os) << "\n========== OBJ History Node " - << nodeIndex << " [BEGIN]: ==================================== " - << endl; - vector< DecompObjBound >::const_iterator it; - (*os) << setw(6) << "Phase" - << setw(6) << "Cut" - << setw(6) << "Price" - << setw(10) << "Time" - << setw(10) << "ThisLB" - << setw(10) << "BestLB" - << setw(10) << "ThisUB" - << setw(10) << "ThisIP" - << setw(10) << "BestIP" - << endl; - - for (it = objHistoryBound.begin(); it != objHistoryBound.end(); it++) { - (*os) << setw(6) << (*it).phase - << setw(6) << (*it).cutPass - << setw(6) << (*it).pricePass - << setw(10) << UtilDblToStr((*it).timeStamp, 3) - << setw(10) << UtilDblToStr((*it).thisBound, 2) - << setw(10) << UtilDblToStr((*it).bestBound, 2) - << setw(10) << UtilDblToStr((*it).thisBoundUB, 2) - << setw(10) << UtilDblToStr((*it).thisBoundIP, 2) - << setw(10) << UtilDblToStr((*it).bestBoundIP, 2) - << endl; - } - - (*os) << "========== OBJ History Node " - << nodeIndex << " [END]: ==================================== " - << endl; +void DecompNodeStats::printObjHistoryBound(ostream *os) const { + (*os) << setiosflags(ios::fixed | ios::showpoint); + (*os).precision(2); + (*os) << "\n========== OBJ History Node " << nodeIndex + << " [BEGIN]: ==================================== " << endl; + vector::const_iterator it; + (*os) << setw(6) << "Phase" << setw(6) << "Cut" << setw(6) << "Price" + << setw(10) << "Time" << setw(10) << "ThisLB" << setw(10) << "BestLB" + << setw(10) << "ThisUB" << setw(10) << "ThisIP" << setw(10) << "BestIP" + << endl; + + for (it = objHistoryBound.begin(); it != objHistoryBound.end(); it++) { + (*os) << setw(6) << (*it).phase << setw(6) << (*it).cutPass << setw(6) + << (*it).pricePass << setw(10) << UtilDblToStr((*it).timeStamp, 3) + << setw(10) << UtilDblToStr((*it).thisBound, 2) << setw(10) + << UtilDblToStr((*it).bestBound, 2) << setw(10) + << UtilDblToStr((*it).thisBoundUB, 2) << setw(10) + << UtilDblToStr((*it).thisBoundIP, 2) << setw(10) + << UtilDblToStr((*it).bestBoundIP, 2) << endl; + } + + (*os) << "========== OBJ History Node " << nodeIndex + << " [END]: ==================================== " << endl; } // --------------------------------------------------------------------- // -void DecompStats::printOverallStats (ostream* os) -{ - calculateStats(); - (*os) << setiosflags(ios::fixed | ios::showpoint); - (*os).precision(2); - (*os) << "\n================ DECOMP Statistics [BEGIN]: =============== "; - totalOverall = totalDecomp; - (*os) << setw(40) << "\nTotal Decomp = " - << setw(10) << totalDecomp - << setw(10) << 100.0 * totalDecomp / totalOverall - << setw(6) << thisDecomp.size() - << setw(6) << maxDecomp - ; - (*os) << setw(40) << "\nTotal Solve Relax = " - << setw(10) << totalSolveRelax - << setw(10) << 100.0 * totalSolveRelax / totalOverall - << setw(6) << thisSolveRelax.size() - << setw(6) << maxSolveRelax - ; - (*os) << setw(40) << "\nTotal Solve Relax App = " - << setw(10) << totalSolveRelaxApp - << setw(10) << 100.0 * totalSolveRelaxApp / totalOverall - << setw(6) << thisSolveRelaxApp.size() - << setw(6) << maxSolveRelaxApp - ; - (*os) << setw(40) << "\nTotal Solution Update = " - << setw(10) << totalSolUpdate - << setw(10) << 100.0 * totalSolUpdate / totalOverall - << setw(6) << thisSolUpdate.size() - << setw(6) << maxSolUpdate - ; - (*os) << setw(40) << "\nTotal Generate Cuts = " - << setw(10) << totalGenCuts - << setw(10) << 100.0 * totalGenCuts / totalOverall - << setw(6) << thisGenCuts.size() - << setw(6) << maxGenCuts - ; - (*os) << setw(40) << "\nTotal Generate Vars = " - << setw(10) << totalGenVars - << setw(10) << 100.0 * totalGenVars / totalOverall - << setw(6) << thisGenVars.size() - << setw(6) << maxGenVars - ; - (*os) << setw(40) << "\nTotal Compress Cols = " - << setw(10) << totalCompressCols - << setw(10) << 100.0 * totalCompressCols / totalOverall - << setw(6) << thisCompressCols.size() - << setw(6) << maxCompressCols - ; - (*os) << "\n================ DECOMP Statistics [END ]: =============== \n"; +void DecompStats::printOverallStats(ostream *os) { + calculateStats(); + (*os) << setiosflags(ios::fixed | ios::showpoint); + (*os).precision(2); + (*os) << "\n================ DECOMP Statistics [BEGIN]: =============== "; + totalOverall = totalDecomp; + (*os) << setw(40) << "\nTotal Decomp = " << setw(10) << totalDecomp + << setw(10) << 100.0 * totalDecomp / totalOverall << setw(6) + << thisDecomp.size() << setw(6) << maxDecomp; + (*os) << setw(40) << "\nTotal Solve Relax = " << setw(10) + << totalSolveRelax << setw(10) << 100.0 * totalSolveRelax / totalOverall + << setw(6) << thisSolveRelax.size() << setw(6) << maxSolveRelax; + (*os) << setw(40) << "\nTotal Solve Relax App = " << setw(10) + << totalSolveRelaxApp << setw(10) + << 100.0 * totalSolveRelaxApp / totalOverall << setw(6) + << thisSolveRelaxApp.size() << setw(6) << maxSolveRelaxApp; + (*os) << setw(40) << "\nTotal Solution Update = " << setw(10) + << totalSolUpdate << setw(10) << 100.0 * totalSolUpdate / totalOverall + << setw(6) << thisSolUpdate.size() << setw(6) << maxSolUpdate; + (*os) << setw(40) << "\nTotal Generate Cuts = " << setw(10) << totalGenCuts + << setw(10) << 100.0 * totalGenCuts / totalOverall << setw(6) + << thisGenCuts.size() << setw(6) << maxGenCuts; + (*os) << setw(40) << "\nTotal Generate Vars = " << setw(10) << totalGenVars + << setw(10) << 100.0 * totalGenVars / totalOverall << setw(6) + << thisGenVars.size() << setw(6) << maxGenVars; + (*os) << setw(40) << "\nTotal Compress Cols = " << setw(10) + << totalCompressCols << setw(10) + << 100.0 * totalCompressCols / totalOverall << setw(6) + << thisCompressCols.size() << setw(6) << maxCompressCols; + (*os) << "\n================ DECOMP Statistics [END ]: =============== \n"; } - // --------------------------------------------------------------------- // -void printDetailedStats(ostream* os = &cout) -{ -} +void printDetailedStats(ostream *os = &cout) {} diff --git a/Dip/src/DecompVar.cpp b/Dip/src/DecompVar.cpp index 7b4958c5..25bbde2f 100644 --- a/Dip/src/DecompVar.cpp +++ b/Dip/src/DecompVar.cpp @@ -18,113 +18,96 @@ using namespace std; // --------------------------------------------------------------------- // -//Design question - we use a check over the entire space to simplify +// Design question - we use a check over the entire space to simplify // storage of branching bounds. But, in reality, for node 1, for example, // these has only been one branch - so we only need to check one number. -//This could be much faster if we are willing to do more accounting related +// This could be much faster if we are willing to do more accounting related // to branching. -bool DecompVar::doesSatisfyBounds(int denseLen, - double* denseArr, - const DecompSubModel& model, - const double* lbs, - const double* ubs) -{ - int j; - double xj;//, lb, ub; - vector ::const_iterator it; - map::const_iterator mcit; - DecompConstraintSet* modelRelax = model.getModel(); - const vector& activeColumns = modelRelax->getActiveColumns(); - //--- - //--- activeColumns are in original space - //--- denseArr, lbs, ubs are all in original space - //--- - fillDenseArr(denseLen, denseArr);//TODO: expensive... - - for (it = activeColumns.begin(); it != activeColumns.end(); it++) { - j = *it; - xj = denseArr[j]; - - if (xj < (lbs[j] - DecompEpsilon) || - xj > (ubs[j] + DecompEpsilon)) { - return false; - } - } - - return true; +bool DecompVar::doesSatisfyBounds(int denseLen, double *denseArr, + const DecompSubModel &model, + const double *lbs, const double *ubs) { + int j; + double xj; //, lb, ub; + vector::const_iterator it; + map::const_iterator mcit; + DecompConstraintSet *modelRelax = model.getModel(); + const vector &activeColumns = modelRelax->getActiveColumns(); + //--- + //--- activeColumns are in original space + //--- denseArr, lbs, ubs are all in original space + //--- + fillDenseArr(denseLen, denseArr); // TODO: expensive... + + for (it = activeColumns.begin(); it != activeColumns.end(); it++) { + j = *it; + xj = denseArr[j]; + + if (xj < (lbs[j] - DecompEpsilon) || xj > (ubs[j] + DecompEpsilon)) { + return false; + } + } + + return true; } // --------------------------------------------------------------------- // -void DecompVar::fillDenseArr(int len, - double* arr) -{ - CoinFillN(arr, len, 0.0); - const int sz = m_s.getNumElements(); - const int* inds = m_s.getIndices(); - const double* elems = m_s.getElements(); - - for (int i = 0; i < sz; ++i) { - arr[inds[i]] = elems[i]; - } +void DecompVar::fillDenseArr(int len, double *arr) { + CoinFillN(arr, len, 0.0); + const int sz = m_s.getNumElements(); + const int *inds = m_s.getIndices(); + const double *elems = m_s.getElements(); + + for (int i = 0; i < sz; ++i) { + arr[inds[i]] = elems[i]; + } } // --------------------------------------------------------------------- // -void -DecompVar::print(double infinity, - ostream* os, - DecompApp* app) const -{ - double lb = getLowerBound(); - double ub = getUpperBound(); - (*os) << "\nVAR c: " << m_origCost - << " rc: " << m_redCost - << " eff: " << m_effCnt - << " block: " << m_blockId - << " colIndex: " << m_colMasterIndex; - - if (lb > -infinity) { - (*os) << " lb: " << getLowerBound(); - } else { - (*os) << " lb: -INF"; - } - - if (ub < infinity) { - (*os) << " ub: " << getUpperBound(); - } else { - (*os) << " ub: INF"; - } - - (*os) << "\n"; - UtilPrintPackedVector(m_s, os, app); +void DecompVar::print(double infinity, ostream *os, DecompApp *app) const { + double lb = getLowerBound(); + double ub = getUpperBound(); + (*os) << "\nVAR c: " << m_origCost << " rc: " << m_redCost + << " eff: " << m_effCnt << " block: " << m_blockId + << " colIndex: " << m_colMasterIndex; + + if (lb > -infinity) { + (*os) << " lb: " << getLowerBound(); + } else { + (*os) << " lb: -INF"; + } + + if (ub < infinity) { + (*os) << " ub: " << getUpperBound(); + } else { + (*os) << " ub: INF"; + } + + (*os) << "\n"; + UtilPrintPackedVector(m_s, os, app); } // --------------------------------------------------------------------- // -void -DecompVar::print(double infinity, - ostream* os, - const vector& colNames, - const double* value) const -{ - double lb = getLowerBound(); - double ub = getUpperBound(); - (*os) << "\nVAR c: " << m_origCost - << " rc: " << m_redCost - << " eff: " << m_effCnt - << " block: " << m_blockId - << " colIndex: " << m_colMasterIndex; - - if (lb > -infinity) { - (*os) << " lb: " << getLowerBound(); - } else { - (*os) << " lb: -INF"; - } - - if (ub < infinity) { - (*os) << " ub: " << getUpperBound(); - } else { - (*os) << " ub: INF"; - } - - (*os) << "\n"; - UtilPrintPackedVector(m_s, os, colNames, value); +void DecompVar::print(double infinity, ostream *os, + const vector &colNames, + const double *value) const { + double lb = getLowerBound(); + double ub = getUpperBound(); + (*os) << "\nVAR c: " << m_origCost << " rc: " << m_redCost + << " eff: " << m_effCnt << " block: " << m_blockId + << " colIndex: " << m_colMasterIndex; + + if (lb > -infinity) { + (*os) << " lb: " << getLowerBound(); + } else { + (*os) << " lb: -INF"; + } + + if (ub < infinity) { + (*os) << " ub: " << getUpperBound(); + } else { + (*os) << " ub: INF"; + } + + (*os) << "\n"; + UtilPrintPackedVector(m_s, os, colNames, value); } diff --git a/Dip/src/DecompVarPool.cpp b/Dip/src/DecompVarPool.cpp index 506804d0..8286d030 100644 --- a/Dip/src/DecompVarPool.cpp +++ b/Dip/src/DecompVarPool.cpp @@ -12,37 +12,34 @@ // All Rights Reserved. // //===========================================================================// - #include "DecompVarPool.h" #include "DecompConstraintSet.h" using namespace std; // --------------------------------------------------------------------- // -bool DecompWaitingCol::setReducedCost(const double* u, - const DecompStatus stat) -{ - double redCost; - - if (stat == STAT_FEASIBLE) { - // --- - // --- RC[s] = c[s] - u (A''s) - alpha - // --- - redCost = m_var->getOriginalCost() - m_col->dotProduct(u); - m_var->setReducedCost(redCost); - return redCost <= -0.0000000001;//m_app->m_param.dualTol; - } else { - // --- - // --- RC[s] = u (A''s) + alpha -> dual ray - // --- - redCost = -m_col->dotProduct(u); - return redCost <= -0.0000000001;//m_app->m_param.dualTol; - } +bool DecompWaitingCol::setReducedCost(const double *u, + const DecompStatus stat) { + double redCost; + + if (stat == STAT_FEASIBLE) { + // --- + // --- RC[s] = c[s] - u (A''s) - alpha + // --- + redCost = m_var->getOriginalCost() - m_col->dotProduct(u); + m_var->setReducedCost(redCost); + return redCost <= -0.0000000001; // m_app->m_param.dualTol; + } else { + // --- + // --- RC[s] = u (A''s) + alpha -> dual ray + // --- + redCost = -m_col->dotProduct(u); + return redCost <= -0.0000000001; // m_app->m_param.dualTol; + } } - // --------------------------------------------------------------------- // -//use hash! +// use hash! /*bool DecompVarPool::isDuplicate(const DecompWaitingCol & wcol){ vector::const_iterator vi; for(vi = begin(); vi != end(); vi++){ @@ -54,194 +51,184 @@ bool DecompWaitingCol::setReducedCost(const double* u, return false; }*/ - // --------------------------------------------------------------------- // -bool DecompVarPool::isParallel(const DecompVarList& vars, - const DecompWaitingCol& wcol, - const double maxCosine) -{ - DecompVarList::const_iterator vi; - int j1, j2, index1, index2; - double cosine; - DecompVar* var = wcol.getVarPtr(); - const int block1 = var->getBlockId(); - const int len1 = var->m_s.getNumElements(); - const int* ind1 = var->m_s.getIndices(); - const double* els1 = var->m_s.getElements(); - const double norm1 = var->getNorm(); - bool isPara = false; - - if (len1 == 0) { - return false; - } - - for (vi = vars.begin(); vi != vars.end(); vi++) { - //--- - //--- if different blocks, it doesn't matter if rest of var - //--- is close to parallel - //--- - const int len2 = (*vi)->m_s.getNumElements(); - - if ((*vi)->getBlockId() != block1 || - len2 == 0) { - continue; +bool DecompVarPool::isParallel(const DecompVarList &vars, + const DecompWaitingCol &wcol, + const double maxCosine) { + DecompVarList::const_iterator vi; + int j1, j2, index1, index2; + double cosine; + DecompVar *var = wcol.getVarPtr(); + const int block1 = var->getBlockId(); + const int len1 = var->m_s.getNumElements(); + const int *ind1 = var->m_s.getIndices(); + const double *els1 = var->m_s.getElements(); + const double norm1 = var->getNorm(); + bool isPara = false; + + if (len1 == 0) { + return false; + } + + for (vi = vars.begin(); vi != vars.end(); vi++) { + //--- + //--- if different blocks, it doesn't matter if rest of var + //--- is close to parallel + //--- + const int len2 = (*vi)->m_s.getNumElements(); + + if ((*vi)->getBlockId() != block1 || len2 == 0) { + continue; + } + + const int *ind2 = (*vi)->m_s.getIndices(); + + const double *els2 = (*vi)->m_s.getElements(); + + const double norm2 = (*vi)->getNorm(); + + index1 = 0; + + index2 = 0; + + cosine = 0.0; + + //--- + //--- calculate var1*var2 (both sparse) + //--- var indices are assumed to be sorted increasing + //--- + while (1) { + j1 = ind1[index1]; + j2 = ind2[index2]; + + if (j1 == j2) { + cosine += els1[index1] * els2[index2]; + index1++; + index2++; + + if (index2 >= len2 || index1 >= len1) { + break; + } + } else if (j1 > j2) { + index2++; + + if (index2 >= len2) { + break; + } + } else { + index1++; + + if (index1 >= len1) { + break; + } } + } - const int* ind2 = (*vi)->m_s.getIndices(); - - const double* els2 = (*vi)->m_s.getElements(); - - const double norm2 = (*vi)->getNorm(); - - index1 = 0; + cosine /= norm1; + cosine /= norm2; + cosine = fabs(cosine); - index2 = 0; + if (cosine > maxCosine) { + isPara = true; + printf("parallel: cosine=%g\n", cosine); + break; + } - cosine = 0.0; - - //--- - //--- calculate var1*var2 (both sparse) - //--- var indices are assumed to be sorted increasing - //--- - while (1) { - j1 = ind1[index1]; - j2 = ind2[index2]; - - if (j1 == j2) { - cosine += els1[index1] * els2[index2]; - index1++; - index2++; - - if (index2 >= len2 || index1 >= len1) { - break; - } - } else if (j1 > j2) { - index2++; - - if (index2 >= len2) { - break; - } - } else { - index1++; - - if (index1 >= len1) { - break; - } - } - } - - cosine /= norm1; - cosine /= norm2; - cosine = fabs(cosine); - - if (cosine > maxCosine) { - isPara = true; - printf("parallel: cosine=%g\n", cosine); - break; - } + // printf("not parallel: cosine=%g\n", cosine); + } - //printf("not parallel: cosine=%g\n", cosine); - } - - return isPara; + return isPara; } // --------------------------------------------------------------------- // -bool DecompVarPool::isDuplicate(const DecompVarList& vars, - const DecompWaitingCol& wcol) -{ - DecompVarList::const_iterator vi; - DecompVar* var = wcol.getVarPtr(); - - for (vi = vars.begin(); vi != vars.end(); vi++) { - if (((*vi)->getBlockId() == var->getBlockId()) && - ((*vi)->getStrHash() == var->getStrHash())) { - return true; - } - } - - return false; +bool DecompVarPool::isDuplicate(const DecompVarList &vars, + const DecompWaitingCol &wcol) { + DecompVarList::const_iterator vi; + DecompVar *var = wcol.getVarPtr(); + + for (vi = vars.begin(); vi != vars.end(); vi++) { + if (((*vi)->getBlockId() == var->getBlockId()) && + ((*vi)->getStrHash() == var->getStrHash())) { + return true; + } + } + + return false; } // --------------------------------------------------------------------- // -bool DecompVarPool::isDuplicate(const DecompWaitingCol& wcol) -{ - vector::const_iterator vi; - DecompVar* var1 = wcol.getVarPtr(); +bool DecompVarPool::isDuplicate(const DecompWaitingCol &wcol) { + vector::const_iterator vi; + DecompVar *var1 = wcol.getVarPtr(); - for (vi = begin(); vi != end(); vi++) { - DecompVar* var2 = (*vi).getVarPtr(); + for (vi = begin(); vi != end(); vi++) { + DecompVar *var2 = (*vi).getVarPtr(); - if ((var1->getBlockId() == var2->getBlockId()) && - (var1->getStrHash() == var2->getStrHash())) { - return true; - } - } + if ((var1->getBlockId() == var2->getBlockId()) && + (var1->getStrHash() == var2->getStrHash())) { + return true; + } + } - return false; + return false; } /*-------------------------------------------------------------------------*/ -bool DecompVarPool::setReducedCosts(const double* u, - const DecompStatus stat, - DecompVarPool::iterator first, - DecompVarPool::iterator last) -{ - //printf("\nHERE DecompVarPool::setReducedCosts"); - bool found_negrc_var = false; - - for (DecompVarPool::iterator vi = first; vi != last; vi++) { - // --- - // --- calculate and set the reduced costs for the variables - // --- which are pointed to in this pool, if any have rc < 0, - // --- return true - // --- - found_negrc_var = (*vi).setReducedCost(u, stat) ? true : found_negrc_var; - } - - return found_negrc_var; +bool DecompVarPool::setReducedCosts(const double *u, const DecompStatus stat, + DecompVarPool::iterator first, + DecompVarPool::iterator last) { + // printf("\nHERE DecompVarPool::setReducedCosts"); + bool found_negrc_var = false; + + for (DecompVarPool::iterator vi = first; vi != last; vi++) { + // --- + // --- calculate and set the reduced costs for the variables + // --- which are pointed to in this pool, if any have rc < 0, + // --- return true + // --- + found_negrc_var = (*vi).setReducedCost(u, stat) ? true : found_negrc_var; + } + + return found_negrc_var; } // --------------------------------------------------------------------- // -//THINK: this is specific to PC and DC?? -void DecompVarPool::reExpand(const DecompConstraintSet& modelCore, - const double tolZero) -{ - //THIS IS WRONG... - //in masterSI, we have - //A'', convexity, cuts - //in modelCore.M we have A'', cuts - //the sparseCol that you come out with the end here has things in the wrong - //order: //A'', cuts, convexity - double* denseCol = new double[modelCore.getNumRows() + 1]; - vector::iterator vi; - - for (vi = begin(); vi != end(); vi++) { - // --- - // --- get dense column = A''s, append convexity constraint on end - // --- - modelCore.M->times((*vi).getVarPtr()->m_s, denseCol); - denseCol[modelCore.getNumRows()] = 1.0; - // --- - // --- create a sparse column from the dense column - // --- - CoinPackedVector* sparseCol - = UtilPackedVectorFromDense(modelCore.getNumRows() + 1, - denseCol, tolZero); - (*vi).deleteCol(); - (*vi).setCol(sparseCol); - } - - setColsAreValid(true); - UTIL_DELARR(denseCol); +// THINK: this is specific to PC and DC?? +void DecompVarPool::reExpand(const DecompConstraintSet &modelCore, + const double tolZero) { + // THIS IS WRONG... + // in masterSI, we have + // A'', convexity, cuts + // in modelCore.M we have A'', cuts + // the sparseCol that you come out with the end here has things in the wrong + // order: //A'', cuts, convexity + double *denseCol = new double[modelCore.getNumRows() + 1]; + vector::iterator vi; + + for (vi = begin(); vi != end(); vi++) { + // --- + // --- get dense column = A''s, append convexity constraint on end + // --- + modelCore.M->times((*vi).getVarPtr()->m_s, denseCol); + denseCol[modelCore.getNumRows()] = 1.0; + // --- + // --- create a sparse column from the dense column + // --- + CoinPackedVector *sparseCol = UtilPackedVectorFromDense( + modelCore.getNumRows() + 1, denseCol, tolZero); + (*vi).deleteCol(); + (*vi).setCol(sparseCol); + } + + setColsAreValid(true); + UTIL_DELARR(denseCol); } // --------------------------------------------------------------------- // -void DecompVarPool::print(double infinity, ostream* os) const -{ - vector::const_iterator vi; +void DecompVarPool::print(double infinity, ostream *os) const { + vector::const_iterator vi; - for (vi = begin(); vi != end(); vi++) { - (*vi).getVarPtr()->print(infinity, os); - } + for (vi = begin(); vi != end(); vi++) { + (*vi).getVarPtr()->print(infinity, os); + } } diff --git a/Dip/src/UtilGraphLib.cpp b/Dip/src/UtilGraphLib.cpp index 8bfa5416..b3b94aeb 100644 --- a/Dip/src/UtilGraphLib.cpp +++ b/Dip/src/UtilGraphLib.cpp @@ -24,563 +24,595 @@ --- -------------------------------------------------------------------------- */ - #include "UtilGraphLib.h" -#include -#include #include #include #include +#include +#include using namespace std; /* TODO - deal with logging, error handling, etc */ // ========================================================================== -void UtilGraphLib::read_data(const char* datafile) -{ - ifstream is(datafile); /* ??? */ - - if (!is) { - cerr << "UtilGraphLib::read_data failed to open " << datafile << endl; - exit(1); - } - - enum DIST {_EXPLICIT, _EUC_2D, _EUC_3D, _MAX_2D, _MAX_3D, - _MAN_2D, _MAN_3D, _CEIL_2D, _GEO, _ATT - }; - const int LENGTH = 255; - const int KEY_NUM = 41; - const int NCTYPE_NUM = 3; - const int WTYPE_NUM = 10; - const int WFORMAT_NUM = 10; - const int DTYPE_NUM = 3; - const double MY_PI = 3.141592; - //This section lists the names of the possible fields in the data file - static char keywords[KEY_NUM][22] = { - "NAME", "NAME:", "TYPE", "TYPE:", "COMMENT", "COMMENT:", - "DIMENSION", "DIMENSION:", "CAPACITY", "CAPACITY:", - "EDGE_WEIGHT_TYPE", "EDGE_WEIGHT_TYPE:", - "EDGE_WEIGHT_FORMAT", "EDGE_WEIGHT_FORMAT:", - "DISPLAY_DATA_TYPE", "DISPLAY_DATA_TYPE:", - "EDGE_WEIGHT_SECTION", "EDGE_WEIGHT_SECTION:", - "DISPLAY_DATA_SECTION", "DISPLAY_DATA_SECTION:", - "NODE_COORD_SECTION", "NODE_COORD_SECTION:", - "NODE_COORD_TYPE", "NODE_COORD_TYPE:", - "DEPOT_SECTION", "DEPOT_SECTION:", - "CAPACITY_VOL", "CAPACITY_VOL:", - "DEMAND_SECTION", "DEMAND_SECTION:", - "TIME_WINDOW_SECTION", "TIME_WINDOW_SECTION:", - "STANDTIME_SECTION", "STANDTIME_SECTION:", - "PICKUP_SECTION", "PICKUP_SECTION:", - "EOF", "EOF.", "", "", "NO_MORE_TYPE" - }; - //This section lists the possible node coordinate data types - static char nctypes[NCTYPE_NUM][14] = {"TWOD_COORDS", "THREED_COORDS", - "NO_COORDS" - }; - //This is a list of the possible data types for edge weights - static char wtypes[WTYPE_NUM][9] = { - "EXPLICIT", "EUC_2D", "EUC_3D", - "MAX_2D", "MAX_3D", "MAN_2D", "MAN_3D", "CEIL_2D", "GEO", "ATT" - }; - //This is a list of the possible formats that the edge weight matrix - //could be given in if it is given explicitly - static char wformats[WFORMAT_NUM][20] = { - "UPPER_ROW", "LOWER_ROW", "UPPER_DIAG_ROW", "LOWER_DIAG_ROW", - "UPPER_COL", "LOWER_COL", "UPPER_DIAG_COL", "LOWER_DIAG_COL", - "FULL_MATRIX", "FUNCTION" - }; - //This is a list of the various display data types - static char dtypes[DTYPE_NUM][14] = {"COORD_DISPLAY", "TWOD_DISPLAY", - "NO_DISPLAY" - }; - FILE* f; - - //if ((err = fopen(&f, datafile, "r")) != 0){ - if ((f = fopen(datafile, "r")) == 0) { - cerr << "UtilGraphLib::read_data ERROR I/O : reading " << datafile +void UtilGraphLib::read_data(const char *datafile) { + ifstream is(datafile); /* ??? */ + + if (!is) { + cerr << "UtilGraphLib::read_data failed to open " << datafile << endl; + exit(1); + } + + enum DIST { + _EXPLICIT, + _EUC_2D, + _EUC_3D, + _MAX_2D, + _MAX_3D, + _MAN_2D, + _MAN_3D, + _CEIL_2D, + _GEO, + _ATT + }; + const int LENGTH = 255; + const int KEY_NUM = 41; + const int NCTYPE_NUM = 3; + const int WTYPE_NUM = 10; + const int WFORMAT_NUM = 10; + const int DTYPE_NUM = 3; + const double MY_PI = 3.141592; + // This section lists the names of the possible fields in the data file + static char keywords[KEY_NUM][22] = {"NAME", + "NAME:", + "TYPE", + "TYPE:", + "COMMENT", + "COMMENT:", + "DIMENSION", + "DIMENSION:", + "CAPACITY", + "CAPACITY:", + "EDGE_WEIGHT_TYPE", + "EDGE_WEIGHT_TYPE:", + "EDGE_WEIGHT_FORMAT", + "EDGE_WEIGHT_FORMAT:", + "DISPLAY_DATA_TYPE", + "DISPLAY_DATA_TYPE:", + "EDGE_WEIGHT_SECTION", + "EDGE_WEIGHT_SECTION:", + "DISPLAY_DATA_SECTION", + "DISPLAY_DATA_SECTION:", + "NODE_COORD_SECTION", + "NODE_COORD_SECTION:", + "NODE_COORD_TYPE", + "NODE_COORD_TYPE:", + "DEPOT_SECTION", + "DEPOT_SECTION:", + "CAPACITY_VOL", + "CAPACITY_VOL:", + "DEMAND_SECTION", + "DEMAND_SECTION:", + "TIME_WINDOW_SECTION", + "TIME_WINDOW_SECTION:", + "STANDTIME_SECTION", + "STANDTIME_SECTION:", + "PICKUP_SECTION", + "PICKUP_SECTION:", + "EOF", + "EOF.", + "", + "", + "NO_MORE_TYPE"}; + // This section lists the possible node coordinate data types + static char nctypes[NCTYPE_NUM][14] = {"TWOD_COORDS", "THREED_COORDS", + "NO_COORDS"}; + // This is a list of the possible data types for edge weights + static char wtypes[WTYPE_NUM][9] = {"EXPLICIT", "EUC_2D", "EUC_3D", "MAX_2D", + "MAX_3D", "MAN_2D", "MAN_3D", "CEIL_2D", + "GEO", "ATT"}; + // This is a list of the possible formats that the edge weight matrix + // could be given in if it is given explicitly + static char wformats[WFORMAT_NUM][20] = { + "UPPER_ROW", "LOWER_ROW", "UPPER_DIAG_ROW", "LOWER_DIAG_ROW", + "UPPER_COL", "LOWER_COL", "UPPER_DIAG_COL", "LOWER_DIAG_COL", + "FULL_MATRIX", "FUNCTION"}; + // This is a list of the various display data types + static char dtypes[DTYPE_NUM][14] = {"COORD_DISPLAY", "TWOD_DISPLAY", + "NO_DISPLAY"}; + FILE *f; + + // if ((err = fopen(&f, datafile, "r")) != 0){ + if ((f = fopen(datafile, "r")) == 0) { + cerr << "UtilGraphLib::read_data ERROR I/O : reading " << datafile + << ". Aborting." << endl; + abort(); + } + + /* + This loop reads in the next line of the data file and compares it + to the list of possible keywords to determine what data will follow. + It then reads the data into the appropriate field and iterates + */ + char line1[LENGTH], line[LENGTH], key[30], tmp[80]; + int wtype = -1, wformat = -1, dtype = -1, nctype = -1; + int depot, k, l, m, i, j, node, *coef2; + double deg, min, x, y, fdummy; + double coord_x, coord_y, coord_z; + bool capacity_volume = false; + + while (0 != fgets(line1, LENGTH, f)) { + strcpy(key, ""); + sscanf(line1, "%s", key); // read in next keyword + int k; + + for (k = 0; k < KEY_NUM; k++) + if (strcmp(keywords[k], key) == 0) { + break; + } + + if (k == KEY_NUM) { + cerr << "UtilGraphLib::read_data ERROR I/O : unknown keyword " << key << ". Aborting." << endl; abort(); - } - - /* - This loop reads in the next line of the data file and compares it - to the list of possible keywords to determine what data will follow. - It then reads the data into the appropriate field and iterates - */ - char line1[LENGTH], line[LENGTH], key[30], tmp[80]; - int wtype = -1, wformat = -1, dtype = -1, nctype = -1; - int depot, k, l, m, i, j, node, *coef2; - double deg, min, x, y, fdummy; - double coord_x, coord_y, coord_z; - bool capacity_volume = false; - - while (0 != fgets( line1, LENGTH, f)) { - strcpy(key, ""); - sscanf(line1, "%s", key); //read in next keyword - int k; - - for (k = 0; k < KEY_NUM; k++) - if (strcmp(keywords[k], key) == 0) { - break; - } - - if (k == KEY_NUM) { - cerr << "UtilGraphLib::read_data ERROR I/O : unknown keyword " - << key << ". Aborting." << endl; - abort(); + } + + // This is a bit shift operation that divides k by 2 since in the list + // of keywords, there are two possible formats for the keyword + k >>= 1; + + if (strchr(line1, ':')) { + strcpy(line, strchr(line1, ':') + 1); + } + + switch (k) { + case 0: { // NAME (set name) + if (!sscanf(line, "%s", tmp)) { + cerr << "UtilGraphLib::read_data ERROR I/O : reading NAME" << tmp + << ". Aborting." << endl; + abort(); } - //This is a bit shift operation that divides k by 2 since in the list - //of keywords, there are two possible formats for the keyword - k >>= 1; - - if (strchr(line1, ':')) { - strcpy(line, strchr(line1, ':') + 1); + // cout << "PROBLEM NAME: \t\t" << tmp << endl; + string tmp_str(tmp); + this->name = tmp_str.substr(0, tmp_str.find_first_of(".")); + break; + } + case 3: // DIMENSION (set n_vertices, n_edges) + + if (!sscanf(line, "%d", &k)) { + cerr << "UtilGraphLib::read_data ERROR I/O : reading DIMENSION " << k + << ". Aborting." << endl; + abort(); } - switch (k) { - case 0: { //NAME (set name) - if (!sscanf(line, "%s", tmp)) { - cerr << "UtilGraphLib::read_data ERROR I/O : reading NAME" - << tmp << ". Aborting." << endl; - abort(); - } + this->n_vertices = k; + this->n_edges = ((n_vertices * n_vertices) - n_vertices) / 2; + break; + case 4: // CAPACITY (set capacity) - //cout << "PROBLEM NAME: \t\t" << tmp << endl; - string tmp_str(tmp); - this->name = tmp_str.substr(0, tmp_str.find_first_of(".")); - break; + if (!sscanf(line, "%d", &k)) { + cerr << "GrabLib:: read_data ERROR I/O : reading CAPACITY " << k + << ". Aborting." << endl; + abort(); } - case 3 : //DIMENSION (set n_vertices, n_edges) - if (!sscanf(line, "%d", &k)) { - cerr << "UtilGraphLib::read_data ERROR I/O : reading DIMENSION " - << k << ". Aborting." << endl; - abort(); - } + this->capacity = k; + break; + case 5: // EDGE_WEIGHT_TYPE + sscanf(line, "%s", tmp); - this->n_vertices = k; - this->n_edges = ((n_vertices * n_vertices) - n_vertices) / 2; - break; - case 4 : //CAPACITY (set capacity) + for (wtype = 0; wtype < WTYPE_NUM; wtype++) + if (strcmp(wtypes[wtype], tmp) == 0) { + break; + } - if (!sscanf(line, "%d", &k)) { - cerr << "GrabLib:: read_data ERROR I/O : reading CAPACITY " - << k << ". Aborting." << endl; - abort(); - } + if (wtype == WTYPE_NUM) { + cerr << "GrabLib::read_data ERROR I/O : unknown weight type " << tmp + << ". Aborting." << endl; + abort(); + } - this->capacity = k; - break; - case 5 : //EDGE_WEIGHT_TYPE - sscanf(line, "%s", tmp); + break; + case 6: // EDGE_WEIGHT_FORMAT + sscanf(line, "%s", tmp); - for (wtype = 0; wtype < WTYPE_NUM; wtype++) - if (strcmp(wtypes[wtype], tmp) == 0) { - break; - } + for (wformat = 0; wformat < WFORMAT_NUM; wformat++) + if (strcmp(wformats[wformat], tmp) == 0) { + break; + } - if (wtype == WTYPE_NUM) { - cerr << "GrabLib::read_data ERROR I/O : unknown weight type " - << tmp << ". Aborting." << endl; - abort(); - } + if (wformat == WFORMAT_NUM) { + cerr << "UtilGraphLib::read_data ERROR I/O : unknown weight format " + << tmp << ". Aborting." << endl; + abort(); + } - break; - case 6 : //EDGE_WEIGHT_FORMAT - sscanf(line, "%s", tmp); + break; + case 7: // DISPLAY_DATA_TYPE + sscanf(line, "%s", tmp); - for (wformat = 0; wformat < WFORMAT_NUM; wformat++) - if (strcmp(wformats[wformat], tmp) == 0) { - break; - } + for (dtype = 0; dtype < DTYPE_NUM; dtype++) + if (strcmp(dtypes[dtype], tmp) == 0) { + break; + } - if (wformat == WFORMAT_NUM) { - cerr << "UtilGraphLib::read_data ERROR I/O : unknown weight format " - << tmp << ". Aborting." << endl; - abort(); - } + if (dtype == DTYPE_NUM) { + cerr << "UtilGraphLib::read_data ERROR I/O : unknown display type " + << tmp << ". Aborting." << endl; + abort(); + } + + break; + case 8: // EDGE_WEIGHT_SECTION (open memory and set edge_wt) + + if (wtype != _EXPLICIT) { + break; + } - break; - case 7 : //DISPLAY_DATA_TYPE - sscanf(line, "%s", tmp); + edge_wt = new int[n_edges]; - for (dtype = 0; dtype < DTYPE_NUM; dtype++) - if (strcmp(dtypes[dtype], tmp) == 0) { - break; + switch (wformat) { + case 1: // LOWER_ROW + case 4: // UPPER_COL + case 3: // LOWER_DIAG_ROW + case 6: // UPPER_DIAG_COL + + for (i = 0, coef2 = edge_wt; i < n_vertices; i++) { + for (j = 0; j < i; j++, coef2++) { + if (!fscanf(f, "%lf", &fdummy)) { + cerr << "UtilGraphLib::read_data ERROR I/O : not enough data " + << "-- DIMENSION or " + << "EDGE_WEIGHT_TYPE declared wrong. Aborting." << endl; + abort(); + } else { + *coef2 = (int)fdummy; } + } - if (dtype == DTYPE_NUM) { - cerr << "UtilGraphLib::read_data ERROR I/O : unknown display type " - << tmp << ". Aborting." << endl; + if ((wformat == 3 || wformat == 6) && !fscanf(f, "%lf", &fdummy)) { + cerr << "UtilGraphLib::read_data ERROR I/O : not enough data " + << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " + << "Aborting." << endl; abort(); - } - - break; - case 8: //EDGE_WEIGHT_SECTION (open memory and set edge_wt) - - if (wtype != _EXPLICIT) { - break; - } - - edge_wt = new int[n_edges]; - - switch (wformat) { - case 1 : //LOWER_ROW - case 4 : //UPPER_COL - case 3 : //LOWER_DIAG_ROW - case 6 : //UPPER_DIAG_COL - - for (i = 0, coef2 = edge_wt; i < n_vertices; i++) { - for (j = 0; j < i; j++, coef2++) { - if (!fscanf(f, "%lf", &fdummy)) { - cerr << "UtilGraphLib::read_data ERROR I/O : not enough data " - << "-- DIMENSION or " - << "EDGE_WEIGHT_TYPE declared wrong. Aborting." << endl; - abort(); - } else { - *coef2 = (int)fdummy; - } - } - - if ((wformat == 3 || wformat == 6) && !fscanf(f, "%lf", &fdummy)) { - cerr << "UtilGraphLib::read_data ERROR I/O : not enough data " - << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " - << "Aborting." << endl; - abort(); - } + } + } + + if (fscanf(f, "%lf", &fdummy)) { + cerr << "UtilGraphLib::read_data ERROR I/O : too much data " + << "-- DIMENSION or " + << "EDGE_WEIGHT_TYPE declared wrong. Aborting." << endl; + abort(); + } + + break; + case 0: // UPPER_ROW + case 5: // LOWER_COL + case 2: // UPPER_DIAG_ROW + case 7: // LOWER_DIAG_COL + + for (i = 0, coef2 = edge_wt; i < n_vertices; i++) { + if (wformat == 2 || wformat == 7) + if (!fscanf(f, "%lf", &fdummy)) { + cerr << "UtilGraphLib::read_data ERROR I/O : not enough data " + << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " + << "Aborting." << endl; + abort(); } - if (fscanf(f, "%lf", &fdummy)) { - cerr << "UtilGraphLib::read_data ERROR I/O : too much data " - << "-- DIMENSION or " - << "EDGE_WEIGHT_TYPE declared wrong. Aborting." << endl; - abort(); + for (j = i + 1; j < n_vertices; j++) { + if (!fscanf(f, "%lf", &fdummy)) { + cerr << "UtilGraphLib::read_data ERROR I/O : not enough data " + << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " + << "Aborting." << endl; + abort(); + } else { + coef2[j * (j - 1) / 2 + i] = (int)fdummy; } - - break; - case 0 : //UPPER_ROW - case 5 : //LOWER_COL - case 2 : //UPPER_DIAG_ROW - case 7 : //LOWER_DIAG_COL - - for (i = 0, coef2 = edge_wt; i < n_vertices; i++) { - if (wformat == 2 || wformat == 7) - if (!fscanf(f, "%lf", &fdummy)) { - cerr << "UtilGraphLib::read_data ERROR I/O : not enough data " - << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " - << "Aborting." << endl; - abort(); - } - - for (j = i + 1; j < n_vertices; j++) { - if (!fscanf(f, "%lf", &fdummy)) { - cerr << "UtilGraphLib::read_data ERROR I/O : not enough data " - << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " - << "Aborting." << endl; - abort(); - } else { - coef2[j * (j - 1) / 2 + i] = (int)fdummy; - } - } + } + } + + if (fscanf(f, "%lf", &fdummy)) { + cerr << "UtilGraphLib::read_data ERROR I/O : too much data " + << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " + << "Aborting." << endl; + abort(); + } + + break; + case 8: // FULL_MATRIX + + for (i = 0, coef2 = edge_wt; i < n_vertices; i++) { + for (j = 0; j <= i; j++) + if (!fscanf(f, "%lf", &fdummy)) { + cerr << "UtilGraphLib::read_data ERROR I/O : not enough data " + << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " + << "Aborting." << endl; + abort(); } - if (fscanf(f, "%lf", &fdummy)) { - cerr << "UtilGraphLib::read_data ERROR I/O : too much data " - << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " - << "Aborting." << endl; - abort(); + for (j = i + 1; j < n_vertices; j++) { + if (!fscanf(f, "%lf", &fdummy)) { + cerr << "UtilGraphLib::read_data ERROR I/O : not enough data " + << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " + << "Aborting." << endl; + abort(); } - break; - case 8 : //FULL_MATRIX - - for (i = 0, coef2 = edge_wt; i < n_vertices; i++) { - for (j = 0; j <= i; j++) - if (!fscanf(f, "%lf", &fdummy)) { - cerr << "UtilGraphLib::read_data ERROR I/O : not enough data " - << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " - << "Aborting." << endl; - abort(); - } - - for (j = i + 1; j < n_vertices; j++) { - if (!fscanf(f, "%lf", &fdummy)) { - cerr << "UtilGraphLib::read_data ERROR I/O : not enough data " - << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " - << "Aborting." << endl; - abort(); - } - - coef2[j * (j - 1) / 2 + i] = (int) fdummy; - } - } + coef2[j * (j - 1) / 2 + i] = (int)fdummy; + } + } - if (fscanf(f, "%lf", &fdummy)) { - cerr << "UtilGraphLib::read_data ERROR I/O : too much data " - << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " - << "Aborting." << endl; - abort(); - } + if (fscanf(f, "%lf", &fdummy)) { + cerr << "UtilGraphLib::read_data ERROR I/O : too much data " + << "-- DIMENSION or EDGE_WEIGHT_TYPE declared wrong. " + << "Aborting." << endl; + abort(); + } - break; - } + break; + } - break; - case 9 : //DISPLAY_DATA_SECTION (open memory and set posx, posy) + break; + case 9: // DISPLAY_DATA_SECTION (open memory and set posx, posy) - if (dtype != 1) { - cerr << "UtilGraphLib::read_data ERROR I/O : DISPLAY_DATA_SECTION " - << "exists but not TWOD_DISPLAY. Aborting." << endl; - abort(); - } + if (dtype != 1) { + cerr << "UtilGraphLib::read_data ERROR I/O : DISPLAY_DATA_SECTION " + << "exists but not TWOD_DISPLAY. Aborting." << endl; + abort(); + } - posx = new int[n_vertices]; - posy = new int[n_vertices]; + posx = new int[n_vertices]; + posy = new int[n_vertices]; - for (i = 0; i < n_vertices; i++) { - if ((k = fscanf(f, "%d%lf%lf", &node, &x, &y)) != 3) { - cerr << "UtilGraphLib::read_data ERROR I/O : error reading " - << "DISPLAY_DATA" << endl; - break; - } + for (i = 0; i < n_vertices; i++) { + if ((k = fscanf(f, "%d%lf%lf", &node, &x, &y)) != 3) { + cerr << "UtilGraphLib::read_data ERROR I/O : error reading " + << "DISPLAY_DATA" << endl; + break; + } - posx[node - 1] = (int)(x + 0.5); - posy[node - 1] = (int)(y + 0.5); - } - - if (fscanf(f, "%lf", &fdummy)) { - cerr << "UtilGraphLib::read_data ERROR I/O : too much display data" - << endl; - break; - } - - break; - case 10 : //NODE_COORD_SECTION (open memory and set posx, - //posy, coordx, coordy, coordz) - if (nctype == -1) { - nctype = 0; //if not given: TWOD_COORDS - } - - if (dtype == -1 && ((wtype == _EUC_2D) || //display type not defd - (wtype == _MAX_2D) || //yet && can disp - (wtype == _MAN_2D) || - (wtype == _ATT) )) { - dtype = 0; //COORD_DISPLY - } - - if (dtype == 0) { - posx = new int[n_vertices]; - posy = new int[n_vertices]; - } - - coordx = new double[n_vertices]; - coordy = new double[n_vertices]; - - if (nctype == 1) { - coordz = new double[n_vertices]; - } - - for (i = 0; i < n_vertices; i++) { - if (nctype == 0) //TWOD_COORDS - if (fscanf(f, "%d%lf%lf", &node, &coord_x, &coord_y) != 3) { - cerr << "UtilGraphLib::read_data ERROR I/O : error reading " - << "NODE_COORD. Aborting." << endl; - abort(); - } - - if (nctype == 1) //THREED_COORDS - if (fscanf(f, "%d%lf%lf%lf", &node, &coord_x, &coord_y, &coord_z) != 4) { - cerr << "UtilGraphLib::read_data ERROR I/O : error reading " - << "NODE_COORD. Aborting." << endl; - abort(); - } - - coordx[node - 1] = coord_x; - coordy[node - 1] = coord_y; - - //since position is an integer and coord is - //a double, round off here if dtype is EXPLICIT - if (dtype == 0) { - posx[node - 1] = (int)coord_x; - posy[node - 1] = (int)coord_y; - } + posx[node - 1] = (int)(x + 0.5); + posy[node - 1] = (int)(y + 0.5); + } - if (nctype == 1) { - coordz[node - 1] = coord_z; - } + if (fscanf(f, "%lf", &fdummy)) { + cerr << "UtilGraphLib::read_data ERROR I/O : too much display data" + << endl; + break; + } - if (wtype == _GEO) { //GEO - deg = (int)(coordx[node - 1]); - min = coordx[node - 1] - deg; - coordx[node - 1] = MY_PI * (deg + 5.0 * min / 3.0 ) / 180.0; - deg = (int)(coordy[node - 1]); - min = coordy[node - 1] - deg; - coordy[node - 1] = MY_PI * (deg + 5.0 * min / 3.0 ) / 180.0; - } - } + break; + case 10: // NODE_COORD_SECTION (open memory and set posx, + // posy, coordx, coordy, coordz) + if (nctype == -1) { + nctype = 0; // if not given: TWOD_COORDS + } + + if (dtype == -1 && ((wtype == _EUC_2D) || // display type not defd + (wtype == _MAX_2D) || // yet && can disp + (wtype == _MAN_2D) || (wtype == _ATT))) { + dtype = 0; // COORD_DISPLY + } - if (fscanf(f, "%d%lf%lf%lf", &node, &coord_x, &coord_y, &coord_z)) { - cerr << "UtilGraphLib::read_data ERROR I/O: too much data in " + if (dtype == 0) { + posx = new int[n_vertices]; + posy = new int[n_vertices]; + } + + coordx = new double[n_vertices]; + coordy = new double[n_vertices]; + + if (nctype == 1) { + coordz = new double[n_vertices]; + } + + for (i = 0; i < n_vertices; i++) { + if (nctype == 0) // TWOD_COORDS + if (fscanf(f, "%d%lf%lf", &node, &coord_x, &coord_y) != 3) { + cerr << "UtilGraphLib::read_data ERROR I/O : error reading " << "NODE_COORD. Aborting." << endl; abort(); - } + } - break; - case 11: //NODE_COORD_TYPE - sscanf(line, "%s", tmp); + if (nctype == 1) // THREED_COORDS + if (fscanf(f, "%d%lf%lf%lf", &node, &coord_x, &coord_y, &coord_z) != + 4) { + cerr << "UtilGraphLib::read_data ERROR I/O : error reading " + << "NODE_COORD. Aborting." << endl; + abort(); + } + + coordx[node - 1] = coord_x; + coordy[node - 1] = coord_y; + + // since position is an integer and coord is + // a double, round off here if dtype is EXPLICIT + if (dtype == 0) { + posx[node - 1] = (int)coord_x; + posy[node - 1] = (int)coord_y; + } + + if (nctype == 1) { + coordz[node - 1] = coord_z; + } + + if (wtype == _GEO) { // GEO + deg = (int)(coordx[node - 1]); + min = coordx[node - 1] - deg; + coordx[node - 1] = MY_PI * (deg + 5.0 * min / 3.0) / 180.0; + deg = (int)(coordy[node - 1]); + min = coordy[node - 1] - deg; + coordy[node - 1] = MY_PI * (deg + 5.0 * min / 3.0) / 180.0; + } + } - for (nctype = 0; nctype < NCTYPE_NUM; nctype++) - if (strcmp(nctypes[nctype], tmp) == 0) { - break; - } + if (fscanf(f, "%d%lf%lf%lf", &node, &coord_x, &coord_y, &coord_z)) { + cerr << "UtilGraphLib::read_data ERROR I/O: too much data in " + << "NODE_COORD. Aborting." << endl; + abort(); + } - if (nctype == NCTYPE_NUM) { - cerr << "UtilGraphLib::read_data ERROR I/O : unknown node_coord_type" - << tmp << ". Aborting." << endl; - abort(); - } + break; + case 11: // NODE_COORD_TYPE + sscanf(line, "%s", tmp); - break; - case 12: //DEPOT_SECTION - fscanf(f, "%d", &k); + for (nctype = 0; nctype < NCTYPE_NUM; nctype++) + if (strcmp(nctypes[nctype], tmp) == 0) { + break; + } - if (k != 1) { - cerr << "UtilGraphLib::read_data ERROR I/O : depot must be node 1." - << "Aborting." << endl; - abort(); - } - - depot = k - 1; - - while (-1 != k) { - fscanf(f, "%d", &k); - } - - break; - case 13: //CAPACITY_VOL - sscanf(line, "%d", &k); - capacity_volume = true; - break; - case 14: //DEMAND_SECTION - vertex_wt = new int[n_vertices]; - - for (i = 0; i < n_vertices; i++) { - if (capacity_volume) { - if (fscanf(f, "%d%d%d", &k, &l, &m) != 3) { - cerr << "UtilGraphLib::read_data ERROR I/O : error reading " - << "DEMAND_SECTION. Aborting." << endl; - abort(); - } - } else if (fscanf(f, "%d%d", &k, &l) != 2) { - cerr << "UtilGraphLib::read_data ERROR I/O : error reading " - << "DEMAND_SECTION. Aborting." << endl; - abort(); - } + if (nctype == NCTYPE_NUM) { + cerr << "UtilGraphLib::read_data ERROR I/O : unknown node_coord_type" + << tmp << ". Aborting." << endl; + abort(); + } + + break; + case 12: // DEPOT_SECTION + fscanf(f, "%d", &k); - vertex_wt[k - 1] = l; - } + if (k != 1) { + cerr << "UtilGraphLib::read_data ERROR I/O : depot must be node 1." + << "Aborting." << endl; + abort(); + } + + depot = k - 1; + + while (-1 != k) { + fscanf(f, "%d", &k); + } - if (fscanf(f, "%d%d", &k, &l)) { - cerr << "UtilGraphLib::read_data ERROR I/O : too much data in " + break; + case 13: // CAPACITY_VOL + sscanf(line, "%d", &k); + capacity_volume = true; + break; + case 14: // DEMAND_SECTION + vertex_wt = new int[n_vertices]; + + for (i = 0; i < n_vertices; i++) { + if (capacity_volume) { + if (fscanf(f, "%d%d%d", &k, &l, &m) != 3) { + cerr << "UtilGraphLib::read_data ERROR I/O : error reading " << "DEMAND_SECTION. Aborting." << endl; abort(); - } + } + } else if (fscanf(f, "%d%d", &k, &l) != 2) { + cerr << "UtilGraphLib::read_data ERROR I/O : error reading " + << "DEMAND_SECTION. Aborting." << endl; + abort(); + } + + vertex_wt[k - 1] = l; + } - break; - case 18: //EOF - default: - break; + if (fscanf(f, "%d%d", &k, &l)) { + cerr << "UtilGraphLib::read_data ERROR I/O : too much data in " + << "DEMAND_SECTION. Aborting." << endl; + abort(); } - } - if (f != stdin) { - fclose(f); - } + break; + case 18: // EOF + default: + break; + } + } - //calculate all the distances explcitly and then use distance type EXPLICIT - if (wtype != _EXPLICIT) { - edge_wt = new int[n_edges]; + if (f != stdin) { + fclose(f); + } - for (i = 1, k = 0; i < n_vertices; i++) { - for (j = 0; j < i; j++) { - edge_wt[k++] = compute_icost(wtype, i, j); - } + // calculate all the distances explcitly and then use distance type EXPLICIT + if (wtype != _EXPLICIT) { + edge_wt = new int[n_edges]; + + for (i = 1, k = 0; i < n_vertices; i++) { + for (j = 0; j < i; j++) { + edge_wt[k++] = compute_icost(wtype, i, j); } - } + } + } - /*for(int i = 0; i < n_edges; i++){ - printf("\ngraphLib edge_wt[%d]: %d", i, edge_wt[i]); - }*/ + /*for(int i = 0; i < n_edges; i++){ + printf("\ngraphLib edge_wt[%d]: %d", i, edge_wt[i]); + }*/ } /**********************************************************************/ -//This function computes the cost of the edge from va to vb -int UtilGraphLib::compute_icost(const int wtype, const int va, const int vb) -{ - double q1, q2, q3, dx, dy, dz; - int cost = 0; - const double RRR = 6378.388; - enum DIST {_EXPLICIT, _EUC_2D, _EUC_3D, _MAX_2D, _MAX_3D, - _MAN_2D, _MAN_3D, _CEIL_2D, _GEO, _ATT - }; - - if (wtype == _GEO) { - q1 = cos( coordy[va] - coordy[vb] ); - q2 = cos( coordx[va] - coordx[vb] ); - q3 = cos( coordx[va] + coordx[vb] ); - cost = (int) (RRR * acos(0.5 * ((1.0 + q1) * q2 - (1.0 - q1) * q3)) + 1.0); - } else { - dx = coordx[va] - coordx[vb]; - dy = coordy[va] - coordy[vb]; - - switch (wtype) { - case _EUC_2D : - cost = (int) floor( sqrt( dx * dx + dy * dy ) + 0.5); - break; - case _EUC_3D : - dz = coordz[va] - coordz[vb]; - cost = (int) floor( sqrt( dx * dx + dy * dy + dz * dz) + 0.5); - break; - case _MAX_2D : - cost = (int) fabs(dx); - - if (cost < fabs(dy)) { - cost = (int) fabs(dy); - } - - break; - case _MAX_3D : - dz = coordz[va] - coordz[vb]; - cost = (int) fabs(dx); - - if (cost < fabs(dy)) { - cost = (int) fabs(dy); - } - - if (cost < fabs(dz)) { - cost = (int) fabs(dz); - } - - break; - case _MAN_2D : - cost = (int) floor( dx + dy + 0.5 ); - break; - case _MAN_3D : - dz = coordz[va] - coordz[vb]; - cost = (int) floor( dx + dy + dz + 0.5 ); - break; - case _CEIL_2D: - cost = (int)ceil( sqrt( dx * dx + dy * dy ) + 0.5); - break; - case _ATT: - cost = (int)( sqrt( (dx * dx + dy * dy ) / 10 ) + 1); - break; +// This function computes the cost of the edge from va to vb +int UtilGraphLib::compute_icost(const int wtype, const int va, const int vb) { + double q1, q2, q3, dx, dy, dz; + int cost = 0; + const double RRR = 6378.388; + enum DIST { + _EXPLICIT, + _EUC_2D, + _EUC_3D, + _MAX_2D, + _MAX_3D, + _MAN_2D, + _MAN_3D, + _CEIL_2D, + _GEO, + _ATT + }; + + if (wtype == _GEO) { + q1 = cos(coordy[va] - coordy[vb]); + q2 = cos(coordx[va] - coordx[vb]); + q3 = cos(coordx[va] + coordx[vb]); + cost = (int)(RRR * acos(0.5 * ((1.0 + q1) * q2 - (1.0 - q1) * q3)) + 1.0); + } else { + dx = coordx[va] - coordx[vb]; + dy = coordy[va] - coordy[vb]; + + switch (wtype) { + case _EUC_2D: + cost = (int)floor(sqrt(dx * dx + dy * dy) + 0.5); + break; + case _EUC_3D: + dz = coordz[va] - coordz[vb]; + cost = (int)floor(sqrt(dx * dx + dy * dy + dz * dz) + 0.5); + break; + case _MAX_2D: + cost = (int)fabs(dx); + + if (cost < fabs(dy)) { + cost = (int)fabs(dy); } - } - return ( cost ); -} + break; + case _MAX_3D: + dz = coordz[va] - coordz[vb]; + cost = (int)fabs(dx); + if (cost < fabs(dy)) { + cost = (int)fabs(dy); + } + + if (cost < fabs(dz)) { + cost = (int)fabs(dz); + } + break; + case _MAN_2D: + cost = (int)floor(dx + dy + 0.5); + break; + case _MAN_3D: + dz = coordz[va] - coordz[vb]; + cost = (int)floor(dx + dy + dz + 0.5); + break; + case _CEIL_2D: + cost = (int)ceil(sqrt(dx * dx + dy * dy) + 0.5); + break; + case _ATT: + cost = (int)(sqrt((dx * dx + dy * dy) / 10) + 1); + break; + } + } + + return (cost); +} diff --git a/Dip/src/UtilHash.cpp b/Dip/src/UtilHash.cpp index cc363716..aea29a9b 100644 --- a/Dip/src/UtilHash.cpp +++ b/Dip/src/UtilHash.cpp @@ -13,17 +13,17 @@ //===========================================================================// // --------------------------------------------------------------------- // +#include #include #include -#include using namespace std; -#include "UtilMacros.h" #include "Decomp.h" +#include "UtilMacros.h" -//http://burtleburtle.net/bob/hash/evahash.html or just map? -//if this is not really doing hashing, then move this function into -//util macros +// http://burtleburtle.net/bob/hash/evahash.html or just map? +// if this is not really doing hashing, then move this function into +// util macros //--- //--- NOTE: @@ -32,81 +32,66 @@ using namespace std; //--- // --------------------------------------------------------------------- // -string UtilCreateStringHash(const int len, - const double* els, - const int precision) -{ - stringstream ss; - ss << setprecision(precision); - - for (int i = 0; i < len; i++) { - if (!UtilIsZero(els[i])) { - ss << i << "_" << els[i] << "_"; - } - } - - return ss.str(); +string UtilCreateStringHash(const int len, const double *els, + const int precision) { + stringstream ss; + ss << setprecision(precision); + + for (int i = 0; i < len; i++) { + if (!UtilIsZero(els[i])) { + ss << i << "_" << els[i] << "_"; + } + } + + return ss.str(); } // --------------------------------------------------------------------- // -string UtilCreateStringHash(const int len, - const int* ind, - const double* els, - const int precision) -{ - stringstream ss; - ss << setprecision(precision); - - for (int i = 0; i < len; i++) { - if (!UtilIsZero(els[i])) { - ss << ind[i] << "_" << els[i] << "_"; - } - } - - return ss.str(); +string UtilCreateStringHash(const int len, const int *ind, const double *els, + const int precision) { + stringstream ss; + ss << setprecision(precision); + + for (int i = 0; i < len; i++) { + if (!UtilIsZero(els[i])) { + ss << ind[i] << "_" << els[i] << "_"; + } + } + + return ss.str(); } // --------------------------------------------------------------------- // -string UtilCreateStringHash(const int len, - const int* ind, - const double els, - const int precision) -{ - stringstream ss; - ss << setprecision(precision); - - for (int i = 0; i < len; i++) { - if (!UtilIsZero(els)) { - ss << ind[i] << "_" << els << "_"; - } - } - - return ss.str(); +string UtilCreateStringHash(const int len, const int *ind, const double els, + const int precision) { + stringstream ss; + ss << setprecision(precision); + + for (int i = 0; i < len; i++) { + if (!UtilIsZero(els)) { + ss << ind[i] << "_" << els << "_"; + } + } + + return ss.str(); } // --------------------------------------------------------------------- // -string UtilCreateStringHash(const int len, - const int* ind, - const double* els, - const char sense, - const double rhs, - double infinity, - const int precision) -{ - stringstream ss; - ss << setprecision(precision); - - if (rhs >= infinity) { - ss << "INF"; - } else if (rhs <= -infinity) { - ss << "-INF"; - } else { - ss << rhs; - } - - ss << "_" << sense << "_"; - ss << UtilCreateStringHash(len, ind, els, precision); - return ss.str(); +string UtilCreateStringHash(const int len, const int *ind, const double *els, + const char sense, const double rhs, double infinity, + const int precision) { + stringstream ss; + ss << setprecision(precision); + + if (rhs >= infinity) { + ss << "INF"; + } else if (rhs <= -infinity) { + ss << "-INF"; + } else { + ss << rhs; + } + + ss << "_" << sense << "_"; + ss << UtilCreateStringHash(len, ind, els, precision); + return ss.str(); } - - diff --git a/Dip/src/UtilKnapsack.cpp b/Dip/src/UtilKnapsack.cpp index 85d42b48..270b8fc8 100644 --- a/Dip/src/UtilKnapsack.cpp +++ b/Dip/src/UtilKnapsack.cpp @@ -12,7 +12,6 @@ // All Rights Reserved. // //===========================================================================// - //=========================================================================== //--- //--- Knapsack Problem: @@ -21,483 +20,450 @@ //--- x binary //--- //=========================================================================== -#include "UtilMacros.h" #include "UtilKnapsack.h" +#include "UtilMacros.h" //=========================================================================== -//TODO: change this to a class -#define OK 0 -#define ERR_NO_MEMORY 1 +// TODO: change this to a class +#define OK 0 +#define ERR_NO_MEMORY 1 -#define KP_OPTIMAL 0 -#define KP_INFEASIBLE 1 -#define KP_ERROR 2 -#define KP_ITERLIMIT 3 +#define KP_OPTIMAL 0 +#define KP_INFEASIBLE 1 +#define KP_ERROR 2 +#define KP_ITERLIMIT 3 -#define EPSILON 1.0e-6 -#define HS_MAXITER 1000000 -#define BIGM 1.0e+17 -#define IS_SMALL( x ) ( fabs( x ) < EPSILON ) -#define IS_TINY( x ) ( fabs( x ) < 1.0e-15 ) +#define EPSILON 1.0e-6 +#define HS_MAXITER 1000000 +#define BIGM 1.0e+17 +#define IS_SMALL(x) (fabs(x) < EPSILON) +#define IS_TINY(x) (fabs(x) < 1.0e-15) //=========================================================================== //#define DEBUG_KNAP(x) #define DEBUG_KNAP(x) x - /*==========================================================================*/ /* SOR_IntDblArr */ /*==========================================================================*/ /*==========================================================================*/ -SOR_IntDblArrPtr SOR_IntDblArrNew (int size, - int* pstatus) -{ - SOR_IntDblArrPtr A = NULL; - *pstatus = OK; - A = (SOR_IntDblArrPtr) malloc(sizeof(SOR_IntDblArr)); - - if (A == NULL) { - *pstatus = ERR_NO_MEMORY; - return A; - } - - A->len = 0; - A->size = size; - A->arr = (SOR_IntDbl*) malloc(size * sizeof(SOR_IntDbl)); - - if (A->arr == NULL) { - *pstatus = ERR_NO_MEMORY; - return A; - } - - return A; +SOR_IntDblArrPtr SOR_IntDblArrNew(int size, int *pstatus) { + SOR_IntDblArrPtr A = NULL; + *pstatus = OK; + A = (SOR_IntDblArrPtr)malloc(sizeof(SOR_IntDblArr)); + + if (A == NULL) { + *pstatus = ERR_NO_MEMORY; + return A; + } + + A->len = 0; + A->size = size; + A->arr = (SOR_IntDbl *)malloc(size * sizeof(SOR_IntDbl)); + + if (A->arr == NULL) { + *pstatus = ERR_NO_MEMORY; + return A; + } + + return A; } /*==========================================================================*/ -void SOR_IntDblSwap(SOR_IntDbl* A, - SOR_IntDbl* B) -{ - SOR_IntDbl temp; - temp.i = B->i; - temp.x = B->x; - B->i = A->i; - B->x = A->x; - A->i = temp.i; - A->x = temp.x; +void SOR_IntDblSwap(SOR_IntDbl *A, SOR_IntDbl *B) { + SOR_IntDbl temp; + temp.i = B->i; + temp.x = B->x; + B->i = A->i; + B->x = A->x; + A->i = temp.i; + A->x = temp.x; } /*==========================================================================*/ -void SOR_IntDblArrPrint(const SOR_IntDblArr* A) -{ - int i; - - for (i = 0; i < A->len; i++) { - printf("index: %d, i: %d, x: %g\n", - i, A->arr[i].i, A->arr[i].x); - } -} +void SOR_IntDblArrPrint(const SOR_IntDblArr *A) { + int i; -/*==========================================================================*/ -void SOR_IntDblArrFree(SOR_IntDblArrPtr* A) -{ - if (A == NULL) { - return; - } - - if ((*A) == NULL) { - return; - } - - UTIL_DELARR((*A)->arr); - UTIL_DELPTR(*A); + for (i = 0; i < A->len; i++) { + printf("index: %d, i: %d, x: %g\n", i, A->arr[i].i, A->arr[i].x); + } } - /*==========================================================================*/ -static void SOR_QSortIntDblDec(SOR_IntDbl* item, int lft, int rght) -{ - int i = lft; - int j = rght; - int lr = (lft + rght) / 2; - SOR_IntDbl x; - x.i = item[lr].i; - x.x = item[lr].x; - - do { - while (item[i].x > x.x && i < rght) { - i++; - } - - while (item[j].x < x.x && j > lft) { - j--; - } - - if (i <= j) { - SOR_IntDblSwap(&item[i], &item[j]); - i++; - j--; - } - } while (i <= j); +void SOR_IntDblArrFree(SOR_IntDblArrPtr *A) { + if (A == NULL) { + return; + } - if (lft < j) { - SOR_QSortIntDblDec(item, lft, j); - } + if ((*A) == NULL) { + return; + } - if (i < rght) { - SOR_QSortIntDblDec(item, i, rght); - } + UTIL_DELARR((*A)->arr); + UTIL_DELPTR(*A); } - - - +/*==========================================================================*/ +static void SOR_QSortIntDblDec(SOR_IntDbl *item, int lft, int rght) { + int i = lft; + int j = rght; + int lr = (lft + rght) / 2; + SOR_IntDbl x; + x.i = item[lr].i; + x.x = item[lr].x; + + do { + while (item[i].x > x.x && i < rght) { + i++; + } + + while (item[j].x < x.x && j > lft) { + j--; + } + + if (i <= j) { + SOR_IntDblSwap(&item[i], &item[j]); + i++; + j--; + } + } while (i <= j); + + if (lft < j) { + SOR_QSortIntDblDec(item, lft, j); + } + + if (i < rght) { + SOR_QSortIntDblDec(item, i, rght); + } +} //=========================================================================== -int KnapsackSortRatio(const int n, - const double* p, - const double* w, - double* psort, - double* wsort) -{ - //--- - //--- Sort non-increasing order of p[j]/w[j]. Return the new - //--- order in psort, wsort which will be of size n+1 to conform - //--- to the routine SOR_KnapsackOptimizeHS. - //--- - int i; - int status = 0; - SOR_IntDblArrPtr ratio = NULL; - ratio = SOR_IntDblArrNew(n, &status); - - if (status != OK) { - return status; - } - - for (i = 0; i < n; i++) { - assert(w[i] >= 0); - assert(p[i] >= 0); - assert(!IS_TINY(w[i])); - ratio->arr[i].x = p[i] / w[i]; - ratio->arr[i].i = i; - } - - ratio->len = n; - - if (n > 1) { - SOR_QSortIntDblDec(ratio->arr, 0, n - 1); - } - - for (i = 0; i < n; i++) { - psort[i] = p[ratio->arr[i].i]; - wsort[i] = w[ratio->arr[i].i]; - printf("i:%d j:%d p:%g w:%g\n", - i, ratio->arr[i].i, psort[i], wsort[i]); - } - - psort[n] = 0; - wsort[n] = BIGM; - SOR_IntDblArrFree(&ratio); - return status; +int KnapsackSortRatio(const int n, const double *p, const double *w, + double *psort, double *wsort) { + //--- + //--- Sort non-increasing order of p[j]/w[j]. Return the new + //--- order in psort, wsort which will be of size n+1 to conform + //--- to the routine SOR_KnapsackOptimizeHS. + //--- + int i; + int status = 0; + SOR_IntDblArrPtr ratio = NULL; + ratio = SOR_IntDblArrNew(n, &status); + + if (status != OK) { + return status; + } + + for (i = 0; i < n; i++) { + assert(w[i] >= 0); + assert(p[i] >= 0); + assert(!IS_TINY(w[i])); + ratio->arr[i].x = p[i] / w[i]; + ratio->arr[i].i = i; + } + + ratio->len = n; + + if (n > 1) { + SOR_QSortIntDblDec(ratio->arr, 0, n - 1); + } + + for (i = 0; i < n; i++) { + psort[i] = p[ratio->arr[i].i]; + wsort[i] = w[ratio->arr[i].i]; + printf("i:%d j:%d p:%g w:%g\n", i, ratio->arr[i].i, psort[i], wsort[i]); + } + + psort[n] = 0; + wsort[n] = BIGM; + SOR_IntDblArrFree(&ratio); + return status; } //=========================================================================== -void KnapsackSortRatioOut(const int n, - const double* p, - const double* w, - double* psort, - double* wsort, - SOR_IntDbl* ratio) -{ - //--- - //--- Sort non-increasing order of p[j]/w[j]. Return the new - //--- order in psort, wsort which will be of size n+1 to conform - //--- to the routine SOR_KnapsackOptimizeHS. - //--- - //--- This version accepts a pointer to ratio and returns it after - //--- the sort. We need this version if we need to construct the - //--- solution at the end, since the KP solver's return is not in - //--- the original order. - //--- - int i; - - for (i = 0; i < n; i++) { - assert(!IS_SMALL(w[i])); - ratio[i].x = p[i] / w[i]; - ratio[i].i = i; - } - - if (n > 1) { - SOR_QSortIntDblDec(ratio, 0, n - 1); - } - - for (i = 0; i < n; i++) { - psort[i] = p[ratio[i].i]; - wsort[i] = w[ratio[i].i]; - printf("i:%d j:%d p:%g w:%g\n", - i, ratio[i].i, psort[i], wsort[i]); - } - - psort[n] = 0; - wsort[n] = BIGM; +void KnapsackSortRatioOut(const int n, const double *p, const double *w, + double *psort, double *wsort, SOR_IntDbl *ratio) { + //--- + //--- Sort non-increasing order of p[j]/w[j]. Return the new + //--- order in psort, wsort which will be of size n+1 to conform + //--- to the routine SOR_KnapsackOptimizeHS. + //--- + //--- This version accepts a pointer to ratio and returns it after + //--- the sort. We need this version if we need to construct the + //--- solution at the end, since the KP solver's return is not in + //--- the original order. + //--- + int i; + + for (i = 0; i < n; i++) { + assert(!IS_SMALL(w[i])); + ratio[i].x = p[i] / w[i]; + ratio[i].i = i; + } + + if (n > 1) { + SOR_QSortIntDblDec(ratio, 0, n - 1); + } + + for (i = 0; i < n; i++) { + psort[i] = p[ratio[i].i]; + wsort[i] = w[ratio[i].i]; + printf("i:%d j:%d p:%g w:%g\n", i, ratio[i].i, psort[i], wsort[i]); + } + + psort[n] = 0; + wsort[n] = BIGM; } //=========================================================================== -int KnapsackOptimizeHS(const int n, - const double c, - double* p, - double* w, - int* x, - double* z, - int* pstatus) -{ - //--- - //--- Knapsack Problems (Algorithms and Computer Implementations) - //--- Horowitz-Sanhi DFS Branch and Bound (Martello/Toth p30-31) - //--- - //--- INPUT: - //--- p and w should be sent in as size n+1 - //--- the input is ordered nonincreasing in p[j]/w[j] - //--- x should be sent in calloc'd - //--- - //--- OUTPUT: - //--- x is the best solution (**in permuted order, NOT original order**) - //--- z is the best solution value = sum{j = 0..n-1} p[j] x[j] - //--- - //TODO: better return codes - int i, j, r, k, iter; - double zhat; // curr solution value = sum{j = 0..n-1} p[j] xhat[j] - double chat; // curr residual capacity = c - sum{j = 0..n-1} w[j] xhat[j] - double wSum, pSum, u; - int* xhat = NULL; - *pstatus = OK; - DEBUG_KNAP( - printf("\n" "//---in HS"); - - for (i = 0; i < n; i++) { - printf("\n" "p[%d]: %12.10f, w[%d]: %12.10f", - i, p[i], i, w[i]); - } - printf("\n" "CAP: %12.10f", c); - fflush(stdout); - printf("\n" "//---out HS"); - ); - - //--- - //--- we assume a[j] > 0, for all j, and x in {0,1}, so if b < 0, INF - //--- - if (c < 0) { - return KP_INFEASIBLE; - } - - memset(x, 0, n * sizeof(int)); - - if (n == 0) { - return KP_OPTIMAL; - } - - assert(n >= 0); - - //--- - //--- deal with some trivial cases here - //--- - if (n == 1) { - //--- - //--- if w[j] <= b, y = 1, psi = coverEl - //--- else , y = 0, psi = 0 - //--- - if ((w[0] - c ) > EPSILON) { - *z = 0; - x[0] = 0; - } else { - *z = p[0]; - x[1] = 1; - } +int KnapsackOptimizeHS(const int n, const double c, double *p, double *w, + int *x, double *z, int *pstatus) { + //--- + //--- Knapsack Problems (Algorithms and Computer Implementations) + //--- Horowitz-Sanhi DFS Branch and Bound (Martello/Toth p30-31) + //--- + //--- INPUT: + //--- p and w should be sent in as size n+1 + //--- the input is ordered nonincreasing in p[j]/w[j] + //--- x should be sent in calloc'd + //--- + //--- OUTPUT: + //--- x is the best solution (**in permuted order, NOT original order**) + //--- z is the best solution value = sum{j = 0..n-1} p[j] x[j] + //--- + // TODO: better return codes + int i, j, r, k, iter; + double zhat; // curr solution value = sum{j = 0..n-1} p[j] xhat[j] + double chat; // curr residual capacity = c - sum{j = 0..n-1} w[j] xhat[j] + double wSum, pSum, u; + int *xhat = NULL; + *pstatus = OK; + DEBUG_KNAP(printf("\n" + "//---in HS"); + + for (i = 0; i < n; i++) { + printf("\n" + "p[%d]: %12.10f, w[%d]: %12.10f", + i, p[i], i, w[i]); + } printf("\n" + "CAP: %12.10f", + c); + fflush(stdout); printf("\n" + "//---out HS");); + + //--- + //--- we assume a[j] > 0, for all j, and x in {0,1}, so if b < 0, INF + //--- + if (c < 0) { + return KP_INFEASIBLE; + } + + memset(x, 0, n * sizeof(int)); + + if (n == 0) { + return KP_OPTIMAL; + } + + assert(n >= 0); + + //--- + //--- deal with some trivial cases here + //--- + if (n == 1) { + //--- + //--- if w[j] <= b, y = 1, psi = coverEl + //--- else , y = 0, psi = 0 + //--- + if ((w[0] - c) > EPSILON) { + *z = 0; + x[0] = 0; + } else { + *z = p[0]; + x[1] = 1; + } + + return KP_OPTIMAL; + } else if (n == 2) { + //--- + //--- Possible solutions: (0,0), (0,1), (1,0), (1,1). + //--- Check each one, keep the best feasible one. + //--- + //--- We have ?? + //--- already checked above that w[i] < cap, for each i. + //--- + *z = 0.0; + x[0] = 0; + x[1] = 0; + + if (((w[1] - c) <= EPSILON) && (p[1] > *z)) { + *z = p[1]; + x[0] = 0; + x[1] = 1; + } - return KP_OPTIMAL; - } else if (n == 2) { - //--- - //--- Possible solutions: (0,0), (0,1), (1,0), (1,1). - //--- Check each one, keep the best feasible one. + if (((w[0] - c) <= EPSILON) && (p[0] > *z)) { + *z = p[0]; + x[0] = 1; + x[1] = 0; + } + + if (((w[0] + w[1] - c) <= EPSILON) && ((p[0] + p[1]) > *z)) { + *z = p[0] + p[1]; + x[0] = 1; + x[1] = 1; + } + + return KP_OPTIMAL; + } + + xhat = (int *)calloc(n, sizeof(int)); + + if (xhat == NULL) { + *pstatus = ERR_NO_MEMORY; + return KP_ERROR; + } + + //--- + //--- (1) initialize + //--- + *z = 0; + zhat = 0; + chat = c; + p[n] = 0; + w[n] = BIGM; + j = 0; + iter = 0; + + while (1) { + iter++; + + if (iter > HS_MAXITER) { + return KP_ITERLIMIT; + } + + //--- + //--- (2) compute the upper bound U1 + //--- + //--- r = arg min{i : sum{k = j..i} w[k] > chat} + //--- u = sum{k = j..r-1} p[k] + //--- + floor((chat - sum{k = j..r-1} w[k]) p[r]/w[r]) + //--- if(z >= zhat + u) then go to 5 + r = j; + wSum = w[r]; + pSum = p[r]; + + // while((wSum < (chat - EPSILON)) && r <= n){ + // if wSum <= chat, continue + // if wSum > chat, stop + while ((wSum <= chat) && r <= n) { + r++; + wSum += w[r]; + pSum += p[r]; + printf("r=%d, wSum=%12.10f, chat=%12.10f\n", r, wSum, chat); + } + + assert(r >= 0 && r <= n); + wSum -= w[r]; + pSum -= p[r]; + u = pSum + floor((chat - wSum) * p[r] / w[r]); + DEBUG_KNAP(printf("\n" + "z, %g, zhat: %g, u: %g r: %d ws: %g, ps: %g", + *z, zhat, u, r, wSum, pSum);); + + // if(*z + EPSILON <= zhat + u){ + if (*z <= zhat + u) { + // if(!(*z >= zhat + u)){ //--- - //--- We have ?? - //--- already checked above that w[i] < cap, for each i. + //--- (3) perform a forward step //--- - *z = 0.0; - x[0] = 0; - x[1] = 0; - - if (((w[1] - c) <= EPSILON) && (p[1] > *z)) { - *z = p[1]; - x[0] = 0; - x[1] = 1; - } - - if (((w[0] - c) <= EPSILON) && (p[0] > *z)) { - *z = p[0]; - x[0] = 1; - x[1] = 0; - } - - if (((w[0] + w[1] - c) <= EPSILON) && ((p[0] + p[1]) > *z)) { - *z = p[0] + p[1]; - x[0] = 1; - x[1] = 1; - } - - return KP_OPTIMAL; - } - - xhat = (int*) calloc(n, sizeof(int)); - - if (xhat == NULL) { - *pstatus = ERR_NO_MEMORY; - return KP_ERROR; - } - - //--- - //--- (1) initialize - //--- - *z = 0; - zhat = 0; - chat = c; - p[n] = 0; - w[n] = BIGM; - j = 0; - iter = 0; - - while (1) { - iter++; - - if (iter > HS_MAXITER) { - return KP_ITERLIMIT; - } + do { + DEBUG_KNAP(printf("\n" + "w[%d]: %9.6f, caphat: %9.6f", + j, w[j], chat);); + + // while(w[j] - chat <= EPSILON){ + while (w[j] <= chat) { + chat -= w[j]; + zhat += p[j]; + xhat[j] = 1; + j++; + } + + if (j < n) { + xhat[j] = 0; + j++; + } + } while (j == n - 1); //--- - //--- (2) compute the upper bound U1 + //--- if j < n-1, then goto 2 //--- - //--- r = arg min{i : sum{k = j..i} w[k] > chat} - //--- u = sum{k = j..r-1} p[k] - //--- + floor((chat - sum{k = j..r-1} w[k]) p[r]/w[r]) - //--- if(z >= zhat + u) then go to 5 - r = j; - wSum = w[r]; - pSum = p[r]; - - //while((wSum < (chat - EPSILON)) && r <= n){ - //if wSum <= chat, continue - //if wSum > chat, stop - while ((wSum <= chat) && r <= n) { - r++; - wSum += w[r]; - pSum += p[r]; - printf("r=%d, wSum=%12.10f, chat=%12.10f\n", - r, wSum, chat); + if (j < n - 1) { + continue; } - assert(r >= 0 && r <= n); - wSum -= w[r]; - pSum -= p[r]; - u = pSum + floor((chat - wSum) * p[r] / w[r]); - DEBUG_KNAP(printf("\n" "z, %g, zhat: %g, u: %g r: %d ws: %g, ps: %g", - *z, zhat, u, r, wSum, pSum); - ); - - //if(*z + EPSILON <= zhat + u){ - if (*z <= zhat + u) { - //if(!(*z >= zhat + u)){ - //--- - //--- (3) perform a forward step - //--- - do { - DEBUG_KNAP(printf("\n" "w[%d]: %9.6f, caphat: %9.6f", - j, w[j], chat); - ); - - //while(w[j] - chat <= EPSILON){ - while (w[j] <= chat) { - chat -= w[j]; - zhat += p[j]; - xhat[j] = 1; - j++; - } - - if (j < n) { - xhat[j] = 0; - j++; - } - } while (j == n - 1); - - //--- - //--- if j < n-1, then goto 2 - //--- - if (j < n - 1) { - continue; - } - - //--- - //--- (4) update the best solution so far - //--- - DEBUG_KNAP(printf("\n" "update best sol zhat %g", zhat); - ); - - //if(zhat > *z){ - if (zhat >= *z) { - *z = zhat; - - for (k = 0; k < n; k++) { - x[k] = xhat[k]; - } - } - - j = n - 1; - - if (xhat[n - 1] == 1) { - chat += w[n - 1]; - zhat -= p[n - 1]; - xhat[n - 1] = 0; - } - } - - DEBUG_KNAP(printf("\n" "backtrack"); - ); //--- - //--- (5) backtrack (find i = max arg{k < j : x[k] = 1} + //--- (4) update the best solution so far //--- - i = std::max(0, j - 1); // need max, in case i = 0 + DEBUG_KNAP(printf("\n" + "update best sol zhat %g", + zhat);); - while ((xhat[i] != 1) && i > 0) { - i--; - DEBUG_KNAP(printf("\n" "i : %d xhat[i]: %d", i, xhat[i]); - ); - } + // if(zhat > *z){ + if (zhat >= *z) { + *z = zhat; - if (i == 0) { - break; + for (k = 0; k < n; k++) { + x[k] = xhat[k]; + } } - chat += w[i]; - zhat -= p[i]; - xhat[i] = 0; - j = i + 1; - } - - DEBUG_KNAP( - wSum = 0.0; - pSum = 0.0; - - for (k = 0; k < n; k++) { - if (x[k] == 1) { - wSum += w[k]; - pSum += p[k]; - printf("\n" "x[%d]=%d, w: %12.4f, p: %12.4f, ", - k, x[k], w[k], p[k]); - printf("wSum: %12.4f, pSum: %12.4f", - wSum, pSum); + j = n - 1; + + if (xhat[n - 1] == 1) { + chat += w[n - 1]; + zhat -= p[n - 1]; + xhat[n - 1] = 0; } - } - ); - UTIL_DELPTR(xhat); - return KP_OPTIMAL; + } + + DEBUG_KNAP(printf("\n" + "backtrack");); + //--- + //--- (5) backtrack (find i = max arg{k < j : x[k] = 1} + //--- + i = std::max(0, j - 1); // need max, in case i = 0 + + while ((xhat[i] != 1) && i > 0) { + i--; + DEBUG_KNAP(printf("\n" + "i : %d xhat[i]: %d", + i, xhat[i]);); + } + + if (i == 0) { + break; + } + + chat += w[i]; + zhat -= p[i]; + xhat[i] = 0; + j = i + 1; + } + + DEBUG_KNAP(wSum = 0.0; pSum = 0.0; + + for (k = 0; k < n; k++) { + if (x[k] == 1) { + wSum += w[k]; + pSum += p[k]; + printf("\n" + "x[%d]=%d, w: %12.4f, p: %12.4f, ", + k, x[k], w[k], p[k]); + printf("wSum: %12.4f, pSum: %12.4f", wSum, pSum); + } + }); + UTIL_DELPTR(xhat); + return KP_OPTIMAL; } - diff --git a/Dip/src/UtilMacros.cpp b/Dip/src/UtilMacros.cpp index 82b1fa66..f3a2ea11 100644 --- a/Dip/src/UtilMacros.cpp +++ b/Dip/src/UtilMacros.cpp @@ -21,10 +21,9 @@ // Graph Macros // ========================================================================= // ------------------------------------------------------------------------- // -std::pair UtilBothEndsU(const int index) -{ - int i = (int)(floor(sqrt(1 + 8.0 * index) / 2 + .500000001)); - return std::make_pair( i, index - (i * (i - 1) / 2) ); +std::pair UtilBothEndsU(const int index) { + int i = (int)(floor(sqrt(1 + 8.0 * index) / 2 + .500000001)); + return std::make_pair(i, index - (i * (i - 1) / 2)); } // ========================================================================= @@ -32,154 +31,146 @@ std::pair UtilBothEndsU(const int index) // ========================================================================= // ------------------------------------------------------------------------- // -int UtilScaleDblToIntArr(const int arrLen, - const double* arrDbl, - int* arrInt, - const double oneDbl, - int* oneInt, - const double epstol) -{ - //--- - //--- A very simple function to scale an array of doubles to integers. - //--- Note: epstol denotes the preferred accuracy, - //--- so, we will scale by 1.0/epstol, unless something smaller works. - //--- It constructs the scaled array and returns the scale factor. - //--- - //--- It can also scale oneDbl to oneInt wrt to the array (e.g.., - //--- the rhs of row). If oneInt == NULL, then this part is skipped. - //--- - int i, scaleFactor = 1, n_aFrac = 0, factorTooBig = 0; - double* arrFrac = NULL; - double fractionalPart; - double oneOverEps = 1.0 / epstol; - arrFrac = new double[arrLen + 1]; - assert(arrFrac); - - for (i = 0; i < arrLen; i++) { - fractionalPart = UtilFracPart(arrDbl[i]); - - if (!UtilIsZero(fractionalPart)) { - fractionalPart *= oneOverEps; - arrFrac[n_aFrac++] = static_cast(round(fractionalPart)) - * (double)epstol; +int UtilScaleDblToIntArr(const int arrLen, const double *arrDbl, int *arrInt, + const double oneDbl, int *oneInt, + const double epstol) { + //--- + //--- A very simple function to scale an array of doubles to integers. + //--- Note: epstol denotes the preferred accuracy, + //--- so, we will scale by 1.0/epstol, unless something smaller works. + //--- It constructs the scaled array and returns the scale factor. + //--- + //--- It can also scale oneDbl to oneInt wrt to the array (e.g.., + //--- the rhs of row). If oneInt == NULL, then this part is skipped. + //--- + int i, scaleFactor = 1, n_aFrac = 0, factorTooBig = 0; + double *arrFrac = NULL; + double fractionalPart; + double oneOverEps = 1.0 / epstol; + arrFrac = new double[arrLen + 1]; + assert(arrFrac); + + for (i = 0; i < arrLen; i++) { + fractionalPart = UtilFracPart(arrDbl[i]); + + if (!UtilIsZero(fractionalPart)) { + fractionalPart *= oneOverEps; + arrFrac[n_aFrac++] = + static_cast(round(fractionalPart)) * (double)epstol; + } + } + + if (oneInt) { + fractionalPart = UtilFracPart(oneDbl); + + if (!UtilIsZero(fractionalPart)) { + fractionalPart *= oneOverEps; + arrFrac[n_aFrac++] = + static_cast(round(fractionalPart)) * (double)epstol; + } + } + + for (i = 0; i < n_aFrac; i++) { + assert(arrFrac[i] < (INT_MAX / scaleFactor)); + arrFrac[i] *= scaleFactor; + + while (!UtilIsZero(UtilFracPart(arrFrac[i]))) { + scaleFactor *= 10; + + if (scaleFactor >= oneOverEps) { + factorTooBig = 1; + break; } - } - if (oneInt) { - fractionalPart = UtilFracPart(oneDbl); + assert(arrFrac[i] < (INT_MAX / 10)); + arrFrac[i] *= 10; + assert(arrFrac[i] >= 0); + } - if (!UtilIsZero(fractionalPart)) { - fractionalPart *= oneOverEps; - arrFrac[n_aFrac++] = static_cast(round(fractionalPart)) - * (double)epstol; - } - } + if (factorTooBig) { + break; + } + } - for (i = 0; i < n_aFrac; i++) { - assert(arrFrac[i] < (INT_MAX / scaleFactor)); - arrFrac[i] *= scaleFactor; - - while (!UtilIsZero(UtilFracPart(arrFrac[i]))) { - scaleFactor *= 10; - - if (scaleFactor >= oneOverEps) { - factorTooBig = 1; - break; - } - - assert(arrFrac[i] < (INT_MAX / 10)); - arrFrac[i] *= 10; - assert(arrFrac[i] >= 0); - } - - if (factorTooBig) { - break; - } - } - - for (i = 0; i < arrLen; i++) { - arrInt[i] = static_cast(round(arrDbl[i] * scaleFactor)); - } + for (i = 0; i < arrLen; i++) { + arrInt[i] = static_cast(round(arrDbl[i] * scaleFactor)); + } - if (oneInt) { - *oneInt = static_cast(round(oneDbl * scaleFactor)); - } + if (oneInt) { + *oneInt = static_cast(round(oneDbl * scaleFactor)); + } - UTIL_DELARR(arrFrac); - return scaleFactor; + UTIL_DELARR(arrFrac); + return scaleFactor; } - /*==========================================================================*/ -int UtilScaleDblToIntArr(const int arrLen, - const double* arrDbl, - int* arrInt, - const double epstol) -{ - //--- - //--- A very simple function to scale an array of doubles to integers. - //--- Note: epstol denotes the preferred accuracy, - //--- so, we will scale by 1.0/epstol, unless something smaller works. - //--- It constructs the scaled array and returns the scale factor. - //--- - //--- It can also scale oneDbl to oneInt wrt to the array (e.g.., - //--- the rhs of row). If oneInt == NULL, then this part is skipped. - //--- - int i, scaleFactor = 1, n_aFrac = 0, factorTooBig = 0; - double* arrFrac = NULL; - double fractionalPart; - double oneOverEps = 1.0 / epstol; - //TODO: pass in arrFrac? - arrFrac = new double[arrLen]; - assert(arrFrac); - - for (i = 0; i < arrLen; i++) { - fractionalPart = UtilFracPart(arrDbl[i]); - - //printf("arrDbl[%d]=%10.5f fracPart=%6.5f\n", - // i, arrDbl[i], fractionalPart); - if (!UtilIsZero(fractionalPart)) { - fractionalPart *= oneOverEps; - //printf("fracPart is not zero oneOverEps= %10.5f fracPart= %10.5f\n", - //oneOverEps, fractionalPart); - arrFrac[n_aFrac++] = static_cast(round(fractionalPart)) - * (double)epstol; - //printf("arrFrac[%d] = %10.5f\n", (n_aFrac-1), arrFrac[n_aFrac-1]); +int UtilScaleDblToIntArr(const int arrLen, const double *arrDbl, int *arrInt, + const double epstol) { + //--- + //--- A very simple function to scale an array of doubles to integers. + //--- Note: epstol denotes the preferred accuracy, + //--- so, we will scale by 1.0/epstol, unless something smaller works. + //--- It constructs the scaled array and returns the scale factor. + //--- + //--- It can also scale oneDbl to oneInt wrt to the array (e.g.., + //--- the rhs of row). If oneInt == NULL, then this part is skipped. + //--- + int i, scaleFactor = 1, n_aFrac = 0, factorTooBig = 0; + double *arrFrac = NULL; + double fractionalPart; + double oneOverEps = 1.0 / epstol; + // TODO: pass in arrFrac? + arrFrac = new double[arrLen]; + assert(arrFrac); + + for (i = 0; i < arrLen; i++) { + fractionalPart = UtilFracPart(arrDbl[i]); + + // printf("arrDbl[%d]=%10.5f fracPart=%6.5f\n", + // i, arrDbl[i], fractionalPart); + if (!UtilIsZero(fractionalPart)) { + fractionalPart *= oneOverEps; + // printf("fracPart is not zero oneOverEps= %10.5f fracPart= %10.5f\n", + // oneOverEps, fractionalPart); + arrFrac[n_aFrac++] = + static_cast(round(fractionalPart)) * (double)epstol; + // printf("arrFrac[%d] = %10.5f\n", (n_aFrac-1), arrFrac[n_aFrac-1]); + } + } + + for (i = 0; i < n_aFrac; i++) { + assert(arrFrac[i] < (INT_MAX / scaleFactor)); + arrFrac[i] *= scaleFactor; + + while (!UtilIsZero(UtilFracPart(arrFrac[i]))) { + scaleFactor *= 10; + + if (scaleFactor >= oneOverEps) { + factorTooBig = 1; + break; } - } - - for (i = 0; i < n_aFrac; i++) { - assert(arrFrac[i] < (INT_MAX / scaleFactor)); - arrFrac[i] *= scaleFactor; - - while (!UtilIsZero(UtilFracPart(arrFrac[i]))) { - scaleFactor *= 10; - - if (scaleFactor >= oneOverEps) { - factorTooBig = 1; - break; - } - - assert(arrFrac[i] < (INT_MAX / 10)); - arrFrac[i] *= 10; - assert(arrFrac[i] >= 0); - } - - if (factorTooBig) { - break; - } - } - - //--- - //--- must be careful not to trunc here - //--- so, we want to round - //--- - for (i = 0; i < arrLen; i++) { - arrInt[i] = static_cast(round(arrDbl[i] * scaleFactor)); - } - UTIL_DELARR(arrFrac); - return scaleFactor; + assert(arrFrac[i] < (INT_MAX / 10)); + arrFrac[i] *= 10; + assert(arrFrac[i] >= 0); + } + + if (factorTooBig) { + break; + } + } + + //--- + //--- must be careful not to trunc here + //--- so, we want to round + //--- + for (i = 0; i < arrLen; i++) { + arrInt[i] = static_cast(round(arrDbl[i] * scaleFactor)); + } + + UTIL_DELARR(arrFrac); + return scaleFactor; } #if 0 @@ -188,8 +179,8 @@ int UtilScaleDblToIntArr(const int arrLen, //--- taken from Concorde's integerize_vector in LOCALCUT/first.c //--- /*==========================================================================*/ -#define INTEGERIZE_MUL (16*9*5*7*11*13*17) -#define CC_SWAP(a,b,t) (((t)=(a)),((a)=(b)),((b)=(t))) +#define INTEGERIZE_MUL (16 * 9 * 5 * 7 * 11 * 13 * 17) +#define CC_SWAP(a, b, t) (((t) = (a)), ((a) = (b)), ((b) = (t))) /*==========================================================================*/ static int CCutil_our_gcd (int a, int b) diff --git a/Dip/src/UtilMacrosAlps.cpp b/Dip/src/UtilMacrosAlps.cpp index a168f327..b29af2c1 100644 --- a/Dip/src/UtilMacrosAlps.cpp +++ b/Dip/src/UtilMacrosAlps.cpp @@ -12,7 +12,7 @@ // All Rights Reserved. // //===========================================================================// -//copyright +// copyright //========================================================================== // #include "UtilMacrosAlps.h" @@ -27,51 +27,47 @@ //--- //========================================================================== // -int UtilAlpsEncodeWarmStart(AlpsEncoded* encoded, - const CoinWarmStartBasis* ws) -{ - int status = 0; - int numCols = ws->getNumStructural(); - int numRows = ws->getNumArtificial(); - encoded->writeRep(numCols); - encoded->writeRep(numRows); - // Pack structural. - int nint = (ws->getNumStructural() + 15) >> 4; - encoded->writeRep(ws->getStructuralStatus(), nint * 4); - // Pack artificial. - nint = (ws->getNumArtificial() + 15) >> 4; - encoded->writeRep(ws->getArtificialStatus(), nint * 4); - return status; +int UtilAlpsEncodeWarmStart(AlpsEncoded *encoded, + const CoinWarmStartBasis *ws) { + int status = 0; + int numCols = ws->getNumStructural(); + int numRows = ws->getNumArtificial(); + encoded->writeRep(numCols); + encoded->writeRep(numRows); + // Pack structural. + int nint = (ws->getNumStructural() + 15) >> 4; + encoded->writeRep(ws->getStructuralStatus(), nint * 4); + // Pack artificial. + nint = (ws->getNumArtificial() + 15) >> 4; + encoded->writeRep(ws->getArtificialStatus(), nint * 4); + return status; } //===========================================================================// -CoinWarmStartBasis* UtilAlpsDecodeWarmStart(AlpsEncoded& encoded, - AlpsReturnStatus* rc) -{ - //rc not used? not checked? - int numCols; - int numRows; - encoded.readRep(numCols); - encoded.readRep(numRows); - int tempInt; - // Structural - int nint = (numCols + 15) >> 4; - char* structuralStatus = new char[4 * nint]; - encoded.readRep(structuralStatus, tempInt); - assert(tempInt == nint * 4); - // Artificial - nint = (numRows + 15) >> 4; - char* artificialStatus = new char[4 * nint]; - encoded.readRep(artificialStatus, tempInt); - assert(tempInt == nint * 4); - CoinWarmStartBasis* ws = new CoinWarmStartBasis(); +CoinWarmStartBasis *UtilAlpsDecodeWarmStart(AlpsEncoded &encoded, + AlpsReturnStatus *rc) { + // rc not used? not checked? + int numCols; + int numRows; + encoded.readRep(numCols); + encoded.readRep(numRows); + int tempInt; + // Structural + int nint = (numCols + 15) >> 4; + char *structuralStatus = new char[4 * nint]; + encoded.readRep(structuralStatus, tempInt); + assert(tempInt == nint * 4); + // Artificial + nint = (numRows + 15) >> 4; + char *artificialStatus = new char[4 * nint]; + encoded.readRep(artificialStatus, tempInt); + assert(tempInt == nint * 4); + CoinWarmStartBasis *ws = new CoinWarmStartBasis(); - if (!ws) { - throw CoinError("Out of memory", "UtilAlpsDecodeWarmStart", "HELP"); - } + if (!ws) { + throw CoinError("Out of memory", "UtilAlpsDecodeWarmStart", "HELP"); + } - ws->assignBasisStatus(numCols, numRows, - structuralStatus, artificialStatus); - return ws; + ws->assignBasisStatus(numCols, numRows, structuralStatus, artificialStatus); + return ws; } - diff --git a/Dip/src/UtilMacrosDecomp.cpp b/Dip/src/UtilMacrosDecomp.cpp index 740b7b76..099f4450 100644 --- a/Dip/src/UtilMacrosDecomp.cpp +++ b/Dip/src/UtilMacrosDecomp.cpp @@ -24,104 +24,93 @@ using namespace std; // COIN Macros // ========================================================================= // ------------------------------------------------------------------------- // -CoinPackedVector* UtilPackedVectorFromDense(const int len, - const double* dense, - const double etol) -{ - //TODO: test for dup? - efficiency vs debug - //TODO: insert is slow - better to use setVector? - int i; - CoinPackedVector* v = new CoinPackedVector(); - - for (i = 0; i < len; i++) { - if (fabs(dense[i]) > etol) { - v->insert(i, dense[i]); - } - } - - return v; +CoinPackedVector *UtilPackedVectorFromDense(const int len, const double *dense, + const double etol) { + // TODO: test for dup? - efficiency vs debug + // TODO: insert is slow - better to use setVector? + int i; + CoinPackedVector *v = new CoinPackedVector(); + + for (i = 0; i < len; i++) { + if (fabs(dense[i]) > etol) { + v->insert(i, dense[i]); + } + } + + return v; } // ------------------------------------------------------------------------- // -void UtilPackedVectorFromDense(const int len, - const double* dense, - const double etol, - CoinPackedVector& v) -{ - //TODO: test for dup? - efficiency vs debug - //TODO: insert is slow - better to use setVector? - int i; - - for (i = 0; i < len; i++) { - if (fabs(dense[i]) > etol) { - v.insert(i, dense[i]); - } - } +void UtilPackedVectorFromDense(const int len, const double *dense, + const double etol, CoinPackedVector &v) { + // TODO: test for dup? - efficiency vs debug + // TODO: insert is slow - better to use setVector? + int i; + + for (i = 0; i < len; i++) { + if (fabs(dense[i]) > etol) { + v.insert(i, dense[i]); + } + } } // ------------------------------------------------------------------------- // -void UtilPrintPackedVector(const CoinPackedVector& v, - ostream* os, - DecompApp* app) -{ - (*os).precision(2); - const int* inds = v.getIndices(); - const double* elems = v.getElements(); - const int len = v.getNumElements(); - - for (int i = 0; i < len; i++) { - if (!app) { - (*os) << elems[i] << " x[" << inds[i] << "] "; - } else { - (*os) << elems[i] << " "; - app->printOriginalColumn(inds[i], os); - (*os) << " "; - } - - if ((i + 1) % 5 == 0) { - (*os) << "\n"; - } - } - - (*os) << endl; -} - -// ------------------------------------------------------------------------- // -void UtilPrintPackedVector(const CoinPackedVector& v, - ostream* os, - const vector& colNames, - const double* value) -{ - (*os).precision(2); - const int* inds = v.getIndices(); - const double* elems = v.getElements(); - const int len = v.getNumElements(); - int namesSize = static_cast(colNames.size()); - double sum = 0.0; - - for (int i = 0; i < len; i++) { - if (!namesSize) - (*os) << setw(10) << UtilDblToStr(elems[i], 4) - << " x[" << setw(6) << inds[i] << "] "; - else { - (*os) << setw(10) << UtilDblToStr(elems[i], 4) - << " " << setw(10) << colNames[inds[i]] << " "; - } - - if (value) { - sum += elems[i] * value[inds[i]]; - (*os) << " --> " << setw(10) << UtilDblToStr(value[inds[i]], 4); - (*os) << " --> " << setw(10) << UtilDblToStr(sum, 4); - } - +void UtilPrintPackedVector(const CoinPackedVector &v, ostream *os, + DecompApp *app) { + (*os).precision(2); + const int *inds = v.getIndices(); + const double *elems = v.getElements(); + const int len = v.getNumElements(); + + for (int i = 0; i < len; i++) { + if (!app) { + (*os) << elems[i] << " x[" << inds[i] << "] "; + } else { + (*os) << elems[i] << " "; + app->printOriginalColumn(inds[i], os); + (*os) << " "; + } + + if ((i + 1) % 5 == 0) { (*os) << "\n"; - } + } + } - if (value) { - (*os) << "dot product = " << UtilDblToStr(sum, 4) << endl; - } - - (*os) << endl; + (*os) << endl; } - +// ------------------------------------------------------------------------- // +void UtilPrintPackedVector(const CoinPackedVector &v, ostream *os, + const vector &colNames, + const double *value) { + (*os).precision(2); + const int *inds = v.getIndices(); + const double *elems = v.getElements(); + const int len = v.getNumElements(); + int namesSize = static_cast(colNames.size()); + double sum = 0.0; + + for (int i = 0; i < len; i++) { + if (!namesSize) + (*os) << setw(10) << UtilDblToStr(elems[i], 4) << " x[" << setw(6) + << inds[i] << "] "; + else { + (*os) << setw(10) << UtilDblToStr(elems[i], 4) << " " << setw(10) + << colNames[inds[i]] << " "; + } + + if (value) { + sum += elems[i] * value[inds[i]]; + (*os) << " --> " << setw(10) << UtilDblToStr(value[inds[i]], 4); + (*os) << " --> " << setw(10) << UtilDblToStr(sum, 4); + } + + (*os) << "\n"; + } + + if (value) { + (*os) << "dot product = " << UtilDblToStr(sum, 4) << endl; + } + + (*os) << endl; +} diff --git a/Dip/src/UtilParameters.cpp b/Dip/src/UtilParameters.cpp index 96621c6b..cbe1e21c 100644 --- a/Dip/src/UtilParameters.cpp +++ b/Dip/src/UtilParameters.cpp @@ -13,8 +13,8 @@ //===========================================================================// //===========================================================================// -#include "UtilMacros.h" #include "UtilParameters.h" +#include "UtilMacros.h" #include #include @@ -23,390 +23,367 @@ using namespace std; #define MAXLINE 1024 //===========================================================================// -//TODO: need a template for usage, need application to register this, defaults -//TODO: this is an ugly combination of old C code and C++, C++-ify it +// TODO: need a template for usage, need application to register this, defaults +// TODO: this is an ugly combination of old C code and C++, C++-ify it // ------------------------------------------------------------------------- // -void UtilParameters::ScanCmdLineArgs(int& argc, - char* argv[]) -{ - int i, j; - - //--- - //--- if there are no arguments, return - //--- - if (argc == 0 || argv == NULL) { - return; - } - - j = 1; - string paramFileName; - - for (i = 1; i < argc; i++) { +void UtilParameters::ScanCmdLineArgs(int &argc, char *argv[]) { + int i, j; + + //--- + //--- if there are no arguments, return + //--- + if (argc == 0 || argv == NULL) { + return; + } + + j = 1; + string paramFileName; + + for (i = 1; i < argc; i++) { + //--- + //--- if "--param" is specified + //--- + if (strcmp(argv[i], "--param") == 0) { //--- - //--- if "--param" is specified + //--- if a filename follows the flag + //--- grab the filename //--- - if (strcmp(argv[i], "--param") == 0) { - //--- - //--- if a filename follows the flag - //--- grab the filename - //--- - if (((i + 1) < argc) - && (argv[i + 1][0] != '-' || argv[i + 1][1] != '-')) { - paramFileName = argv[++i]; - } - - continue; + if (((i + 1) < argc) && + (argv[i + 1][0] != '-' || argv[i + 1][1] != '-')) { + paramFileName = argv[++i]; } - argv[j++] = argv[i]; - } - - argc = j; - //--- - //--- load the parameter file - //--- - LoadParamFile(paramFileName); - //--- - //--- enter the command line flags into the parameter map - //--- format is --SECTION:PARAMETER value - //--- - char cmdBuf[MAXLINE]; - - for (i = 1; i < argc; i++) { - if (argv[i][0] == '-' && argv[i][1] == '-') { - string name (argv[i] + 2); - string value(""); - strcpy(cmdBuf, argv[i] + 2); - char* ptr1 = strtok(cmdBuf, ":"); - char* ptr2 = strtok(NULL, ":"); - char* section = NULL; - char* parm = NULL; - - if (ptr2 == NULL) { - //--- - //--- section is NULL - //--- - parm = ptr1; - } else { - //--- - //--- section is not NULL - //--- - section = ptr1; - parm = ptr2; - } - - if (((i + 1) < argc) && (argv[i + 1][0] != '-' || argv[i + 1][1] != '-')) { - value = argv[++i]; - } - - Add(section, parm, value.c_str()); - continue; + continue; + } + + argv[j++] = argv[i]; + } + + argc = j; + //--- + //--- load the parameter file + //--- + LoadParamFile(paramFileName); + //--- + //--- enter the command line flags into the parameter map + //--- format is --SECTION:PARAMETER value + //--- + char cmdBuf[MAXLINE]; + + for (i = 1; i < argc; i++) { + if (argv[i][0] == '-' && argv[i][1] == '-') { + string name(argv[i] + 2); + string value(""); + strcpy(cmdBuf, argv[i] + 2); + char *ptr1 = strtok(cmdBuf, ":"); + char *ptr2 = strtok(NULL, ":"); + char *section = NULL; + char *parm = NULL; + + if (ptr2 == NULL) { + //--- + //--- section is NULL + //--- + parm = ptr1; + } else { + //--- + //--- section is not NULL + //--- + section = ptr1; + parm = ptr2; } - } - - //STOP - what is the point of this? - // - // ----- remove all the flags from the command line - // - j = 1; - - for (i = 1; i < argc; i++) { - if (argv[i][0] == '-' && argv[i][1] == '-') { - string name (argv[i] + 2); - string value(""); - if (((i + 1) < argc) - && (argv[i + 1][0] != '-' || argv[i + 1][1] != '-')) { - value = argv[++i]; - } - - continue; + if (((i + 1) < argc) && + (argv[i + 1][0] != '-' || argv[i + 1][1] != '-')) { + value = argv[++i]; } - argv[j++] = argv[i]; - } - - argc = j; -} - -// ------------------------------------------------------------------------- // -void UtilParameters::LoadParamFile(string& paramFileName) -{ - char buf[MAXLINE]; - char* ptr = NULL; - string curSection(""); - string bufStr (""); - string name (""); - string value (""); - //--- - //--- open the stream pointer - //--- - ifstream is(paramFileName.c_str()); - - if (!is) { - return; - } - - //--- - //--- foreach line in the file - //--- skip comments (#) and blank lines - //--- - int lineNum = 0; - - while (!is.eof()) { - is.getline(buf, sizeof(buf) - 1); - lineNum++; - ptr = strchr(buf, '#'); - - if (ptr != NULL) { - *ptr = '\0'; + Add(section, parm, value.c_str()); + continue; + } + } + + // STOP - what is the point of this? + // + // ----- remove all the flags from the command line + // + j = 1; + + for (i = 1; i < argc; i++) { + if (argv[i][0] == '-' && argv[i][1] == '-') { + string name(argv[i] + 2); + string value(""); + + if (((i + 1) < argc) && + (argv[i + 1][0] != '-' || argv[i + 1][1] != '-')) { + value = argv[++i]; } - //TODO: move all to use string? do we need buf? - bufStr = buf; - bufStr = UtilStrTrim(bufStr); - strcpy(buf, bufStr.c_str()); - - if (strlen(buf) < 3) { - continue; - } + continue; + } - //--- - //--- if line is '[section]' - //--- create a new section - //--- - if (buf[0] == '[') { - ptr = strchr(buf + 1, ']'); - - if (ptr == NULL) { - cerr << "UtilParameters: syntax error on line " - << lineNum << " '" << buf << "'" << endl; - } - - *ptr = '\0'; - curSection = buf + 1; - continue; - } - - //--- - //--- if line is 'name = value' - //--- create a new name/value pair in the current section - //--- - ptr = strchr(buf, '='); + argv[j++] = argv[i]; + } - if (ptr != NULL) { - *ptr++ = '\0'; - } - - name = buf; - value = "1"; - name = UtilStrTrim(name); - name = UtilStrToLower(name); + argc = j; +} - if (ptr != NULL) { - //TODO: look into old code -> value=Expand(ptr) - value = ptr; - value = UtilStrTrim(value); +// ------------------------------------------------------------------------- // +void UtilParameters::LoadParamFile(string ¶mFileName) { + char buf[MAXLINE]; + char *ptr = NULL; + string curSection(""); + string bufStr(""); + string name(""); + string value(""); + //--- + //--- open the stream pointer + //--- + ifstream is(paramFileName.c_str()); + + if (!is) { + return; + } + + //--- + //--- foreach line in the file + //--- skip comments (#) and blank lines + //--- + int lineNum = 0; + + while (!is.eof()) { + is.getline(buf, sizeof(buf) - 1); + lineNum++; + ptr = strchr(buf, '#'); + + if (ptr != NULL) { + *ptr = '\0'; + } + + // TODO: move all to use string? do we need buf? + bufStr = buf; + bufStr = UtilStrTrim(bufStr); + strcpy(buf, bufStr.c_str()); + + if (strlen(buf) < 3) { + continue; + } + + //--- + //--- if line is '[section]' + //--- create a new section + //--- + if (buf[0] == '[') { + ptr = strchr(buf + 1, ']'); + + if (ptr == NULL) { + cerr << "UtilParameters: syntax error on line " << lineNum << " '" + << buf << "'" << endl; } - Add(curSection, name, value); - } - - //--- - //--- close file stream - //--- - is.close(); + *ptr = '\0'; + curSection = buf + 1; + continue; + } + + //--- + //--- if line is 'name = value' + //--- create a new name/value pair in the current section + //--- + ptr = strchr(buf, '='); + + if (ptr != NULL) { + *ptr++ = '\0'; + } + + name = buf; + value = "1"; + name = UtilStrTrim(name); + name = UtilStrToLower(name); + + if (ptr != NULL) { + // TODO: look into old code -> value=Expand(ptr) + value = ptr; + value = UtilStrTrim(value); + } + + Add(curSection, name, value); + } + + //--- + //--- close file stream + //--- + is.close(); } // ------------------------------------------------------------------------- // -void UtilParameters::Add(string& sSection, - string& sName, - string& sValue) -{ - string keyname (""); - if (sSection == ""){ - sSection = "DECOMP"; - } - keyname = UtilStrToLower(UtilStrTrim(sSection)); - keyname += "@"; - keyname += UtilStrToLower(UtilStrTrim(sName)); - //TODO: why doesn't insert override?? - // m_paramMap.insert(make_pair(keyname, UtilStrTrim(sValue))); - m_paramMap[keyname] = UtilStrTrim(sValue); +void UtilParameters::Add(string &sSection, string &sName, string &sValue) { + string keyname(""); + if (sSection == "") { + sSection = "DECOMP"; + } + keyname = UtilStrToLower(UtilStrTrim(sSection)); + keyname += "@"; + keyname += UtilStrToLower(UtilStrTrim(sName)); + // TODO: why doesn't insert override?? + // m_paramMap.insert(make_pair(keyname, UtilStrTrim(sValue))); + m_paramMap[keyname] = UtilStrTrim(sValue); } // ------------------------------------------------------------------------- // -void UtilParameters::Add(const char* section, - const char* name, - const char* value) -{ - string keyname (""); - string sSection("DECOMP"); - string sName (name); - string sValue (value); - - if (section) { - sSection = section; - } - - keyname = UtilStrToLower(UtilStrTrim(sSection)); - - keyname += "@"; - keyname += UtilStrToLower(UtilStrTrim(sName)); - //TODO: why doesn't insert override?? - // m_paramMap.insert(make_pair(keyname, utilParam)); - m_paramMap[keyname] = UtilStrTrim(sValue); +void UtilParameters::Add(const char *section, const char *name, + const char *value) { + string keyname(""); + string sSection("DECOMP"); + string sName(name); + string sValue(value); + + if (section) { + sSection = section; + } + + keyname = UtilStrToLower(UtilStrTrim(sSection)); + + keyname += "@"; + keyname += UtilStrToLower(UtilStrTrim(sName)); + // TODO: why doesn't insert override?? + // m_paramMap.insert(make_pair(keyname, utilParam)); + m_paramMap[keyname] = UtilStrTrim(sValue); } // ------------------------------------------------------------------------- // -std::string* UtilParameters::Find(const char* section, - const char* name) -{ - string keyname (""); - string sSection("DECOMP"); - string sName (name); - - if (section) { - sSection = section; - } - keyname = UtilStrToLower(UtilStrTrim(sSection)); - - keyname += "@"; - keyname += UtilStrToLower(UtilStrTrim(sName)); - map::iterator it; - it = m_paramMap.find(keyname); - - if (it == m_paramMap.end()) { - Add(section, name, "(undefined)"); - return NULL; - } else if (it->second == "(undefined)"){ - return NULL; - }else{ - return &(it->second); - } +std::string *UtilParameters::Find(const char *section, const char *name) { + string keyname(""); + string sSection("DECOMP"); + string sName(name); + + if (section) { + sSection = section; + } + keyname = UtilStrToLower(UtilStrTrim(sSection)); + + keyname += "@"; + keyname += UtilStrToLower(UtilStrTrim(sName)); + map::iterator it; + it = m_paramMap.find(keyname); + + if (it == m_paramMap.end()) { + Add(section, name, "(undefined)"); + return NULL; + } else if (it->second == "(undefined)") { + return NULL; + } else { + return &(it->second); + } } // ------------------------------------------------------------------------- // -string UtilParameters::GetSetting(const char* name, - const std::string defaultValue, - const char* section) -{ - //--- - //--- build the qualified name using the section - //--- if the parameter is not found, return the default - //--- else convert the string to the appropriate type - //--- - string* pVal = Find(section, name); - - if (pVal == NULL) { - return defaultValue; - } - - return *pVal; +string UtilParameters::GetSetting(const char *name, + const std::string defaultValue, + const char *section) { + //--- + //--- build the qualified name using the section + //--- if the parameter is not found, return the default + //--- else convert the string to the appropriate type + //--- + string *pVal = Find(section, name); + + if (pVal == NULL) { + return defaultValue; + } + + return *pVal; } - - - // ------------------------------------------------------------------------- // -string UtilParameters::GetSetting(const char* name, - const char* defaultValue, - const char* section) -{ - //--- - //--- build the qualified name using the section - //--- if the parameter is not found, return the default - //--- else convert the string to the appropriate type - //--- - string* pVal = Find(section, name); - - if (pVal == NULL) { - return string(defaultValue); - } - - return *pVal; +string UtilParameters::GetSetting(const char *name, const char *defaultValue, + const char *section) { + //--- + //--- build the qualified name using the section + //--- if the parameter is not found, return the default + //--- else convert the string to the appropriate type + //--- + string *pVal = Find(section, name); + + if (pVal == NULL) { + return string(defaultValue); + } + + return *pVal; } // ------------------------------------------------------------------------- // -int UtilParameters::GetSetting(const char* name, - const int defaultValue, - const char* section) -{ - //--- - //--- build the qualified name using the section - //--- if the parameter is not found, return the default - //--- else convert the string to the appropriate type - //--- - string* pVal = Find(section, name); - - if (pVal == NULL) { - return defaultValue; - } - - int value = atoi(pVal->c_str()); - return value; +int UtilParameters::GetSetting(const char *name, const int defaultValue, + const char *section) { + //--- + //--- build the qualified name using the section + //--- if the parameter is not found, return the default + //--- else convert the string to the appropriate type + //--- + string *pVal = Find(section, name); + + if (pVal == NULL) { + return defaultValue; + } + + int value = atoi(pVal->c_str()); + return value; } // ------------------------------------------------------------------------- // -bool UtilParameters::GetSetting(const char* name, - const bool defaultValue, - const char* section) -{ - //--- - //--- build the qualified name using the section - //--- if the parameter is not found, return the default - //--- else convert the string to the appropriate type - //--- - string* pVal = Find(section, name); - - if (pVal == NULL) { - return defaultValue; - } - - bool value = atoi(pVal->c_str()) != 0; - return value; +bool UtilParameters::GetSetting(const char *name, const bool defaultValue, + const char *section) { + //--- + //--- build the qualified name using the section + //--- if the parameter is not found, return the default + //--- else convert the string to the appropriate type + //--- + string *pVal = Find(section, name); + + if (pVal == NULL) { + return defaultValue; + } + + bool value = atoi(pVal->c_str()) != 0; + return value; } // ------------------------------------------------------------------------- // -long UtilParameters::GetSetting(const char* name, - const long defaultValue, - const char* section) -{ - //--- - //--- build the qualified name using the section - //--- if the parameter is not found, return the default - //--- else convert the string to the appropriate type - //--- - string* pVal = Find(section, name); - - if (pVal == NULL) { - return defaultValue; - } - - long value = atol(pVal->c_str()); - return value; +long UtilParameters::GetSetting(const char *name, const long defaultValue, + const char *section) { + //--- + //--- build the qualified name using the section + //--- if the parameter is not found, return the default + //--- else convert the string to the appropriate type + //--- + string *pVal = Find(section, name); + + if (pVal == NULL) { + return defaultValue; + } + + long value = atol(pVal->c_str()); + return value; } // ------------------------------------------------------------------------- // -double UtilParameters::GetSetting(const char* name, - const double defaultValue, - const char* section) -{ - //--- - //--- build the qualified name using the section - //--- if the parameter is not found, return the default - //--- else convert the string to the appropriate type - //--- - string* pVal = Find(section, name); - - if (pVal == NULL) { - return defaultValue; - } - - char* pEnd = NULL; - double value = strtod(pVal->c_str(), &pEnd); - return value; +double UtilParameters::GetSetting(const char *name, const double defaultValue, + const char *section) { + //--- + //--- build the qualified name using the section + //--- if the parameter is not found, return the default + //--- else convert the string to the appropriate type + //--- + string *pVal = Find(section, name); + + if (pVal == NULL) { + return defaultValue; + } + + char *pEnd = NULL; + double value = strtod(pVal->c_str(), &pEnd); + return value; } #if 0 diff --git a/Dip/src/__init__.py b/Dip/src/__init__.py index 7d29d19b..5284146e 100644 --- a/Dip/src/__init__.py +++ b/Dip/src/__init__.py @@ -1,2 +1 @@ - -__import__('pkg_resources').declare_namespace(__name__) +__import__("pkg_resources").declare_namespace(__name__) diff --git a/Dip/src/dippy/DippyDecompAlgo.cpp b/Dip/src/dippy/DippyDecompAlgo.cpp index 2353a453..dea05e1e 100644 --- a/Dip/src/dippy/DippyDecompAlgo.cpp +++ b/Dip/src/dippy/DippyDecompAlgo.cpp @@ -7,148 +7,153 @@ /** * Perform branching * - * This function should populate the (down|up)Branch(LB|UB) vectors with (indices, bound-value) pairs - * which define the branch. + * This function should populate the (down|up)Branch(LB|UB) vectors with + * (indices, bound-value) pairs which define the branch. */ -bool DippyAlgoMixin::chooseBranchSet(DecompAlgo* algo, - std::vector< std::pair >& downBranchLB, - std::vector< std::pair >& downBranchUB, - std::vector< std::pair >& upBranchLB, - std::vector< std::pair >& upBranchUB) -{ - bool ret_val; - - if (!m_utilParam->GetSetting("pyBranchMethod", true)) { - return algo->DecompAlgo::chooseBranchSet(downBranchLB, downBranchUB, - upBranchLB, upBranchUB); - } - - DippyDecompApp* app = (DippyDecompApp*)algo->getDecompApp(); - // copy the current solution into a Python list - const double* xhat = algo->getXhat(); - PyObject* pSolutionList = pyTupleList_FromDoubleArray(xhat, app->m_colList); - // try to call chooseBranchSet on the DipProblem python object - char arg1[] = "chooseBranchSet"; - char arg2[] = "O"; - PyObject* pResult = PyObject_CallMethod(m_pProb, arg1, arg2, pSolutionList); - - if (pResult == NULL) { - //something's gone wrong with the function call, a Python exception has - //been set - throw UtilException("Error calling method prob.chooseBranchSet()", - "chooseBranchSet", "DippyDecompAlgo"); - } - - // need more error checking/assertion setting here - if (pResult == Py_None) { - // if chooseBranchSet returns None, do default branching for this algo - ret_val = algo->DecompAlgo::chooseBranchSet(downBranchLB, downBranchUB, upBranchLB, upBranchUB); - - // Original comment: No branching set was returned. This shouldn't happen - // tkr 11/11/15: Actually, it can happen if the solution is integral, but not feasible. - // This happens sometimes when column generation is halted because of tailoff and - // the solution to the relaxation is feasible. I'm leaving the commented code here for - // posterity - //assert(ret_val == true); - - //if (!ret_val){ - // throw UtilException("No branch set found in prob.chooseBranchSet()", - // "chooseBranchSet", "DippyDecompAlgo"); - //} - - if (downBranchUB.size() > 0) { - PyObject* downBranchVar, * upBranchVar; - pDownLB = PyDict_New(); // Down branch LBs is an empty dictionary - pDownUB = PyDict_New(); - downBranchVar = PyList_GetItem(app->m_colList, downBranchUB[0].first); - PyDict_SetItem(pDownUB, downBranchVar, - PyLong_FromLong(static_cast(round(downBranchUB[0].second)))); - pUpLB = PyDict_New(); - upBranchVar = PyList_GetItem(app->m_colList, upBranchLB[0].first); - PyDict_SetItem(pUpLB, upBranchVar, - PyLong_FromLong(static_cast(round(upBranchLB[0].second)))); - pUpUB = PyDict_New(); // Up branch UBs is an empty dictionary - assert(downBranchVar == upBranchVar); - }else{ - //No branching set was returned. Zero out pointers to old branching - //sets - assert(ret_val == false); - pDownLB = NULL; - pDownUB = NULL; - pUpLB = NULL; - pUpUB = NULL; - } - return ret_val; - } else { - // or else, the function should have returned 4 lists of (name, value) tuples - pDownLB = PyTuple_GetItem(pResult, 0); - pDownUB = PyTuple_GetItem(pResult, 1); - pUpLB = PyTuple_GetItem(pResult, 2); - pUpUB = PyTuple_GetItem(pResult, 3); - // copy the python dictionaries into the result vectors - pyColDict_AsPairedVector(pDownLB, downBranchLB, app->m_colIndices); - pyColDict_AsPairedVector(pDownUB, downBranchUB, app->m_colIndices); - pyColDict_AsPairedVector(pUpLB, upBranchLB, app->m_colIndices); - pyColDict_AsPairedVector(pUpUB, upBranchUB, app->m_colIndices); - return true; - } - assert(!PyErr_Occurred()); +bool DippyAlgoMixin::chooseBranchSet( + DecompAlgo *algo, std::vector> &downBranchLB, + std::vector> &downBranchUB, + std::vector> &upBranchLB, + std::vector> &upBranchUB) { + bool ret_val; + + if (!m_utilParam->GetSetting("pyBranchMethod", true)) { + return algo->DecompAlgo::chooseBranchSet(downBranchLB, downBranchUB, + upBranchLB, upBranchUB); + } + + DippyDecompApp *app = (DippyDecompApp *)algo->getDecompApp(); + // copy the current solution into a Python list + const double *xhat = algo->getXhat(); + PyObject *pSolutionList = pyTupleList_FromDoubleArray(xhat, app->m_colList); + // try to call chooseBranchSet on the DipProblem python object + char arg1[] = "chooseBranchSet"; + char arg2[] = "O"; + PyObject *pResult = PyObject_CallMethod(m_pProb, arg1, arg2, pSolutionList); + + if (pResult == NULL) { + // something's gone wrong with the function call, a Python exception has + // been set + throw UtilException("Error calling method prob.chooseBranchSet()", + "chooseBranchSet", "DippyDecompAlgo"); + } + + // need more error checking/assertion setting here + if (pResult == Py_None) { + // if chooseBranchSet returns None, do default branching for this algo + ret_val = algo->DecompAlgo::chooseBranchSet(downBranchLB, downBranchUB, + upBranchLB, upBranchUB); + + // Original comment: No branching set was returned. This shouldn't happen + // tkr 11/11/15: Actually, it can happen if the solution is integral, but + // not feasible. This happens sometimes when column generation is halted + // because of tailoff and the solution to the relaxation is feasible. I'm + // leaving the commented code here for posterity + // assert(ret_val == true); + + // if (!ret_val){ + // throw UtilException("No branch set found in prob.chooseBranchSet()", + // "chooseBranchSet", "DippyDecompAlgo"); + //} + + if (downBranchUB.size() > 0) { + PyObject *downBranchVar, *upBranchVar; + pDownLB = PyDict_New(); // Down branch LBs is an empty dictionary + pDownUB = PyDict_New(); + downBranchVar = PyList_GetItem(app->m_colList, downBranchUB[0].first); + PyDict_SetItem( + pDownUB, downBranchVar, + PyLong_FromLong(static_cast(round(downBranchUB[0].second)))); + pUpLB = PyDict_New(); + upBranchVar = PyList_GetItem(app->m_colList, upBranchLB[0].first); + PyDict_SetItem( + pUpLB, upBranchVar, + PyLong_FromLong(static_cast(round(upBranchLB[0].second)))); + pUpUB = PyDict_New(); // Up branch UBs is an empty dictionary + assert(downBranchVar == upBranchVar); + } else { + // No branching set was returned. Zero out pointers to old branching + // sets + assert(ret_val == false); + pDownLB = NULL; + pDownUB = NULL; + pUpLB = NULL; + pUpUB = NULL; + } + return ret_val; + } else { + // or else, the function should have returned 4 lists of (name, value) + // tuples + pDownLB = PyTuple_GetItem(pResult, 0); + pDownUB = PyTuple_GetItem(pResult, 1); + pUpLB = PyTuple_GetItem(pResult, 2); + pUpUB = PyTuple_GetItem(pResult, 3); + // copy the python dictionaries into the result vectors + pyColDict_AsPairedVector(pDownLB, downBranchLB, app->m_colIndices); + pyColDict_AsPairedVector(pDownUB, downBranchUB, app->m_colIndices); + pyColDict_AsPairedVector(pUpLB, upBranchLB, app->m_colIndices); + pyColDict_AsPairedVector(pUpUB, upBranchUB, app->m_colIndices); + return true; + } + assert(!PyErr_Occurred()); } -void DippyAlgoMixin::postProcessBranch(DecompAlgo* algo, - DecompStatus decompStatus) -{ - PyObject* pOutput = PyList_New(0); - - if (!m_utilParam->GetSetting("pyPostProcessBranch", true)) { - return; - } - - AlpsDecompTreeNode* node = (AlpsDecompTreeNode*)algo->getCurrentNode(); - double quality = node->getQuality(); - - if (pDownLB != NULL) { - addTupleToPyList(pOutput, PyUnicode_FromString("pDownLB"), pDownLB); - } - - if (pDownUB != NULL) { - addTupleToPyList(pOutput, PyUnicode_FromString("pDownUB"), pDownUB); - } - - if (pUpLB != NULL) { - addTupleToPyList(pOutput, PyUnicode_FromString("pUpLB"), pUpLB); - } - - if (pUpUB != NULL) { - addTupleToPyList(pOutput, PyUnicode_FromString("pUpUB"), pUpUB); - } - - addTupleToPyList(pOutput, PyUnicode_FromString("nodeIndex"), PyLong_FromLong(algo->getNodeIndex())); - addTupleToPyList(pOutput, PyUnicode_FromString("nodeQuality"), PyFloat_FromDouble(quality)); - char arg1[] = "postProcessBranch"; - char arg2[] = "O"; - PyObject* pResult = PyObject_CallMethod(m_pProb, arg1, arg2, pOutput); - if (pResult == NULL){ - throw UtilException("postProcessNode call failed.", "postProcessNode", "DippyAlgoMixin"); - } - - assert(!PyErr_Occurred()); +void DippyAlgoMixin::postProcessBranch(DecompAlgo *algo, + DecompStatus decompStatus) { + PyObject *pOutput = PyList_New(0); + + if (!m_utilParam->GetSetting("pyPostProcessBranch", true)) { + return; + } + + AlpsDecompTreeNode *node = (AlpsDecompTreeNode *)algo->getCurrentNode(); + double quality = node->getQuality(); + + if (pDownLB != NULL) { + addTupleToPyList(pOutput, PyUnicode_FromString("pDownLB"), pDownLB); + } + + if (pDownUB != NULL) { + addTupleToPyList(pOutput, PyUnicode_FromString("pDownUB"), pDownUB); + } + + if (pUpLB != NULL) { + addTupleToPyList(pOutput, PyUnicode_FromString("pUpLB"), pUpLB); + } + + if (pUpUB != NULL) { + addTupleToPyList(pOutput, PyUnicode_FromString("pUpUB"), pUpUB); + } + + addTupleToPyList(pOutput, PyUnicode_FromString("nodeIndex"), + PyLong_FromLong(algo->getNodeIndex())); + addTupleToPyList(pOutput, PyUnicode_FromString("nodeQuality"), + PyFloat_FromDouble(quality)); + char arg1[] = "postProcessBranch"; + char arg2[] = "O"; + PyObject *pResult = PyObject_CallMethod(m_pProb, arg1, arg2, pOutput); + if (pResult == NULL) { + throw UtilException("postProcessNode call failed.", "postProcessNode", + "DippyAlgoMixin"); + } + + assert(!PyErr_Occurred()); } -void DippyAlgoMixin::postProcessNode(DecompAlgo* algo, DecompStatus decompStatus) -{ - if (!m_utilParam->GetSetting("pyPostProcessNode", true)) { - return; - } - - PyObject* pOutput = pyTupleList_FromNode(algo, decompStatus); - char arg1[] = "postProcessNode"; - char arg2[] = "O"; - PyObject* pResult = PyObject_CallMethod(m_pProb, arg1, arg2, pOutput); - if (pResult == NULL){ - throw UtilException("postProcessNode call failed.", "postProcessNode", "DippyAlgoMixin"); - } - - assert(!PyErr_Occurred()); +void DippyAlgoMixin::postProcessNode(DecompAlgo *algo, + DecompStatus decompStatus) { + if (!m_utilParam->GetSetting("pyPostProcessNode", true)) { + return; + } + + PyObject *pOutput = pyTupleList_FromNode(algo, decompStatus); + char arg1[] = "postProcessNode"; + char arg2[] = "O"; + PyObject *pResult = PyObject_CallMethod(m_pProb, arg1, arg2, pOutput); + if (pResult == NULL) { + throw UtilException("postProcessNode call failed.", "postProcessNode", + "DippyAlgoMixin"); + } + + assert(!PyErr_Occurred()); } - diff --git a/Dip/src/dippy/DippyDecompApp.cpp b/Dip/src/dippy/DippyDecompApp.cpp index a5bf2e29..71632d34 100644 --- a/Dip/src/dippy/DippyDecompApp.cpp +++ b/Dip/src/dippy/DippyDecompApp.cpp @@ -1,7 +1,7 @@ #include "Python.h" -#include "DecompVar.h" #include "DecompAlgoC.h" +#include "DecompVar.h" #include "DippyDecompApp.h" #include "DippyDecompCut.h" #include "DippyPythonUtils.h" @@ -14,302 +14,303 @@ /** * Called to create the core and relaxation models */ -void DippyDecompApp::createModels() -{ - int i, len; - string name; - // create the core model - DecompConstraintSet* modelCore = new DecompConstraintSet(); - // gets the master problem model - char masterTuple[] = "getMasterAsTuple"; - PyObject* pMasterAsTuple = PyObject_CallMethod(m_pProb, masterTuple, NULL); - - if (pMasterAsTuple == NULL) { - throw UtilException("Error calling method prob.getMasterAsTuple()", - "createModels", "DippyDecompApp"); - } - - PyObject* pObjective = PyTuple_GetItem(pMasterAsTuple, 0); - PyObject* pRowList = PyTuple_GetItem(pMasterAsTuple, 1); - PyObject* pColList = PyTuple_GetItem(pMasterAsTuple, 2); - m_rowList = pRowList; - Py_XINCREF(m_rowList); - m_numCols = PyObject_Length(pColList); - m_colList = pColList; - Py_XINCREF(m_colList); - int numRows = PyObject_Length(pRowList); - PyObject* pRow, *pRowName, *pRowLb, *pRowUb; - double lb, ub; - - for (int i = 0; i < numRows; i++) { +void DippyDecompApp::createModels() { + int i, len; + string name; + // create the core model + DecompConstraintSet *modelCore = new DecompConstraintSet(); + // gets the master problem model + char masterTuple[] = "getMasterAsTuple"; + PyObject *pMasterAsTuple = PyObject_CallMethod(m_pProb, masterTuple, NULL); + + if (pMasterAsTuple == NULL) { + throw UtilException("Error calling method prob.getMasterAsTuple()", + "createModels", "DippyDecompApp"); + } + + PyObject *pObjective = PyTuple_GetItem(pMasterAsTuple, 0); + PyObject *pRowList = PyTuple_GetItem(pMasterAsTuple, 1); + PyObject *pColList = PyTuple_GetItem(pMasterAsTuple, 2); + m_rowList = pRowList; + Py_XINCREF(m_rowList); + m_numCols = PyObject_Length(pColList); + m_colList = pColList; + Py_XINCREF(m_colList); + int numRows = PyObject_Length(pRowList); + PyObject *pRow, *pRowName, *pRowLb, *pRowUb; + double lb, ub; + + for (int i = 0; i < numRows; i++) { + pRow = PyList_GetItem(pRowList, i); + char getName[] = "getName"; + pRowName = PyObject_CallMethod(pRow, getName, NULL); + + if (pRowName == NULL) { + throw UtilException("Error calling method row.getName()", "createModels", + "DippyDecompApp"); + } + char getLb[] = "getLb"; + pRowLb = PyObject_CallMethod(pRow, getLb, NULL); + + if (pRowLb == NULL) { + throw UtilException("Error calling method row.getLb()", "createModels", + "DippyDecompApp"); + } + char getUb[] = "getUb"; + pRowUb = PyObject_CallMethod(pRow, getUb, NULL); + + if (pRowUb == NULL) { + throw UtilException("Error calling method row.getUb()", "createModels", + "DippyDecompApp"); + } + + name = PyBytes_AsString( + PyUnicode_AsEncodedString(pRowName, "UTF-8", "strict")); + + if (pRowLb == Py_None) { + lb = -m_infinity; + } else { + lb = PyFloat_AsDouble(pRowLb); + } + + if (pRowUb == Py_None) { + ub = m_infinity; + } else { + ub = PyFloat_AsDouble(pRowUb); + } + + modelCore->rowNames.push_back(name); + modelCore->rowLB.push_back(lb); + modelCore->rowUB.push_back(ub); + m_rowIndices[pRow] = i; // Don't need to increase reference count here as + // m_rowList references pRow + } + + PyObject *pCol, *pColName, *pColLb, *pColUb, *pIsInt; + + for (int j = 0; j < m_numCols; j++) { + pCol = PyList_GetItem(pColList, j); + char getName[] = "getName"; + pColName = PyObject_CallMethod(pCol, getName, NULL); + + if (pColName == NULL) { + throw UtilException("Error calling method col.getName()", "createModels", + "DippyDecompApp"); + } + char getLb[] = "getLb"; + pColLb = PyObject_CallMethod(pCol, getLb, NULL); + + if (pColLb == NULL) { + throw UtilException("Error calling method col.getLb()", "createModels", + "DippyDecompApp"); + } + char getUb[] = "getUb"; + pColUb = PyObject_CallMethod(pCol, getUb, NULL); + + if (pColUb == NULL) { + throw UtilException("Error calling method col.getUb()", "createModels", + "DippyDecompApp"); + } + char isInteger[] = "isInteger"; + pIsInt = PyObject_CallMethod(pCol, isInteger, NULL); + + if (pIsInt == NULL) { + throw UtilException("Error calling method col.isInteger()", + "createModels", "DippyDecompApp"); + } + + name = PyBytes_AsString( + PyUnicode_AsEncodedString(pColName, "UTF-8", "strict")); + + if (pColLb == Py_None) { + lb = -m_infinity; + } else { + lb = PyFloat_AsDouble(pColLb); + } + + if (pColUb == Py_None) { + ub = m_infinity; + } else { + ub = PyFloat_AsDouble(pColUb); + } + + modelCore->colNames.push_back(name); + modelCore->colLB.push_back(lb); + modelCore->colUB.push_back(ub); + + if (PyObject_IsTrue(pIsInt)) { + modelCore->integerVars.push_back(j); + } + + m_colIndices[pCol] = j; // Don't need to increase reference count here + // as m_rowList references pCol + } + + // set objective coefficients + double *obj = new double[m_numCols]; + UtilFillN(obj, m_numCols, 0.0); + PyObject *pObjKeys = PyDict_Keys(pObjective); + PyObject *pCoeff; + + for (int j = 0; j < PyObject_Length(pObjKeys); j++) { + pCol = PyList_GetItem(pObjKeys, j); + pCoeff = PyDict_GetItem(pObjective, pCol); + obj[m_colIndices[pCol]] = PyFloat_AsDouble(pCoeff); + } + + setModelObjective(obj, m_numCols); + // set constraint matrix + modelCore->M = + pyConstraints_AsPackedMatrix(pRowList, m_rowIndices, m_colIndices); + modelCore->M->setDimensions(modelCore->rowLB.size(), modelCore->colLB.size()); + // subproblems + char getRelaxsAsDict[] = "getRelaxsAsDict"; + PyObject *pRelaxedDict = PyObject_CallMethod(m_pProb, getRelaxsAsDict, NULL); + + if (pRelaxedDict == NULL) { + throw UtilException("Error calling method prob.getRelaxsAsDict()", + "createModels", "DippyDecompApp"); + } + + int *masterOnly = new int[m_numCols]; + + if (!masterOnly) { + throw UtilExceptionMemory("createModels", "DecompApp"); + } + + UtilFillN(masterOnly, m_numCols, 1); + + int nRelaxed = 0; + + if (pRelaxedDict != Py_None) { + nRelaxed = PyObject_Length(pRelaxedDict); + } + + // we have a list of relaxations + m_relaxedKeys = PyDict_Keys(pRelaxedDict); + Py_XINCREF(m_relaxedKeys); + PyObject *pKey, *pRelax; + + for (int p = 0; p < nRelaxed; p++) { + DecompConstraintSet *modelRelax = new DecompConstraintSet(); + // each relaxation is a LpProblem + pKey = PyList_GetItem(m_relaxedKeys, p); + pRelax = PyDict_GetItem(pRelaxedDict, pKey); + m_relaxIndices[pKey] = p; // Don't need to increase reference count here + // as m_relaxedKey references pKey + char getRelaxAsTuple[] = "getRelaxAsTuple"; + char O[] = "O"; + PyObject *pRelaxAsTuple = + PyObject_CallMethod(m_pProb, getRelaxAsTuple, O, pRelax); + + if (pRelaxAsTuple == NULL) { + throw UtilException("Error calling method prob.getRelaxAsTuple()", + "createModels", "DippyDecompApp"); + } + + // row names + pRowList = PyTuple_GetItem(pRelaxAsTuple, 0); + pColList = PyTuple_GetItem(pRelaxAsTuple, 1); + numRows = PyObject_Length(pRowList); + map relaxRowIndices; + + for (int i = 0; i < numRows; i++) { pRow = PyList_GetItem(pRowList, i); char getName[] = "getName"; pRowName = PyObject_CallMethod(pRow, getName, NULL); if (pRowName == NULL) { - throw UtilException("Error calling method row.getName()", - "createModels", "DippyDecompApp"); + throw UtilException("Error calling method row.getName()", + "createModels", "DippyDecompApp"); } char getLb[] = "getLb"; - pRowLb = PyObject_CallMethod(pRow, getLb, NULL); + pRowLb = PyObject_CallMethod(pRow, getLb, NULL); if (pRowLb == NULL) { - throw UtilException("Error calling method row.getLb()", - "createModels", "DippyDecompApp"); + throw UtilException("Error calling method row.getLb()", "createModels", + "DippyDecompApp"); } char getUb[] = "getUb"; - pRowUb = PyObject_CallMethod(pRow, getUb, NULL); + pRowUb = PyObject_CallMethod(pRow, getUb, NULL); if (pRowUb == NULL) { - throw UtilException("Error calling method row.getUb()", - "createModels", "DippyDecompApp"); + throw UtilException("Error calling method row.getUb()", "createModels", + "DippyDecompApp"); } - name = PyBytes_AsString(PyUnicode_AsEncodedString(pRowName, "UTF-8", "strict")); + name = PyBytes_AsString( + PyUnicode_AsEncodedString(pRowName, "UTF-8", "strict")); if (pRowLb == Py_None) { - lb = -m_infinity; + lb = -m_infinity; } else { - lb = PyFloat_AsDouble(pRowLb); + lb = PyFloat_AsDouble(pRowLb); } if (pRowUb == Py_None) { - ub = m_infinity; + ub = m_infinity; } else { - ub = PyFloat_AsDouble(pRowUb); + ub = PyFloat_AsDouble(pRowUb); } - modelCore->rowNames.push_back(name); - modelCore->rowLB.push_back(lb); - modelCore->rowUB.push_back(ub); - m_rowIndices[pRow] = i; // Don't need to increase reference count here as m_rowList - // references pRow - } - - PyObject* pCol, *pColName, *pColLb, *pColUb, *pIsInt; - - for (int j = 0; j < m_numCols; j++) { - pCol = PyList_GetItem(pColList, j); - char getName[] = "getName"; - pColName = PyObject_CallMethod(pCol, getName, NULL); + modelRelax->rowNames.push_back(name); + modelRelax->rowLB.push_back(lb); + modelRelax->rowUB.push_back(ub); + relaxRowIndices[pRow] = i; + } - if (pColName == NULL) { - throw UtilException("Error calling method col.getName()", - "createModels", "DippyDecompApp"); - } - char getLb[] = "getLb"; - pColLb = PyObject_CallMethod(pCol, getLb, NULL); + // get the constraint matrix for this relaxation + modelRelax->M = + pyConstraints_AsPackedMatrix(pRowList, relaxRowIndices, m_colIndices); - if (pColLb == NULL) { - throw UtilException("Error calling method col.getLb()", - "createModels", "DippyDecompApp"); - } - char getUb[] = "getUb"; - pColUb = PyObject_CallMethod(pCol, getUb, NULL); + // set all cols at their lower bounds + for (int j = 0; j < modelCore->colLB.size(); j++) { + modelRelax->colLB.push_back(modelCore->colLB[j]); + modelRelax->colUB.push_back(modelCore->colLB[j]); + } - if (pColUb == NULL) { - throw UtilException("Error calling method col.getUb()", - "createModels", "DippyDecompApp"); - } - char isInteger[] = "isInteger"; - pIsInt = PyObject_CallMethod(pCol, isInteger, NULL); + // get active cols + int cols = PyObject_Length(pColList); + int index; - if (pIsInt == NULL) { - throw UtilException("Error calling method col.isInteger()", - "createModels", "DippyDecompApp"); - } - - name = PyBytes_AsString(PyUnicode_AsEncodedString(pColName, "UTF-8", "strict")); - - if (pColLb == Py_None) { - lb = -m_infinity; - } else { - lb = PyFloat_AsDouble(pColLb); - } - - if (pColUb == Py_None) { - ub = m_infinity; - } else { - ub = PyFloat_AsDouble(pColUb); - } - - modelCore->colNames.push_back(name); - modelCore->colLB.push_back(lb); - modelCore->colUB.push_back(ub); - - if (PyObject_IsTrue(pIsInt)) { - modelCore->integerVars.push_back(j); - } - - m_colIndices[pCol] = j; // Don't need to increase reference count here - // as m_rowList references pCol - } - - // set objective coefficients - double* obj = new double[m_numCols]; - UtilFillN(obj, m_numCols, 0.0); - PyObject* pObjKeys = PyDict_Keys(pObjective); - PyObject* pCoeff; - - for (int j = 0; j < PyObject_Length(pObjKeys); j++) { - pCol = PyList_GetItem(pObjKeys, j); - pCoeff = PyDict_GetItem(pObjective, pCol); - obj[m_colIndices[pCol]] = PyFloat_AsDouble(pCoeff); - } - - setModelObjective(obj, m_numCols); - // set constraint matrix - modelCore->M = pyConstraints_AsPackedMatrix(pRowList, m_rowIndices, - m_colIndices); - modelCore->M->setDimensions(modelCore->rowLB.size(), - modelCore->colLB.size()); - // subproblems - char getRelaxsAsDict[] = "getRelaxsAsDict"; - PyObject* pRelaxedDict = PyObject_CallMethod(m_pProb, getRelaxsAsDict, NULL); - - if (pRelaxedDict == NULL) { - throw UtilException("Error calling method prob.getRelaxsAsDict()", - "createModels", "DippyDecompApp"); - } - - int* masterOnly = new int[m_numCols]; - - if (!masterOnly) { - throw UtilExceptionMemory("createModels", "DecompApp"); - } - - UtilFillN(masterOnly, m_numCols, 1); - - int nRelaxed = 0; - - if (pRelaxedDict != Py_None) { - nRelaxed = PyObject_Length(pRelaxedDict); - } - - // we have a list of relaxations - m_relaxedKeys = PyDict_Keys(pRelaxedDict); - Py_XINCREF(m_relaxedKeys); - PyObject* pKey, *pRelax; - - for (int p = 0; p < nRelaxed; p++) { - DecompConstraintSet* modelRelax = new DecompConstraintSet(); - // each relaxation is a LpProblem - pKey = PyList_GetItem(m_relaxedKeys, p); - pRelax = PyDict_GetItem(pRelaxedDict, pKey); - m_relaxIndices[pKey] = p; // Don't need to increase reference count here - //as m_relaxedKey references pKey - char getRelaxAsTuple[] = "getRelaxAsTuple"; - char O[] = "O"; - PyObject* pRelaxAsTuple = PyObject_CallMethod(m_pProb,getRelaxAsTuple, - O, pRelax); - - if (pRelaxAsTuple == NULL) { - throw UtilException("Error calling method prob.getRelaxAsTuple()", - "createModels", "DippyDecompApp"); - } - - // row names - pRowList = PyTuple_GetItem(pRelaxAsTuple, 0); - pColList = PyTuple_GetItem(pRelaxAsTuple, 1); - numRows = PyObject_Length(pRowList); - map relaxRowIndices; - - for (int i = 0; i < numRows; i++) { - pRow = PyList_GetItem(pRowList, i); - char getName[] = "getName"; - pRowName = PyObject_CallMethod(pRow, getName, NULL); - - if (pRowName == NULL) { - throw UtilException("Error calling method row.getName()", - "createModels", "DippyDecompApp"); - } - char getLb[] = "getLb"; - pRowLb = PyObject_CallMethod(pRow, getLb, NULL); - - if (pRowLb == NULL) { - throw UtilException("Error calling method row.getLb()", - "createModels", "DippyDecompApp"); - } - char getUb[] = "getUb"; - pRowUb = PyObject_CallMethod(pRow, getUb, NULL); - - if (pRowUb == NULL) { - throw UtilException("Error calling method row.getUb()", - "createModels", "DippyDecompApp"); - } - - name = PyBytes_AsString(PyUnicode_AsEncodedString(pRowName, "UTF-8", "strict")); - - if (pRowLb == Py_None) { - lb = -m_infinity; - } else { - lb = PyFloat_AsDouble(pRowLb); - } - - if (pRowUb == Py_None) { - ub = m_infinity; - } else { - ub = PyFloat_AsDouble(pRowUb); - } - - modelRelax->rowNames.push_back(name); - modelRelax->rowLB.push_back(lb); - modelRelax->rowUB.push_back(ub); - relaxRowIndices[pRow] = i; - } - - // get the constraint matrix for this relaxation - modelRelax->M = pyConstraints_AsPackedMatrix(pRowList, relaxRowIndices, - m_colIndices); + for (int j = 0; j < cols; j++) { + pCol = PyList_GetItem(pColList, j); + index = m_colIndices[pCol]; - // set all cols at their lower bounds - for (int j = 0; j < modelCore->colLB.size(); j++) { - modelRelax->colLB.push_back(modelCore->colLB[j]); - modelRelax->colUB.push_back(modelCore->colLB[j]); + if ((index < 0) || (index >= m_colIndices.size())) { + throw UtilException("Bad index for " + name, "createModels", + "DippyDecompApp"); } - // get active cols - int cols = PyObject_Length(pColList); - int index; + modelRelax->colUB[index] = modelCore->colUB[index]; + modelRelax->activeColumns.push_back(index); + masterOnly[index] = 0; + } - for (int j = 0; j < cols; j++) { - pCol = PyList_GetItem(pColList, j); - index = m_colIndices[pCol]; + modelRelax->M->setDimensions(modelRelax->rowLB.size(), + modelRelax->colLB.size()); - if ( (index < 0) || (index >= m_colIndices.size()) ) { - throw UtilException("Bad index for " + name, "createModels", - "DippyDecompApp"); - } + // copy integer vars (from master prob) + for (int j = 0; j < modelCore->integerVars.size(); j++) { + modelRelax->integerVars.push_back(modelCore->integerVars[j]); + } - modelRelax->colUB[index] = modelCore->colUB[index]; - modelRelax->activeColumns.push_back(index); - masterOnly[index] = 0; - } + setModelRelax(modelRelax, "BLOCK", p); + } - modelRelax->M->setDimensions(modelRelax->rowLB.size(), - modelRelax->colLB.size()); + for (i = 0; i < m_numCols; i++) { + if (masterOnly[i]) { + modelCore->masterOnlyCols.push_back(i); + } + } - // copy integer vars (from master prob) - for (int j = 0; j < modelCore->integerVars.size(); j++) { - modelRelax->integerVars.push_back(modelCore->integerVars[j]); - } + printf("Num master-only cols: %d\n", modelCore->masterOnlyCols.size()); - setModelRelax(modelRelax, "BLOCK", p); - } + // set the core problem + setModelCore(modelCore, "CORE"); + UTIL_DELARR(masterOnly); - for (i = 0; i < m_numCols; i++) { - if (masterOnly[i]){ - modelCore->masterOnlyCols.push_back(i); - } - } - - printf("Num master-only cols: %d\n", modelCore->masterOnlyCols.size()); - - // set the core problem - setModelCore(modelCore, "CORE"); - UTIL_DELARR(masterOnly); - - assert(!PyErr_Occurred()); + assert(!PyErr_Occurred()); } /** @@ -322,90 +323,88 @@ void DippyDecompApp::createModels() * return the status of the subproblem solver. */ DecompSolverStatus DippyDecompApp::solveRelaxed(const int whichBlock, - const double* redCostX, - const double convexDual, - DecompVarList& varList) -{ - if (!m_pySolveRelaxed) { - return DecompSolStatNoSolution; - } - - PyObject* pRelaxKey = PyList_GetItem(m_relaxedKeys, whichBlock); - PyObject* pRedCostList = pyTupleList_FromDoubleArray(redCostX, m_colList); - PyObject* pConvexDual = PyFloat_FromDouble(convexDual); - // call solveRelaxed on DipProblem - - char solveRelaxed[] = "solveRelaxed"; - char OOd[] = "OOd"; - PyObject* pStatandVarList = PyObject_CallMethod(m_pProb, solveRelaxed, OOd, - pRelaxKey, - pRedCostList, - pConvexDual); - - Py_DECREF(pRedCostList); - Py_DECREF(pConvexDual); - - if ( (pStatandVarList == NULL) || (pStatandVarList == Py_None) ){ - throw UtilException("Error calling method prob.solveRelaxed()", "solveRelaxed", - "DippyDecompApp"); - } - - // [status, varList] = relaxed_solver(...) - PyObject * pStatus = PyTuple_GetItem(pStatandVarList, 0); - - int cStatus = PyLong_AsLong(pStatus); - - DecompSolverStatus status = (DecompSolverStatus)cStatus; - - PyObject * pVarList = PyTuple_GetItem(pStatandVarList, 1); - - int nVars = PyObject_Length(pVarList); - - // In the new design, we need to allow the possibility that the user will solve - // the problem exactly, but not find any solutions with reduced costs zero - // The below is is commented out and left in the source for posterity - // tkr 11/11/15 - //if (nVars == 0) { - // throw UtilException("Empty variable list", "solveRelaxed", "DippyDecompApp"); - //} - - // solveRelaxed returns 3-tuples (cost, reduced cost, dictionary of (variable, value) pairs) - // We can use these to construct a C++ DecompVar objects - double cost, rc; - PyObject* pTuple, *pDict, *pKeys, *pCol; - string name; - double value; - - for (int j = 0; j < nVars; j++) { - pTuple = PySequence_GetItem(pVarList, j); - cost = PyFloat_AsDouble(PyTuple_GetItem(pTuple, 0)); - rc = PyFloat_AsDouble(PyTuple_GetItem(pTuple, 1)); - - pDict = PyTuple_GetItem(pTuple, 2); - pKeys = PyDict_Keys(pDict); - vector varInds; - vector varVals; - - for (int n = 0; n < PyObject_Length(pDict); n++) { - pCol = PyList_GetItem(pKeys, n); - value = PyFloat_AsDouble(PyDict_GetItem(pDict, pCol)); - varInds.push_back(m_colIndices[pCol]); - varVals.push_back(value); - } - - Py_DECREF(pKeys); - Py_DECREF(pTuple); - - DecompVar* var = new DecompVar(varInds, varVals, rc, cost); - var->setBlockId(whichBlock); - varList.push_back(var); - } - - Py_DECREF(pStatandVarList); - - assert(!PyErr_Occurred()); - - return status; + const double *redCostX, + const double convexDual, + DecompVarList &varList) { + if (!m_pySolveRelaxed) { + return DecompSolStatNoSolution; + } + + PyObject *pRelaxKey = PyList_GetItem(m_relaxedKeys, whichBlock); + PyObject *pRedCostList = pyTupleList_FromDoubleArray(redCostX, m_colList); + PyObject *pConvexDual = PyFloat_FromDouble(convexDual); + // call solveRelaxed on DipProblem + + char solveRelaxed[] = "solveRelaxed"; + char OOd[] = "OOd"; + PyObject *pStatandVarList = PyObject_CallMethod( + m_pProb, solveRelaxed, OOd, pRelaxKey, pRedCostList, pConvexDual); + + Py_DECREF(pRedCostList); + Py_DECREF(pConvexDual); + + if ((pStatandVarList == NULL) || (pStatandVarList == Py_None)) { + throw UtilException("Error calling method prob.solveRelaxed()", + "solveRelaxed", "DippyDecompApp"); + } + + // [status, varList] = relaxed_solver(...) + PyObject *pStatus = PyTuple_GetItem(pStatandVarList, 0); + + int cStatus = PyLong_AsLong(pStatus); + + DecompSolverStatus status = (DecompSolverStatus)cStatus; + + PyObject *pVarList = PyTuple_GetItem(pStatandVarList, 1); + + int nVars = PyObject_Length(pVarList); + + // In the new design, we need to allow the possibility that the user will + // solve the problem exactly, but not find any solutions with reduced costs + // zero The below is is commented out and left in the source for posterity tkr + // 11/11/15 + // if (nVars == 0) { + // throw UtilException("Empty variable list", "solveRelaxed", + // "DippyDecompApp"); + //} + + // solveRelaxed returns 3-tuples (cost, reduced cost, dictionary of (variable, + // value) pairs) We can use these to construct a C++ DecompVar objects + double cost, rc; + PyObject *pTuple, *pDict, *pKeys, *pCol; + string name; + double value; + + for (int j = 0; j < nVars; j++) { + pTuple = PySequence_GetItem(pVarList, j); + cost = PyFloat_AsDouble(PyTuple_GetItem(pTuple, 0)); + rc = PyFloat_AsDouble(PyTuple_GetItem(pTuple, 1)); + + pDict = PyTuple_GetItem(pTuple, 2); + pKeys = PyDict_Keys(pDict); + vector varInds; + vector varVals; + + for (int n = 0; n < PyObject_Length(pDict); n++) { + pCol = PyList_GetItem(pKeys, n); + value = PyFloat_AsDouble(PyDict_GetItem(pDict, pCol)); + varInds.push_back(m_colIndices[pCol]); + varVals.push_back(value); + } + + Py_DECREF(pKeys); + Py_DECREF(pTuple); + + DecompVar *var = new DecompVar(varInds, varVals, rc, cost); + var->setBlockId(whichBlock); + varList.push_back(var); + } + + Py_DECREF(pStatandVarList); + + assert(!PyErr_Occurred()); + + return status; } /** @@ -413,34 +412,35 @@ DecompSolverStatus DippyDecompApp::solveRelaxed(const int whichBlock, * * Called by DIP, we interface with Python */ -bool DippyDecompApp::APPisUserFeasible(const double* x, const int n_cols, const double tolZero) -{ - assert(n_cols == m_modelCore.getModel()->getColNames().size()); - PyObject* pSolutionList = pyTupleList_FromDoubleArray(x, m_colList); - PyObject* pTolZero = PyFloat_FromDouble(tolZero); - - if (!m_pyIsUserFeasible) { - return true; - } - char isUserFeasible[] = "isUserFeasible"; - char Od[] = "Od"; - PyObject* pResult = PyObject_CallMethod(m_pProb, isUserFeasible, Od, pSolutionList, pTolZero); - - if (pResult == NULL) { - throw UtilException("Error calling method prob.isUserFeasible()", "APPisUserFeasible", - "DippyDecompApp"); - } - - // This should not happen as having isUserFeasible present but not returning a boolean is - // not good - if (pResult == Py_None) { - // method exists, but not implemented, return true - return true; - } - - assert(!PyErr_Occurred()); - - return (bool)PyObject_IsTrue(pResult); +bool DippyDecompApp::APPisUserFeasible(const double *x, const int n_cols, + const double tolZero) { + assert(n_cols == m_modelCore.getModel()->getColNames().size()); + PyObject *pSolutionList = pyTupleList_FromDoubleArray(x, m_colList); + PyObject *pTolZero = PyFloat_FromDouble(tolZero); + + if (!m_pyIsUserFeasible) { + return true; + } + char isUserFeasible[] = "isUserFeasible"; + char Od[] = "Od"; + PyObject *pResult = + PyObject_CallMethod(m_pProb, isUserFeasible, Od, pSolutionList, pTolZero); + + if (pResult == NULL) { + throw UtilException("Error calling method prob.isUserFeasible()", + "APPisUserFeasible", "DippyDecompApp"); + } + + // This should not happen as having isUserFeasible present but not returning a + // boolean is not good + if (pResult == Py_None) { + // method exists, but not implemented, return true + return true; + } + + assert(!PyErr_Occurred()); + + return (bool)PyObject_IsTrue(pResult); } /** @@ -449,69 +449,76 @@ bool DippyDecompApp::APPisUserFeasible(const double* x, const int n_cols, const * Called by DIP, we interface with Python */ -int DippyDecompApp::generateCuts(const double* x, DecompCutList& cutList) -{ - if (!m_pyGenerateCuts) { - return 0; - } - - // PyObject *pSolutionList = pyTupleList_FromDoubleArray(x, m_colList); - // MO (28/2/2012) - Don't need this anymore as solution is contained within node - PyObject* pPackagedNode = pyTupleList_FromNode(getDecompAlgo(), STAT_FEASIBLE); - char generateCuts[] = "generateCuts"; - char O[] = "O"; - PyObject* pCutList = PyObject_CallMethod(m_pProb, generateCuts, O, pPackagedNode); - - if (pCutList == NULL) { - throw UtilException("Error calling method prob.generateCuts()", "generateCuts", +int DippyDecompApp::generateCuts(const double *x, DecompCutList &cutList) { + if (!m_pyGenerateCuts) { + return 0; + } + + // PyObject *pSolutionList = pyTupleList_FromDoubleArray(x, m_colList); + // MO (28/2/2012) - Don't need this anymore as solution is contained within + // node + PyObject *pPackagedNode = + pyTupleList_FromNode(getDecompAlgo(), STAT_FEASIBLE); + char generateCuts[] = "generateCuts"; + char O[] = "O"; + PyObject *pCutList = + PyObject_CallMethod(m_pProb, generateCuts, O, pPackagedNode); + + if (pCutList == NULL) { + throw UtilException("Error calling method prob.generateCuts()", + "generateCuts", "DippyDecompApp"); + } + + // This should never happen, pyGenerateCuts should be set to false in dippy.py + if (pCutList == Py_None) + // method exists, but is not implemented, return 0 + { + return 0; + } + + // generateCuts returns constraints, i.e., dictionary of (variable, value) + // pairs also with name, lb, ub + const int len = PyObject_Length(pCutList); + // loop through each cut + // We can use these to construct a C++ DecompVar objects + double lb, ub; + PyObject *pRow, *pLb, *pUb; + string name; + double value; + + for (int i = 0; i < len; i++) { + pRow = PySequence_GetItem(pCutList, i); + char getLb[] = "getLb"; + pLb = PyObject_CallMethod(pRow, getLb, NULL); + + if (pLb == NULL) { + throw UtilException("Error calling method row.getLb()", "generateCuts", "DippyDecompApp"); - } - - // This should never happen, pyGenerateCuts should be set to false in dippy.py - if (pCutList == Py_None) - // method exists, but is not implemented, return 0 - { - return 0; - } - - // generateCuts returns constraints, i.e., dictionary of (variable, value) pairs also with name, lb, ub - const int len = PyObject_Length(pCutList); - // loop through each cut - // We can use these to construct a C++ DecompVar objects - double lb, ub; - PyObject* pRow, *pLb, *pUb; - string name; - double value; - - for (int i = 0; i < len; i++) { - pRow = PySequence_GetItem(pCutList, i); - char getLb[] = "getLb"; - pLb = PyObject_CallMethod(pRow, getLb, NULL); - - if (pLb == NULL) { - throw UtilException("Error calling method row.getLb()", "generateCuts", "DippyDecompApp"); - } - char getUb[] = "getUb"; - pUb = PyObject_CallMethod(pRow, getUb, NULL); - - if (pUb == NULL) { - throw UtilException("Error calling method row.getUb()", "generateCuts", "DippyDecompApp"); - } - - lb = (pLb == Py_None) ? -m_infinity : PyFloat_AsDouble(pLb); - ub = (pUb == Py_None) ? m_infinity : PyFloat_AsDouble(pUb); - int* varInds = NULL; - double* varVals = NULL; - int numPairs = pyColDict_AsPackedArrays(pRow, m_colIndices, &varInds, &varVals); - assert(numPairs == PyObject_Length(pRow)); - // arrays are now owned by the Cut object - DippyDecompCut* cut = new DippyDecompCut(lb, ub, numPairs, varInds, varVals); - cutList.push_back(cut); - } + } + char getUb[] = "getUb"; + pUb = PyObject_CallMethod(pRow, getUb, NULL); - assert(!PyErr_Occurred()); - - return len; + if (pUb == NULL) { + throw UtilException("Error calling method row.getUb()", "generateCuts", + "DippyDecompApp"); + } + + lb = (pLb == Py_None) ? -m_infinity : PyFloat_AsDouble(pLb); + ub = (pUb == Py_None) ? m_infinity : PyFloat_AsDouble(pUb); + int *varInds = NULL; + double *varVals = NULL; + int numPairs = + pyColDict_AsPackedArrays(pRow, m_colIndices, &varInds, &varVals); + assert(numPairs == PyObject_Length(pRow)); + // arrays are now owned by the Cut object + DippyDecompCut *cut = + new DippyDecompCut(lb, ub, numPairs, varInds, varVals); + cutList.push_back(cut); + } + + assert(!PyErr_Occurred()); + + return len; } /** @@ -519,56 +526,58 @@ int DippyDecompApp::generateCuts(const double* x, DecompCutList& cutList) * * Called by DIP, we interface with Python */ -int DippyDecompApp::APPheuristics(const double* xhat, const double* origCost, vector& xhatIPFeas) -{ - if (!m_pyHeuristics) { - return 0; - } - - PyObject* pSolution = pyTupleList_FromDoubleArray(xhat, m_colList); - PyObject* pObjective = pyTupleList_FromDoubleArray(origCost, m_colList); - char solveHeuristics[] = "solveHeuristics"; - char OO[] = "OO"; - PyObject* pSolList = PyObject_CallMethod(m_pProb, solveHeuristics, OO, pSolution, pObjective); - - if (pSolList == NULL) { - throw UtilException("Error calling method prob.solveHeuristics()", "APPheuristics", - "DippyDecompApp"); - } - - // This should never happen, pyHeuristics should be set to false in dippy.py - if (pSolList == Py_None) - // method exists, but is not implemented, return 0 - { - return 0; - } - - // APPheuristics returns dictionary of (variable, value) pairs - const int len = PyObject_Length(pSolList); - - // loop through each solution - for (int i = 0; i < len; i++) { - pSolution = PyList_GetItem(pSolList, i); - int* varInds = NULL; - double* varVals = NULL; - int numPairs = pyColDict_AsPackedArrays(pSolution, m_colIndices, &varInds, &varVals); - assert(numPairs == PyObject_Length(pSolution)); - double* sol = new double[m_numCols]; - UtilFillN(sol, m_numCols, 0.0); - - for (int j = 0; j < numPairs; j++) { - sol[varInds[j]] = varVals[j]; - } - - xhatIPFeas.push_back(new DecompSolution(m_numCols, sol, origCost)); - delete [] sol; - delete [] varInds; - delete [] varVals; - } - - assert(!PyErr_Occurred()); - - return len; +int DippyDecompApp::APPheuristics(const double *xhat, const double *origCost, + vector &xhatIPFeas) { + if (!m_pyHeuristics) { + return 0; + } + + PyObject *pSolution = pyTupleList_FromDoubleArray(xhat, m_colList); + PyObject *pObjective = pyTupleList_FromDoubleArray(origCost, m_colList); + char solveHeuristics[] = "solveHeuristics"; + char OO[] = "OO"; + PyObject *pSolList = + PyObject_CallMethod(m_pProb, solveHeuristics, OO, pSolution, pObjective); + + if (pSolList == NULL) { + throw UtilException("Error calling method prob.solveHeuristics()", + "APPheuristics", "DippyDecompApp"); + } + + // This should never happen, pyHeuristics should be set to false in dippy.py + if (pSolList == Py_None) + // method exists, but is not implemented, return 0 + { + return 0; + } + + // APPheuristics returns dictionary of (variable, value) pairs + const int len = PyObject_Length(pSolList); + + // loop through each solution + for (int i = 0; i < len; i++) { + pSolution = PyList_GetItem(pSolList, i); + int *varInds = NULL; + double *varVals = NULL; + int numPairs = + pyColDict_AsPackedArrays(pSolution, m_colIndices, &varInds, &varVals); + assert(numPairs == PyObject_Length(pSolution)); + double *sol = new double[m_numCols]; + UtilFillN(sol, m_numCols, 0.0); + + for (int j = 0; j < numPairs; j++) { + sol[varInds[j]] = varVals[j]; + } + + xhatIPFeas.push_back(new DecompSolution(m_numCols, sol, origCost)); + delete[] sol; + delete[] varInds; + delete[] varVals; + } + + assert(!PyErr_Occurred()); + + return len; } /** @@ -576,50 +585,50 @@ int DippyDecompApp::APPheuristics(const double* xhat, const double* origCost, ve * * Called by DIP, we interface with Python */ -int DippyDecompApp::generateInitVars(DecompVarList& initVars) -{ - if (!m_pyInitVars) { - return 0; - } - - char generateInitVars[] = "generateInitVars"; - PyObject* pVarList = PyObject_CallMethod(m_pProb, generateInitVars, NULL); - - if (pVarList == NULL) { - throw UtilException("Error calling method prob.generateInitVars()", "generateInitVars", - "DippyDecompApp"); - } - - if (pVarList == Py_None) - // method exists, but is not implemented, return 0 - { - return 0; - } - - int nVars = PyObject_Length(pVarList); - // generateInitVars returns 2-tuples (index, (cost, dictionary of (variable, value) pairs)) - // We can use these to construct a C++ DecompVar objects - double cost; - PyObject* pColDict; - - for (int i = 0; i < nVars; i++) { - PyObject* pTuple = PyList_GetItem(pVarList, i); - int whichBlock = m_relaxIndices[PyTuple_GetItem(pTuple, 0)]; - PyObject* pVarTuple = PyTuple_GetItem(pTuple, 1); - cost = PyFloat_AsDouble(PyTuple_GetItem(pVarTuple, 0)); - pColDict = PyTuple_GetItem(pVarTuple, 1); - int* varInds = NULL; - double* varVals = NULL; - DecompVarType varType; - - int numPairs = pyColDict_AsPackedArrays(pColDict, m_colIndices, &varInds, &varVals, varType); - assert(numPairs == PyObject_Length(pColDict)); - DecompVar* var = new DecompVar(numPairs, varInds, varVals, cost, varType); - var->setBlockId(whichBlock); - initVars.push_back(var); - } - - assert(!PyErr_Occurred()); - - return nVars; +int DippyDecompApp::generateInitVars(DecompVarList &initVars) { + if (!m_pyInitVars) { + return 0; + } + + char generateInitVars[] = "generateInitVars"; + PyObject *pVarList = PyObject_CallMethod(m_pProb, generateInitVars, NULL); + + if (pVarList == NULL) { + throw UtilException("Error calling method prob.generateInitVars()", + "generateInitVars", "DippyDecompApp"); + } + + if (pVarList == Py_None) + // method exists, but is not implemented, return 0 + { + return 0; + } + + int nVars = PyObject_Length(pVarList); + // generateInitVars returns 2-tuples (index, (cost, dictionary of (variable, + // value) pairs)) We can use these to construct a C++ DecompVar objects + double cost; + PyObject *pColDict; + + for (int i = 0; i < nVars; i++) { + PyObject *pTuple = PyList_GetItem(pVarList, i); + int whichBlock = m_relaxIndices[PyTuple_GetItem(pTuple, 0)]; + PyObject *pVarTuple = PyTuple_GetItem(pTuple, 1); + cost = PyFloat_AsDouble(PyTuple_GetItem(pVarTuple, 0)); + pColDict = PyTuple_GetItem(pVarTuple, 1); + int *varInds = NULL; + double *varVals = NULL; + DecompVarType varType; + + int numPairs = pyColDict_AsPackedArrays(pColDict, m_colIndices, &varInds, + &varVals, varType); + assert(numPairs == PyObject_Length(pColDict)); + DecompVar *var = new DecompVar(numPairs, varInds, varVals, cost, varType); + var->setBlockId(whichBlock); + initVars.push_back(var); + } + + assert(!PyErr_Occurred()); + + return nVars; } diff --git a/Dip/src/dippy/DippyModule.cpp b/Dip/src/dippy/DippyModule.cpp index ac42adf3..c37e4e8e 100644 --- a/Dip/src/dippy/DippyModule.cpp +++ b/Dip/src/dippy/DippyModule.cpp @@ -1,5 +1,5 @@ -#include "Python.h" #include "Decomp.h" +#include "Python.h" #ifdef _WIN32 #define DLLEXPORT extern "C" __declspec(dllexport) @@ -8,87 +8,79 @@ #endif // prototype -DLLEXPORT PyObject* Solve(PyObject* self, PyObject* args); +DLLEXPORT PyObject *Solve(PyObject *self, PyObject *args); struct module_state { - PyObject *error; + PyObject *error; }; #if PY_MAJOR_VERSION >= 3 -#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) +#define GETSTATE(m) ((struct module_state *)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif -static PyObject * -error_out(PyObject *m) { - struct module_state *st = GETSTATE(m); - PyErr_SetString(st->error, "something bad happened"); - return NULL; +static PyObject *error_out(PyObject *m) { + struct module_state *st = GETSTATE(m); + PyErr_SetString(st->error, "something bad happened"); + return NULL; } // methods exposed by this module static PyMethodDef dippy_module_methods[] = { - {"Solve", Solve, METH_VARARGS, "Solve"}, + {"Solve", Solve, METH_VARARGS, "Solve"}, - {NULL, NULL, 0, NULL } -}; + {NULL, NULL, 0, NULL}}; #if PY_MAJOR_VERSION >= 3 static int dippy_module_traverse(PyObject *m, visitproc visit, void *arg) { - Py_VISIT(GETSTATE(m)->error); - return 0; + Py_VISIT(GETSTATE(m)->error); + return 0; } static int dippy_module_clear(PyObject *m) { - Py_CLEAR(GETSTATE(m)->error); - return 0; + Py_CLEAR(GETSTATE(m)->error); + return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_dippy", - NULL, - sizeof(struct module_state), - dippy_module_methods, - NULL, - dippy_module_traverse, - dippy_module_clear, - NULL -}; +static struct PyModuleDef moduledef = {PyModuleDef_HEAD_INIT, + "_dippy", + NULL, + sizeof(struct module_state), + dippy_module_methods, + NULL, + dippy_module_traverse, + dippy_module_clear, + NULL}; #define INITERROR return NULL -PyMODINIT_FUNC -PyInit__dippy(void) +PyMODINIT_FUNC PyInit__dippy(void) #else #define INITERROR return -void -init_dippy(void) +void init_dippy(void) #endif { PyObject *module, *d, *s; #if PY_MAJOR_VERSION >= 3 - module=PyModule_Create(&moduledef); + module = PyModule_Create(&moduledef); #else - module=Py_InitModule("_dippy", dippy_module_methods); + module = Py_InitModule("_dippy", dippy_module_methods); #endif d = PyModule_GetDict(module); s = PyUnicode_FromString("0.2"); PyDict_SetItemString(d, "__version__", s); s = PyUnicode_FromString("See polyhedron.py"); PyDict_SetItemString(d, "__doc__", s); - //dippy_error = PyUnicode_FromString("_dippy.error"); - //PyDict_SetItemString(d, "error", cdd_error); + // dippy_error = PyUnicode_FromString("_dippy.error"); + // PyDict_SetItemString(d, "error", cdd_error); if (PyErr_Occurred()) Py_FatalError("can't initialize module _cdd"); #if PY_MAJOR_VERSION >= 3 return module; #endif } - - diff --git a/Dip/src/dippy/DippyPythonUtils.cpp b/Dip/src/dippy/DippyPythonUtils.cpp index e7a4e873..c43327dc 100644 --- a/Dip/src/dippy/DippyPythonUtils.cpp +++ b/Dip/src/dippy/DippyPythonUtils.cpp @@ -1,8 +1,8 @@ #include "DippyPythonUtils.h" +#include "AlpsDecompNodeDesc.h" #include "DippyDecompApp.h" #include "UtilMacros.h" #include "UtilMacrosDecomp.h" -#include "AlpsDecompNodeDesc.h" #include @@ -18,20 +18,19 @@ * * Returns Python tuple list with length = pList length */ -PyObject* pyTupleList_FromDoubleArray(const double* values, PyObject* pList) -{ - int len = PyObject_Length(pList); - PyObject* pTupleList = PyList_New(len), *pObj; +PyObject *pyTupleList_FromDoubleArray(const double *values, PyObject *pList) { + int len = PyObject_Length(pList); + PyObject *pTupleList = PyList_New(len), *pObj; - for (int i = 0; i < len; i++) { - pObj = PyList_GetItem(pList, i); - Py_XINCREF(pObj); - insertTupleToPyList(pTupleList, i, pObj, PyFloat_FromDouble(values[i])); - } + for (int i = 0; i < len; i++) { + pObj = PyList_GetItem(pList, i); + Py_XINCREF(pObj); + insertTupleToPyList(pTupleList, i, pObj, PyFloat_FromDouble(values[i])); + } - assert(!PyErr_Occurred()); + assert(!PyErr_Occurred()); - return pTupleList; + return pTupleList; } /** @@ -42,134 +41,131 @@ PyObject* pyTupleList_FromDoubleArray(const double* values, PyObject* pList) * * Returns Python tuple list */ -PyObject* pyTupleList_FromNode(DecompAlgo* algo, DecompStatus decompStatus) -{ - PyObject* pOutput = PyList_New(0); - AlpsDecompTreeNode* node = (AlpsDecompTreeNode*) algo->getCurrentNode(); - double lb = algo->getObjBestBoundLB(), ub = algo->getObjBestBoundUB(); - double quality = node->getQuality(); - string status; - - switch (decompStatus) { - case STAT_IP_FEASIBLE: - status = "Solution"; - break; - - case STAT_FEASIBLE: - if (lb > quality) { - quality = lb; - } - - if (quality >= ub) { - status = "Pruned"; - } else { - status = "Candidate"; - } - - break; - - case STAT_INFEASIBLE: - status = "Infeasible"; - break; - - default: - status = "Unknown"; - } - - // Add into the list the output needed - addTupleToPyList(pOutput, PyUnicode_FromString("nodeIndex"), - PyLong_FromLong(node->getIndex())); - addTupleToPyList(pOutput, PyUnicode_FromString("parentIndex"), - PyLong_FromLong(node->getParentIndex())); - addTupleToPyList(pOutput, PyUnicode_FromString("nodeDepth"), - PyLong_FromLong(node->getDepth())); - addTupleToPyList(pOutput, PyUnicode_FromString("nodeQuality"), - PyFloat_FromDouble(quality)); - addTupleToPyList(pOutput, PyUnicode_FromString("globalLB"), - PyFloat_FromDouble(lb)); - addTupleToPyList(pOutput, PyUnicode_FromString("globalUB"), - PyFloat_FromDouble(ub)); - addTupleToPyList(pOutput, PyUnicode_FromString("nodeStatus"), - PyUnicode_FromString(status.c_str())); - addTupleToPyList(pOutput, PyUnicode_FromString("branchedDir"), - PyLong_FromLong(dynamic_cast - (algo->getCurrentNode()->getDesc())->getBranchedDir())); - // Copy the current solution into a Python list - const double* xhat = algo->getXhat(); - DippyDecompApp* app = (DippyDecompApp*)algo->getDecompApp(); - PyObject* pSolutionList = pyTupleList_FromDoubleArray(xhat, - app->m_colList); - addTupleToPyList(pOutput, PyUnicode_FromString("xhat"), pSolutionList); - /** MO. 29/2/2012 - This section was originally an attempt to add "simple" cuts. i.e., - that the sum of non-basic variables >= 1 (or at least a variant for lb and ub), so I - passed the variable bounds at the node and lists of basic, lower bound and upper bound - variables from the original problem. However, the cuts added "clashed" with the CGL cuts, - probably because the variables introduced by these cuts, e.g., slacks, were not considered. - I have abandoned this direction for now, but I like the idea of getting the full node problem - so more complex cuts that use, e.g., the basis, can be implemented in Python. - - int numOrig = algo->getModelCore().getModel()->getNumCols(); - - const double * lb = algo->getColLBNode(); - const double * ub = algo->getColUBNode(); - PyObject * pBoundList = PyList_New(0), - * pExtraBoundList = PyList_New(0); - PyObject * pBoundPair; - - for (int j=0; j= DecompInf) - PyTuple_SetItem(pBoundPair, 1, Py_None); - else - PyTuple_SetItem(pBoundPair, 1, PyFloat_FromDouble(ub[j])); - addTupleToPyList(pBoundList, PyList_GetItem(app->m_colList, j), pBoundPair); - } - addTupleToPyList(pOutput, PyUnicode_FromString("bounds"), pBoundList); - - // Copy the original variables into "status" lists - PyObject * pBasisList = PyList_New(0), - * pLBList = PyList_New(0), - * pUBList = PyList_New(0); - - if (algo->getMasterOSI()->basisIsAvailable()) { - int numRows = algo->getMasterOSI()->getNumRows(), - numCols = algo->getMasterOSI()->getNumCols(); - int * rstat = new int[numRows], - * cstat = new int [numCols]; - - algo->getMasterOSI()->getBasisStatus(cstat, rstat); - // MO (28/2/2012) - Assuming that any extra variables are added at the end, is this true? - for (int j=0; jm_colList, j)); - break; - case 2: // upper - PyList_Append(pUBList, PyList_GetItem(app->m_colList, j)); - break; - case 3: // lower - PyList_Append(pLBList, PyList_GetItem(app->m_colList, j)); - break; - default: - throw UtilException("Error calling method pyTupleList_FromNode()", "pyTupleList_FromNode", "DippyPythonUtils"); - } - - delete [] rstat; - delete [] cstat; - } - - addTupleToPyList(pOutput, PyUnicode_FromString("basic"), pBasisList); - addTupleToPyList(pOutput, PyUnicode_FromString("atLB"), pLBList); - addTupleToPyList(pOutput, PyUnicode_FromString("atUB"), pUBList); - */ - - assert(!PyErr_Occurred()); - - return pOutput; +PyObject *pyTupleList_FromNode(DecompAlgo *algo, DecompStatus decompStatus) { + PyObject *pOutput = PyList_New(0); + AlpsDecompTreeNode *node = (AlpsDecompTreeNode *)algo->getCurrentNode(); + double lb = algo->getObjBestBoundLB(), ub = algo->getObjBestBoundUB(); + double quality = node->getQuality(); + string status; + + switch (decompStatus) { + case STAT_IP_FEASIBLE: + status = "Solution"; + break; + + case STAT_FEASIBLE: + if (lb > quality) { + quality = lb; + } + + if (quality >= ub) { + status = "Pruned"; + } else { + status = "Candidate"; + } + + break; + + case STAT_INFEASIBLE: + status = "Infeasible"; + break; + + default: + status = "Unknown"; + } + + // Add into the list the output needed + addTupleToPyList(pOutput, PyUnicode_FromString("nodeIndex"), + PyLong_FromLong(node->getIndex())); + addTupleToPyList(pOutput, PyUnicode_FromString("parentIndex"), + PyLong_FromLong(node->getParentIndex())); + addTupleToPyList(pOutput, PyUnicode_FromString("nodeDepth"), + PyLong_FromLong(node->getDepth())); + addTupleToPyList(pOutput, PyUnicode_FromString("nodeQuality"), + PyFloat_FromDouble(quality)); + addTupleToPyList(pOutput, PyUnicode_FromString("globalLB"), + PyFloat_FromDouble(lb)); + addTupleToPyList(pOutput, PyUnicode_FromString("globalUB"), + PyFloat_FromDouble(ub)); + addTupleToPyList(pOutput, PyUnicode_FromString("nodeStatus"), + PyUnicode_FromString(status.c_str())); + addTupleToPyList(pOutput, PyUnicode_FromString("branchedDir"), + PyLong_FromLong(dynamic_cast( + algo->getCurrentNode()->getDesc()) + ->getBranchedDir())); + // Copy the current solution into a Python list + const double *xhat = algo->getXhat(); + DippyDecompApp *app = (DippyDecompApp *)algo->getDecompApp(); + PyObject *pSolutionList = pyTupleList_FromDoubleArray(xhat, app->m_colList); + addTupleToPyList(pOutput, PyUnicode_FromString("xhat"), pSolutionList); + /** MO. 29/2/2012 - This section was originally an attempt to add "simple" + cuts. i.e., that the sum of non-basic variables >= 1 (or at least a variant + for lb and ub), so I passed the variable bounds at the node and lists of + basic, lower bound and upper bound variables from the original problem. + However, the cuts added "clashed" with the CGL cuts, probably because the + variables introduced by these cuts, e.g., slacks, were not considered. I have + abandoned this direction for now, but I like the idea of getting the full node + problem so more complex cuts that use, e.g., the basis, can be implemented in + Python. + + int numOrig = algo->getModelCore().getModel()->getNumCols(); + + const double * lb = algo->getColLBNode(); + const double * ub = algo->getColUBNode(); + PyObject * pBoundList = PyList_New(0), + * pExtraBoundList = PyList_New(0); + PyObject * pBoundPair; + + for (int j=0; j= DecompInf) + PyTuple_SetItem(pBoundPair, 1, Py_None); + else + PyTuple_SetItem(pBoundPair, 1, PyFloat_FromDouble(ub[j])); + addTupleToPyList(pBoundList, PyList_GetItem(app->m_colList, j), + pBoundPair); + } + addTupleToPyList(pOutput, PyUnicode_FromString("bounds"), pBoundList); + + // Copy the original variables into "status" lists + PyObject * pBasisList = PyList_New(0), + * pLBList = PyList_New(0), + * pUBList = PyList_New(0); + + if (algo->getMasterOSI()->basisIsAvailable()) { + int numRows = algo->getMasterOSI()->getNumRows(), + numCols = algo->getMasterOSI()->getNumCols(); + int * rstat = new int[numRows], + * cstat = new int [numCols]; + + algo->getMasterOSI()->getBasisStatus(cstat, rstat); + // MO (28/2/2012) - Assuming that any extra variables are added at the + end, is this true? for (int j=0; jm_colList, j)); break; + case 2: // upper + PyList_Append(pUBList, PyList_GetItem(app->m_colList, + j)); break; case 3: // lower PyList_Append(pLBList, + PyList_GetItem(app->m_colList, j)); break; default: throw UtilException("Error + calling method pyTupleList_FromNode()", "pyTupleList_FromNode", + "DippyPythonUtils"); + } + + delete [] rstat; + delete [] cstat; + } + + addTupleToPyList(pOutput, PyUnicode_FromString("basic"), pBasisList); + addTupleToPyList(pOutput, PyUnicode_FromString("atLB"), pLBList); + addTupleToPyList(pOutput, PyUnicode_FromString("atUB"), pUBList); + */ + + assert(!PyErr_Occurred()); + + return pOutput; } /** @@ -179,38 +175,40 @@ PyObject* pyTupleList_FromNode(DecompAlgo* algo, DecompStatus decompStatus) * and coefficients as values * */ -void pyColDict_AsPairedVector(PyObject* pColDict, vector >& vec, map indices) -{ - int len = PyObject_Length(pColDict); - vec.clear(); - PyObject* pKeys = PyDict_Keys(pColDict), *pCol; - double value; - int index; - - for (int i = 0; i < len; i++) { - pCol = PyList_GetItem(pKeys, i); - value = PyFloat_AsDouble(PyDict_GetItem(pColDict, pCol)); - index = indices[pCol]; - - if ( (index < 0) || (index >= indices.size()) ) { - char str[] = "__str__"; - PyObject* pColName = PyObject_CallMethod(pCol, str, NULL); - - if (pColName == NULL) { - throw UtilException("Error calling method col.__str__()", "pyColDict_AsPairedVector", - "DippyPythonUtils"); - } - - string name = PyBytes_AsString(PyUnicode_AsEncodedString(pColName, "UTF-8", "strict")); - - throw UtilException("Bad index for " + name, "pyTupleList_AsPairedVector", - "DippyPythonUtils"); +void pyColDict_AsPairedVector(PyObject *pColDict, + vector> &vec, + map indices) { + int len = PyObject_Length(pColDict); + vec.clear(); + PyObject *pKeys = PyDict_Keys(pColDict), *pCol; + double value; + int index; + + for (int i = 0; i < len; i++) { + pCol = PyList_GetItem(pKeys, i); + value = PyFloat_AsDouble(PyDict_GetItem(pColDict, pCol)); + index = indices[pCol]; + + if ((index < 0) || (index >= indices.size())) { + char str[] = "__str__"; + PyObject *pColName = PyObject_CallMethod(pCol, str, NULL); + + if (pColName == NULL) { + throw UtilException("Error calling method col.__str__()", + "pyColDict_AsPairedVector", "DippyPythonUtils"); } - vec.push_back(pair(index, value)); - } + string name = PyBytes_AsString( + PyUnicode_AsEncodedString(pColName, "UTF-8", "strict")); + + throw UtilException("Bad index for " + name, "pyTupleList_AsPairedVector", + "DippyPythonUtils"); + } + + vec.push_back(pair(index, value)); + } - assert(!PyErr_Occurred()); + assert(!PyErr_Occurred()); } /** @@ -221,87 +219,91 @@ void pyColDict_AsPairedVector(PyObject* pColDict, vector >& ve * * Returns length of index and value arrays */ -int pyColDict_AsPackedArrays(PyObject* pColDict, map indices, int** inds, double** vals) -{ - int len = PyObject_Length(pColDict); - *inds = new int[len]; - *vals = new double[len]; - PyObject* pKeys = PyDict_Keys(pColDict); - PyObject* pCol; - double value; - int index; - - for (int i = 0; i < len; i++) { - pCol = PyList_GetItem(pKeys, i); - value = PyFloat_AsDouble(PyDict_GetItem(pColDict, pCol)); - index = indices[pCol]; - - if ( (index < 0) || (index >= indices.size()) ) { - char getName[] = "getName"; - PyObject* pColName = PyObject_CallMethod(pCol, getName, NULL); - - if (pColName == NULL) { - throw UtilException("Error calling method col.getName()", "pyColDict_AsPackedArrays", - "DippyPythonUtils"); - } - - string name = PyBytes_AsString(PyUnicode_AsEncodedString(pColName, "UTF-8", "strict")); - - throw UtilException("Bad index for " + name, "pyColDict_AsPackedArrays", "DippyPythonUtils"); +int pyColDict_AsPackedArrays(PyObject *pColDict, map indices, + int **inds, double **vals) { + int len = PyObject_Length(pColDict); + *inds = new int[len]; + *vals = new double[len]; + PyObject *pKeys = PyDict_Keys(pColDict); + PyObject *pCol; + double value; + int index; + + for (int i = 0; i < len; i++) { + pCol = PyList_GetItem(pKeys, i); + value = PyFloat_AsDouble(PyDict_GetItem(pColDict, pCol)); + index = indices[pCol]; + + if ((index < 0) || (index >= indices.size())) { + char getName[] = "getName"; + PyObject *pColName = PyObject_CallMethod(pCol, getName, NULL); + + if (pColName == NULL) { + throw UtilException("Error calling method col.getName()", + "pyColDict_AsPackedArrays", "DippyPythonUtils"); } - (*inds)[i] = index; - (*vals)[i] = value; - } + string name = PyBytes_AsString( + PyUnicode_AsEncodedString(pColName, "UTF-8", "strict")); + + throw UtilException("Bad index for " + name, "pyColDict_AsPackedArrays", + "DippyPythonUtils"); + } + + (*inds)[i] = index; + (*vals)[i] = value; + } - assert(!PyErr_Occurred()); + assert(!PyErr_Occurred()); - return len; + return len; } -int pyColDict_AsPackedArrays(PyObject* pColDict, map indices, int** inds, double** vals, DecompVarType& varType) -{ - int len = PyObject_Length(pColDict); - *inds = new int[len]; - *vals = new double[len]; - PyObject* pKeys = PyDict_Keys(pColDict); - PyObject* pCol; - double value; - int index; - - for (int i = 0; i < len; i++) { - pCol = PyList_GetItem(pKeys, i); - value = PyFloat_AsDouble(PyDict_GetItem(pColDict, pCol)); - index = indices[pCol]; - - if ( (index < 0) || (index >= indices.size()) ) { - char getName[] = "getName"; - PyObject* pColName = PyObject_CallMethod(pCol, getName, NULL); - - if (pColName == NULL) { - throw UtilException("Error calling method col.getName()", "pyColDict_AsPackedArrays", - "DippyPythonUtils"); - } - - string name = PyBytes_AsString(PyUnicode_AsEncodedString(pColName, "UTF-8", "strict")); - - throw UtilException("Bad index for " + name, "pyColDict_AsPackedArrays", - "DippyPythonUtils"); +int pyColDict_AsPackedArrays(PyObject *pColDict, map indices, + int **inds, double **vals, + DecompVarType &varType) { + int len = PyObject_Length(pColDict); + *inds = new int[len]; + *vals = new double[len]; + PyObject *pKeys = PyDict_Keys(pColDict); + PyObject *pCol; + double value; + int index; + + for (int i = 0; i < len; i++) { + pCol = PyList_GetItem(pKeys, i); + value = PyFloat_AsDouble(PyDict_GetItem(pColDict, pCol)); + index = indices[pCol]; + + if ((index < 0) || (index >= indices.size())) { + char getName[] = "getName"; + PyObject *pColName = PyObject_CallMethod(pCol, getName, NULL); + + if (pColName == NULL) { + throw UtilException("Error calling method col.getName()", + "pyColDict_AsPackedArrays", "DippyPythonUtils"); } - char getVarType[] = "getVarType"; - PyObject* pColType = PyObject_CallMethod(pCol, getVarType, NULL); - if (pColType == NULL){ - throw UtilException("getVarType call failed.", "pyColDict_AsPackedArrays", - "DippyPythonUtils"); - } - - (*inds)[i] = index; - (*vals)[i] = value; - } - assert(!PyErr_Occurred()); + string name = PyBytes_AsString( + PyUnicode_AsEncodedString(pColName, "UTF-8", "strict")); + + throw UtilException("Bad index for " + name, "pyColDict_AsPackedArrays", + "DippyPythonUtils"); + } + char getVarType[] = "getVarType"; + PyObject *pColType = PyObject_CallMethod(pCol, getVarType, NULL); + if (pColType == NULL) { + throw UtilException("getVarType call failed.", "pyColDict_AsPackedArrays", + "DippyPythonUtils"); + } + + (*inds)[i] = index; + (*vals)[i] = value; + } - return len; + assert(!PyErr_Occurred()); + + return len; } /** @@ -311,76 +313,75 @@ int pyColDict_AsPackedArrays(PyObject* pColDict, map indices, in * and coefficients as values */ -CoinPackedMatrix* pyConstraints_AsPackedMatrix(PyObject* pRowList, - map rowIndices, map colIndices) -{ - int len = PyObject_Length(pRowList); - int rowInd, colInd, num; - string rowName, colName; - double val, lb, ub; - PyObject* pRow, *pKeys, *pCol; - // First get the total number of non-zeros from all the tuples - int numNZs = 0; - - for (int i = 0; i < len; i++) { - pRow = PyList_GetItem(pRowList, i); - num = PyObject_Length(pRow); - numNZs += num; - } - - // Now read and process the tuples - int start = 0; - int* rowInds = new int[numNZs]; - UtilFillN(rowInds, numNZs, -1); - int* colInds = new int[numNZs]; - UtilFillN(colInds, numNZs, -1); - double* values = new double[numNZs]; - UtilFillN(values, numNZs, 0.0); - - for (int i = 0; i < len; i++) { - pRow = PyList_GetItem(pRowList, i); - rowInd = rowIndices[pRow]; - pKeys = PyDict_Keys(pRow); - num = PyObject_Length(pKeys); - - for (int n = 0; n < num; n++) { - pCol = PyList_GetItem(pKeys, n); - colInd = colIndices[pCol]; - val = PyFloat_AsDouble(PyDict_GetItem(pRow, pCol)); - rowInds[start + n] = rowInd; - colInds[start + n] = colInd; - values[start + n] = val; - } - - start += num; - } - - assert(!PyErr_Occurred()); - - return new CoinPackedMatrix(false, rowInds, colInds, values, numNZs); +CoinPackedMatrix * +pyConstraints_AsPackedMatrix(PyObject *pRowList, + map rowIndices, + map colIndices) { + int len = PyObject_Length(pRowList); + int rowInd, colInd, num; + string rowName, colName; + double val, lb, ub; + PyObject *pRow, *pKeys, *pCol; + // First get the total number of non-zeros from all the tuples + int numNZs = 0; + + for (int i = 0; i < len; i++) { + pRow = PyList_GetItem(pRowList, i); + num = PyObject_Length(pRow); + numNZs += num; + } + + // Now read and process the tuples + int start = 0; + int *rowInds = new int[numNZs]; + UtilFillN(rowInds, numNZs, -1); + int *colInds = new int[numNZs]; + UtilFillN(colInds, numNZs, -1); + double *values = new double[numNZs]; + UtilFillN(values, numNZs, 0.0); + + for (int i = 0; i < len; i++) { + pRow = PyList_GetItem(pRowList, i); + rowInd = rowIndices[pRow]; + pKeys = PyDict_Keys(pRow); + num = PyObject_Length(pKeys); + + for (int n = 0; n < num; n++) { + pCol = PyList_GetItem(pKeys, n); + colInd = colIndices[pCol]; + val = PyFloat_AsDouble(PyDict_GetItem(pRow, pCol)); + rowInds[start + n] = rowInd; + colInds[start + n] = colInd; + values[start + n] = val; + } + + start += num; + } + + assert(!PyErr_Occurred()); + + return new CoinPackedMatrix(false, rowInds, colInds, values, numNZs); } /** * Creates a (key,value) tuple and appends to a Python list of tuples * */ -void addTupleToPyList(PyObject* pList, PyObject* key, PyObject* value) -{ - PyObject* pTuple = PyTuple_New(2); - PyTuple_SetItem(pTuple, 0, key); - PyTuple_SetItem(pTuple, 1, value); - PyList_Append(pList, pTuple); - assert(!PyErr_Occurred()); +void addTupleToPyList(PyObject *pList, PyObject *key, PyObject *value) { + PyObject *pTuple = PyTuple_New(2); + PyTuple_SetItem(pTuple, 0, key); + PyTuple_SetItem(pTuple, 1, value); + PyList_Append(pList, pTuple); + assert(!PyErr_Occurred()); } /** * Creates a (key,value) tuple and inserts in a Python list of tuples * */ -void insertTupleToPyList(PyObject* pList, unsigned position, PyObject* key, PyObject* value) -{ - PyObject* pTuple = PyTuple_New(2); - PyTuple_SetItem(pTuple, 0, key); - PyTuple_SetItem(pTuple, 1, value); - PyList_SetItem(pList, position, pTuple); - assert(!PyErr_Occurred()); +void insertTupleToPyList(PyObject *pList, unsigned position, PyObject *key, + PyObject *value) { + PyObject *pTuple = PyTuple_New(2); + PyTuple_SetItem(pTuple, 0, key); + PyTuple_SetItem(pTuple, 1, value); + PyList_SetItem(pList, position, pTuple); + assert(!PyErr_Occurred()); } - diff --git a/Dip/src/dippy/DippySolve.cpp b/Dip/src/dippy/DippySolve.cpp index 59ab66b7..9607e930 100644 --- a/Dip/src/dippy/DippySolve.cpp +++ b/Dip/src/dippy/DippySolve.cpp @@ -2,8 +2,8 @@ #include "UtilParameters.h" //===========================================================================// -#include "DippyDecompApp.h" #include "DippyDecompAlgo.h" +#include "DippyDecompApp.h" #include "DippyPythonUtils.h" //===========================================================================// #include "AlpsDecompModel.h" @@ -35,159 +35,157 @@ double DecompInf = COIN_DBL_MAX; extern double DecompInf; #endif -DLLEXPORT PyObject* Solve(PyObject* self, PyObject* args) -{ - PyObject* pProb; - PyObject* pParamDict; - - if (!PyArg_ParseTuple(args, "OO", &pProb, &pParamDict)) { - return NULL; - } - - try { - // create the utility class for parsing parameters - UtilParameters utilParam; - // By default Dippy enforces branching in the master problem - utilParam.Add("DECOMP", "BranchEnforceInMaster", "1"); - utilParam.Add("DECOMP", "BranchEnforceInSubProb", "0"); - // loop through paramDict, add to utilParam - PyObject* pKey, *pValue; - Py_ssize_t pos = 0; - - while (PyDict_Next(pParamDict, &pos, &pKey, &pValue)) { - // key is a 2-tuple (section, name), both strings - // value is a string - // TODO: better error reporting - const char* section = NULL; - PyObject* pSection = PyTuple_GetItem(pKey, 0); - - if (pSection != Py_None) { - section = PyBytes_AsString(PyUnicode_AsEncodedString(PyTuple_GetItem(pKey, 0), - "UTF-8", "strict")); - } - - const char* name = - PyBytes_AsString(PyUnicode_AsEncodedString(PyTuple_GetItem(pKey, 1), - "UTF-8", "strict")); - const char* value = - PyBytes_AsString(PyUnicode_AsEncodedString(pValue, "UTF-8", "strict")); - utilParam.Add(section, name, value); - } - - bool doCut = utilParam.GetSetting("doCut", false); - bool doPriceCut = utilParam.GetSetting("doPriceCut", false); - bool doRelaxCut = utilParam.GetSetting("doRelaxCut", false); - // create the user application (a DecompApp) - DippyDecompApp sip(utilParam, pProb); - // create the decomp algo - DecompAlgo* algo = NULL; - - if (doPriceCut) { - algo = new DippyAlgoPC(&sip, utilParam, pProb); - } else if (doCut) { - algo = new DippyAlgoC(&sip, utilParam, pProb); - } else if (doRelaxCut) { - algo = new DippyAlgoRC(&sip, utilParam, pProb); - } - - // default - if (algo == NULL) { - algo = new DippyAlgoC(&sip, utilParam, pProb); - } - - AlpsDecompModel alpsModel(utilParam, algo); - alpsModel.solve(); - // TODO: Python exception needs to be set here or higher - int status = alpsModel.getSolStatus(); - PyObject* pStatus; - PyObject* pMessage = Py_None; - /** - LpStatusOptimal “Optimal” 1 - LpStatusNotSolved “Not Solved” 0 - LpStatusInfeasible “Infeasible” -1 - LpStatusUnbounded “Unbounded” -2 - LpStatusUndefined “Undefined” -3 - */ - - switch (status) { - case AlpsExitStatusOptimal: - pStatus = PyLong_FromLong(1); - break; - - case AlpsExitStatusTimeLimit: - pStatus = PyLong_FromLong(0); - pMessage = PyUnicode_FromString("Reached time limit"); - break; - - case AlpsExitStatusNodeLimit: - pStatus = PyLong_FromLong(0); - pMessage = PyUnicode_FromString("Reached node limit"); - break; - - case AlpsExitStatusSolLimit: - pStatus = PyLong_FromLong(0); - pMessage = PyUnicode_FromString("Reached sol limit"); - break; - - case AlpsExitStatusInfeasible: - pStatus = PyLong_FromLong(-1); - break; - - case AlpsExitStatusNoMemory: - throw UtilException("Out of memory", "Solve", "DippySolve"); - - case AlpsExitStatusFailed: - throw UtilException("Solve failed", "Solve", "DippySolve"); - - case AlpsExitStatusUnbounded: - pStatus = PyLong_FromLong(-2); - break; - - case AlpsExitStatusFeasible: - throw UtilException("Feasible but not optimal", "Solve", "DippySolve"); - - default: - throw UtilException("Unknown solution status", "Solve", "DippySolve"); +DLLEXPORT PyObject *Solve(PyObject *self, PyObject *args) { + PyObject *pProb; + PyObject *pParamDict; + + if (!PyArg_ParseTuple(args, "OO", &pProb, &pParamDict)) { + return NULL; + } + + try { + // create the utility class for parsing parameters + UtilParameters utilParam; + // By default Dippy enforces branching in the master problem + utilParam.Add("DECOMP", "BranchEnforceInMaster", "1"); + utilParam.Add("DECOMP", "BranchEnforceInSubProb", "0"); + // loop through paramDict, add to utilParam + PyObject *pKey, *pValue; + Py_ssize_t pos = 0; + + while (PyDict_Next(pParamDict, &pos, &pKey, &pValue)) { + // key is a 2-tuple (section, name), both strings + // value is a string + // TODO: better error reporting + const char *section = NULL; + PyObject *pSection = PyTuple_GetItem(pKey, 0); + + if (pSection != Py_None) { + section = PyBytes_AsString(PyUnicode_AsEncodedString( + PyTuple_GetItem(pKey, 0), "UTF-8", "strict")); } - const DecompSolution* solution = alpsModel.getBestSolution(); - // cout << "Optimal Solution" << endl; - // solution->print(); - PyObject* pSolution = Py_None; - - if (solution != NULL) { - const double* values = solution->getValues(); - pSolution = pyTupleList_FromDoubleArray(values, sip.m_colList); + const char *name = PyBytes_AsString(PyUnicode_AsEncodedString( + PyTuple_GetItem(pKey, 1), "UTF-8", "strict")); + const char *value = PyBytes_AsString( + PyUnicode_AsEncodedString(pValue, "UTF-8", "strict")); + utilParam.Add(section, name, value); + } + + bool doCut = utilParam.GetSetting("doCut", false); + bool doPriceCut = utilParam.GetSetting("doPriceCut", false); + bool doRelaxCut = utilParam.GetSetting("doRelaxCut", false); + // create the user application (a DecompApp) + DippyDecompApp sip(utilParam, pProb); + // create the decomp algo + DecompAlgo *algo = NULL; + + if (doPriceCut) { + algo = new DippyAlgoPC(&sip, utilParam, pProb); + } else if (doCut) { + algo = new DippyAlgoC(&sip, utilParam, pProb); + } else if (doRelaxCut) { + algo = new DippyAlgoRC(&sip, utilParam, pProb); + } + + // default + if (algo == NULL) { + algo = new DippyAlgoC(&sip, utilParam, pProb); + } + + AlpsDecompModel alpsModel(utilParam, algo); + alpsModel.solve(); + // TODO: Python exception needs to be set here or higher + int status = alpsModel.getSolStatus(); + PyObject *pStatus; + PyObject *pMessage = Py_None; + /** + LpStatusOptimal “Optimal” 1 + LpStatusNotSolved “Not Solved” 0 + LpStatusInfeasible “Infeasible” -1 + LpStatusUnbounded “Unbounded” -2 + LpStatusUndefined “Undefined” -3 + */ + + switch (status) { + case AlpsExitStatusOptimal: + pStatus = PyLong_FromLong(1); + break; + + case AlpsExitStatusTimeLimit: + pStatus = PyLong_FromLong(0); + pMessage = PyUnicode_FromString("Reached time limit"); + break; + + case AlpsExitStatusNodeLimit: + pStatus = PyLong_FromLong(0); + pMessage = PyUnicode_FromString("Reached node limit"); + break; + + case AlpsExitStatusSolLimit: + pStatus = PyLong_FromLong(0); + pMessage = PyUnicode_FromString("Reached sol limit"); + break; + + case AlpsExitStatusInfeasible: + pStatus = PyLong_FromLong(-1); + break; + + case AlpsExitStatusNoMemory: + throw UtilException("Out of memory", "Solve", "DippySolve"); + + case AlpsExitStatusFailed: + throw UtilException("Solve failed", "Solve", "DippySolve"); + + case AlpsExitStatusUnbounded: + pStatus = PyLong_FromLong(-2); + break; + + case AlpsExitStatusFeasible: + throw UtilException("Feasible but not optimal", "Solve", "DippySolve"); + + default: + throw UtilException("Unknown solution status", "Solve", "DippySolve"); + } + + const DecompSolution *solution = alpsModel.getBestSolution(); + // cout << "Optimal Solution" << endl; + // solution->print(); + PyObject *pSolution = Py_None; + + if (solution != NULL) { + const double *values = solution->getValues(); + pSolution = pyTupleList_FromDoubleArray(values, sip.m_colList); + } + + PyObject *pDuals = Py_None; + + if (doCut) { + DecompAlgoC *algoC = dynamic_cast(algo); + OsiSolverInterface *masterOSI = algoC->getMasterOSI(); + const double *duals = masterOSI->getRowPrice(); + + if (duals != NULL) { + pDuals = pyTupleList_FromDoubleArray(duals, sip.m_rowList); } - - PyObject* pDuals = Py_None; - - if (doCut) { - DecompAlgoC* algoC = dynamic_cast(algo); - OsiSolverInterface* masterOSI = algoC->getMasterOSI(); - const double* duals = masterOSI->getRowPrice(); - - if (duals != NULL) { - pDuals = pyTupleList_FromDoubleArray(duals, sip.m_rowList); - } - } - - delete algo; - // return solution as list - PyObject* pOutput = PyTuple_New(4); - PyTuple_SetItem(pOutput, 0, pStatus); - PyTuple_SetItem(pOutput, 1, pMessage); - PyTuple_SetItem(pOutput, 2, pSolution); - PyTuple_SetItem(pOutput, 3, pDuals); - Py_INCREF(pOutput); - return pOutput; - } catch (CoinError& ex) { - cerr << "COIN Exception [ " << ex.message() << " ]" - << " at " << ex.fileName() << ":L" << ex.lineNumber() - << " in " << ex.className() << "::" << ex.methodName() << endl; - return NULL; - } - - Py_INCREF(Py_None); - return Py_None; + } + + delete algo; + // return solution as list + PyObject *pOutput = PyTuple_New(4); + PyTuple_SetItem(pOutput, 0, pStatus); + PyTuple_SetItem(pOutput, 1, pMessage); + PyTuple_SetItem(pOutput, 2, pSolution); + PyTuple_SetItem(pOutput, 3, pDuals); + Py_INCREF(pOutput); + return pOutput; + } catch (CoinError &ex) { + cerr << "COIN Exception [ " << ex.message() << " ]" + << " at " << ex.fileName() << ":L" << ex.lineNumber() << " in " + << ex.className() << "::" << ex.methodName() << endl; + return NULL; + } + + Py_INCREF(Py_None); + return Py_None; } diff --git a/Dip/src/dippy/__init__.py b/Dip/src/dippy/__init__.py index 998a144e..004043ea 100644 --- a/Dip/src/dippy/__init__.py +++ b/Dip/src/dippy/__init__.py @@ -7,4 +7,3 @@ pass from .dippy import * - diff --git a/Dip/src/dippy/dipapi.py b/Dip/src/dippy/dipapi.py index 652ac84d..bf63b2d5 100644 --- a/Dip/src/dippy/dipapi.py +++ b/Dip/src/dippy/dipapi.py @@ -18,23 +18,27 @@ def Solve(prob, params=None): """ from builtins import object + class DipAPIError(Exception): - """ + """ DipAPI Exception """ - pass -class DipAPI(object): + pass - def getObjective(self): - """ + +class DipAPI(object): + def getObjective(self): + """ Return objective as a dictionary with variables as keys and (non-zero) coefficients as values """ - raise DipAPIError("Bad function definition, DipAPI.getObjective must be overwritten") + raise DipAPIError( + "Bad function definition, DipAPI.getObjective must be overwritten" + ) - def getRows(self, problem=None): - """ + def getRows(self, problem=None): + """ Return constraints as a list of dictionaries with variables as keys and (non-zero) coefficients as values. Constraints also have getName, getLb and getUb methods @@ -42,43 +46,42 @@ def getRows(self, problem=None): problem = None implies the master problem, otherwise problem is a subproblem """ - raise DipAPIError("Bad function definition, DipAPI.getRows must be overwritten") + raise DipAPIError("Bad function definition, DipAPI.getRows must be overwritten") - def getCols(self, problem=None): - """ + def getCols(self, problem=None): + """ Returns a list of variables. Variables have getName, getLb, getUb and isInteger methods problem = None implies the master problem, otherwise problem is a subproblem """ - raise DipAPIError("Bad function definition, DipAPI.getCols must be overwritten") + raise DipAPIError("Bad function definition, DipAPI.getCols must be overwritten") - def getMasterAsTuple(self): - """ + def getMasterAsTuple(self): + """ Returns all the master problem data as a tuple of other "data gathering" functions """ - return (self.getObjective(), - self.getRows(), - self.getCols()) + return (self.getObjective(), self.getRows(), self.getCols()) - def getRelaxAsTuple(self, problem): - """ + def getRelaxAsTuple(self, problem): + """ Returns all the subproblem constraints and variables """ - return (self.getRows(problem), - self.getCols(problem)) + return (self.getRows(problem), self.getCols(problem)) - def getRelaxsAsDict(self): - """ + def getRelaxsAsDict(self): + """ Returns the relaxation subproblems as a dictionary with keys as defined by the user and values as subproblems """ - raise DipAPIError("Bad function definition, DipAPI.getRelaxsAsDict must be overwritten") + raise DipAPIError( + "Bad function definition, DipAPI.getRelaxsAsDict must be overwritten" + ) - def chooseBranchSet(self, xhat): - """ + def chooseBranchSet(self, xhat): + """ Finds the best branch for a fractional solution Inputs: @@ -88,19 +91,23 @@ def chooseBranchSet(self, xhat): down_lb, down_ub, up_lb, up_ub (tuple of (variable, value) dictionaries) = lower and upper bounds for down branch, lower and upper bounds for up branch """ - raise DipAPIError("Bad function definition, DipAPI.chooseBranchSet must be overwritten") + raise DipAPIError( + "Bad function definition, DipAPI.chooseBranchSet must be overwritten" + ) - def postProcessNode(self, output): - """ + def postProcessNode(self, output): + """ Returns information from the node that has just been processed. Inputs: output (list of (parameter, value) tuples) = list of output values from the node """ - raise DipAPIError("Bad function definition, DipAPI.postProcess must be overwritten") + raise DipAPIError( + "Bad function definition, DipAPI.postProcess must be overwritten" + ) - def solveRelaxed(self, key, redCostX, target): - """ + def solveRelaxed(self, key, redCostX, target): + """ Returns solutions to the whichBlock relaxed subproblem Inputs: @@ -117,10 +124,12 @@ def solveRelaxed(self, key, redCostX, target): solution for this relaxed subproblem expressed as a cost, reduced cost and dictionary of non-zero values for variables """ - raise DipAPIError("Bad function definition, DipAPI.solveRelaxed must be overwritten") + raise DipAPIError( + "Bad function definition, DipAPI.solveRelaxed must be overwritten" + ) - def isUserFeasible(self, sol, tol): - """ + def isUserFeasible(self, sol, tol): + """ Lets the user decide if an integer solution is really feasible Inputs: @@ -130,10 +139,12 @@ def isUserFeasible(self, sol, tol): Outputs: (boolean) = false if not feasible (generate cuts) or true if feasible """ - raise DipAPIError("Bad function definition, DipAPI.isUserFeasible must be overwritten") + raise DipAPIError( + "Bad function definition, DipAPI.isUserFeasible must be overwritten" + ) - def generateCuts(self, node): - """ + def generateCuts(self, node): + """ Lets the user generate cuts to remove fractional "pieces" of the node solution Inputs: @@ -145,10 +156,12 @@ def generateCuts(self, node): i.e., a dictionary with LpVariables as keys and (non-zero) coefficients as values with getName, getLb and getUb bound methods """ - raise DipAPIError("Bad function definition, DipAPI.generateCuts must be overwritten") + raise DipAPIError( + "Bad function definition, DipAPI.generateCuts must be overwritten" + ) - def solveHeuristics(self, xhat, costX): - """ + def solveHeuristics(self, xhat, costX): + """ Lets the user generate (heuristic) solutions from a fractional solution Inputs: @@ -160,10 +173,12 @@ def solveHeuristics(self, xhat, costX): solutions found from this fractional solution expressed as a dictionary of non-zero values for variables """ - raise DipAPIError("Bad function definition, DipAPI.solveHeuristics must be overwritten") + raise DipAPIError( + "Bad function definition, DipAPI.solveHeuristics must be overwritten" + ) - def generateInitVars(self): - """ + def generateInitVars(self): + """ Returns initial solutions to relaxed subproblems Inputs: @@ -174,4 +189,6 @@ def generateInitVars(self): initial solutions for the relaxed subproblems expressed as a cost and dictionary of non-zero values for variables """ - raise DipAPIError("Bad function definition, DipAPI.generateInitVars must be overwritten") + raise DipAPIError( + "Bad function definition, DipAPI.generateInitVars must be overwritten" + ) diff --git a/Dip/src/dippy/dippy.py b/Dip/src/dippy/dippy.py index a6263da6..579f75bb 100644 --- a/Dip/src/dippy/dippy.py +++ b/Dip/src/dippy/dippy.py @@ -35,33 +35,37 @@ else: grumpy_installed = False + class DipError(Exception): """ Dip Exception """ - + + # DIP solver status -#enum DecompSolverStatus { +# enum DecompSolverStatus { # DecompSolStatError, # DecompSolStatOptimal, # DecompSolStatFeasible, # DecompSolStatInfeasible, # DecompSolStatNoSolution -#}; -DipSolStatError = 0 -DipSolStatOptimal = 1 -DipSolStatFeasible = 2 +# }; +DipSolStatError = 0 +DipSolStatOptimal = 1 +DipSolStatFeasible = 2 DipSolStatInfeasible = 3 DipSolStatNoSolution = 4 DipStatus = { - DipSolStatError: "Error", - DipSolStatOptimal: "Optimal", - DipSolStatFeasible: "Feasible", - DipSolStatInfeasible: "Infeasible", - DipSolStatNoSolution: "No solution" - } + DipSolStatError: "Error", + DipSolStatOptimal: "Optimal", + DipSolStatFeasible: "Feasible", + DipSolStatInfeasible: "Infeasible", + DipSolStatNoSolution: "No solution", +} _Solve = Solve + + def Solve(prob, params=None): """ Solve a DipProblem instance, returning a solution object @@ -87,19 +91,19 @@ def Solve(prob, params=None): if params is None: params = {} - if (prob.branch_method == None) and (prob.display_mode == 'off'): - params['pyBranchMethod'] = '0' - if (prob.post_process_node == None) and (prob.display_mode == 'off'): - params['pyPostProcessNode'] = '0' - if (prob.post_process_branch == None) and (prob.display_mode == 'off'): - params['pyPostProcessBranch'] = '0' + if (prob.branch_method == None) and (prob.display_mode == "off"): + params["pyBranchMethod"] = "0" + if (prob.post_process_node == None) and (prob.display_mode == "off"): + params["pyPostProcessNode"] = "0" + if (prob.post_process_branch == None) and (prob.display_mode == "off"): + params["pyPostProcessBranch"] = "0" if prob.relaxed_solver == None: - params['pyRelaxedSolver'] = '0' + params["pyRelaxedSolver"] = "0" if prob.is_solution_feasible == None: - params['pyIsSolutionFeasible'] = '0' - + params["pyIsSolutionFeasible"] = "0" + if (prob.generate_cuts == None) and (prob.generate_cuts_from_node == None): - params['pyGenerateCuts'] = '0' + params["pyGenerateCuts"] = "0" if prob.generate_cuts != None: prob.gen_cuts = True @@ -109,13 +113,13 @@ def Solve(prob, params=None): prob.gen_cuts_node = True else: prob.gen_cuts_node = False - + if prob.heuristics == None: - params['pyHeuristics'] = '0' + params["pyHeuristics"] = "0" if prob.init_vars == None: - params['pyInitVars'] = '0' + params["pyInitVars"] = "0" if prob.is_solution_feasible == None: - params['pyIsSolutionFeasible'] = '0' + params["pyIsSolutionFeasible"] = "0" for key, value in list(params.items()): valid_types = (basestring, int, float) @@ -125,44 +129,52 @@ def Solve(prob, params=None): section = key for name, param_val in list(value.items()): if not isinstance(param_val, valid_types): - raise DipError("Bad value '%s' in parameter dictionary, expecting string or number" % param_val) + raise DipError( + "Bad value '%s' in parameter dictionary, expecting string or number" + % param_val + ) processed[(section, name)] = str(param_val) elif isinstance(value, valid_types): # add this parameter to both the 'None' section and the 'DECOMP' section processed[(None, key)] = str(value) - processed[('DECOMP', key)] = str(value) + processed[("DECOMP", key)] = str(value) else: - raise DipError("Bad value '%s' in parameter dictionary, expecting string" % value) + raise DipError( + "Bad value '%s' in parameter dictionary, expecting string" % value + ) # DIP only solves minimisation problems if prob.sense == pulp.LpMaximize: - raise DipError("DIP assumes a minimize objective, but DipProblem has "+ - "maximize objective.\n" + - "Use prob.sense = pulp.LpMinimize and prob.objective " + - "*= -1 to remedy") - + raise DipError( + "DIP assumes a minimize objective, but DipProblem has " + + "maximize objective.\n" + + "Use prob.sense = pulp.LpMinimize and prob.objective " + + "*= -1 to remedy" + ) + # DIP only allows non-negative variables. This is difficult # to transform automatically, so request re-formulation for v in prob.variables(): if v.lowBound < 0: - raise DipError("Variable %s has negative lower bound, please " + - "re-formulate using sum of non-negative variables" - % v.name) - + raise DipError( + "Variable %s has negative lower bound, please " + + "re-formulate using sum of non-negative variables" % v.name + ) + # call the Solve method from _dippy try: status, message, solList, dualList = _Solve(prob, processed) # solList is a list of (col_name, value) pairs # dualList is a list of (row_name, value) pairs - if prob.display_mode == 'svg' and gimpy_installed: + if prob.display_mode == "svg" and gimpy_installed: if prob.display_interval is not None: - prob.Tree.write_as_svg(filename = "%s_%d" % (prob.svg_prefix, - prob.last_svg + 1), - prevfile = "%s_%d" % (prob.svg_prefix, - prob.last_svg)) + prob.Tree.write_as_svg( + filename="%s_%d" % (prob.svg_prefix, prob.last_svg + 1), + prevfile="%s_%d" % (prob.svg_prefix, prob.last_svg), + ) prob.last_svg += 1 - + except Exception as ex: print("Error returned from _dippy") print(ex) @@ -176,13 +188,12 @@ def Solve(prob, params=None): setSolVars = set(solDict.keys()) diff = setVars.symmetric_difference(setSolVars) if len(diff) > 0: - raise DipError("Solution and variable list don't match in " + - "dippy.Solve") + raise DipError("Solution and variable list don't match in " + "dippy.Solve") solution = solDict for v in prob.variables(): v.varValue = solution[v] - + if dualList is None: duals = None else: @@ -198,21 +209,25 @@ def Solve(prob, params=None): # return status, message, solution and duals return status, message, solution, duals + def createBranchLabel(lbs, ubs): maxLabelWidth = 20 - + both = set(lbs.keys()) & set(ubs.keys()) lbOnly = set(lbs.keys()) - both ubOnly = set(ubs.keys()) - both - - bothStr = '' + + bothStr = "" first = True currWidth = len(bothStr) for l in both: - addStr = str(int(lbs[l])) + '<=' + \ - ''.join(x for x in str(l) - if x not in '()_') \ - + '<=' + str(int(ubs[l])) + addStr = ( + str(int(lbs[l])) + + "<=" + + "".join(x for x in str(l) if x not in "()_") + + "<=" + + str(int(ubs[l])) + ) if first: newWidth = len(addStr) + currWidth else: @@ -221,23 +236,21 @@ def createBranchLabel(lbs, ubs): if first: first = False else: - bothStr += ', ' + bothStr += ", " currWidth += 2 bothStr += addStr currWidth += len(addStr) else: - bothStr += '\n' + addStr + bothStr += "\n" + addStr currWidth = len(addStr) -# bothStr += ' ...' -# break + # bothStr += ' ...' + # break - lbOnlyStr = '' + lbOnlyStr = "" first = True currWidth = len(lbOnlyStr) for l in lbOnly: - addStr = str(int(lbs[l])) + '<=' + \ - ''.join(x for x in str(l) - if x not in '()_') + addStr = str(int(lbs[l])) + "<=" + "".join(x for x in str(l) if x not in "()_") if first: newWidth = len(addStr) + currWidth else: @@ -246,23 +259,21 @@ def createBranchLabel(lbs, ubs): if first: first = False else: - lbOnlyStr += ', ' + lbOnlyStr += ", " currWidth += 2 lbOnlyStr += addStr currWidth += len(addStr) else: - lbOnlyStr += '\n' + addStr + lbOnlyStr += "\n" + addStr currWidth = len(addStr) -# lbOnlyStr += ' ...' -# break + # lbOnlyStr += ' ...' + # break - ubOnlyStr = '' + ubOnlyStr = "" first = True currWidth = len(ubOnlyStr) for l in ubOnly: - addStr = ''.join(x for x in str(l) - if x not in '()_') \ - + '<=' + str(int(ubs[l])) + addStr = "".join(x for x in str(l) if x not in "()_") + "<=" + str(int(ubs[l])) if first: newWidth = len(addStr) + currWidth else: @@ -271,17 +282,17 @@ def createBranchLabel(lbs, ubs): if first: first = False else: - ubOnlyStr += ', ' + ubOnlyStr += ", " currWidth += 2 ubOnlyStr += addStr currWidth += len(addStr) else: - ubOnlyStr += '\n' + addStr + ubOnlyStr += "\n" + addStr currWidth = len(addStr) -# ubOnlyStr += ' ...' -# break + # ubOnlyStr += ' ...' + # break - labelStr = '' + labelStr = "" first = True if len(bothStr) > 0: labelStr += bothStr @@ -290,58 +301,60 @@ def createBranchLabel(lbs, ubs): if first: first = False else: - labelStr += '\n' + labelStr += "\n" labelStr += lbOnlyStr if len(ubOnlyStr) > 0: if first: first = False else: - labelStr += '\n' + labelStr += "\n" labelStr += ubOnlyStr - + return labelStr + import string + + def asCplexName(name): - #to remove illegal characters from the names - trans = str.maketrans("-+[] ->/","________") - + # to remove illegal characters from the names + trans = str.maketrans("-+[] ->/", "________") + return str(name).translate(trans) -class DipProblem(pulp.LpProblem, DipAPI): +class DipProblem(pulp.LpProblem, DipAPI): def __init__(self, *args, **kwargs): - # callback functions can be passed to class constructor as keyword + # callback functions can be passed to class constructor as keyword # arguments - self.branch_method = kwargs.pop('branch_method', None) - self.post_process_branch = kwargs.pop('post_process_branch', None) - self.post_process_node = kwargs.pop('post_process_node', None) - self.relaxed_solver = kwargs.pop('relaxed_solver', None) - self.is_solution_feasible = kwargs.pop('is_solution_feasible', None) - self.generate_cuts = kwargs.pop('generate_cuts', None) - self.generate_cuts_from_node = kwargs.pop('generate_cuts_from_node', - None) - self.heuristics = kwargs.pop('heuristics', None) - self.init_vars = kwargs.pop('init_vars', None) - self.display_mode = kwargs.pop('display_mode', 'off') - self.display_interval = kwargs.pop('display_interval', 1) - self.layout = kwargs.pop('layout', 'dot') - self.svg_prefix = kwargs.pop('svg_prefix', 'tree') - - if self.display_mode != 'off': + self.branch_method = kwargs.pop("branch_method", None) + self.post_process_branch = kwargs.pop("post_process_branch", None) + self.post_process_node = kwargs.pop("post_process_node", None) + self.relaxed_solver = kwargs.pop("relaxed_solver", None) + self.is_solution_feasible = kwargs.pop("is_solution_feasible", None) + self.generate_cuts = kwargs.pop("generate_cuts", None) + self.generate_cuts_from_node = kwargs.pop("generate_cuts_from_node", None) + self.heuristics = kwargs.pop("heuristics", None) + self.init_vars = kwargs.pop("init_vars", None) + self.display_mode = kwargs.pop("display_mode", "off") + self.display_interval = kwargs.pop("display_interval", 1) + self.layout = kwargs.pop("layout", "dot") + self.svg_prefix = kwargs.pop("svg_prefix", "tree") + + if self.display_mode != "off": if not gimpy_installed: print("GiMPy not installed. Display mode set to 'off'") - self.display_mode = 'off' + self.display_mode = "off" else: if grumpy_installed: self.Tree = BBTree() else: - if self.layout == 'bak': + if self.layout == "bak": print("GrUMPy not installed. Display mode set to 'off'") - self.display_mode = 'off' + self.display_mode = "off" else: self.Tree = BinaryTree() - if self.display_mode != 'off': + if self.display_mode != "off": self.Tree.set_display_mode(self.display_mode) self.Tree.set_layout(self.layout) @@ -350,9 +363,9 @@ def __init__(self, *args, **kwargs): self.relaxation = RelaxationCollection(self) def deepcopy(self): - # callback functions can be passed to class constructor as keyword + # callback functions can be passed to class constructor as keyword # arguments - dipcopy = DipProblem(name = self.name, sense = self.sense) + dipcopy = DipProblem(name=self.name, sense=self.sense) dipcopy.branch_method = self.branch_method dipcopy.is_solution_feasible = self.is_solution_feasible dipcopy.generate_cuts = self.generate_cuts @@ -364,7 +377,7 @@ def deepcopy(self): if dipcopy.objective != None: dipcopy.objective = self.objective.copy() dipcopy.constraints = {} - for k,v in self.constraints.items(): + for k, v in self.constraints.items(): dipcopy.constraints[k] = v.copy() dipcopy.sos1 = self.sos1.copy() dipcopy.sos2 = self.sos2.copy() @@ -374,7 +387,7 @@ def deepcopy(self): dipcopy.relaxation[k] = self.relaxation[k].copy() return dipcopy - + def variables(self): """ Returns a list of the problem variables @@ -396,7 +409,7 @@ def variables(self): variables.update(c) variables = list(variables) variables = sorted(variables, key=lambda variable: variable.name) - + return variables def getObjective(self): @@ -453,18 +466,19 @@ def getRelaxsAsDict(self): """ return self.relaxation.dict - def writeFull(self, instancefile, blockfile, mip = True): + def writeFull(self, instancefile, blockfile, mip=True): f = open(instancefile, "w") b = open(blockfile, "w") - f.write("\\* "+self.name+" *\\\n") + f.write("\\* " + self.name + " *\\\n") if self.sense == 1: f.write("Minimize\n") else: f.write("Maximize\n") wasNone, dummyVar = self.fixObjective() objName = self.objective.name - if not objName: objName = "OBJ" - f.write(self.objective.asCplexLpAffineExpression(objName, constant = 0)) + if not objName: + objName = "OBJ" + f.write(self.objective.asCplexLpAffineExpression(objName, constant=0)) f.write("Subject To\n") b.write("NBLOCKS\n") b.write("%i\n" % len(self.relaxation.dict)) @@ -473,32 +487,41 @@ def writeFull(self, instancefile, blockfile, mip = True): blockId = 0 for r in self.relaxation.dict: rname = asCplexName(str(r)) - b.write("BLOCK %d\n" % blockId) + b.write("BLOCK %d\n" % blockId) for k in self.relaxation.dict[r].constraints: - f.write(self.relaxation.dict[r].constraints[k].asCplexLpConstraint(str(k)+'_'+rname)) - b.write(str(k)+'_'+rname+'\n') + f.write( + self.relaxation.dict[r] + .constraints[k] + .asCplexLpConstraint(str(k) + "_" + rname) + ) + b.write(str(k) + "_" + rname + "\n") blockId += 1 vs = list(self.variables()) # check if any names are longer than 100 characters long_names = [v.name for v in vs if len(v.name) > 100] if long_names: - raise PulpError('Variable names too long for Lp format\n' - + str(long_names)) + raise PulpError("Variable names too long for Lp format\n" + str(long_names)) # check for repeated names repeated_names = {} for v in vs: repeated_names[v.name] = repeated_names.get(v.name, 0) + 1 - repeated_names = [(key, value) for key, value in list(repeated_names.items()) - if value >= 2] + repeated_names = [ + (key, value) for key, value in list(repeated_names.items()) if value >= 2 + ] if repeated_names: - raise PulpError('Repeated variable names in Lp format\n' - + str(repeated_names)) + raise PulpError( + "Repeated variable names in Lp format\n" + str(repeated_names) + ) # Bounds on non-"positive" variables - # Note: XPRESS and CPLEX do not interpret integer variables without + # Note: XPRESS and CPLEX do not interpret integer variables without # explicit bounds if mip: - vg = [v for v in vs if not (v.isPositive() and v.cat == pulp.LpContinuous) \ - and not v.isBinary()] + vg = [ + v + for v in vs + if not (v.isPositive() and v.cat == pulp.LpContinuous) + and not v.isBinary() + ] else: vg = [v for v in vs if not v.isPositive()] if vg: @@ -510,17 +533,19 @@ def writeFull(self, instancefile, blockfile, mip = True): vg = [v for v in vs if v.cat == pulp.LpInteger and not v.isBinary()] if vg: f.write("Generals\n") - for v in vg: f.write("%s\n" % v.name) + for v in vg: + f.write("%s\n" % v.name) # Binary variables vg = [v for v in vs if v.isBinary()] if vg: f.write("Binaries\n") - for v in vg: f.write("%s\n" % v.name) + for v in vg: + f.write("%s\n" % v.name) f.write("End\n") f.close() self.restoreObjective(wasNone, dummyVar) - def writeRelaxed(self, block, filename, mip = True): + def writeRelaxed(self, block, filename, mip=True): """ Write the given block into a .lp file. @@ -536,7 +561,7 @@ def writeRelaxed(self, block, filename, mip = True): """ relaxation = self.relaxation[block] f = open(filename, "w") - f.write("\\* "+relaxation.name+" *\\\n") + f.write("\\* " + relaxation.name + " *\\\n") f.write("Subject To\n") ks = list(relaxation.constraints.keys()) ks.sort() @@ -545,12 +570,15 @@ def writeRelaxed(self, block, filename, mip = True): vs = relaxation.variables() vs.sort() # Bounds on non-"positive" variables - # Note: XPRESS and CPLEX do not interpret integer variables without + # Note: XPRESS and CPLEX do not interpret integer variables without # explicit bounds if mip: - vg = [v for v in vs if not (v.isPositive() and \ - v.cat == pulp.LpContinuous) \ - and not v.isBinary()] + vg = [ + v + for v in vs + if not (v.isPositive() and v.cat == pulp.LpContinuous) + and not v.isBinary() + ] else: vg = [v for v in vs if not v.isPositive()] if vg: @@ -559,19 +587,20 @@ def writeRelaxed(self, block, filename, mip = True): f.write("%s\n" % v.asCplexLpVariable()) # Integer non-binary variables if mip: - vg = [v for v in vs if v.cat == pulp.LpInteger and \ - not v.isBinary()] + vg = [v for v in vs if v.cat == pulp.LpInteger and not v.isBinary()] if vg: f.write("Generals\n") - for v in vg: f.write("%s\n" % v.name) + for v in vg: + f.write("%s\n" % v.name) # Binary variables vg = [v for v in vs if v.isBinary()] if vg: f.write("Binaries\n") - for v in vg: f.write("%s\n" % v.name) + for v in vg: + f.write("%s\n" % v.name) f.write("End\n") f.close() - + def chooseBranchSet(self, xhat): """ Finds the best branch for a fractional solution @@ -586,27 +615,33 @@ def chooseBranchSet(self, xhat): try: if self.branch_method is None: return None - + xhatDict = dict(xhat) setVars = set(self.variables()) setXhatVars = set(xhatDict.keys()) diff = setVars.symmetric_difference(setXhatVars) if len(diff) > 0: - raise DipError("Solution and variable list don't match in chooseBranchSet") - + raise DipError( + "Solution and variable list don't match in chooseBranchSet" + ) + branch_sets = self.branch_method(self, xhatDict) if branch_sets is None: return None - - if (branch_sets[0] or branch_sets[1]) and (branch_sets[2] or branch_sets[3]): + + if (branch_sets[0] or branch_sets[1]) and ( + branch_sets[2] or branch_sets[3] + ): return branch_sets else: - raise DipError("Invalid bounds returned from user-specified branch_method") + raise DipError( + "Invalid bounds returned from user-specified branch_method" + ) except Exception as ex: errorStr = "Error in chooseBranchSet\n%s" % ex raise DipError(errorStr) - + def decipherNode(self, output): outputDict = dict(output) if "xhat" in list(outputDict.keys()): @@ -615,7 +650,7 @@ def decipherNode(self, output): if "bounds" in list(outputDict.keys()): bounds = outputDict["bounds"] outputDict["bounds"] = dict(bounds) - + return outputDict def postProcessNode(self, node): @@ -626,9 +661,9 @@ def postProcessNode(self, node): output (list of (parameter, value) tuples) = list of output values from the node """ - try: + try: nodeDict = self.decipherNode(node) - + if gimpy_installed: nodeInd = nodeDict["nodeIndex"] parentInd = nodeDict["parentIndex"] @@ -636,96 +671,116 @@ def postProcessNode(self, node): branchedDir = nodeDict["branchedDir"] nodeStatus = nodeDict["nodeStatus"] if branchedDir == -1: - branch_direction = 'L' + branch_direction = "L" else: - branch_direction = 'R' - - if nodeStatus == 'Infeasible': - status = 'I' - BAKstatus = 'infeasible' - color = 'orange' - elif nodeStatus == 'Candidate': - status = 'C' - BAKstatus = 'candidate' - color = 'yellow' - elif nodeStatus == 'Solution': - status = 'S' - BAKstatus = 'integer' - color = 'lightblue' + branch_direction = "R" + + if nodeStatus == "Infeasible": + status = "I" + BAKstatus = "infeasible" + color = "orange" + elif nodeStatus == "Candidate": + status = "C" + BAKstatus = "candidate" + color = "yellow" + elif nodeStatus == "Solution": + status = "S" + BAKstatus = "integer" + color = "lightblue" else: - status = 'P' - BAKstatus = 'fathomed' - color = 'red' - - if nodeStatus != 'Infeasible': - label = status + ": " + "%.1f"%nodeQuality + status = "P" + BAKstatus = "fathomed" + color = "red" + + if nodeStatus != "Infeasible": + label = status + ": " + "%.1f" % nodeQuality else: - label = 'I' + label = "I" numNodes = len(self.Tree.get_node_list()) if parentInd == -1: - if self.layout == 'bak': - self.Tree.AddOrUpdateNode(nodeInd, parentInd, - branch_direction, BAKstatus, - nodeQuality, None, None) + if self.layout == "bak": + self.Tree.AddOrUpdateNode( + nodeInd, + parentInd, + branch_direction, + BAKstatus, + nodeQuality, + None, + None, + ) else: - self.Tree.add_root(nodeInd, label = label, - status = 'C', obj = nodeQuality, - color = color, style = 'filled', - fillcolor = color) - if self.Tree.attr['display'] == 'svg': + self.Tree.add_root( + nodeInd, + label=label, + status="C", + obj=nodeQuality, + color=color, + style="filled", + fillcolor=color, + ) + if self.Tree.attr["display"] == "svg": if self.display_interval is not None: if numNodes % self.display_interval in [0, 1]: - self.Tree.write_as_svg(filename = "%s_0" - % self.svg_prefix, - nextfile = "%s_1" - % self.svg_prefix, - highlight = nodeInd) + self.Tree.write_as_svg( + filename="%s_0" % self.svg_prefix, + nextfile="%s_1" % self.svg_prefix, + highlight=nodeInd, + ) self.last_svg = 0 numNodes += 1 else: - if branch_direction == 'L': + if branch_direction == "L": n = self.Tree.get_left_child(parentInd) else: n = self.Tree.get_right_child(parentInd) - edge_label = self.Tree.get_edge_attr(parentInd, n, 'label') + edge_label = self.Tree.get_edge_attr(parentInd, n, "label") self.Tree.del_node(n) - if self.layout == 'bak': - self.Tree.AddOrUpdateNode(nodeInd, parentInd, - branch_direction, 'branched', - nodeQuality, None, None) - elif branch_direction == 'L': - self.Tree.add_left_child(nodeInd, parentInd, - label = label, - status = status, - obj = nodeQuality, - color = color, - style = 'filled', - fillcolor = color) + if self.layout == "bak": + self.Tree.AddOrUpdateNode( + nodeInd, + parentInd, + branch_direction, + "branched", + nodeQuality, + None, + None, + ) + elif branch_direction == "L": + self.Tree.add_left_child( + nodeInd, + parentInd, + label=label, + status=status, + obj=nodeQuality, + color=color, + style="filled", + fillcolor=color, + ) else: - self.Tree.add_right_child(nodeInd, parentInd, - label = label, - status = status, - obj = nodeQuality, - color = color, - style = 'filled', - fillcolor = color) + self.Tree.add_right_child( + nodeInd, + parentInd, + label=label, + status=status, + obj=nodeQuality, + color=color, + style="filled", + fillcolor=color, + ) if edge_label is not None: - self.Tree.set_edge_attr(parentInd, nodeInd, - 'label', edge_label) - if self.Tree.attr['display'] == 'svg': + self.Tree.set_edge_attr(parentInd, nodeInd, "label", edge_label) + if self.Tree.attr["display"] == "svg": if self.display_interval is not None: if numNodes % self.display_interval in [0, 1]: - self.Tree.write_as_svg(filename = "%s_%d" - % (self.svg_prefix, - self.last_svg + 1), - prevfile = "%s_%d" - % (self.svg_prefix, - self.last_svg), - nextfile = "%s_%d" - % (self.svg_prefix, - self.last_svg + 2), - highlight = nodeInd) + self.Tree.write_as_svg( + filename="%s_%d" + % (self.svg_prefix, self.last_svg + 1), + prevfile="%s_%d" % (self.svg_prefix, self.last_svg), + nextfile="%s_%d" + % (self.svg_prefix, self.last_svg + 2), + highlight=nodeInd, + ) self.last_svg += 1 if self.display_interval is not None: if numNodes % self.display_interval in [0, 1]: @@ -733,7 +788,7 @@ def postProcessNode(self, node): if self.post_process_node is not None: self.post_process_node(self, nodeDict) - + except Exception as ex: errorStr = "Error in postProcessNode\n%s" % ex raise DipError(errorStr) @@ -746,73 +801,83 @@ def postProcessBranch(self, branchInfo): output (list of (parameter, value) tuples) describing branching decision """ try: - + outputDict = dict(branchInfo) if gimpy_installed: - nodeInd = outputDict['nodeIndex'] - nodeQuality = outputDict['nodeQuality'] + nodeInd = outputDict["nodeIndex"] + nodeQuality = outputDict["nodeQuality"] numNodes = len(self.Tree.get_node_list()) for n in outputDict: - if n == 'pDownUB': - if self.layout == 'bak': - self.Tree.AddOrUpdateNode(-numNodes, - nodeInd, 'L', - 'candidate', - nodeQuality, None, None) + if n == "pDownUB": + if self.layout == "bak": + self.Tree.AddOrUpdateNode( + -numNodes, + nodeInd, + "L", + "candidate", + nodeQuality, + None, + None, + ) else: - self.Tree.add_left_child(-numNodes, - nodeInd, - label = 'C', - status = 'C', - obj = nodeQuality, - color = 'yellow', - style = 'filled', - fillcolor = 'yellow') - if 'pDownLB' in outputDict: - lbs = outputDict['pDownLB'] + self.Tree.add_left_child( + -numNodes, + nodeInd, + label="C", + status="C", + obj=nodeQuality, + color="yellow", + style="filled", + fillcolor="yellow", + ) + if "pDownLB" in outputDict: + lbs = outputDict["pDownLB"] else: lbs = {} - ubs = outputDict['pDownUB'] + ubs = outputDict["pDownUB"] labelStr = createBranchLabel(lbs, ubs) - self.Tree.set_edge_attr(nodeInd, - -numNodes, - 'label', labelStr) + self.Tree.set_edge_attr(nodeInd, -numNodes, "label", labelStr) numNodes += 1 - elif n == 'pUpLB': - if self.layout == 'bak': - self.Tree.AddOrUpdateNode(-numNodes, - nodeInd, 'R', - 'candidate', - nodeQuality, None, None) + elif n == "pUpLB": + if self.layout == "bak": + self.Tree.AddOrUpdateNode( + -numNodes, + nodeInd, + "R", + "candidate", + nodeQuality, + None, + None, + ) else: - self.Tree.add_right_child(-numNodes, - nodeInd, - label = 'C', - status = 'C', - obj = nodeQuality, - color = 'yellow', - style = 'filled', - fillcolor = 'yellow') - if 'pUpUB' in outputDict: - ubs = outputDict['pUpUB'] + self.Tree.add_right_child( + -numNodes, + nodeInd, + label="C", + status="C", + obj=nodeQuality, + color="yellow", + style="filled", + fillcolor="yellow", + ) + if "pUpUB" in outputDict: + ubs = outputDict["pUpUB"] else: ubs = {} - lbs = outputDict['pUpLB'] + lbs = outputDict["pUpLB"] labelStr = createBranchLabel(lbs, ubs) - self.Tree.set_edge_attr(nodeInd, - -numNodes, - 'label', labelStr) + self.Tree.set_edge_attr(nodeInd, -numNodes, "label", labelStr) numNodes += 1 - if self.Tree.get_node_attr(nodeInd, 'color') == 'yellow': - self.Tree.set_node_attr(nodeInd, 'color', 'green') - self.Tree.set_node_attr(nodeInd, 'fillcolor', 'green') - + if self.Tree.get_node_attr(nodeInd, "color") == "yellow": + self.Tree.set_node_attr(nodeInd, "color", "green") + self.Tree.set_node_attr(nodeInd, "fillcolor", "green") + if self.post_process_branch is not None: self.post_process_branch(self, outputDict) - + except Exception as ex: errorStr = "Error in postProcessBranch\n%s" % ex raise DipError(errorStr) @@ -836,7 +901,7 @@ def solveRelaxed(self, key, redCostX, target): dictionary of non-zero values for variables """ try: - + # transform redCostX into a dictionary redCostDict = dict(redCostX) setVars = set(self.variables()) @@ -844,26 +909,31 @@ def solveRelaxed(self, key, redCostX, target): diff = setVars.symmetric_difference(setRedCostVars) if len(diff) > 0: print(diff) - raise DipError("Reduced cost and variable list don't match in", - "solveRelaxed") - + raise DipError( + "Reduced cost and variable list don't match in", "solveRelaxed" + ) + status, dvs = self.relaxed_solver(self, key, redCostDict, target) - + if len(dvs) > 0: dvs_with_costs = [] for var in dvs: if isinstance(var, dict): - cost = sum(self.objective[i]*var[i] for i in var - if i in self.objective) - red_cost = sum(redCostDict[i]*var[i] for i in var - if i in redCostDict) + cost = sum( + self.objective[i] * var[i] + for i in var + if i in self.objective + ) + red_cost = sum( + redCostDict[i] * var[i] for i in var if i in redCostDict + ) dvs_with_costs.append((cost, red_cost, var)) else: return status, dvs return status, dvs_with_costs else: return status, dvs - + except Exception as ex: errorStr = "Error in solveRelaxed\n%s" % ex raise DipError(errorStr) @@ -880,16 +950,18 @@ def isUserFeasible(self, sol, tol): (boolean) = false if not feasible (generate cuts) or true if feasible """ try: - + solDict = dict(sol) setVars = set(self.variables()) setSolVars = set(solDict.keys()) diff = setVars.symmetric_difference(setSolVars) if len(diff) > 0: - raise DipError("Solution and variable list don't match in isUserFeasible") - + raise DipError( + "Solution and variable list don't match in isUserFeasible" + ) + return self.is_solution_feasible(self, solDict, tol) - + except Exception as ex: errorStr = "Error in isUserFeasible\n%s" % ex raise DipError(errorStr) @@ -908,7 +980,7 @@ def generateCuts(self, node): as values with getName, getLb and getUb bound methods """ try: - + nodeDict = self.decipherNode(node) xhatDict = nodeDict["xhat"] setVars = set(self.variables()) @@ -916,13 +988,13 @@ def generateCuts(self, node): diff = setVars.symmetric_difference(setXhatVars) if len(diff) > 0: raise DipError("Solution and variable list don't match in generateCuts") - + # Generate a list of cuts as LpConstraints if self.gen_cuts: cuts = self.generate_cuts(self, xhatDict) else: cuts = None - + if self.gen_cuts_node: moreCuts = self.generate_cuts_from_node(self, nodeDict) if moreCuts is not None: @@ -930,7 +1002,7 @@ def generateCuts(self, node): cuts = moreCuts else: cuts.extend(moreCuts) - + if cuts is not None: if len(cuts) > 0: return cuts @@ -955,22 +1027,24 @@ def solveHeuristics(self, xhat, costX): dictionary of non-zero values for variables """ try: - + # transform xhat into a dictionary xhatDict = dict(xhat) setVars = set(self.variables()) setXhatVars = set(xhatDict.keys()) diff = setVars.symmetric_difference(setXhatVars) if len(diff) > 0: - raise DipError("Solution and variable list don't match in solveHeuristics") - + raise DipError( + "Solution and variable list don't match in solveHeuristics" + ) + # transform costs into a dictionary costDict = dict(costX) setCostVars = set(costDict.keys()) diff = setVars.symmetric_difference(setCostVars) if len(diff) > 0: raise DipError("Cost and variable list don't match in solveHeuristics") - + sols = self.heuristics(self, xhatDict, costDict) if sols is not None: if len(sols) > 0: @@ -995,7 +1069,7 @@ def generateInitVars(self): dictionary of non-zero values for variables """ try: - + bvs = self.init_vars(self) if bvs is not None: if len(bvs) > 0: @@ -1006,14 +1080,14 @@ def generateInitVars(self): except Exception as ex: errorStr = "Error in generateInitVars\n%s" % ex raise DipError(errorStr) - + class RelaxationCollection(object): """ A simple defaultdict for holding relaxation problems """ - PROBLEM_CLASS = pulp.LpProblem + PROBLEM_CLASS = pulp.LpProblem def __init__(self, parent): self.parent = parent diff --git a/Dip/src/dippy/examples/bpp/bin_pack_decomp_func.py b/Dip/src/dippy/examples/bpp/bin_pack_decomp_func.py index 8afc250b..6d76dd9f 100644 --- a/Dip/src/dippy/examples/bpp/bin_pack_decomp_func.py +++ b/Dip/src/dippy/examples/bpp/bin_pack_decomp_func.py @@ -2,7 +2,15 @@ from __future__ import print_function from builtins import range from past.utils import old_div -from pulp import LpVariable, LpBinary, lpSum, value, LpProblem, LpMaximize, LpAffineExpression +from pulp import ( + LpVariable, + LpBinary, + lpSum, + value, + LpProblem, + LpMaximize, + LpAffineExpression, +) try: import src.dippy as dippy @@ -27,14 +35,15 @@ prob = dippy.DipProblem("Bin Packing") -assign_vars = LpVariable.dicts("AtLocation", - [(i, j) for i in data.LOCATIONS - for j in data.PRODUCTS], - 0, 1, LpBinary) -use_vars = LpVariable.dicts("UseLocation", - data.LOCATIONS, 0, 1, LpBinary) -waste_vars = LpVariable.dicts("Waste", - data.LOCATIONS, 0, data.CAPACITY) +assign_vars = LpVariable.dicts( + "AtLocation", + [(i, j) for i in data.LOCATIONS for j in data.PRODUCTS], + 0, + 1, + LpBinary, +) +use_vars = LpVariable.dicts("UseLocation", data.LOCATIONS, 0, 1, LpBinary) +waste_vars = LpVariable.dicts("Waste", data.LOCATIONS, 0, data.CAPACITY) # objective: minimise waste prob += lpSum(waste_vars[i] for i in LOCATIONS), "min" @@ -45,37 +54,39 @@ # Aggregate capacity constraints for i in LOCATIONS: - prob.relaxation[i] += lpSum(assign_vars[(i, j)] * REQUIREMENT[j] - for j in PRODUCTS) + waste_vars[i] \ - == CAPACITY * use_vars[i] + prob.relaxation[i] += ( + lpSum(assign_vars[(i, j)] * REQUIREMENT[j] for j in PRODUCTS) + waste_vars[i] + == CAPACITY * use_vars[i] + ) # Disaggregate capacity constraints for i in LOCATIONS: for j in PRODUCTS: prob.relaxation[i] += assign_vars[(i, j)] <= use_vars[i] - + # Ordering constraints for index, location in enumerate(LOCATIONS): if index > 0: - prob += use_vars[LOCATIONS[index-1]] >= use_vars[location] - + prob += use_vars[LOCATIONS[index - 1]] >= use_vars[location] + # Anti-symmetry branches def choose_antisymmetry_branch(prob, sol): num_locations = sum(sol[use_vars[i]] for i in LOCATIONS) - up = ceil(num_locations) # Round up to next nearest integer - down = floor(num_locations) # Round down - if (up - num_locations > tol) \ - and (num_locations - down > tol): # Is fractional? + up = ceil(num_locations) # Round up to next nearest integer + down = floor(num_locations) # Round down + if (up - num_locations > tol) and (num_locations - down > tol): # Is fractional? # Down branch: provide upper bounds, lower bounds are default - down_branch_ub = dict([(use_vars[LOCATIONS[n]], 0) for - n in range(int(down), len(LOCATIONS))]) + down_branch_ub = dict( + [(use_vars[LOCATIONS[n]], 0) for n in range(int(down), len(LOCATIONS))] + ) # Up branch: provide lower bounds, upper bounds are default - up_branch_lb = dict([(use_vars[LOCATIONS[n]], 1) for - n in range(0, int(up))]) + up_branch_lb = dict([(use_vars[LOCATIONS[n]], 1) for n in range(0, int(up))]) # Return the advanced branch to DIP return ({}, down_branch_ub, up_branch_lb, {}) -#prob.branch_method = choose_antisymmetry_branch + +# prob.branch_method = choose_antisymmetry_branch + def solve_subproblem(prob, index, redCosts, target): loc = index @@ -83,23 +94,23 @@ def solve_subproblem(prob, index, redCosts, target): # Calculate effective objective coefficient of products effs = {} for j in PRODUCTS: - effs[j] = (redCosts[assign_vars[(loc, j)]] - - redCosts[waste_vars[loc]] * REQUIREMENT[j]) + effs[j] = ( + redCosts[assign_vars[(loc, j)]] - redCosts[waste_vars[loc]] * REQUIREMENT[j] + ) obj = [-effs[j] for j in PRODUCTS] weights = [REQUIREMENT[j] for j in PRODUCTS] - + # Use 0-1 KP to max. total effective value of products at location z, solution = knapsack01(obj, weights, CAPACITY) - + # Get the reduced cost of the knapsack solution and waste if debug_print: - rc = (redCosts[use_vars[loc]] -z + - redCosts[waste_vars[loc]] * CAPACITY) + rc = redCosts[use_vars[loc]] - z + redCosts[waste_vars[loc]] * CAPACITY waste = CAPACITY - sum(weights[i] for i in solution) rc += redCosts[waste_vars[loc]] * waste - if redCosts[use_vars[loc]] > z + tol: # ... or an empty location is "useful" + if redCosts[use_vars[loc]] > z + tol: # ... or an empty location is "useful" if debug_print: print("Zero solution is optimal") return DipSolStatOptimal, [{}] @@ -108,10 +119,12 @@ def solve_subproblem(prob, index, redCosts, target): var_values = [(assign_vars[(loc, i)], 1) for i in solution] var_values.append((use_vars[loc], 1)) var_values.append((waste_vars[loc], waste)) - + return DipSolStatOptimal, [var_values] -#prob.relaxed_solver = solve_subproblem + +# prob.relaxed_solver = solve_subproblem + def knapsack01(obj, weights, capacity): """ 0/1 knapsack solver, maximizes profit. weights and capacity integer """ @@ -121,24 +134,24 @@ def knapsack01(obj, weights, capacity): if n == 0: return 0, [] - c = [[0]*(capacity+1) for i in range(n)] - added = [[False]*(capacity+1) for i in range(n)] + c = [[0] * (capacity + 1) for i in range(n)] + added = [[False] * (capacity + 1) for i in range(n)] # c [items, remaining capacity] # important: this code assumes strictly positive objective values for i in range(n): - for j in range(capacity+1): - if (weights[i] > j): - c[i][j] = c[i-1][j] + for j in range(capacity + 1): + if weights[i] > j: + c[i][j] = c[i - 1][j] else: - c_add = obj[i] + c[i-1][j-weights[i]] - if c_add > c[i-1][j]: + c_add = obj[i] + c[i - 1][j - weights[i]] + if c_add > c[i - 1][j]: c[i][j] = c_add added[i][j] = True else: - c[i][j] = c[i-1][j] + c[i][j] = c[i - 1][j] # backtrack to find solution - i = n-1 + i = n - 1 j = capacity solution = [] @@ -148,11 +161,12 @@ def knapsack01(obj, weights, capacity): j -= weights[i] i -= 1 - return c[n-1][capacity], solution + return c[n - 1][capacity], solution + def first_fit_heuristic(): # Sort the items in descending weight order - productReqs = [(REQUIREMENT[j],j) for j in PRODUCTS] + productReqs = [(REQUIREMENT[j], j) for j in PRODUCTS] productReqs.sort(reverse=True) # Add items to locations, fitting in as much @@ -165,8 +179,8 @@ def first_fit_heuristic(): while j < len(productReqs): # Can we fit this product? if productReqs[j][0] <= waste: - currentLocation.append(productReqs[j][1]) # index - waste -= productReqs[j][0] # requirement + currentLocation.append(productReqs[j][1]) # index + waste -= productReqs[j][0] # requirement productReqs.pop(j) else: # Try to fit next item @@ -175,6 +189,7 @@ def first_fit_heuristic(): # Return a list of tuples: ([products],waste) return allLocations + def first_fit(prob): locations = first_fit_heuristic() bvs = [] @@ -189,6 +204,7 @@ def first_fit(prob): index += 1 return bvs + def one_each(prob): bvs = [] for index, loc in enumerate(LOCATIONS): @@ -197,28 +213,34 @@ def one_each(prob): var_values = [(assign_vars[(loc, j)], 1) for j in lc] var_values.append((use_vars[loc], 1)) var_values.append((waste_vars[loc], waste)) - + dv = dippy.DecompVar(var_values, None, waste) bvs.append((loc, dv)) return bvs -#prob.init_vars = first_fit + +# prob.init_vars = first_fit ##prob.init_vars = one_each -prob.writeLP('facility_main.lp') +prob.writeLP("facility_main.lp") for n, i in enumerate(LOCATIONS): - prob.writeRelaxed(n, 'facility_relax%s.lp' % i); + prob.writeRelaxed(n, "facility_relax%s.lp" % i) -dippy.Solve(prob, { - 'TolZero': '%s' % tol, - 'doPriceCut': '1', -# 'generateInitVars': '1', -}) +dippy.Solve( + prob, + { + "TolZero": "%s" % tol, + "doPriceCut": "1", + # 'generateInitVars': '1', + }, +) # print solution for i in LOCATIONS: if use_vars[i].varValue > tol: - print("Location ", i, \ - " produces ", \ - [j for j in PRODUCTS - if assign_vars[(i, j)].varValue > tol]) + print( + "Location ", + i, + " produces ", + [j for j in PRODUCTS if assign_vars[(i, j)].varValue > tol], + ) diff --git a/Dip/src/dippy/examples/bpp/bin_pack_func.py b/Dip/src/dippy/examples/bpp/bin_pack_func.py index 840442ed..6b02f063 100755 --- a/Dip/src/dippy/examples/bpp/bin_pack_func.py +++ b/Dip/src/dippy/examples/bpp/bin_pack_func.py @@ -1,5 +1,6 @@ from builtins import range from builtins import object + CGL_cuts = False Bin_antisymmetry = False @@ -28,27 +29,29 @@ from math import floor, ceil + class BinPackProb(object): def __init__(self, ITEMS, volume, capacity): self.ITEMS = ITEMS self.volume = volume - self.BINS = list(range(len(ITEMS))) # Create 1 bin for each - # item, indices start at 0 + self.BINS = list(range(len(ITEMS))) # Create 1 bin for each + # item, indices start at 0 self.capacity = capacity - + + def formulate(bpp): - prob = dippy.DipProblem("Bin Packing", - display_mode = 'off', -# layout = 'bak', - display_interval = None, - ) - - assign_vars = LpVariable.dicts("x", - [(i, j) for i in bpp.BINS - for j in bpp.ITEMS], - cat=LpBinary) - use_vars = LpVariable.dicts("y", bpp.BINS, cat=LpBinary) - waste_vars = LpVariable.dicts("w", bpp.BINS, 0, None) + prob = dippy.DipProblem( + "Bin Packing", + display_mode="off", + # layout = 'bak', + display_interval=None, + ) + + assign_vars = LpVariable.dicts( + "x", [(i, j) for i in bpp.BINS for j in bpp.ITEMS], cat=LpBinary + ) + use_vars = LpVariable.dicts("y", bpp.BINS, cat=LpBinary) + waste_vars = LpVariable.dicts("w", bpp.BINS, 0, None) prob += lpSum(waste_vars[i] for i in bpp.BINS), "min_waste" @@ -56,9 +59,10 @@ def formulate(bpp): prob += lpSum(assign_vars[i, j] for i in bpp.BINS) == 1 for i in bpp.BINS: - prob.relaxation[i] += (lpSum(bpp.volume[j] * assign_vars[i, j] - for j in bpp.ITEMS) + waste_vars[i] - == bpp.capacity * use_vars[i]) + prob.relaxation[i] += ( + lpSum(bpp.volume[j] * assign_vars[i, j] for j in bpp.ITEMS) + waste_vars[i] + == bpp.capacity * use_vars[i] + ) for i in bpp.BINS: for j in bpp.ITEMS: @@ -77,35 +81,37 @@ def formulate(bpp): prob += assign_vars[i, j] == 0 # Attach the problem data and variable dictionaries - # to the DipProblem - prob.bpp = bpp + # to the DipProblem + prob.bpp = bpp prob.assign_vars = assign_vars - prob.use_vars = use_vars - prob.waste_vars = waste_vars + prob.use_vars = use_vars + prob.waste_vars = waste_vars return prob + def my_branch(prob, sol): bounds = None - + if Symmetry_branch: bounds = symmetry(prob, sol) - + if Most_use_branch: if bounds is None: bounds = most_frac_use(prob, sol) - + if Most_assign_branch: if bounds is None: bounds = most_frac_assign(prob, sol) - + return bounds + def my_heuristics(prob, xhat, cost): -# print "Heuristics..." + # print "Heuristics..." sol = None - + if prob.is_root_node: prob.is_root_node = False if prob.root_heuristic: @@ -113,139 +119,144 @@ def my_heuristics(prob, xhat, cost): else: if prob.node_heuristic: sol = frac_fit(prob, xhat) - + if sol is not None: return [sol] -def solve(prob, algo = 'PriceCut'): + +def solve(prob, algo="PriceCut"): if Symmetry_branch or Most_use_branch or Most_assign_branch: prob.branch_method = my_branch -# prob.heuristics = my_heuristics -# prob.is_root_node = True -# prob.root_heuristic = True -# prob.node_heuristic = True - + # prob.heuristics = my_heuristics + # prob.is_root_node = True + # prob.root_heuristic = True + # prob.node_heuristic = True + dippyOpts = {} if CGL_cuts: - dippyOpts['CutCGL'] = '1' + dippyOpts["CutCGL"] = "1" else: - dippyOpts['CutCGL'] = '0' - - if algo == 'PriceCut': - dippyOpts['doPriceCut'] = '1' - dippyOpts['CutCGL'] = '1' - elif algo == 'Price': - dippyOpts['doPriceCut'] = '1' - dippyOpts['CutCGL'] = '0' + dippyOpts["CutCGL"] = "0" + + if algo == "PriceCut": + dippyOpts["doPriceCut"] = "1" + dippyOpts["CutCGL"] = "1" + elif algo == "Price": + dippyOpts["doPriceCut"] = "1" + dippyOpts["CutCGL"] = "0" else: - dippyOpts['doCut'] = '1' + dippyOpts["doCut"] = "1" -# 'SolveMasterAsIp': '0' -# 'generateInitVars': '1', -# 'LogDebugLevel': 5, -# 'LogDumpModel': 5, - dippyOpts['Gurobi'] = {'MipGap':'.05'} + # 'SolveMasterAsIp': '0' + # 'generateInitVars': '1', + # 'LogDebugLevel': 5, + # 'LogDumpModel': 5, + dippyOpts["Gurobi"] = {"MipGap": ".05"} status, message, primals, duals = dippy.Solve(prob, dippyOpts) - + if status == LpStatusOptimal: return dict((var, var.value()) for var in prob.variables()) else: return None + def most_frac_use(prob, sol): # Get the attached data and variable dicts - bpp = prob.bpp - use_vars = prob.use_vars - tol = prob.tol - - most = float('-inf') + bpp = prob.bpp + use_vars = prob.use_vars + tol = prob.tol + + most = float("-inf") bin = None for j in bpp.BINS: alpha = sol[use_vars[j]] - up = ceil(alpha) # Round up to next nearest integer - down = floor(alpha) # Round down + up = ceil(alpha) # Round up to next nearest integer + down = floor(alpha) # Round down frac = min(up - alpha, alpha - down) - if frac > tol: # Is fractional? + if frac > tol: # Is fractional? if frac > most: most = frac bin = j - + down_lbs = {} down_ubs = {} up_lbs = {} up_ubs = {} if bin is not None: -# print bin, sol[use_vars[bin]] + # print bin, sol[use_vars[bin]] down_ubs[use_vars[bin]] = 0.0 up_lbs[use_vars[bin]] = 1.0 - + return down_lbs, down_ubs, up_lbs, up_ubs + def most_frac_assign(prob, sol): # Get the attached data and variable dicts - bpp = prob.bpp + bpp = prob.bpp assign_vars = prob.assign_vars - tol = prob.tol - - most = float('-inf') + tol = prob.tol + + most = float("-inf") assign = None for i in bpp.ITEMS: for j in bpp.BINS: - up = ceil(sol[assign_vars[i, j]]) # Round up to next nearest integer - down = floor(sol[assign_vars[i, j]]) # Round down + up = ceil(sol[assign_vars[i, j]]) # Round up to next nearest integer + down = floor(sol[assign_vars[i, j]]) # Round down frac = min(up - sol[assign_vars[i, j]], sol[assign_vars[i, j]] - down) - if frac > tol: # Is fractional? + if frac > tol: # Is fractional? if frac > most: most = frac assign = (i, j) - + down_lbs = {} down_ubs = {} up_lbs = {} up_ubs = {} if assign is not None: -# print assign, sol[assign_vars[assign]] + # print assign, sol[assign_vars[assign]] down_ubs[assign_vars[assign]] = 0.0 up_lbs[assign_vars[assign]] = 1.0 - + return down_lbs, down_ubs, up_lbs, up_ubs + def symmetry(prob, sol): # Get the attached data and variable dicts - bpp = prob.bpp + bpp = prob.bpp use_vars = prob.use_vars - tol = prob.tol - + tol = prob.tol + alpha = sum(sol[use_vars[j]] for j in bpp.BINS) -# print "# bins =", alpha - up = int(ceil(alpha)) # Round up to next nearest integer - down = int(floor(alpha)) # Round down - frac = min(up - alpha, alpha - down) - if frac > tol: # Is fractional? -# print "Symmetry branch" - + # print "# bins =", alpha + up = int(ceil(alpha)) # Round up to next nearest integer + down = int(floor(alpha)) # Round down + frac = min(up - alpha, alpha - down) + if frac > tol: # Is fractional? + # print "Symmetry branch" + down_lbs = {} down_ubs = {} up_lbs = {} up_ubs = {} for n in range(up - 1, len(bpp.BINS)): down_ubs[use_vars[bpp.BINS[n]]] = 0.0 -# print down_ubs - for n in range(up): # Same as range(0, up) + # print down_ubs + for n in range(up): # Same as range(0, up) up_lbs[use_vars[bpp.BINS[n]]] = 1.0 -# print up_lbs + # print up_lbs return down_lbs, down_ubs, up_lbs, up_ubs - + + def fit(prob, order): - bpp = prob.bpp - use_vars = prob.use_vars + bpp = prob.bpp + use_vars = prob.use_vars assign_vars = prob.assign_vars - waste_vars = prob.waste_vars - tol = prob.tol + waste_vars = prob.waste_vars + tol = prob.tol sol = {} @@ -264,50 +275,58 @@ def fit(prob, order): assigned[i] = True sol[assign_vars[i, j]] = 1.0 sol[waste_vars[j]] -= bpp.volume[i] - + for j in bpp.BINS: if sol[waste_vars[j]] > bpp.capacity - tol: sol[use_vars[j]] = 0.0 sol[waste_vars[j]] = 0.0 - -# print sol - + + # print sol + return sol + import operator + def first_fit(prob): -# print "first fit..." - + # print "first fit..." + bpp = prob.bpp - sorted_volume = sorted(iter(bpp.volume.items()), key=operator.itemgetter(1), reverse=True) + sorted_volume = sorted( + iter(bpp.volume.items()), key=operator.itemgetter(1), reverse=True + ) sorted_ITEMS = [i for (i, v) in sorted_volume] - -# print sorted_ITEMS + + # print sorted_ITEMS order = [(i, j) for i in sorted_ITEMS for j in bpp.BINS] - -# print order - + + # print order + sol = fit(prob, order) -# print sol + # print sol return sol + def frac_fit(prob, xhat): -# print "frac fit..." - + # print "frac fit..." + bpp = prob.bpp assign_vars = prob.assign_vars - - assign = dict(((i, j), xhat[assign_vars[i, j]]) for i in bpp.ITEMS for j in bpp.BINS) -# print assign - - sorted_assign = sorted(iter(assign.items()), key=operator.itemgetter(1), reverse=True) + + assign = dict( + ((i, j), xhat[assign_vars[i, j]]) for i in bpp.ITEMS for j in bpp.BINS + ) + # print assign + + sorted_assign = sorted( + iter(assign.items()), key=operator.itemgetter(1), reverse=True + ) order = [(i, j) for ((i, j), x) in sorted_assign] - -# print order - + + # print order + sol = fit(prob, order) -# print sol + # print sol return sol - diff --git a/Dip/src/dippy/examples/bpp/bin_pack_instance.py b/Dip/src/dippy/examples/bpp/bin_pack_instance.py index 894f8e68..b4725a85 100755 --- a/Dip/src/dippy/examples/bpp/bin_pack_instance.py +++ b/Dip/src/dippy/examples/bpp/bin_pack_instance.py @@ -9,29 +9,30 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": # Python starts here - bpp = BinPackProb(ITEMS = [1, 2, 3, 4, 5], - volume = {1: 2, 2: 5, 3: 3, 4: 7, 5: 2}, - capacity = 8) - + bpp = BinPackProb( + ITEMS=[1, 2, 3, 4, 5], volume={1: 2, 2: 5, 3: 3, 4: 7, 5: 2}, capacity=8 + ) + prob = formulate(bpp) - # Set a zero tolerance (Mike Saunders' "magic number") + # Set a zero tolerance (Mike Saunders' "magic number") prob.tol = pow(pow(2, -24), old_div(2.0, 3.0)) if len(sys.argv) > 1: xopt = solve(prob, sys.argv[1]) else: xopt = solve(prob) - + if xopt is not None: for var in prob.variables(): print(var.name, "=", xopt[var]) else: - print("Dippy could not find and optimal solution") - - if prob.display_mode != 'off': + print("Dippy could not find and optimal solution") + + if prob.display_mode != "off": numNodes = len(prob.Tree.get_node_list()) - if ((prob.Tree.attr['display'] == 'pygame') or - (prob.Tree.attr['display'] == 'xdot')): + if (prob.Tree.attr["display"] == "pygame") or ( + prob.Tree.attr["display"] == "xdot" + ): prob.Tree.display() diff --git a/Dip/src/dippy/examples/bpp/bin_pack_instance2.py b/Dip/src/dippy/examples/bpp/bin_pack_instance2.py index f45c0a15..7981fdfa 100755 --- a/Dip/src/dippy/examples/bpp/bin_pack_instance2.py +++ b/Dip/src/dippy/examples/bpp/bin_pack_instance2.py @@ -8,27 +8,31 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": # Python starts here - bpp = BinPackProb(ITEMS = [1, 2, 3, 4, 5, 6], - volume = {1: 2, 2: 5, 3: 3, 4: 3, 5: 3, 6: 2}, - capacity = 9) - + bpp = BinPackProb( + ITEMS=[1, 2, 3, 4, 5, 6], + volume={1: 2, 2: 5, 3: 3, 4: 3, 5: 3, 6: 2}, + capacity=9, + ) + prob = formulate(bpp) - + prob.tol = pow(pow(2, -24), old_div(2.0, 3.0)) if len(sys.argv) > 1: xopt = solve(prob, sys.argv[1]) else: xopt = solve(prob) - + if xopt is not None: for var in prob.variables(): print(var.name, "=", xopt[var]) else: print("Dippy could not find and optimal solution") - - if prob.display_mode != 'off': + + if prob.display_mode != "off": numNodes = len(prob.Tree.get_node_list()) - if (prob.Tree.attr['display'] == 'pygame') or (prob.Tree.attr['display'] == 'xdot'): + if (prob.Tree.attr["display"] == "pygame") or ( + prob.Tree.attr["display"] == "xdot" + ): prob.Tree.display() diff --git a/Dip/src/dippy/examples/bpp/mdbin_pack_func.py b/Dip/src/dippy/examples/bpp/mdbin_pack_func.py index f05a2fd5..18b1bedb 100644 --- a/Dip/src/dippy/examples/bpp/mdbin_pack_func.py +++ b/Dip/src/dippy/examples/bpp/mdbin_pack_func.py @@ -8,7 +8,7 @@ import path except ImportError: pass - + try: import dippy except ImportError: @@ -19,6 +19,7 @@ from math import floor, ceil + class MDBinPackProb(object): def __init__(self, ITEMS, LIMITS, volume, capacity): self.ITEMS = ITEMS @@ -26,30 +27,35 @@ def __init__(self, ITEMS, LIMITS, volume, capacity): self.volume = volume self.capacity = capacity - self.BINS = list(range(len(ITEMS))) # Create 1 bin for each item, indices - # start at 0 - + self.BINS = list(range(len(ITEMS))) # Create 1 bin for each item, indices + # start at 0 + + def formulate(bpp): - prob = dippy.DipProblem("Bin Packing", - display_mode = 'xdot', -# layout = 'bak', - display_interval = None, - ) - - assign_vars = LpVariable.dicts("x", - [(i, j) for i in bpp.ITEMS - for j in bpp.BINS], - cat=LpBinary) - use_vars = LpVariable.dicts("y", bpp.BINS, cat=LpBinary) - waste_vars = LpVariable.dicts("w", [(j, k) for j in bpp.BINS - for k in bpp.LIMITS], 0, None) + prob = dippy.DipProblem( + "Bin Packing", + display_mode="xdot", + # layout = 'bak', + display_interval=None, + ) + + assign_vars = LpVariable.dicts( + "x", [(i, j) for i in bpp.ITEMS for j in bpp.BINS], cat=LpBinary + ) + use_vars = LpVariable.dicts("y", bpp.BINS, cat=LpBinary) + waste_vars = LpVariable.dicts( + "w", [(j, k) for j in bpp.BINS for k in bpp.LIMITS], 0, None + ) prob += lpSum(use_vars[j] for j in bpp.BINS), "min_bins" for j in bpp.BINS: - for k in bpp.LIMITS: - prob += lpSum(bpp.volume[i, k] * assign_vars[i, j] for i in bpp.ITEMS) \ - + waste_vars[j, k] == bpp.capacity[k] * use_vars[j] + for k in bpp.LIMITS: + prob += ( + lpSum(bpp.volume[i, k] * assign_vars[i, j] for i in bpp.ITEMS) + + waste_vars[j, k] + == bpp.capacity[k] * use_vars[j] + ) for i in bpp.ITEMS: prob += lpSum(assign_vars[i, j] for j in bpp.BINS) == 1 @@ -61,29 +67,29 @@ def formulate(bpp): for n in range(0, len(bpp.BINS) - 1): prob += use_vars[bpp.BINS[n]] >= use_vars[bpp.BINS[n + 1]] - # Attach the problem data and variable dictionaries to the DipProblem - prob.bpp = bpp + # Attach the problem data and variable dictionaries to the DipProblem + prob.bpp = bpp prob.assign_vars = assign_vars - prob.use_vars = use_vars - prob.waste_vars = waste_vars + prob.use_vars = use_vars + prob.waste_vars = waste_vars return prob + def solve(prob): - + dippyOpts = { -# 'doPriceCut' : '1', - 'CutCGL': '1', -# 'SolveMasterAsIp': '0' -# 'generateInitVars': '1', -# 'LogDebugLevel': 5, -# 'LogDumpModel': 5, - } + # 'doPriceCut' : '1', + "CutCGL": "1", + # 'SolveMasterAsIp': '0' + # 'generateInitVars': '1', + # 'LogDebugLevel': 5, + # 'LogDumpModel': 5, + } status, message, primals, duals = dippy.Solve(prob, dippyOpts) - + if status == LpStatusOptimal: return dict((var, var.value()) for var in prob.variables()) else: return None - diff --git a/Dip/src/dippy/examples/bpp/mdbin_pack_instance.py b/Dip/src/dippy/examples/bpp/mdbin_pack_instance.py index 8a68da72..89da45f4 100644 --- a/Dip/src/dippy/examples/bpp/mdbin_pack_instance.py +++ b/Dip/src/dippy/examples/bpp/mdbin_pack_instance.py @@ -6,40 +6,42 @@ from past.utils import old_div from .mdbin_pack_func import MDBinPackProb, formulate, solve -if __name__ == '__main__': +if __name__ == "__main__": # Python starts here - bpp = MDBinPackProb(ITEMS = [11, 12, 13, 14, 21, 22], - LIMITS = ['CPU', 'RAM'], - volume = {(11, 'CPU'): 1, - (12, 'CPU'): 1, - (13, 'CPU'): 1, - (14, 'CPU'): 1, - (21, 'CPU'): 1, - (22, 'CPU'): 1, - (11, 'RAM'): 512, - (12, 'RAM'): 512, - (13, 'RAM'): 512, - (14, 'RAM'): 512, - (21, 'RAM'): 3072, - (22, 'RAM'): 3072, - }, - capacity = {'CPU': 4, 'RAM': 4096}) - + bpp = MDBinPackProb( + ITEMS=[11, 12, 13, 14, 21, 22], + LIMITS=["CPU", "RAM"], + volume={ + (11, "CPU"): 1, + (12, "CPU"): 1, + (13, "CPU"): 1, + (14, "CPU"): 1, + (21, "CPU"): 1, + (22, "CPU"): 1, + (11, "RAM"): 512, + (12, "RAM"): 512, + (13, "RAM"): 512, + (14, "RAM"): 512, + (21, "RAM"): 3072, + (22, "RAM"): 3072, + }, + capacity={"CPU": 4, "RAM": 4096}, + ) + prob = formulate(bpp) - + prob.tol = pow(pow(2, -24), old_div(2.0, 3.0)) xopt = solve(prob) - + if xopt is not None: for var in prob.variables(): print(var.name, "=", xopt[var]) else: - print("Dippy could not find and optimal solution") - - if prob.display_mode != 'off': + print("Dippy could not find and optimal solution") + + if prob.display_mode != "off": numNodes = len(prob.Tree.get_node_list()) - if ((prob.Tree.attr['display'] == 'pygame') or - (prob.Tree.attr['display'] == 'xdot')): + if (prob.Tree.attr["display"] == "pygame") or ( + prob.Tree.attr["display"] == "xdot" + ): prob.Tree.display() - - diff --git a/Dip/src/dippy/examples/cflp/facility_ex1.py b/Dip/src/dippy/examples/cflp/facility_ex1.py index 840a1c3d..37b0c792 100644 --- a/Dip/src/dippy/examples/cflp/facility_ex1.py +++ b/Dip/src/dippy/examples/cflp/facility_ex1.py @@ -1,26 +1,14 @@ from random import randint # The requirements for the products -REQUIREMENT = { - 1 : 7, - 2 : 5, - 3 : 3, - 4 : 2, - 5 : 2 -} +REQUIREMENT = {1: 7, 2: 5, 3: 3, 4: 2, 5: 2} # Set of all products PRODUCTS = list(REQUIREMENT.keys()) PRODUCTS.sort() # Costs of the facilities -FIXED_COST = { - 1 : 10, - 2 : 20, - 3 : 16, - 4 : 1, - 5 : 2 -} +FIXED_COST = {1: 10, 2: 20, 3: 16, 4: 1, 5: 2} # Set of facilities LOCATIONS = list(FIXED_COST.keys()) diff --git a/Dip/src/dippy/examples/cflp/facility_ex2.py b/Dip/src/dippy/examples/cflp/facility_ex2.py index 4a597a95..f216860e 100644 --- a/Dip/src/dippy/examples/cflp/facility_ex2.py +++ b/Dip/src/dippy/examples/cflp/facility_ex2.py @@ -1,26 +1,14 @@ from random import randint, seed # The requirements for the products -REQUIREMENT = { - 1 : 7, - 2 : 5, - 3 : 3, - 4 : 2, - 5 : 2 -} +REQUIREMENT = {1: 7, 2: 5, 3: 3, 4: 2, 5: 2} # Set of all products PRODUCTS = list(REQUIREMENT.keys()) PRODUCTS.sort() # Costs of the facilities -FIXED_COST = { - 1 : 1, - 2 : 1, - 3 : 1, - 4 : 1, - 5 : 1 -} +FIXED_COST = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1} # Set of facilities LOCATIONS = list(FIXED_COST.keys()) diff --git a/Dip/src/dippy/examples/cflp/facility_location.py b/Dip/src/dippy/examples/cflp/facility_location.py index cab917c5..9583ac39 100755 --- a/Dip/src/dippy/examples/cflp/facility_location.py +++ b/Dip/src/dippy/examples/cflp/facility_location.py @@ -8,13 +8,21 @@ from past.utils import old_div import sys -from pulp import LpVariable, LpBinary, lpSum, value, LpProblem, LpMaximize, LpAffineExpression +from pulp import ( + LpVariable, + LpBinary, + lpSum, + value, + LpProblem, + LpMaximize, + LpAffineExpression, +) try: import path except ImportError: pass - + try: import src.dippy as dippy from src.dippy import DipSolStatOptimal @@ -28,6 +36,7 @@ from facility_ex2 import REQUIREMENT, PRODUCTS from facility_ex2 import LOCATIONS, CAPACITY + try: from facility_ex2 import FIXED_COST except ImportError: @@ -43,21 +52,23 @@ except ImportError: ASSIGNMENT_COSTS = dict((i, 0) for i in ASSIGNMENTS) -#display_mode = 'xdot' -#layout = 'dot' +# display_mode = 'xdot' +# layout = 'dot' prob = dippy.DipProblem("Facility Location") assign_vars = LpVariable.dicts("x", ASSIGNMENTS, 0, 1, LpBinary) -use_vars = LpVariable.dicts("y", LOCATIONS, 0, 1, LpBinary) +use_vars = LpVariable.dicts("y", LOCATIONS, 0, 1, LpBinary) debug_print = False debug_print_lp = False -prob += (lpSum(use_vars[i] * FIXED_COST[i] for i in LOCATIONS) + - lpSum(assign_vars[j] * ASSIGNMENT_COSTS[j] for j in ASSIGNMENTS), - "min") +prob += ( + lpSum(use_vars[i] * FIXED_COST[i] for i in LOCATIONS) + + lpSum(assign_vars[j] * ASSIGNMENT_COSTS[j] for j in ASSIGNMENTS), + "min", +) # assignment constraints for j in PRODUCTS: @@ -65,20 +76,23 @@ # Aggregate capacity constraints for i in LOCATIONS: - prob.relaxation[i] += lpSum(assign_vars[(i, j)] * REQUIREMENT[j] - for j in PRODUCTS) <= CAPACITY * use_vars[i] + prob.relaxation[i] += ( + lpSum(assign_vars[(i, j)] * REQUIREMENT[j] for j in PRODUCTS) + <= CAPACITY * use_vars[i] + ) # Disaggregate capacity constraints for i, j in ASSIGNMENTS: prob.relaxation[i] += assign_vars[(i, j)] <= use_vars[i] + def solve_subproblem(prob, key, redCosts, target): if debug_print: print("solve_subproblem...") print("reduced costs:") print(redCosts) print("target value:", target) - + loc = key # Calculate effective objective coefficient of products @@ -86,10 +100,10 @@ def solve_subproblem(prob, key, redCosts, target): avars = [assign_vars[(loc, j)] for j in PRODUCTS] obj = [max(-redCosts[assign_vars[(loc, j)]], 0) for j in PRODUCTS] weights = [REQUIREMENT[j] for j in PRODUCTS] - + # Use 0-1 KP to max. total effective value of products at location z, solution = knapsack01(obj, weights, CAPACITY) - + # Get the reduced cost of the knapsack solution if debug_print: print([(v, redCosts[v]) for v in avars]) @@ -98,7 +112,7 @@ def solve_subproblem(prob, key, redCosts, target): print("redCosts[use_vars[loc]] =", redCosts[use_vars[loc]]) print("Fixed cost, rc", FIXED_COST[loc], redCosts[use_vars[loc]] - z) - if redCosts[use_vars[loc]] > z + tol: # ... or an empty location is "useful" + if redCosts[use_vars[loc]] > z + tol: # ... or an empty location is "useful" if debug_print: print("Zero solution is optimal") return DipSolStatOptimal, [{}] @@ -110,54 +124,53 @@ def solve_subproblem(prob, key, redCosts, target): rcCheck = 0.0 for v in list(var_values.keys()): rcCheck += redCosts[v] * var_values[v] - print("Checking rc calc", redCosts[use_vars[loc]] - z, rcCheck) + print("Checking rc calc", redCosts[use_vars[loc]] - z, rcCheck) print(var_values) return DipSolStatOptimal, [var_values] + def knapsack01(obj, weights, capacity): """ 0/1 knapsack solver, maximizes profit. weights and capacity integer """ - + debug_subproblem = False - + assert len(obj) == len(weights) n = len(obj) if n == 0: return 0, [] if debug_subproblem: - relaxation = LpProblem('relaxation', LpMaximize) + relaxation = LpProblem("relaxation", LpMaximize) relax_vars = [str(i) for i in range(n)] - var_dict = LpVariable.dicts("", relax_vars, 0, 1, LpBinary) - relaxation += (lpSum(var_dict[str(i)] * weights[i] for i in range(n)) - <= capacity) + var_dict = LpVariable.dicts("", relax_vars, 0, 1, LpBinary) + relaxation += lpSum(var_dict[str(i)] * weights[i] for i in range(n)) <= capacity relaxation += lpSum(var_dict[str(i)] * obj[i] for i in range(n)) relaxation.solve() relax_obj = value(relaxation.objective) - solution = [i for i in range(n) if var_dict[str(i)].varValue > tol ] + solution = [i for i in range(n) if var_dict[str(i)].varValue > tol] print(relax_obj, solution) - - c = [[0]*(capacity+1) for i in range(n)] - added = [[False]*(capacity+1) for i in range(n)] + c = [[0] * (capacity + 1) for i in range(n)] + added = [[False] * (capacity + 1) for i in range(n)] # c [items, remaining capacity] # important: this code assumes strictly positive objective values for i in range(n): - for j in range(capacity+1): - if (weights[i] > j): - c[i][j] = c[i-1][j] + for j in range(capacity + 1): + if weights[i] > j: + c[i][j] = c[i - 1][j] else: - c_add = obj[i] + c[i-1][j-weights[i]] - if c_add > c[i-1][j]: + c_add = obj[i] + c[i - 1][j - weights[i]] + if c_add > c[i - 1][j]: c[i][j] = c_add added[i][j] = True else: - c[i][j] = c[i-1][j] + c[i][j] = c[i - 1][j] # backtrack to find solution - i = n-1 + i = n - 1 j = capacity solution = [] @@ -166,8 +179,9 @@ def knapsack01(obj, weights, capacity): solution.append(i) j -= weights[i] i -= 1 - - return c[n-1][capacity], solution + + return c[n - 1][capacity], solution + def generate_weight_cuts(prob, sol): # Define mu and T for each knapsack @@ -176,17 +190,18 @@ def generate_weight_cuts(prob, sol): for i in LOCATIONS: mu[i] = CAPACITY S[i] = [] - + # Use current assign_var values to assign items to locations - assigning = True + assigning = True while assigning: bestValue = 0 bestAssign = None for i in LOCATIONS: for j in PRODUCTS: - if j not in S[i]: # If this product is not in the subset - if (sol[assign_vars[(i, j)]] > bestValue) \ - and (REQUIREMENT[j] <= mu[i]): + if j not in S[i]: # If this product is not in the subset + if (sol[assign_vars[(i, j)]] > bestValue) and ( + REQUIREMENT[j] <= mu[i] + ): # The assignment variable for this product is closer # to 1 than any other product checked, and "fits" in # this location's remaining space @@ -194,31 +209,33 @@ def generate_weight_cuts(prob, sol): bestAssign = (i, j) # Make the best assignment found across all products and locactions if bestAssign: - (i,j) = bestAssign - mu[i] -= REQUIREMENT[j] # Decrease spare CAPACITY at this location - S[i].append(j) # Assign this product to this location's set + (i, j) = bestAssign + mu[i] -= REQUIREMENT[j] # Decrease spare CAPACITY at this location + S[i].append(j) # Assign this product to this location's set else: - assigning = False # Didn't find anything to assign - stop + assigning = False # Didn't find anything to assign - stop # Generate the weight cuts from the sets found above new_cuts = [] for i in LOCATIONS: - if len(S[i]) > 0: # If an item assigned to this location - con = LpAffineExpression() # Start a new constraint - con += sum(REQUIREMENT[j] * assign_vars[(i, j)] - for j in S[i]) - con += sum(max(0, REQUIREMENT[j] - mu[i]) * - assign_vars[(i, j)] for j in PRODUCTS - if j not in S[i]) + if len(S[i]) > 0: # If an item assigned to this location + con = LpAffineExpression() # Start a new constraint + con += sum(REQUIREMENT[j] * assign_vars[(i, j)] for j in S[i]) + con += sum( + max(0, REQUIREMENT[j] - mu[i]) * assign_vars[(i, j)] + for j in PRODUCTS + if j not in S[i] + ) new_cuts.append(con <= CAPACITY - mu[i]) # Return the set of cuts we created to DIP if len(new_cuts) > 0: return new_cuts + def first_fit_heuristic(): # Sort the items in descending weight order - productReqs = [(REQUIREMENT[j],j) for j in PRODUCTS] + productReqs = [(REQUIREMENT[j], j) for j in PRODUCTS] productReqs.sort(reverse=True) # Add items to locations, fitting in as much @@ -231,8 +248,8 @@ def first_fit_heuristic(): while j < len(productReqs): # Can we fit this product? if productReqs[j][0] <= waste: - currentLocation.append(productReqs[j][1]) # index - waste -= productReqs[j][0] # requirement + currentLocation.append(productReqs[j][1]) # index + waste -= productReqs[j][0] # requirement productReqs.pop(j) else: # Try to fit next item @@ -242,6 +259,7 @@ def first_fit_heuristic(): # Return a list of tuples: ([products],waste) return allLocations + def first_fit(): # Use generic first-fit heuristic that is shared # between heuristics and initial variable generation @@ -261,43 +279,47 @@ def first_fit(): for j in loc[0]: sol[assign_vars[(i, j)]] = 1 index += 1 - + return sol + def frac_fit(xhat): # Initialise solution sol = {} waste = {} for i in LOCATIONS: - for j in PRODUCTS: sol[assign_vars[(i, j)]] = 0 + for j in PRODUCTS: + sol[assign_vars[(i, j)]] = 0 sol[use_vars[i]] = 0 waste[i] = 0 - + # Get the list of non-zero fractional assignments - fracAssigns = [ (xhat[assign_vars[(i, j)]], (i, j)) - for i in LOCATIONS for j in PRODUCTS - if xhat[assign_vars[(i, j)]] > tol ] + fracAssigns = [ + (xhat[assign_vars[(i, j)]], (i, j)) + for i in LOCATIONS + for j in PRODUCTS + if xhat[assign_vars[(i, j)]] > tol + ] fracAssigns.sort() # Track which products and locations have been used - notAllocated = dict((j,True) for j in PRODUCTS) - notUsed = dict((i,True) for i in LOCATIONS) + notAllocated = dict((j, True) for j in PRODUCTS) + notUsed = dict((i, True) for i in LOCATIONS) while len(fracAssigns) > 0: - fracX = fracAssigns.pop() # Get best frac. assignment left - (i,j) = fracX[1] + fracX = fracAssigns.pop() # Get best frac. assignment left + (i, j) = fracX[1] if notAllocated[j]: - if notUsed[i]: # Create a new location if needed + if notUsed[i]: # Create a new location if needed notUsed[i] = False sol[use_vars[i]] = 1 waste[i] = CAPACITY - if REQUIREMENT[j] <= waste[i]: # Space left? + if REQUIREMENT[j] <= waste[i]: # Space left? sol[assign_vars[(i, j)]] = 1 notAllocated[j] = False waste[i] -= REQUIREMENT[j] - + # Allocate the remaining products - unallocated = [(REQUIREMENT[j],j) for j in PRODUCTS - if notAllocated[j]] + unallocated = [(REQUIREMENT[j], j) for j in PRODUCTS if notAllocated[j]] unallocated.sort(reverse=True) unused = [i for i in LOCATIONS if notUsed[i]] while len(unallocated) > 0: @@ -310,15 +332,17 @@ def frac_fit(xhat): unallocated.pop(index) sol[assign_vars[(loc, j)]] = 1 waste -= j_req - else: index += 1 + else: + index += 1 sol[use_vars[loc]] = 1 return sol + def heuristics(prob, xhat, cost): sols = [] if prob.root_heuristic: - prob.root_heuristic = False # Don't run twice + prob.root_heuristic = False # Don't run twice sol = first_fit() sols.append(sol) if prob.node_heuristic: @@ -327,7 +351,8 @@ def heuristics(prob, xhat, cost): if len(sols) > 0: return sols - + + def init_first_fit(prob): locations = first_fit_heuristic() @@ -346,6 +371,7 @@ def init_first_fit(prob): print(bvs) return bvs + def init_one_each(prob): bvs = [] if debug_print: @@ -362,51 +388,54 @@ def init_one_each(prob): print(bvs) return bvs + if debug_print_lp: - prob.writeLP('facility_main.lp') + prob.writeLP("facility_main.lp") for n, i in enumerate(LOCATIONS): - prob.writeRelaxed(n, 'facility_relax%s.lp' % i); + prob.writeRelaxed(n, "facility_relax%s.lp" % i) -#prob.writeFull('facility.lp', 'facility.dec') +# prob.writeFull('facility.lp', 'facility.dec') -#prob.relaxed_solver = solve_subproblem -#prob.init_vars = init_one_each -#prob.init_vars = init_first_fit -#prob.generate_cuts = generate_weight_cuts -#prob.heuristics = heuristics -#prob.root_heuristic = True -#prob.node_heuristic = True +# prob.relaxed_solver = solve_subproblem +# prob.init_vars = init_one_each +# prob.init_vars = init_first_fit +# prob.generate_cuts = generate_weight_cuts +# prob.heuristics = heuristics +# prob.root_heuristic = True +# prob.node_heuristic = True dippyOpts = {} -algo = 'doCut' +algo = "doCut" if len(sys.argv) > 1: algo = sys.argv[1] -if algo == 'PriceCut': - dippyOpts['doPriceCut'] = '1' - dippyOpts['CutCGL'] = '1' -elif algo == 'Price': - dippyOpts['doPriceCut'] = '1' - dippyOpts['CutCGL'] = '0' +if algo == "PriceCut": + dippyOpts["doPriceCut"] = "1" + dippyOpts["CutCGL"] = "1" +elif algo == "Price": + dippyOpts["doPriceCut"] = "1" + dippyOpts["CutCGL"] = "0" else: - dippyOpts['doCut'] = '1' + dippyOpts["doCut"] = "1" -dippyOpts['TolZero'] = '%s' % tol +dippyOpts["TolZero"] = "%s" % tol dippy.Solve(prob, dippyOpts) -if prob.display_mode != 'off': +if prob.display_mode != "off": numNodes = len(prob.Tree.get_node_list()) - if prob.Tree.attr['display'] == 'svg': - prob.Tree.write_as_svg(filename = "facility_node%d" % (numNodes + 1), - prevfile = "facility_node%d" % numNodes) + if prob.Tree.attr["display"] == "svg": + prob.Tree.write_as_svg( + filename="facility_node%d" % (numNodes + 1), + prevfile="facility_node%d" % numNodes, + ) prob.Tree.display() # print solution -print("Optimal solution found!") +print("Optimal solution found!") print("************************************") for i in LOCATIONS: if use_vars[i].varValue > 0: - print("Location ", i, " is assigned: ", end=' ') + print("Location ", i, " is assigned: ", end=" ") print([j for j in PRODUCTS if assign_vars[(i, j)].varValue > 0]) print("************************************") print() diff --git a/Dip/src/dippy/examples/coke/coke.py b/Dip/src/dippy/examples/coke/coke.py index 1cf178b6..4ee82820 100644 --- a/Dip/src/dippy/examples/coke/coke.py +++ b/Dip/src/dippy/examples/coke/coke.py @@ -5,7 +5,15 @@ from builtins import str from builtins import range from past.utils import old_div -from pulp import LpVariable, LpBinary, lpSum, value, LpProblem, LpMinimize, LpAffineExpression +from pulp import ( + LpVariable, + LpBinary, + lpSum, + value, + LpProblem, + LpMinimize, + LpAffineExpression, +) try: import src.dippy as dippy @@ -15,39 +23,17 @@ CC = 1.3 BIG_M = 1e10 -MINE_SUPPLY = { - "M1": 25.8, - "M2": 728, - "M3": 1456, - "M4": 49, - "M5": 36.9, - "M6": 1100, -} +MINE_SUPPLY = {"M1": 25.8, "M2": 728, "M3": 1456, "M4": 49, "M5": 36.9, "M6": 1100} MINES = list(MINE_SUPPLY.keys()) MINES.sort() LOCATIONS = ["L1", "L2", "L3", "L4", "L5", "L6"] -SIZE_COSTS = { - 0: 0, - 75: 4.4, - 150: 7.4, - 225: 10.5, - 300: 13.5, - 375: 16.5, - 450: 19.6, -} +SIZE_COSTS = {0: 0, 75: 4.4, 150: 7.4, 225: 10.5, 300: 13.5, 375: 16.5, 450: 19.6} SIZES = list(SIZE_COSTS.keys()) SIZES.sort() -CUSTOMER_DEMAND = { - "C1": 83, - "C2": 5.5, - "C3": 6.975, - "C4": 5.5, - "C5": 720.75, - "C6": 5.5, -} +CUSTOMER_DEMAND = {"C1": 83, "C2": 5.5, "C3": 6.975, "C4": 5.5, "C5": 720.75, "C6": 5.5} CUSTOMERS = list(CUSTOMER_DEMAND.keys()) CUSTOMERS.sort() @@ -71,6 +57,7 @@ C6 186302 189099 147026 164938 149836 286307 """ + def read_table(data, coerce, transpose=False): lines = data.splitlines() headings = lines[1].split() @@ -78,22 +65,25 @@ def read_table(data, coerce, transpose=False): for row in lines[2:]: items = row.split() for i, item in enumerate(items[1:]): - if transpose: key = (headings[i], items[0]) - else: key = (items[0], headings[i]) + if transpose: + key = (headings[i], items[0]) + else: + key = (items[0], headings[i]) result[key] = coerce(item) return result + MINE_TRANS = read_table(MINE_TRANS_DATA, int) for key in MINE_TRANS: MINE_TRANS[key] = MINE_TRANS[key] * CC -CUST_TRANS = read_table(CUST_TRANS_DATA, int, \ - transpose=True) +CUST_TRANS = read_table(CUST_TRANS_DATA, int, transpose=True) ARC_COSTS = dict(MINE_TRANS) ARC_COSTS.update(CUST_TRANS) ARCS = list(ARC_COSTS.keys()) + def cross(i1, i2): r = [] for a in i1: @@ -101,13 +91,13 @@ def cross(i1, i2): r.append((a, b)) return r + LOC_SIZES = cross(LOCATIONS, SIZES) prob = dippy.DipProblem("Coke", LpMinimize) # create variables -buildVars = LpVariable.dicts("Build", LOC_SIZES, None, \ - None, LpBinary) +buildVars = LpVariable.dicts("Build", LOC_SIZES, None, None, LpBinary) prob.buildVars = buildVars # create arcs @@ -118,15 +108,17 @@ def cross(i1, i2): prob.SIZES = SIZES # objective -prob += 1e6 * lpSum(buildVars[(l, s)] * SIZE_COSTS[s] \ - for (l, s) in LOC_SIZES) + \ - lpSum(flowVars[(s, d)] * ARC_COSTS[(s, d)] \ - for (s, d) in ARCS), "min" +prob += ( + 1e6 * lpSum(buildVars[(l, s)] * SIZE_COSTS[s] for (l, s) in LOC_SIZES) + + lpSum(flowVars[(s, d)] * ARC_COSTS[(s, d)] for (s, d) in ARCS), + "min", +) # plant availability for loc in LOCATIONS: - prob += lpSum(flowVars[(loc, i)] for i in CUSTOMERS) \ - <= lpSum(buildVars[(loc, s)] *s for s in SIZES) + prob += lpSum(flowVars[(loc, i)] for i in CUSTOMERS) <= lpSum( + buildVars[(loc, s)] * s for s in SIZES + ) # one size for loc in LOCATIONS: @@ -135,66 +127,62 @@ def cross(i1, i2): # conserve flow (mines) # flows are in terms of tonnes of coke for m in MINES: - prob += lpSum(flowVars[(m, j)] for j in LOCATIONS) <= \ - old_div(MINE_SUPPLY[m],CC) + prob += lpSum(flowVars[(m, j)] for j in LOCATIONS) <= old_div(MINE_SUPPLY[m], CC) for loc in LOCATIONS: - prob += lpSum(flowVars[(m, loc)] for m in MINES) - \ - lpSum(flowVars[(loc, c)] for c in CUSTOMERS) \ - >= 0 + prob += ( + lpSum(flowVars[(m, loc)] for m in MINES) + - lpSum(flowVars[(loc, c)] for c in CUSTOMERS) + >= 0 + ) for c in CUSTOMERS: - prob += lpSum(flowVars[(loc, c)] \ - for loc in LOCATIONS) \ - >= CUSTOMER_DEMAND[c] + prob += lpSum(flowVars[(loc, c)] for loc in LOCATIONS) >= CUSTOMER_DEMAND[c] + def do_branch(prob, sol): tol = 1e-10 SIZES = prob.SIZES buildVars = prob.buildVars for loc in LOCATIONS: - sol_size = sum(sol[buildVars[loc, size]] * \ - size for size in SIZES) + sol_size = sum(sol[buildVars[loc, size]] * size for size in SIZES) # smallest index and size larger than sol_size if abs(sol_size - SIZES[-1]) < tol: continue - i, bigger = [(i, s) for i, s in enumerate(SIZES) \ - if s > sol_size][0] + i, bigger = [(i, s) for i, s in enumerate(SIZES) if s > sol_size][0] if i == 0: smaller = 0 else: - smaller = SIZES[i-1] + smaller = SIZES[i - 1] if bigger - sol_size > tol and sol_size - smaller > tol: - down_branch_ub = dict([(buildVars[loc, SIZES[j]], 0) \ - for j in range(i, len(SIZES))]) - up_branch_ub = dict([(buildVars[loc, SIZES[j]], 0) \ - for j in range(0, i)]) + down_branch_ub = dict( + [(buildVars[loc, SIZES[j]], 0) for j in range(i, len(SIZES))] + ) + up_branch_ub = dict([(buildVars[loc, SIZES[j]], 0) for j in range(0, i)]) return ({}, down_branch_ub, {}, up_branch_ub) - + + prob.branch_method = do_branch -dippy.Solve(prob, { - 'CutCGL': '0', -}) +dippy.Solve(prob, {"CutCGL": "0"}) + def print_table(rows, cols, fn): print("\t", "\t".join(cols)) for r in rows: - print(r,"\t", "\t".join(str(fn(r,c)) for c in cols)) + print(r, "\t", "\t".join(str(fn(r, c)) for c in cols)) + def print_var_table(rows, cols, var, fn=lambda x: x): - print_table(rows, cols, lambda x, y: - fn(var[(x,y)].varValue)) + print_table(rows, cols, lambda x, y: fn(var[(x, y)].varValue)) + for (l, s) in LOC_SIZES: - if buildVars[(l,s)].varValue > 0: - print("Build %s %s (%s)" % \ - (l, s, buildVars[(l,s)].varValue)) + if buildVars[(l, s)].varValue > 0: + print("Build %s %s (%s)" % (l, s, buildVars[(l, s)].varValue)) print() -print_var_table(MINES, LOCATIONS, flowVars, \ - fn=lambda x: CC*x) +print_var_table(MINES, LOCATIONS, flowVars, fn=lambda x: CC * x) print() print_var_table(LOCATIONS, CUSTOMERS, flowVars) - diff --git a/Dip/src/dippy/examples/coke/coke_func.py b/Dip/src/dippy/examples/coke/coke_func.py index e9e711d9..4e47eec5 100644 --- a/Dip/src/dippy/examples/coke/coke_func.py +++ b/Dip/src/dippy/examples/coke/coke_func.py @@ -2,6 +2,7 @@ from builtins import str from builtins import range from builtins import object + CGL_cuts = True Advanced_branch = True @@ -27,34 +28,37 @@ except ImportError: import coinor.dippy as dippy + class CokeProb(object): - def __init__(self, supply, demand, LOCATIONS, build_costs, - conversion_factor, transport_costs): - self.MINES = list(supply.keys()) + def __init__( + self, supply, demand, LOCATIONS, build_costs, conversion_factor, transport_costs + ): + self.MINES = list(supply.keys()) self.MINES.sort() self.CUSTOMERS = list(demand.keys()) self.CUSTOMERS.sort() self.LOCATIONS = LOCATIONS - self.SIZES = list(build_costs.keys()) + self.SIZES = list(build_costs.keys()) self.SIZES.sort() - self.ARCS = list(transport_costs.keys()) + self.ARCS = list(transport_costs.keys()) self.conversion_factor = conversion_factor - self.supply = supply - self.demand = demand - self.build_costs = build_costs - self.transport_costs = transport_costs + self.supply = supply + self.demand = demand + self.build_costs = build_costs + self.transport_costs = transport_costs + def formulate(cp): - prob = dippy.DipProblem("Coke", - display_mode = 'xdot', -# layout = 'bak', - display_interval = None, - ) + prob = dippy.DipProblem( + "Coke", + display_mode="xdot", + # layout = 'bak', + display_interval=None, + ) # create variables - LOC_SIZES = [(l, s) for l in cp.LOCATIONS - for s in cp.SIZES] + LOC_SIZES = [(l, s) for l in cp.LOCATIONS for s in cp.SIZES] buildVars = LpVariable.dicts("Build", LOC_SIZES, cat=LpBinary) # create arcs @@ -64,16 +68,18 @@ def formulate(cp): flowVars[a].bounds(0, BIG_M) # objective - prob += 1e6 * lpSum(buildVars[(l, s)] * cp.build_costs[s] \ - for (l, s) in LOC_SIZES) + \ - lpSum(flowVars[(s, d)] * cp.transport_costs[(s, d)] \ - for (s, d) in cp.ARCS), "min" + prob += ( + 1e6 * lpSum(buildVars[(l, s)] * cp.build_costs[s] for (l, s) in LOC_SIZES) + + lpSum(flowVars[(s, d)] * cp.transport_costs[(s, d)] for (s, d) in cp.ARCS), + "min", + ) # plant availability - assumes that SIZES are numeric, # which they should be for loc in cp.LOCATIONS: - prob += lpSum(flowVars[(loc, i)] for i in cp.CUSTOMERS) \ - <= lpSum(buildVars[(loc, s)] * s for s in cp.SIZES) + prob += lpSum(flowVars[(loc, i)] for i in cp.CUSTOMERS) <= lpSum( + buildVars[(loc, s)] * s for s in cp.SIZES + ) # one size for loc in cp.LOCATIONS: @@ -82,74 +88,74 @@ def formulate(cp): # conserve flow (mines) # flows are in terms of tonnes of coke for m in cp.MINES: - prob += lpSum(flowVars[(m, j)] for j in cp.LOCATIONS) \ - <= cp.supply[m] + prob += lpSum(flowVars[(m, j)] for j in cp.LOCATIONS) <= cp.supply[m] # conserve flow (locations) # convert from coal to coke for loc in cp.LOCATIONS: - prob += lpSum(flowVars[(m, loc)] for m in cp.MINES) - \ - cp.conversion_factor * \ - lpSum(flowVars[(loc, c)] for c in cp.CUSTOMERS) \ - >= 0 + prob += ( + lpSum(flowVars[(m, loc)] for m in cp.MINES) + - cp.conversion_factor * lpSum(flowVars[(loc, c)] for c in cp.CUSTOMERS) + >= 0 + ) for c in cp.CUSTOMERS: - prob += lpSum(flowVars[(loc, c)] for loc in cp.LOCATIONS) \ - >= cp.demand[c] + prob += lpSum(flowVars[(loc, c)] for loc in cp.LOCATIONS) >= cp.demand[c] prob.cp = cp prob.buildVars = buildVars prob.flowVars = flowVars - + return prob - + + def do_branch(prob, sol): - tol = prob.tol + tol = prob.tol LOCATIONS = prob.cp.LOCATIONS - SIZES = prob.cp.SIZES + SIZES = prob.cp.SIZES buildVars = prob.buildVars - + for loc in LOCATIONS: - sol_size = sum(sol[buildVars[loc, size]] * \ - size for size in SIZES) + sol_size = sum(sol[buildVars[loc, size]] * size for size in SIZES) # smallest index and size larger than sol_size if abs(sol_size - SIZES[-1]) < tol: continue - i, bigger = [(i, s) for i, s in enumerate(SIZES) \ - if s > sol_size][0] + i, bigger = [(i, s) for i, s in enumerate(SIZES) if s > sol_size][0] if i == 0: smaller = 0 else: - smaller = SIZES[i-1] + smaller = SIZES[i - 1] down_lbs = {} down_ubs = {} up_lbs = {} up_ubs = {} if bigger - sol_size > tol and sol_size - smaller > tol: - down_ubs = dict([(buildVars[loc, SIZES[j]], 0) - for j in range(i, len(SIZES))]) - up_ubs = dict([(buildVars[loc, SIZES[j]], 0) - for j in range(0, i)]) + down_ubs = dict( + [(buildVars[loc, SIZES[j]], 0) for j in range(i, len(SIZES))] + ) + up_ubs = dict([(buildVars[loc, SIZES[j]], 0) for j in range(0, i)]) return down_lbs, down_ubs, up_lbs, up_ubs - + + def solve(prob): if Advanced_branch: - prob.branch_method = do_branch - + prob.branch_method = do_branch + dippyOpts = {} if not CGL_cuts: - dippyOpts['CutCGL'] = '0' - + dippyOpts["CutCGL"] = "0" + status, message, primals, duals = dippy.Solve(prob, dippyOpts) - + if status == LpStatusOptimal: return dict((var, var.value()) for var in prob.variables()) else: return None + def read_table(data, coerce, transpose=False): lines = data.splitlines() headings = lines[1].split() @@ -157,16 +163,19 @@ def read_table(data, coerce, transpose=False): for row in lines[2:]: items = row.split() for i, item in enumerate(items[1:]): - if transpose: key = (headings[i], items[0]) - else: key = (items[0], headings[i]) + if transpose: + key = (headings[i], items[0]) + else: + key = (items[0], headings[i]) result[key] = coerce(item) return result + def print_table(rows, cols, fn): print("\t", "\t".join(cols)) for r in rows: - print(r,"\t", "\t".join(str(fn(r,c)) for c in cols)) + print(r, "\t", "\t".join(str(fn(r, c)) for c in cols)) + def print_var_table(rows, cols, var, fn=lambda x: x): - print_table(rows, cols, lambda x, y: - fn(var[(x,y)].varValue)) + print_table(rows, cols, lambda x, y: fn(var[(x, y)].varValue)) diff --git a/Dip/src/dippy/examples/coke/coke_instance.py b/Dip/src/dippy/examples/coke/coke_instance.py index 7144678e..990ff15c 100644 --- a/Dip/src/dippy/examples/coke/coke_instance.py +++ b/Dip/src/dippy/examples/coke/coke_instance.py @@ -2,41 +2,32 @@ from __future__ import print_function from __future__ import absolute_import from past.utils import old_div -from .coke_func import CokeProb, read_table, formulate, solve, \ - print_table, print_var_table +from .coke_func import ( + CokeProb, + read_table, + formulate, + solve, + print_table, + print_var_table, +) -if __name__ == '__main__': +if __name__ == "__main__": # Python starts here convert = 1.3 - mine_supply = { - "M1": 25.8, - "M2": 728, - "M3": 1456, - "M4": 49, - "M5": 36.9, - "M6": 1100, - } + mine_supply = {"M1": 25.8, "M2": 728, "M3": 1456, "M4": 49, "M5": 36.9, "M6": 1100} LOCATIONS = ["L1", "L2", "L3", "L4", "L5", "L6"] - build_costs = { - 0: 0, - 75: 4.4, - 150: 7.4, - 225: 10.5, - 300: 13.5, - 375: 16.5, - 450: 19.6, - } + build_costs = {0: 0, 75: 4.4, 150: 7.4, 225: 10.5, 300: 13.5, 375: 16.5, 450: 19.6} customer_demand = { - "C1": 83, - "C2": 5.5, - "C3": 6.975, - "C4": 5.5, + "C1": 83, + "C2": 5.5, + "C3": 6.975, + "C4": 5.5, "C5": 720.75, - "C6": 5.5, + "C6": 5.5, } mine_trans_data = """ @@ -61,37 +52,40 @@ mine_trans = read_table(mine_trans_data, int) - cust_trans = read_table(cust_trans_data, int, - transpose=True) + cust_trans = read_table(cust_trans_data, int, transpose=True) transport_costs = dict(mine_trans) transport_costs.update(cust_trans) - - cp = CokeProb(supply = mine_supply, demand = customer_demand, - LOCATIONS = LOCATIONS, build_costs = build_costs, - conversion_factor = convert, - transport_costs = transport_costs) - + + cp = CokeProb( + supply=mine_supply, + demand=customer_demand, + LOCATIONS=LOCATIONS, + build_costs=build_costs, + conversion_factor=convert, + transport_costs=transport_costs, + ) + prob = formulate(cp) - # Set a zero tolerance (Mike Saunders' "magic number") + # Set a zero tolerance (Mike Saunders' "magic number") prob.tol = pow(pow(2, -24), old_div(2.0, 3.0)) xopt = solve(prob) for l in cp.LOCATIONS: for s in cp.SIZES: - if xopt[prob.buildVars[(l,s)]] > 0: - print("Build %s %s (%s)" % \ - (l, s, xopt[prob.buildVars[(l,s)]])) + if xopt[prob.buildVars[(l, s)]] > 0: + print("Build %s %s (%s)" % (l, s, xopt[prob.buildVars[(l, s)]])) print() print_var_table(cp.MINES, LOCATIONS, prob.flowVars) print() print_var_table(cp.LOCATIONS, cp.CUSTOMERS, prob.flowVars) - if prob.display_mode != 'off': + if prob.display_mode != "off": numNodes = len(prob.Tree.get_node_list()) - if ((prob.Tree.attr['display'] == 'pygame') or - (prob.Tree.attr['display'] == 'xdot')): + if (prob.Tree.attr["display"] == "pygame") or ( + prob.Tree.attr["display"] == "xdot" + ): prob.Tree.display() diff --git a/Dip/src/dippy/examples/csp/cutting_stock.py b/Dip/src/dippy/examples/csp/cutting_stock.py index 8cd43437..5768ca7d 100644 --- a/Dip/src/dippy/examples/csp/cutting_stock.py +++ b/Dip/src/dippy/examples/csp/cutting_stock.py @@ -1,5 +1,6 @@ from __future__ import print_function from builtins import range + #!/usr/bin/env python from pulp import * @@ -11,19 +12,11 @@ import coinor.dippy as dippy from coinor.dippy import DipSolStatOptimal -length = { -"9cm": 9, -"7cm": 7, -"5cm": 5 -} +length = {"9cm": 9, "7cm": 7, "5cm": 5} ITEMS = list(length.keys()) -demand = { -"9cm": 3, -"7cm": 2, -"5cm": 2 -} +demand = {"9cm": 3, "7cm": 2, "5cm": 2} total_patterns = sum(demand[i] for i in ITEMS) @@ -32,6 +25,7 @@ total_length[p] = 20 PATTERNS = list(total_length.keys()) + def cross(i1, i2): r = [] for a in i1: @@ -39,6 +33,7 @@ def cross(i1, i2): r.append((a, b)) return r + CUTS = cross(PATTERNS, ITEMS) prob = dippy.DipProblem("Python", LpMinimize) @@ -56,43 +51,42 @@ def cross(i1, i2): # Meet demand for i in ITEMS: - prob += lpSum(cutVars[(p, i)] for p in PATTERNS) \ - >= demand[i] + prob += lpSum(cutVars[(p, i)] for p in PATTERNS) >= demand[i] # Ordering patterns for i, p in enumerate(PATTERNS): if p != PATTERNS[-1]: - prob += useVars[p] >= useVars[PATTERNS[i+1]] + prob += useVars[p] >= useVars[PATTERNS[i + 1]] for p in PATTERNS: - prob.relaxation[p] += \ - lpSum(length[i] * cutVars[(p, i)] for i in ITEMS) \ - <= total_length[p] * useVars[p] + prob.relaxation[p] += ( + lpSum(length[i] * cutVars[(p, i)] for i in ITEMS) + <= total_length[p] * useVars[p] + ) + def solve_subproblem(prob, keySub, redCosts, target): # get items with negative reduced cost - item_idx = [i for i in ITEMS \ - if redCosts[cutVars[(keySub, i)]] < 0] + item_idx = [i for i in ITEMS if redCosts[cutVars[(keySub, i)]] < 0] vars = [cutVars[(keySub, i)] for i in item_idx] obj = [-redCosts[cutVars[(keySub, i)]] for i in item_idx] weights = [length[i] for i in item_idx] z, solution = kp(obj, weights, total_length[p]) - - total_weight = sum(w * solution[i] \ - for i, w in enumerate(weights)) + + total_weight = sum(w * solution[i] for i, w in enumerate(weights)) assert total_weight <= total_length[p] # add in reduced cost of useVars - var_values = [(v, solution[i]) \ - for i, v in enumerate(vars) \ - if solution[i] > 0] + var_values = [(v, solution[i]) for i, v in enumerate(vars) if solution[i] > 0] var_values.append((useVars[keySub], 1)) return DipSolStatOptimal, [var_values] + prob.relaxed_solver = solve_subproblem + def kp(obj, weights, capacity): assert len(obj) == len(weights) n = len(obj) @@ -102,7 +96,7 @@ def kp(obj, weights, capacity): if capacity == 0: return 0, [0 for i in range(n)] - + n = len(obj) # Don't include item @@ -110,8 +104,7 @@ def kp(obj, weights, capacity): # Check all items for inclusion for i in range(n): if weights[i] <= capacity: - zyes, solyes = kp(obj, weights, \ - capacity - weights[i]) + zyes, solyes = kp(obj, weights, capacity - weights[i]) zyes += obj[i] solyes[i] += 1 if zbest > zyes: @@ -120,12 +113,16 @@ def kp(obj, weights, capacity): return zbest, solbest -dippy.Solve(prob, { - 'generateCuts': '1', - 'doPriceCut':'1', - 'SolveRelaxAsIp': '1', \ + +dippy.Solve( + prob, + { + "generateCuts": "1", + "doPriceCut": "1", + "SolveRelaxAsIp": "1", # use default IP to solve subproblems -}) + }, +) for i, var in list(useVars.items()): if var.varValue: @@ -134,11 +131,8 @@ def kp(obj, weights, capacity): for pat in PATTERNS: for i in ITEMS: if cutVars[(pat, i)].varValue: - print("Pat", pat, "item", i, \ - cutVars[(pat, i)].varValue) + print("Pat", pat, "item", i, cutVars[(pat, i)].varValue) ##for (pat, w), var in cutVars.items(): ## if var.varValue: ## print "Pat", pat, "item", w, var.varValue - - diff --git a/Dip/src/dippy/examples/cvpmp/CVPMP.py b/Dip/src/dippy/examples/cvpmp/CVPMP.py index a836f4d7..77f20a03 100755 --- a/Dip/src/dippy/examples/cvpmp/CVPMP.py +++ b/Dip/src/dippy/examples/cvpmp/CVPMP.py @@ -3,19 +3,21 @@ from __future__ import division from builtins import range from past.utils import old_div -__title__ = 'B&P-cut for the Capacitated Vertex p-Median Problem (CVPMP)' -__version__ = '1.0 Nov 2013' -__author__ = 'Dago Quevedo' -__email__ = 'dago@yalma.fime.uanl.mx' -import sys +__title__ = "B&P-cut for the Capacitated Vertex p-Median Problem (CVPMP)" +__version__ = "1.0 Nov 2013" +__author__ = "Dago Quevedo" +__email__ = "dago@yalma.fime.uanl.mx" + +import sys from pulp import LpVariable, LpBinary, lpSum, value, LpProblem, LpMaximize -from math import * +from math import * + try: import path except ImportError: pass - + try: import src.dippy as dippy from src.dippy import DipSolStatOptimal @@ -23,121 +25,120 @@ import coinor.dippy as dippy from coinor.dippy import DipSolStatOptimal -#Globar vars +# Globar vars -n = None -p = None -d = None -s = None -w = None -V = None -x = None -y = None +n = None +p = None +d = None +s = None +w = None +V = None +x = None +y = None -tol = pow(pow(2, -24), old_div(2.0, 3.0)) -display_mode = 'off' +tol = pow(pow(2, -24), old_div(2.0, 3.0)) +display_mode = "off" + + +def init(_n, _p, _d, _s, _w, _V): -def init(_n,_p,_d,_s,_w,_V): - global n, p, d, s, w, V - n = _n - p = _p - d = _d - s = _s - w = _w - V = _V + n = _n + p = _p + d = _d + s = _s + w = _w + V = _V + def solve_subproblem(prob, i, redCosts, target): - - vars = [x[(i, j)] for j in V] - obj = [max(-redCosts[x[(i, j)]], 0) for j in V] - weights= [w[j] for j in V] - #Solver a knapsack for i + vars = [x[(i, j)] for j in V] + obj = [max(-redCosts[x[(i, j)]], 0) for j in V] + weights = [w[j] for j in V] + + # Solver a knapsack for i z, solution = KP01(obj, weights, s[i]) rc = redCosts[y[i]] - z - - #Cost - cost = sum([d[i,V[j]] for j in solution]) - #Cluster of customers for i - var_val = dict([(vars[j], 1) for j in solution]) - var_val[y[i]] = 1 + # Cost + cost = sum([d[i, V[j]] for j in solution]) + + # Cluster of customers for i + var_val = dict([(vars[j], 1) for j in solution]) + var_val[y[i]] = 1 return DipSolStatOptimal, [var_val] + def KP01(obj, weights, capacity): - + assert len(obj) == len(weights) n = len(obj) if n == 0: return 0, [] - - c = [[0]*(capacity+1) for i in range(n)] - added = [[False]*(capacity+1) for i in range(n)] - + + c = [[0] * (capacity + 1) for i in range(n)] + added = [[False] * (capacity + 1) for i in range(n)] + for i in range(n): for j in range(capacity + 1): - if (weights[i] > j): - c[i][j] = c[i-1][j] + if weights[i] > j: + c[i][j] = c[i - 1][j] else: - c_add = obj[i] + c[i-1][j-weights[i]] - if c_add > c[i-1][j]: - c[i][j] = c_add + c_add = obj[i] + c[i - 1][j - weights[i]] + if c_add > c[i - 1][j]: + c[i][j] = c_add added[i][j] = True else: - c[i][j] = c[i-1][j] - - i = n-1 + c[i][j] = c[i - 1][j] + + i = n - 1 j = capacity - + solution = [] while i >= 0 and j >= 0: if added[i][j]: solution.append(i) j -= weights[i] i -= 1 - - return c[n-1][capacity], solution + return c[n - 1][capacity], solution def Solver(): - - global x,y - prob = dippy.DipProblem("CVPMP", display_mode = display_mode, - layout = 'dot', display_interval = 0) - + global x, y + + prob = dippy.DipProblem( + "CVPMP", display_mode=display_mode, layout="dot", display_interval=0 + ) + X = [(i, j) for i in V for j in V] x = LpVariable.dicts("x", X, 0, 1, LpBinary) y = LpVariable.dicts("y", V, 0, 1, LpBinary) prob += (lpSum(d[i, j] * x[(i, j)] for i in V for j in V), "min") - - #linking constraints + + # linking constraints for j in V: - prob += lpSum(x[(i,j)] for i in V) == 1 - - #non-relaxing + prob += lpSum(x[(i, j)] for i in V) == 1 + + # non-relaxing for i in V: - prob.relaxation[i] += lpSum(w[j] * x[(i, j)] - for j in V) <= s[i]*y[i] + prob.relaxation[i] += lpSum(w[j] * x[(i, j)] for j in V) <= s[i] * y[i] prob += lpSum(y[i] for i in V) == p prob.relaxed_solver = solve_subproblem - dippy.Solve(prob, { - 'TolZero' : '%s' % tol, - 'doCut' : '1', - 'generateInitVars' : '1', - 'CutCGL' : '1', - }) - + dippy.Solve( + prob, + {"TolZero": "%s" % tol, "doCut": "1", "generateInitVars": "1", "CutCGL": "1"}, + ) - #Make solution + # Make solution solution = [] for i in V: if y[i].varValue: @@ -146,8 +147,6 @@ def Solver(): if x[(i, j)].varValue: cluster.append(j) - solution.append((i,cluster)) - + solution.append((i, cluster)) return round(prob.objective.value()), solution - diff --git a/Dip/src/dippy/examples/cvpmp/Global.py b/Dip/src/dippy/examples/cvpmp/Global.py index efd1efe6..62b911bf 100755 --- a/Dip/src/dippy/examples/cvpmp/Global.py +++ b/Dip/src/dippy/examples/cvpmp/Global.py @@ -1,29 +1,32 @@ #!/usr/bin/env python -__title__ = 'General tools' -__version__ = '1.0 Nov 2013' -__author__ = 'Dago Quevedo' -__email__ = 'dago@yalma.fime.uanl.mx' +__title__ = "General tools" +__version__ = "1.0 Nov 2013" +__author__ = "Dago Quevedo" +__email__ = "dago@yalma.fime.uanl.mx" import math import os import sys import time -#import resource + +# import resource def euclidean(x1, x2, y1, y2): - return int(math.sqrt(math.pow(x1 - x2, 2)+math.pow(y1 - y2, 2))) + return int(math.sqrt(math.pow(x1 - x2, 2) + math.pow(y1 - y2, 2))) + def indexOf(L, value): try: i = L.index(value) except ValueError: i = -1 - + return i + def path(): - return os.path.abspath(os.path.split(sys.argv[0])[0]) \ No newline at end of file + return os.path.abspath(os.path.split(sys.argv[0])[0]) diff --git a/Dip/src/dippy/examples/cvpmp/__main__.py b/Dip/src/dippy/examples/cvpmp/__main__.py index c27d1602..e68c7d6e 100755 --- a/Dip/src/dippy/examples/cvpmp/__main__.py +++ b/Dip/src/dippy/examples/cvpmp/__main__.py @@ -2,48 +2,50 @@ from __future__ import absolute_import -__title__ = 'Main module of B&P-cut for the CVPMP and CVPCP' -__version__ = '1.0 Nov 2013' -__author__ = 'Dago Quevedo' -__email__ = 'dago@yalma.fime.uanl.mx' +__title__ = "Main module of B&P-cut for the CVPMP and CVPCP" +__version__ = "1.0 Nov 2013" +__author__ = "Dago Quevedo" +__email__ = "dago@yalma.fime.uanl.mx" -import sys -from . import draw -from . import Global + +import sys +from . import draw +from . import Global try: import path except ImportError: pass - -from . import CVPMP -from .readInstance import * + +from . import CVPMP +from .readInstance import * + def main(): - #read instance - #path=sys.argv[1] - id,n,p,d,s,w,cxy,V,mytype,id = read('Instances/pmedcap1.dat') - - CVPMP.init(n,p,d,s,w,V) + # read instance + # path=sys.argv[1] + id, n, p, d, s, w, cxy, V, mytype, id = read("Instances/pmedcap1.dat") + + CVPMP.init(n, p, d, s, w, V) z, solution = CVPMP.Solver() - #display solution - data = Global.path()+'/out.dat' - f = open(data,'w') - + # display solution + data = Global.path() + "/out.dat" + f = open(data, "w") + for s in solution: - i=s[0] + i = s[0] for j in s[1]: if i == j: mytype = 1 else: mytype = 0 - f.write('%d\t%d\t%d\t%d\t%d\n'% - (mytype, j, i, cxy[j][0], cxy[j][1])) + f.write("%d\t%d\t%d\t%d\t%d\n" % (mytype, j, i, cxy[j][0], cxy[j][1])) - f.close(); + f.close() draw.draw(data, mytype, id) + main() diff --git a/Dip/src/dippy/examples/cvpmp/draw.py b/Dip/src/dippy/examples/cvpmp/draw.py index a925164b..72a58ce2 100755 --- a/Dip/src/dippy/examples/cvpmp/draw.py +++ b/Dip/src/dippy/examples/cvpmp/draw.py @@ -3,38 +3,41 @@ from __future__ import division from future import standard_library + standard_library.install_aliases() from builtins import str from builtins import range from past.utils import old_div -__title__ = 'Display graphic solution for the CVPMP and CVPCP' -__version__ = '1.0 Nov 2013' -__author__ = 'Dago Quevedo' -__email__ = 'dago@yalma.fime.uanl.mx' +__title__ = "Display graphic solution for the CVPMP and CVPCP" +__version__ = "1.0 Nov 2013" +__author__ = "Dago Quevedo" +__email__ = "dago@yalma.fime.uanl.mx" + + +import string +from tkinter import * -import string -from tkinter import * def draw(path, mytype, id): - f = open(path,"r") + f = open(path, "r") V = [] P = [] - + for line in f: Z = string.split(line) - V.append([int(Z[0]),int(Z[1]),int(Z[2]),float(Z[3]),float(Z[4])]) - if int(Z[0]) == 1: - P.append([int(Z[0]),int(Z[1]),int(Z[2]),float(Z[3]),float(Z[4])]) - + V.append([int(Z[0]), int(Z[1]), int(Z[2]), float(Z[3]), float(Z[4])]) + if int(Z[0]) == 1: + P.append([int(Z[0]), int(Z[1]), int(Z[2]), float(Z[3]), float(Z[4])]) + f.close() - x_max = max(V,key = lambda x:x[3])[3] - y_max = max(V,key = lambda x:x[4])[4] - x_min = min(V,key = lambda x:x[3])[3] - y_min = min(V,key = lambda x:x[4])[4] - - #Normalizacion de valores + x_max = max(V, key=lambda x: x[3])[3] + y_max = max(V, key=lambda x: x[4])[4] + x_min = min(V, key=lambda x: x[3])[3] + y_min = min(V, key=lambda x: x[4])[4] + + # Normalizacion de valores if mytype == 1: scale = 600 @@ -43,37 +46,99 @@ def draw(path, mytype, id): if mytype == 0: scale = 650 delta = 20 - for i in range(len(V)): V[i][3] = ((old_div((V[i][3] - x_min), (x_max - x_min))) * scale) + delta V[i][4] = ((old_div((V[i][4] - y_min), (y_max - y_min))) * scale) + delta - if i < len(P): + if i < len(P): P[i][3] = ((old_div((P[i][3] - x_min), (x_max - x_min))) * scale) + delta P[i][4] = ((old_div((P[i][4] - y_min), (y_max - y_min))) * scale) + delta _d_ = 0 - - C = ["#87CEFA","#C0C0C0","#FFA500","#DDA0DD","#9ACD32", "#9ACD32", - "#E99699","#B8F7B8","#D8D8F9","#FAD6A5","#F4FBA6", "#FF434F", - "#87CEFA","#C0C0C0","#FFA500","#DDA0DD","#9ACD32", "#9ACD32", - "#E99699","#B8F7B8","#D8D8F9","#FAD6A5","#F4FBA6", "#FF434F", - "#87CEFA","#C0C0C0","#FFA500","#DDA0DD","#9ACD32", "#9ACD32", - "#E99699","#B8F7B8","#D8D8F9","#FAD6A5","#F4FBA6", "#FF434F", - "#87CEFA","#C0C0C0","#FFA500","#DDA0DD","#9ACD32", "#9ACD32", - "#E99699","#B8F7B8","#D8D8F9","#FAD6A5","#F4FBA6", "#FF434F", - "#87CEFA","#C0C0C0","#FFA500","#DDA0DD","#9ACD32", "#9ACD32", - "#E99699","#B8F7B8","#D8D8F9","#FAD6A5","#F4FBA6", "#FF434F", - "#87CEFA","#C0C0C0","#FFA500","#DDA0DD","#9ACD32", "#9ACD32", - "#E99699","#B8F7B8","#D8D8F9","#FAD6A5","#F4FBA6", "#FF434F"] - + + C = [ + "#87CEFA", + "#C0C0C0", + "#FFA500", + "#DDA0DD", + "#9ACD32", + "#9ACD32", + "#E99699", + "#B8F7B8", + "#D8D8F9", + "#FAD6A5", + "#F4FBA6", + "#FF434F", + "#87CEFA", + "#C0C0C0", + "#FFA500", + "#DDA0DD", + "#9ACD32", + "#9ACD32", + "#E99699", + "#B8F7B8", + "#D8D8F9", + "#FAD6A5", + "#F4FBA6", + "#FF434F", + "#87CEFA", + "#C0C0C0", + "#FFA500", + "#DDA0DD", + "#9ACD32", + "#9ACD32", + "#E99699", + "#B8F7B8", + "#D8D8F9", + "#FAD6A5", + "#F4FBA6", + "#FF434F", + "#87CEFA", + "#C0C0C0", + "#FFA500", + "#DDA0DD", + "#9ACD32", + "#9ACD32", + "#E99699", + "#B8F7B8", + "#D8D8F9", + "#FAD6A5", + "#F4FBA6", + "#FF434F", + "#87CEFA", + "#C0C0C0", + "#FFA500", + "#DDA0DD", + "#9ACD32", + "#9ACD32", + "#E99699", + "#B8F7B8", + "#D8D8F9", + "#FAD6A5", + "#F4FBA6", + "#FF434F", + "#87CEFA", + "#C0C0C0", + "#FFA500", + "#DDA0DD", + "#9ACD32", + "#9ACD32", + "#E99699", + "#B8F7B8", + "#D8D8F9", + "#FAD6A5", + "#F4FBA6", + "#FF434F", + ] + root = Tk() root.wm_attributes("-topmost", 1) root.title("Solution " + str(type) + " - " + str(id)) - canvas = Canvas(root,width = scale + (delta * 2), height = scale + (delta * 2), - bg = 'white') - canvas.pack(expand = YES, fill = BOTH) + canvas = Canvas( + root, width=scale + (delta * 2), height=scale + (delta * 2), bg="white" + ) + canvas.pack(expand=YES, fill=BOTH) for _k_ in range(len(P)): k = P[_k_] @@ -81,39 +146,48 @@ def draw(path, mytype, id): if j[2] == k[1]: if j[0] == 0: canvas.create_line( - k[3] + 10,k[4] + 10, - j[3] + 7, j[4] + 7, - dash = (3,3)) + k[3] + 10, k[4] + 10, j[3] + 7, j[4] + 7, dash=(3, 3) + ) else: canvas.create_line( - k[3] + 10,k[4] + 10, - j[3] + 7,j[4] + 7,fill = "red",width = 1.5, - dash = (3,3)) - - - if j[1] > 9:_d_ = 2 - else :_d_ = 0 + k[3] + 10, + k[4] + 10, + j[3] + 7, + j[4] + 7, + fill="red", + width=1.5, + dash=(3, 3), + ) + + if j[1] > 9: + _d_ = 2 + else: + _d_ = 0 canvas.create_oval( - j[3],j[4], - j[3] + 15,j[4] + 15, - width = 1, fill = C[_k_]) - + j[3], j[4], j[3] + 15, j[4] + 15, width=1, fill=C[_k_] + ) + canvas.create_text( - j[3] + (5 - _d_), - j[4] + 7, - text = str(j[1]),anchor = "w",fill = "black", - font = ("Arial", 10)) - canvas.create_rectangle( - k[3],k[4], - k[3] + 20,k[4] + 20, - width = 2, fill = C[_k_]) - - if k[1] > 9:_d_ = 3 - else :_d_ = 0 + j[3] + (5 - _d_), + j[4] + 7, + text=str(j[1]), + anchor="w", + fill="black", + font=("Arial", 10), + ) + canvas.create_rectangle(k[3], k[4], k[3] + 20, k[4] + 20, width=2, fill=C[_k_]) + + if k[1] > 9: + _d_ = 3 + else: + _d_ = 0 canvas.create_text( - k[3] + (6 - _d_),k[4] + 10, - text = str(k[1]),anchor = "w",fill = "black", - font = ("Arial", 14)) + k[3] + (6 - _d_), + k[4] + 10, + text=str(k[1]), + anchor="w", + fill="black", + font=("Arial", 14), + ) root.mainloop() - diff --git a/Dip/src/dippy/examples/cvpmp/readInstance.py b/Dip/src/dippy/examples/cvpmp/readInstance.py index 5ab2b76f..7c0b0e32 100755 --- a/Dip/src/dippy/examples/cvpmp/readInstance.py +++ b/Dip/src/dippy/examples/cvpmp/readInstance.py @@ -1,12 +1,13 @@ from __future__ import absolute_import from builtins import range + #!/usr/bin/env python -__title__ = 'Read instances for the CVPMP and CVPCP' -__version__ = '1.0 Nov 2013' -__author__ = 'Dago Quevedo' -__email__ = 'dago@yalma.fime.uanl.mx' +__title__ = "Read instances for the CVPMP and CVPCP" +__version__ = "1.0 Nov 2013" +__author__ = "Dago Quevedo" +__email__ = "dago@yalma.fime.uanl.mx" import string @@ -14,122 +15,117 @@ def read(path): - file = open(path,'r') + file = open(path, "r") line = string.split(file.readline()) type = int(line[0]) - cxy = None - - #1 - Beasly + cxy = None + + # 1 - Beasly if type == 1: - id = int(line[1]) - n = int(line[2]) - p = int(line[3]) + id = int(line[1]) + n = int(line[2]) + p = int(line[3]) line = string.split(file.readline()) - q = int(line[0]) - - cxy = {} - d = {} - s = {} - w = {} - V = [i for i in range(1, n + 1)] - + q = int(line[0]) + + cxy = {} + d = {} + s = {} + w = {} + V = [i for i in range(1, n + 1)] + for i in V: - line = string.split(file.readline()) - cxy[i] = [int(line[1]), int(line[2])] - s[i] = q - w[i] = int(line[3]) - - for i in range(1,n+1): - for j in range(i,n+1): + line = string.split(file.readline()) + cxy[i] = [int(line[1]), int(line[2])] + s[i] = q + w[i] = int(line[3]) + + for i in range(1, n + 1): + for j in range(i, n + 1): e = Global.euclidean(cxy[i][0], cxy[j][0], cxy[i][1], cxy[j][1]) - d[i,j] = e - d[j,i] = e - - - #2 - GalvaoReVelle + d[i, j] = e + d[j, i] = e + + # 2 - GalvaoReVelle if type == 2: id = int(line[1]) - n = int(line[2]) - p = int(line[3]) - - d = {} - s = {} - w = {} - V = [i for i in range(1,n+1)] - + n = int(line[2]) + p = int(line[3]) + + d = {} + s = {} + w = {} + V = [i for i in range(1, n + 1)] + i = 1 for s_ in string.split(file.readline()): s[i] = int(float(s_)) i += 1 - + i = 1 for w_ in string.split(file.readline()): w[i] = int(float(w_)) i += 1 - + for i in V: j = 1 for d_ in string.split(file.readline()): - d[i,j] = int(float(d_)) + d[i, j] = int(float(d_)) j += 1 - - - #3 - Lorena + + # 3 - Lorena if type == 3: - - id = int(line[1]) - n = int(line[2]) - p = int(line[3]) - + + id = int(line[1]) + n = int(line[2]) + p = int(line[3]) + cxy = {} - d = {} - s = {} - w = {} - V = [i for i in range(1,n+1)] - + d = {} + s = {} + w = {} + V = [i for i in range(1, n + 1)] + for i in V: - line = string.split(file.readline()) - cxy[i] = [int(line[0]), int(line[1])] - s[i] = int(line[2]) - w[i] = int(line[3]) - - for i in range(1,n+1): - for j in range(i,n+1): - e = Global.euclidean(cxy[i][0], cxy[j][0], cxy[i][1], cxy[j][1]) - d[i,j] = e - d[j,i] = e - - - - #4 - OR-Library + line = string.split(file.readline()) + cxy[i] = [int(line[0]), int(line[1])] + s[i] = int(line[2]) + w[i] = int(line[3]) + + for i in range(1, n + 1): + for j in range(i, n + 1): + e = Global.euclidean(cxy[i][0], cxy[j][0], cxy[i][1], cxy[j][1]) + d[i, j] = e + d[j, i] = e + + # 4 - OR-Library if type >= 4 and type <= 7: id = int(line[1]) - n = int(line[2]) - p = int(line[3]) + n = int(line[2]) + p = int(line[3]) - - d = {} - s = {} - w = {} - V = [i for i in range(1,n+1)] + d = {} + s = {} + w = {} + V = [i for i in range(1, n + 1)] for i in V: j = 1 for d_ in string.split(file.readline()): - d[i,j] = int(float(d_)) + d[i, j] = int(float(d_)) j += 1 i = 1 for w_ in string.split(file.readline()): w[i] = int(float(w_)) i += 1 - + i = 1 for s_ in string.split(file.readline()): s[i] = int(float(s_)) i += 1 - + file.close() - return id,n,p,d,s,w,cxy,V,id,type + return id, n, p, d, s, w, cxy, V, id, type diff --git a/Dip/src/dippy/examples/gap/gap.py b/Dip/src/dippy/examples/gap/gap.py index de8de0c7..dee5b61c 100755 --- a/Dip/src/dippy/examples/gap/gap.py +++ b/Dip/src/dippy/examples/gap/gap.py @@ -17,7 +17,7 @@ import path except ImportError: pass - + try: import src.dippy as dippy from src.dippy import DipSolStatOptimal @@ -62,28 +62,27 @@ v.append(LpVariable("M%dT%d" % (m, t), cat=LpBinary)) assignVars.append(v) -prob = dippy.DipProblem("GAP", - display_mode = 'off', - layout = 'dot', - display_interval = None, - ) +prob = dippy.DipProblem("GAP", display_mode="off", layout="dot", display_interval=None) # objective prob += lpSum(assignVars[m][t] * COSTS[m][t] for m, t in MACHINES_TASKS), "min" # machine capacity (knapsacks, relaxation) for m in MACHINES: - prob.relaxation[m] += lpSum(assignVars[m][t] * RESOURCE_USE[m][t] for t in TASKS) <= CAPACITIES[m] + prob.relaxation[m] += ( + lpSum(assignVars[m][t] * RESOURCE_USE[m][t] for t in TASKS) <= CAPACITIES[m] + ) # assignment for t in TASKS: prob += lpSum(assignVars[m][t] for m in MACHINES) == 1 + def solve_subproblem(prob, machine, redCosts, target): if debug_print: print("solve_subproblem...") print(redCosts) - + # get tasks which have negative reduced costs task_idx = [t for t in TASKS if redCosts[assignVars[machine][t]] < 0] var = [assignVars[machine][t] for t in task_idx] @@ -99,7 +98,7 @@ def solve_subproblem(prob, machine, redCosts, target): print("z, solution =", z, solution) print("rc", -z) - if z < -tol: # Zero solution is optimal + if z < -tol: # Zero solution is optimal if debug_print: print("Zero solution is optimal") return DipSolStatOptimal, [{}] @@ -110,53 +109,53 @@ def solve_subproblem(prob, machine, redCosts, target): rcCheck = 0.0 for v in list(var_values.keys()): rcCheck += redCosts[v] * var_values[v] - print("Checking rc calc", -z, rcCheck) + print("Checking rc calc", -z, rcCheck) print(var_values) - + return DipSolStatOptimal, [var_values] + def knapsack01(obj, weights, capacity): """ 0/1 knapsack solver, maximizes profit. weights and capacity integer """ - + debug_subproblem = False - + assert len(obj) == len(weights) n = len(obj) if n == 0: return 0, [] - if (debug_subproblem): - relaxation = LpProblem('relaxation', LpMaximize) + if debug_subproblem: + relaxation = LpProblem("relaxation", LpMaximize) relax_vars = [str(i) for i in range(n)] - var_dict = LpVariable.dicts("", relax_vars, 0, 1, LpBinary) + var_dict = LpVariable.dicts("", relax_vars, 0, 1, LpBinary) relaxation += lpSum(var_dict[str(i)] * weights[i] for i in range(n)) <= capacity relaxation += lpSum(var_dict[str(i)] * obj[i] for i in range(n)) relaxation.solve() relax_obj = value(relaxation.objective) - solution = [i for i in range(n) if var_dict[str(i)].varValue > tol ] + solution = [i for i in range(n) if var_dict[str(i)].varValue > tol] print(relax_obj, solution) - - c = [[0]*(capacity+1) for i in range(n)] - added = [[False]*(capacity+1) for i in range(n)] + c = [[0] * (capacity + 1) for i in range(n)] + added = [[False] * (capacity + 1) for i in range(n)] # c [items, remaining capacity] # important: this code assumes strictly positive objective values for i in range(n): - for j in range(capacity+1): - if (weights[i] > j): - c[i][j] = c[i-1][j] + for j in range(capacity + 1): + if weights[i] > j: + c[i][j] = c[i - 1][j] else: - c_add = obj[i] + c[i-1][j-weights[i]] - if c_add > c[i-1][j]: + c_add = obj[i] + c[i - 1][j - weights[i]] + if c_add > c[i - 1][j]: c[i][j] = c_add added[i][j] = True else: - c[i][j] = c[i-1][j] + c[i][j] = c[i - 1][j] # backtrack to find solution - i = n-1 + i = n - 1 j = capacity solution = [] @@ -165,26 +164,29 @@ def knapsack01(obj, weights, capacity): solution.append(i) j -= weights[i] i -= 1 - - return c[n-1][capacity], solution -#prob.relaxed_solver = solve_subproblem + return c[n - 1][capacity], solution + -dippy.Solve(prob, { - 'TolZero': '%s' % tol, - 'doPriceCut': '1', -# 'logLevel': '3', -}) +# prob.relaxed_solver = solve_subproblem + +dippy.Solve( + prob, + { + "TolZero": "%s" % tol, + "doPriceCut": "1", + # 'logLevel': '3', + }, +) for m in MACHINES: - print() - print("Machine %d assigned tasks" %m, end=' ') + print() + print("Machine %d assigned tasks" % m, end=" ") for t in TASKS: v = assignVars[m][t].varValue if v: - print("%d" %t, end=' ') - -if prob.display_mode != 'off': - if (prob.Tree.attr['display'] == 'pygame') or (prob.Tree.attr['display'] == 'xdot'): - prob.Tree.display() + print("%d" % t, end=" ") +if prob.display_mode != "off": + if (prob.Tree.attr["display"] == "pygame") or (prob.Tree.attr["display"] == "xdot"): + prob.Tree.display() diff --git a/Dip/src/dippy/examples/milp/milp.py b/Dip/src/dippy/examples/milp/milp.py index e09b8ace..9493fee0 100644 --- a/Dip/src/dippy/examples/milp/milp.py +++ b/Dip/src/dippy/examples/milp/milp.py @@ -1,8 +1,8 @@ -''' +""" Created on Dec 29, 2013 @author: ted -''' +""" from __future__ import division from builtins import str from builtins import range @@ -18,27 +18,44 @@ import path except ImportError: pass - + try: import src.dippy as dippy except ImportError: import coinor.dippy as dippy -def GenerateRandomBlock(VARIABLES, CONSTRAINTS, density = 0.2, - maxObjCoeff = 10, maxConsCoeff = 10, - tightness = 2, rand_seed = 2): + +def GenerateRandomBlock( + VARIABLES, + CONSTRAINTS, + density=0.2, + maxObjCoeff=10, + maxConsCoeff=10, + tightness=2, + rand_seed=2, +): random.seed(rand_seed) OBJ = dict((i, random.randint(1, maxObjCoeff)) for i in VARIABLES) - MAT = dict(((i, j), random.randint(1, maxConsCoeff) - if random.random() <= density else 0) - for j in CONSTRAINTS for i in VARIABLES) - RHS = dict((i, random.randint(int(numVars*density*maxConsCoeff/tightness), - int(numVars*density*maxConsCoeff/1.5))) - for i in CONSTRAINTS) + MAT = dict( + ((i, j), random.randint(1, maxConsCoeff) if random.random() <= density else 0) + for j in CONSTRAINTS + for i in VARIABLES + ) + RHS = dict( + ( + i, + random.randint( + int(numVars * density * maxConsCoeff / tightness), + int(numVars * density * maxConsCoeff / 1.5), + ), + ) + for i in CONSTRAINTS + ) return OBJ, MAT, RHS -#display_mode = 'xdot' -#layout = 'dot' + +# display_mode = 'xdot' +# layout = 'dot' tol = pow(pow(2, -24), old_div(2.0, 3.0)) @@ -51,52 +68,58 @@ def GenerateRandomBlock(VARIABLES, CONSTRAINTS, density = 0.2, numVars = sum(numBlockVars) numCons = sum(numBlockCons) + numLinkingCons -VARIABLES = dict(((i, j), 0) for i in range(numBlocks) - for j in range(numBlockVars[i])) +VARIABLES = dict(((i, j), 0) for i in range(numBlocks) for j in range(numBlockVars[i])) CONSTRAINTS = [] for k in range(numBlocks): - CONSTRAINTS.append(["C"+str(k)+"_"+str(j) for j in range(numCons)]) -CONSTRAINTS.append(["C"+str(numBlocks)+"_"+str(j) for j in range(numCons)]) + CONSTRAINTS.append(["C" + str(k) + "_" + str(j) for j in range(numCons)]) +CONSTRAINTS.append(["C" + str(numBlocks) + "_" + str(j) for j in range(numCons)]) -#Generate random MILP +# Generate random MILP var = LpVariable.dicts("x", VARIABLES, 0, 1, LpBinary) numCons = len(CONSTRAINTS) numVars = len(VARIABLES) OBJ, MAT, RHS = GenerateRandomBlock(VARIABLES, CONSTRAINTS[numBlocks]) -prob += -lpSum([OBJ[i]*var[i] for i in var]), "Objective" +prob += -lpSum([OBJ[i] * var[i] for i in var]), "Objective" -#Linking constraints +# Linking constraints for j in CONSTRAINTS[numBlocks]: - prob += lpSum([MAT[i, j]*var[i] for i in var]) <= RHS[j], j + prob += lpSum([MAT[i, j] * var[i] for i in var]) <= RHS[j], j -#Blocks +# Blocks for k in range(numBlocks): - OBJ, MAT, RHS = GenerateRandomBlock([(k, i) for i in range(numBlockVars[k])], - CONSTRAINTS[k]) + OBJ, MAT, RHS = GenerateRandomBlock( + [(k, i) for i in range(numBlockVars[k])], CONSTRAINTS[k] + ) for j in CONSTRAINTS[k]: - prob.relaxation[k] += (lpSum([MAT[(k, i), j]*var[k, i] for i in range(numBlockVars[k])]) - <=RHS[j], j) - -dippy.Solve(prob,{ - 'TolZero': '%s' % tol, - 'doCut': '1', - 'CutCGL': '1', - 'SolveMasterAsIp': '0', - 'generateInitVars': '1', - 'LogDebugLevel': '3', - 'LogLevel': '4', - 'LogDumpModel': 5, - 'ALPS' : - {'msgLevel' : 3}} + prob.relaxation[k] += ( + lpSum([MAT[(k, i), j] * var[k, i] for i in range(numBlockVars[k])]) + <= RHS[j], + j, + ) + +dippy.Solve( + prob, + { + "TolZero": "%s" % tol, + "doCut": "1", + "CutCGL": "1", + "SolveMasterAsIp": "0", + "generateInitVars": "1", + "LogDebugLevel": "3", + "LogLevel": "4", + "LogDumpModel": 5, + "ALPS": {"msgLevel": 3}, + }, ) -if prob.display_mode != 'off': +if prob.display_mode != "off": numNodes = len(prob.Tree.get_node_list()) - if prob.Tree.attr['display'] == 'svg': - prob.Tree.write_as_svg(filename = "facility_node%d" % (numNodes + 1), - prevfile = "facility_node%d" % numNodes) + if prob.Tree.attr["display"] == "svg": + prob.Tree.write_as_svg( + filename="facility_node%d" % (numNodes + 1), + prevfile="facility_node%d" % numNodes, + ) prob.Tree.display() - diff --git a/Dip/src/dippy/examples/tsp/tsp.py b/Dip/src/dippy/examples/tsp/tsp.py index 63f5f9c1..8915e6e2 100644 --- a/Dip/src/dippy/examples/tsp/tsp.py +++ b/Dip/src/dippy/examples/tsp/tsp.py @@ -13,27 +13,37 @@ # 2d Euclidean TSP with extremely simple cut generation # x,y coords of cities -CITY_LOCS = [(0, 2), (0, 4), (1, 2), (1, 4), \ - (4, 1), (4, 4), (4, 5), (5, 0), \ - (5, 2), (5, 5)] +CITY_LOCS = [ + (0, 2), + (0, 4), + (1, 2), + (1, 4), + (4, 1), + (4, 4), + (4, 5), + (5, 0), + (5, 2), + (5, 5), +] CITIES = list(range(len(CITY_LOCS))) -ARCS = [] # list of arcs (no duplicates) -ARC_COSTS = {} # distance +ARCS = [] # list of arcs (no duplicates) +ARC_COSTS = {} # distance # for each city, list of arcs into/out of CITY_ARCS = [[] for i in CITIES] # use 2d euclidean distance def dist(x1, y1, x2, y2): - return sqrt((x1-x2)**2 + (y1-y2)**2) + return sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) + # construct list of arcs for i in CITIES: i_x, i_y = CITY_LOCS[i] - for j in CITIES[i+1:]: + for j in CITIES[i + 1 :]: j_x, j_y = CITY_LOCS[j] - ARC_COSTS[(i,j)] = dist(i_x, i_y, j_x, j_y) + ARC_COSTS[(i, j)] = dist(i_x, i_y, j_x, j_y) ARCS.append((i, j)) CITY_ARCS[i].append((i, j)) CITY_ARCS[j].append((i, j)) @@ -47,8 +57,7 @@ def dist(x1, y1, x2, y2): # degree constraints for city in CITIES: - prob += lpSum(arc_vars[x] for x in CITY_ARCS[city]) \ - == 2 + prob += lpSum(arc_vars[x] for x in CITY_ARCS[city]) == 2 # generate subtour elimination constraints @@ -61,6 +70,7 @@ def dist(x1, y1, x2, y2): symmetric[(i, j)] = (i, j) symmetric[(j, i)] = (i, j) + def generate_cuts(prob, sol): cons = [] not_connected = set(CITIES) @@ -68,23 +78,26 @@ def generate_cuts(prob, sol): while not_connected: start = not_connected.pop() nodes, arcs = get_subtour(sol, start) - if len(nodes) == len(arcs) and \ - len(nodes) < len(CITIES): - cons.append( lpSum(arc_vars[a] for a in arcs) \ - <= len(arcs) - 1 ) + if len(nodes) == len(arcs) and len(nodes) < len(CITIES): + cons.append(lpSum(arc_vars[a] for a in arcs) <= len(arcs) - 1) nodes.remove(start) not_connected -= nodes return cons + + prob.generate_cuts = generate_cuts + def is_solution_feasible(prob, sol, tol): nodes, arcs = get_subtour(sol, 0) - return len(nodes) == len(arcs) and \ - len(nodes) == len(CITIES) + return len(nodes) == len(arcs) and len(nodes) == len(CITIES) + + prob.is_solution_feasible = is_solution_feasible + def get_subtour(sol, node): # returns: list of nodes and arcs # in subtour containing node @@ -99,20 +112,20 @@ def get_subtour(sol, node): while to_process: c = to_process.pop() not_processed.remove(c) - new_arcs = [ symmetric[(c, i)] \ - for i in not_processed \ - if sol[ \ - arc_vars[symmetric[(c, i)]]] - > one] - new_nodes = [ i for i in not_processed \ - if symmetric[(i, c)] in new_arcs ] + new_arcs = [ + symmetric[(c, i)] + for i in not_processed + if sol[arc_vars[symmetric[(c, i)]]] > one + ] + new_nodes = [i for i in not_processed if symmetric[(i, c)] in new_arcs] arcs |= set(new_arcs) nodes |= set(new_nodes) to_process |= set(new_nodes) return nodes, arcs -dippy.Solve(prob, {'doCut': '1'}) + +dippy.Solve(prob, {"doCut": "1"}) # print solution for arc, var in list(arc_vars.items()): diff --git a/Dip/src/dippy/examples/wedding/wedding.py b/Dip/src/dippy/examples/wedding/wedding.py index a89a8afd..eed5ff34 100644 --- a/Dip/src/dippy/examples/wedding/wedding.py +++ b/Dip/src/dippy/examples/wedding/wedding.py @@ -11,7 +11,7 @@ import path except ImportError: pass - + try: import src.dippy as dippy from src.dippy import DipSolStatOptimal @@ -23,7 +23,8 @@ max_tables = 5 max_table_size = 4 -guests = 'A B C D E F G I J K L M N O P Q R'.split() +guests = "A B C D E F G I J K L M N O P Q R".split() + def happiness(guest_a, guest_b): """ @@ -32,107 +33,114 @@ def happiness(guest_a, guest_b): """ return abs(ord(guest_a) - ord(guest_b)) -#create the set of possible tables + +# create the set of possible tables tables = list(range(max_tables)) -possible_seatings = [(g, t) for g in guests - for t in tables] +possible_seatings = [(g, t) for g in guests for t in tables] -#create a binary variable to model if a guest sits at a particular table -x = pulp.LpVariable.dicts('possible_seatings', possible_seatings, - lowBound = 0, - upBound = 1, - cat = pulp.LpInteger) +# create a binary variable to model if a guest sits at a particular table +x = pulp.LpVariable.dicts( + "possible_seatings", possible_seatings, lowBound=0, upBound=1, cat=pulp.LpInteger +) -seating_model = dippy.DipProblem("Wedding Seating Model (DIP)", pulp.LpMinimize, - display_mode = 'xdot', display_interval = 0) +seating_model = dippy.DipProblem( + "Wedding Seating Model (DIP)", + pulp.LpMinimize, + display_mode="xdot", + display_interval=0, +) -#specify the maximum number of guests per table +# specify the maximum number of guests per table for table in tables: - seating_model.relaxation[table] += sum([x[(guest, table)] - for guest in guests]) <= \ - max_table_size, \ - "Maximum_table_size_%s"%table + seating_model.relaxation[table] += ( + sum([x[(guest, table)] for guest in guests]) <= max_table_size, + "Maximum_table_size_%s" % table, + ) -#A guest must seated at one and only one table +# A guest must seated at one and only one table for guest in guests: - seating_model += (sum([x[(guest, table)] for table in tables]) == 1, - "Must_seat_%s"%guest) + seating_model += ( + sum([x[(guest, table)] for table in tables]) == 1, + "Must_seat_%s" % guest, + ) -#create a set of variables to model the objective function +# create a set of variables to model the objective function possible_pairs = [(a, b) for a in guests for b in guests if ord(a) < ord(b)] -happy = pulp.LpVariable.dicts('table_happiness', tables, - lowBound = 0, - upBound = None, - cat = pulp.LpContinuous) +happy = pulp.LpVariable.dicts( + "table_happiness", tables, lowBound=0, upBound=None, cat=pulp.LpContinuous +) seating_model += sum([happy[table] for table in tables]) -#create constraints for each possible pair +# create constraints for each possible pair for table in tables: for (a, b) in possible_pairs: - seating_model.relaxation[table] += \ - happy[table] >= (happiness(a, b) * (x[(a, table)] + - x[(b, table)] - 1)) + seating_model.relaxation[table] += happy[table] >= ( + happiness(a, b) * (x[(a, table)] + x[(b, table)] - 1) + ) + def relaxed_solver(prob, table, redCosts, target): """ Generate columns (tables) with negative reduced costs """ dvs = [] - neg_guests = [g for g in guests - if redCosts[x[(g,table)]] < 0.0] + neg_guests = [g for g in guests if redCosts[x[(g, table)]] < 0.0] neg_guests.sort() # find all possible tables between two end points - for pos1, pos2 in [(i, j) for i in range(len(neg_guests)) - for j in range(len(neg_guests)) - if j > i]: - # find the suitable guests that can be included in between the end + for pos1, pos2 in [ + (i, j) for i in range(len(neg_guests)) for j in range(len(neg_guests)) if j > i + ]: + # find the suitable guests that can be included in between the end # points - candidate_guests = [(redCosts[x[(g,table)]], g) - for g in neg_guests[pos1+1:pos2]] + candidate_guests = [ + (redCosts[x[(g, table)]], g) for g in neg_guests[pos1 + 1 : pos2] + ] candidate_guests.sort() # pick the best guests (ie those with the negative reduced costs) - possible_table_inner = [g - for _, g in candidate_guests[:max_table_size-2]] - #This is the best table between the end points - possible_table = [neg_guests[pos1]] + possible_table_inner +\ - [neg_guests[pos2]] + possible_table_inner = [g for _, g in candidate_guests[: max_table_size - 2]] + # This is the best table between the end points + possible_table = [neg_guests[pos1]] + possible_table_inner + [neg_guests[pos2]] # calculate the sum of the reduced costs for each of the guests neg_cost = sum(redCosts[x[(g, table)]] for g in possible_table) table_happiness = happiness(possible_table[0], possible_table[-1]) rc = neg_cost + table_happiness * redCosts[happy[table]] - var_values = [(x[(g, table)], 1) - for g in possible_table] + var_values = [(x[(g, table)], 1) for g in possible_table] var_values.append((happy[table], table_happiness)) dvs.append(dict(var_values)) if debug_print: - print('Table: ', table, 'Happiness: ', table_happiness, 'RC: ', rc) + print("Table: ", table, "Happiness: ", table_happiness, "RC: ", rc) return DipSolStatOptimal, dvs -#seating_model.relaxed_solver = relaxed_solver -#seating_model.writeLP('wedding_main.lp') -#for table in tables: +# seating_model.relaxed_solver = relaxed_solver + +# seating_model.writeLP('wedding_main.lp') +# for table in tables: # seating_model.writeRelaxed(table, 'wedding_relax%s.lp' % table); - -dippy.Solve(seating_model, { - 'doPriceCut' : '1', - 'CutCGL' : '1', + +dippy.Solve( + seating_model, + { + "doPriceCut": "1", + "CutCGL": "1", #'generateInitVars' : '1', - }) + }, +) -if seating_model.display_mode != 'off': +if seating_model.display_mode != "off": numNodes = len(seating_model.Tree.get_node_list()) - if seating_model.Tree.attr['display'] == 'svg': - seating_model.Tree.write_as_svg(filename = "facility_node%d" % (numNodes + 1), - prevfile = "facility_node%d" % numNodes) + if seating_model.Tree.attr["display"] == "svg": + seating_model.Tree.write_as_svg( + filename="facility_node%d" % (numNodes + 1), + prevfile="facility_node%d" % numNodes, + ) seating_model.Tree.display() for table in tables: - print(table, end=' ') + print(table, end=" ") for guest in guests: - if x[(guest,table)].value() >= 0.99: - print(guest, end=' ') + if x[(guest, table)].value() >= 0.99: + print(guest, end=" ") print(happy[table].value()) - diff --git a/Dip/src/dippy/tests/dippy_tests.py b/Dip/src/dippy/tests/dippy_tests.py index 7aa471bd..ed69a64e 100644 --- a/Dip/src/dippy/tests/dippy_tests.py +++ b/Dip/src/dippy/tests/dippy_tests.py @@ -6,9 +6,9 @@ from pulp import * import coinor.dippy as dippy -class DippyTestCase(unittest.TestCase): - def variable_feasibility_test(self, prob, tol = 1e-5): +class DippyTestCase(unittest.TestCase): + def variable_feasibility_test(self, prob, tol=1e-5): """ tests that the problem is feasible for its class of variables """ @@ -20,7 +20,7 @@ def variable_feasibility_test(self, prob, tol = 1e-5): if var.cat == LpInteger: self.assertAlmostEqual(int(var.value() + tol), var.value()) - def constraint_feasibility_test(self, prob, tol = 1e-5): + def constraint_feasibility_test(self, prob, tol=1e-5): """ tests that the problem is feasible for its class of variables """ @@ -28,9 +28,11 @@ def constraint_feasibility_test(self, prob, tol = 1e-5): for constraint_name, constraint in list(prob.constraints.items()): self.assertTrue(constraint.valid(tol)) -if __name__ == '__main__': + +if __name__ == "__main__": from .test_coke import * from .test_facility import * from .test_cutting_stock import * from .test_tsp import * + unittest.main() diff --git a/Dip/src/dippy/tests/test_coke.py b/Dip/src/dippy/tests/test_coke.py index 6ebbaea9..95c74599 100644 --- a/Dip/src/dippy/tests/test_coke.py +++ b/Dip/src/dippy/tests/test_coke.py @@ -7,7 +7,6 @@ class TestCokeProblem(DippyTestCase): - def setUp(self): """ sets up the coke problem as a unittest @@ -35,7 +34,6 @@ def test_dippy_branch(self): self.constraint_feasibility_test(self.prob) - def create_coke_problem(): """ creates and returns the coke problem @@ -44,28 +42,13 @@ def create_coke_problem(): CC = 1.3 BIG_M = 1e10 - MINE_SUPPLY = { - "M1": 25.8, - "M2": 728, - "M3": 1456, - "M4": 49, - "M5": 36.9, - "M6": 1100, - } + MINE_SUPPLY = {"M1": 25.8, "M2": 728, "M3": 1456, "M4": 49, "M5": 36.9, "M6": 1100} MINES = list(MINE_SUPPLY.keys()) MINES.sort() LOCATIONS = ["L1", "L2", "L3", "L4", "L5", "L6"] - SIZE_COSTS = { - 0: 0, - 75: 4.4, - 150: 7.4, - 225: 10.5, - 300: 13.5, - 375: 16.5, - 450: 19.6, - } + SIZE_COSTS = {0: 0, 75: 4.4, 150: 7.4, 225: 10.5, 300: 13.5, 375: 16.5, 450: 19.6} SIZES = list(SIZE_COSTS.keys()) SIZES.sort() @@ -107,92 +90,94 @@ def read_table(data, coerce, transpose=False): for row in lines[2:]: items = row.split() for i, item in enumerate(items[1:]): - if transpose: key = (headings[i], items[0]) - else: key = (items[0], headings[i]) + if transpose: + key = (headings[i], items[0]) + else: + key = (items[0], headings[i]) result[key] = coerce(item) return result MINE_TRANSPORT = read_table(MINE_TRANSPORT_DATA, int) - CUST_TRANSPORT = read_table(CUST_TRANSPORT_DATA, int, \ - transpose=True) + CUST_TRANSPORT = read_table(CUST_TRANSPORT_DATA, int, transpose=True) - #add both parts of the network together + # add both parts of the network together ARC_COSTS = MINE_TRANSPORT.copy() ARC_COSTS.update(CUST_TRANSPORT) ARCS = list(ARC_COSTS.keys()) - LOCATIONS_SIZES = [(l, s) for l in LOCATIONS - for s in SIZES] + LOCATIONS_SIZES = [(l, s) for l in LOCATIONS for s in SIZES] prob = dippy.DipProblem("Coke", LpMinimize) # create variables - buildVars = LpVariable.dicts("Build", LOCATIONS_SIZES, - cat = LpBinary) + buildVars = LpVariable.dicts("Build", LOCATIONS_SIZES, cat=LpBinary) # create arcs - flowVars = LpVariable.dicts("Arcs", ARCS, lowBound=0, - upBound = BIG_M) + flowVars = LpVariable.dicts("Arcs", ARCS, lowBound=0, upBound=BIG_M) # objective - prob += 1e6 * sum(buildVars[(l, s)] * SIZE_COSTS[s] - for (l, s) in LOCATIONS_SIZES) + \ - sum(flowVars[(s, d)] * ARC_COSTS[(s, d)] - for (s, d) in ARCS), \ - "cost_of_building_and_transport" + prob += ( + 1e6 * sum(buildVars[(l, s)] * SIZE_COSTS[s] for (l, s) in LOCATIONS_SIZES) + + sum(flowVars[(s, d)] * ARC_COSTS[(s, d)] for (s, d) in ARCS), + "cost_of_building_and_transport", + ) # plant availability for loc in LOCATIONS: - prob += sum(flowVars[(loc, c)] for c in CUSTOMERS) \ - <= sum(buildVars[(loc, s)] * s - for s in SIZES), \ - "Plant_%s_availability"%loc + prob += ( + sum(flowVars[(loc, c)] for c in CUSTOMERS) + <= sum(buildVars[(loc, s)] * s for s in SIZES), + "Plant_%s_availability" % loc, + ) # one size for loc in LOCATIONS: - prob += sum(buildVars[(loc, s)] for s in SIZES) == 1, \ - "Plant_%s_size"%loc + prob += sum(buildVars[(loc, s)] for s in SIZES) == 1, "Plant_%s_size" % loc # conserve flow (mines) # flows are in terms of tonnes of coke for m in MINES: - prob += sum(flowVars[(m, j)] for j in LOCATIONS) \ - <= MINE_SUPPLY[m], "Supply_mine_%s"%m + prob += ( + sum(flowVars[(m, j)] for j in LOCATIONS) <= MINE_SUPPLY[m], + "Supply_mine_%s" % m, + ) for loc in LOCATIONS: - prob += sum(flowVars[(m, loc)] for m in MINES) - \ - CC * sum(flowVars[(loc, c)] for c in CUSTOMERS) \ - == 0, "Conserve_flow_location_%s"%loc + prob += ( + sum(flowVars[(m, loc)] for m in MINES) + - CC * sum(flowVars[(loc, c)] for c in CUSTOMERS) + == 0, + "Conserve_flow_location_%s" % loc, + ) for c in CUSTOMERS: - prob += sum(flowVars[(loc, c)] for loc in LOCATIONS) \ - >= CUSTOMER_DEMAND[c], "Demand_cust_%s"%c + prob += ( + sum(flowVars[(loc, c)] for loc in LOCATIONS) >= CUSTOMER_DEMAND[c], + "Demand_cust_%s" % c, + ) def do_branch(prob, sol): tol = 1e-10 for loc in LOCATIONS: - sol_size = sum(sol[buildVars[loc, size]] * \ - size for size in SIZES) + sol_size = sum(sol[buildVars[loc, size]] * size for size in SIZES) # determine if solsize is bigger than the largest # or smaller tham the smallest - if (abs(sol_size - max(SIZES)) < tol - or abs(sol_size - min(SIZES))) < tol: + if (abs(sol_size - max(SIZES)) < tol or abs(sol_size - min(SIZES))) < tol: continue - #find the first one bigger or equal to sol_size - bigger = min([s for s in SIZES - if s >= sol_size - tol]) + # find the first one bigger or equal to sol_size + bigger = min([s for s in SIZES if s >= sol_size - tol]) if bigger - sol_size > tol: - down_branch_ub = dict([(buildVars[loc, s], 0) - for s in SIZES - if s <= sol_size]) - up_branch_ub = dict([(buildVars[loc, s], 0) - for s in SIZES - if s > sol_size]) + down_branch_ub = dict( + [(buildVars[loc, s], 0) for s in SIZES if s <= sol_size] + ) + up_branch_ub = dict( + [(buildVars[loc, s], 0) for s in SIZES if s > sol_size] + ) return {}, down_branch_ub, {}, up_branch_ub return prob, do_branch -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/Dip/src/dippy/tests/test_cutting_stock.py b/Dip/src/dippy/tests/test_cutting_stock.py index fe0a411e..c91711f8 100644 --- a/Dip/src/dippy/tests/test_cutting_stock.py +++ b/Dip/src/dippy/tests/test_cutting_stock.py @@ -8,8 +8,6 @@ class TestCuttingStockProblem(DippyTestCase): - - def test_pulp_solve(self): """ tests that pulp can solve the problem @@ -27,10 +25,7 @@ def test_dippy_relaxation(self): """ self.prob, self.relaxation = create_cutting_stock_problem(doRelaxed=True) self.prob.relaxed_solver = self.relaxation - dippy.Solve(self.prob, { - 'doPriceCut':1, - 'CutCGL': 0, - }) + dippy.Solve(self.prob, {"doPriceCut": 1, "CutCGL": 0}) self.assertAlmostEqual(self.prob.objective.value(), 2.0) self.variable_feasibility_test(self.prob) self.constraint_feasibility_test(self.prob) @@ -41,19 +36,11 @@ def create_cutting_stock_problem(doRelaxed=False): creates and returns the cutting_stock problem """ - length = { - "9cm": 9, - "7cm": 7, - "5cm": 5 - } + length = {"9cm": 9, "7cm": 7, "5cm": 5} ITEMS = list(length.keys()) - demand = { - "9cm": 1, - "7cm": 1, - "5cm": 4 - } + demand = {"9cm": 1, "7cm": 1, "5cm": 4} total_patterns = sum(demand[i] for i in ITEMS) @@ -62,8 +49,7 @@ def create_cutting_stock_problem(doRelaxed=False): total_length[p] = 20 PATTERNS = list(total_length.keys()) - CUTS = [(p, i) for p in PATTERNS - for i in ITEMS] + CUTS = [(p, i) for p in PATTERNS for i in ITEMS] prob = dippy.DipProblem("Sponge_Rolls", LpMinimize) @@ -78,68 +64,67 @@ def create_cutting_stock_problem(doRelaxed=False): # Meet demand for i in ITEMS: - prob += sum(cutVars[(p, i)] for p in PATTERNS) \ - >= demand[i] + prob += sum(cutVars[(p, i)] for p in PATTERNS) >= demand[i] # Ordering patterns for i, p in enumerate(PATTERNS): if p != PATTERNS[-1]: - prob += useVars[p] >= useVars[PATTERNS[i+1]] + prob += useVars[p] >= useVars[PATTERNS[i + 1]] # Cut patterns for p in PATTERNS: if doRelaxed: - prob.relaxation[p] += sum(length[i] * - cutVars[(p, i)] for i in ITEMS) \ - <= total_length[p] * useVars[p] + prob.relaxation[p] += ( + sum(length[i] * cutVars[(p, i)] for i in ITEMS) + <= total_length[p] * useVars[p] + ) else: - prob += sum(length[i] * cutVars[(p, i)] for i in ITEMS) \ - <= total_length[p] * useVars[p] - + prob += ( + sum(length[i] * cutVars[(p, i)] for i in ITEMS) + <= total_length[p] * useVars[p] + ) def relaxed_solver(prob, patt, redCosts, convexDual): - ## print patt, "in", PATTERNS - ## print "redCosts =", redCosts - ## print "convexDual =", convexDual + ## print patt, "in", PATTERNS + ## print "redCosts =", redCosts + ## print "convexDual =", convexDual # get items with negative reduced cost - item_idx = [i for i in ITEMS \ - if redCosts[cutVars[(patt, i)]] < 0] + item_idx = [i for i in ITEMS if redCosts[cutVars[(patt, i)]] < 0] vars = [cutVars[(patt, i)] for i in item_idx] obj = [-redCosts[cutVars[(patt, i)]] for i in item_idx] weights = [length[i] for i in item_idx] - ## print "Using knapsack heuristic" - ## print "item_idx =", item_idx - ## print "obj =", obj - ## print "weights =", weights - ## print "capacity =", total_length[patt] + ## print "Using knapsack heuristic" + ## print "item_idx =", item_idx + ## print "obj =", obj + ## print "weights =", weights + ## print "capacity =", total_length[patt] z, solution = kp(obj, weights, total_length[patt]) - ## print "Number of items = ", len(item_idx) - ## for i in range(len(item_idx)): - ## print "Item ", item_idx[i], " has profit ", obj[i], " and weight ", weights[i] - ## print "Knapsack has capacity ", total_length[patt] - ## print "Value = ", z - ## for i in range(len(item_idx)): - ## print "Included[", item_idx[i], "] = ", solution[i] - - total_weight = sum(w * solution[i] \ - for i, w in enumerate(weights)) + ## print "Number of items = ", len(item_idx) + ## for i in range(len(item_idx)): + ## print "Item ", item_idx[i], " has profit ", obj[i], " and weight ", weights[i] + ## print "Knapsack has capacity ", total_length[patt] + ## print "Value = ", z + ## for i in range(len(item_idx)): + ## print "Included[", item_idx[i], "] = ", solution[i] + + total_weight = sum(w * solution[i] for i, w in enumerate(weights)) assert total_weight <= total_length[patt] # add in reduced cost of useVars totalCut = sum(solution) - ## print "z, redCosts[useVars[", patt, "]], convexDual", z, redCosts[useVars[patt]], convexDual + ## print "z, redCosts[useVars[", patt, "]], convexDual", z, redCosts[useVars[patt]], convexDual if totalCut > 0: rc = -z + redCosts[useVars[patt]] - convexDual else: rc = -convexDual - ## print "rc =", rc - ## sys.stdout.flush() - if rc < 0: # Using this pattern - var_values = dict([(v, solution[i]) \ - for i, v in enumerate(vars) \ - if solution[i] > 0]) + ## print "rc =", rc + ## sys.stdout.flush() + if rc < 0: # Using this pattern + var_values = dict( + [(v, solution[i]) for i, v in enumerate(vars) if solution[i] > 0] + ) if totalCut > 0: var_values[useVars[patt]] = 1 cost = 1 @@ -168,8 +153,7 @@ def kp(obj, weights, capacity): # Check all items for inclusion for i in range(n): if weights[i] <= capacity: - zyes, solyes = kp(obj, weights, \ - capacity - weights[i]) + zyes, solyes = kp(obj, weights, capacity - weights[i]) zyes += obj[i] solyes[i] += 1 if zyes > zbest: @@ -181,5 +165,5 @@ def kp(obj, weights, capacity): return prob, relaxed_solver -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/Dip/src/dippy/tests/test_facility.py b/Dip/src/dippy/tests/test_facility.py index dcf08b10..5c83c372 100644 --- a/Dip/src/dippy/tests/test_facility.py +++ b/Dip/src/dippy/tests/test_facility.py @@ -10,7 +10,6 @@ class TestFacilityProblem(DippyTestCase): - def setUp(self): """ sets up the coke problem as a unittest @@ -28,26 +27,24 @@ def test_pulp_solve(self): self.variable_feasibility_test(self.prob) self.constraint_feasibility_test(self.prob) - #~ def test_dippy_branch(self): - #~ """ - #~ tests that the custom branch can solve the problem - #~ """ - #~ self.prob.branch_method = self.branch - #~ dippy.Solve(self.prob, { - #~ 'TolZero': '%s' % self.tol, - #~ }) - #~ self.assertAlmostEqual(self.prob.objective.value(), 212.0) - #~ self.variable_feasibility_test(self.prob) - #~ self.constraint_feasibility_test(self.prob) + # ~ def test_dippy_branch(self): + # ~ """ + # ~ tests that the custom branch can solve the problem + # ~ """ + # ~ self.prob.branch_method = self.branch + # ~ dippy.Solve(self.prob, { + # ~ 'TolZero': '%s' % self.tol, + # ~ }) + # ~ self.assertAlmostEqual(self.prob.objective.value(), 212.0) + # ~ self.variable_feasibility_test(self.prob) + # ~ self.constraint_feasibility_test(self.prob) def test_dippy_cuts(self): """ tests that the custom branch can solve the problem """ self.prob.generate_cuts = self.cuts - dippy.Solve(self.prob, { - 'TolZero': '%s' % self.tol, - }) + dippy.Solve(self.prob, {"TolZero": "%s" % self.tol}) self.assertAlmostEqual(self.prob.objective.value(), 212.0) self.variable_feasibility_test(self.prob) self.constraint_feasibility_test(self.prob) @@ -58,15 +55,12 @@ def test_dippy_branch_cuts(self): """ self.prob.branch_method = self.branch self.prob.generate_cuts = self.cuts - dippy.Solve(self.prob, { - 'TolZero': '%s' % self.tol, - }) + dippy.Solve(self.prob, {"TolZero": "%s" % self.tol}) self.assertAlmostEqual(self.prob.objective.value(), 212.0) self.variable_feasibility_test(self.prob) self.constraint_feasibility_test(self.prob) - def create_facility_problem(): """ creates and returns the facility problem @@ -78,25 +72,14 @@ def create_facility_problem(): # The requirements for the products - REQUIREMENT = { - 1 : 66, - 2 : 4, - 3 : 85, - 4 : 93, - 5 : 68, - 6 : 76, - 7 : 74, - 8 : 39, - 9 : 66, - 10 : 17, - } + REQUIREMENT = {1: 66, 2: 4, 3: 85, 4: 93, 5: 68, 6: 76, 7: 74, 8: 39, 9: 66, 10: 17} # Set of all products PRODUCTS = list(REQUIREMENT.keys()) PRODUCTS.sort() # Set of all locations - LOCATIONS = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + LOCATIONS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] LOCATIONS.sort() # The capacity of the facilities @@ -105,14 +88,11 @@ def create_facility_problem(): prob = dippy.DipProblem("Facility_Location") - assign_vars = LpVariable.dicts("AtLocation", - [(i, j) for i in LOCATIONS - for j in PRODUCTS], - 0, 1, LpBinary) - use_vars = LpVariable.dicts("UseLocation", - LOCATIONS, 0, 1, LpBinary) - waste_vars = LpVariable.dicts("Waste", - LOCATIONS, 0, CAPACITY) + assign_vars = LpVariable.dicts( + "AtLocation", [(i, j) for i in LOCATIONS for j in PRODUCTS], 0, 1, LpBinary + ) + use_vars = LpVariable.dicts("UseLocation", LOCATIONS, 0, 1, LpBinary) + waste_vars = LpVariable.dicts("Waste", LOCATIONS, 0, CAPACITY) # objective: minimise waste prob += lpSum(waste_vars[i] for i in LOCATIONS), "min" @@ -123,9 +103,11 @@ def create_facility_problem(): # aggregate CAPACITY constraints for i in LOCATIONS: - prob += lpSum(assign_vars[(i, j)] * REQUIREMENT[j] - for j in PRODUCTS) + waste_vars[i] == \ - CAPACITY * use_vars[i] + prob += ( + lpSum(assign_vars[(i, j)] * REQUIREMENT[j] for j in PRODUCTS) + + waste_vars[i] + == CAPACITY * use_vars[i] + ) # disaggregate CAPACITY constraints for i in LOCATIONS: @@ -135,26 +117,29 @@ def create_facility_problem(): # Ordering constraints for index, location in enumerate(LOCATIONS): if index > 0: - prob += use_vars[LOCATIONS[index-1]] >= use_vars[location] + prob += use_vars[LOCATIONS[index - 1]] >= use_vars[location] # Anti-symmetry branches def choose_antisymmetry_branch(prob, sol): num_locations = sum(sol[use_vars[i]] for i in LOCATIONS) - up = ceil(num_locations) # Round up to next nearest integer - down = floor(num_locations) # Round down - if (up - num_locations > tol) \ - and (num_locations - down > tol): # Is fractional? + up = ceil(num_locations) # Round up to next nearest integer + down = floor(num_locations) # Round down + if (up - num_locations > tol) and ( + num_locations - down > tol + ): # Is fractional? # Down branch: provide upper bounds, lower bounds are default - down_branch_ub = dict([(use_vars[LOCATIONS[n]], 0) - for n in range(int(down), len(LOCATIONS))]) + down_branch_ub = dict( + [(use_vars[LOCATIONS[n]], 0) for n in range(int(down), len(LOCATIONS))] + ) # Up branch: provide lower bounds, upper bounds are default - up_branch_lb = dict([(use_vars[LOCATIONS[n]], 1) - for n in range(0, int(up))]) + up_branch_lb = dict( + [(use_vars[LOCATIONS[n]], 1) for n in range(0, int(up))] + ) # Return the advanced branch to DIP return {}, down_branch_ub, up_branch_lb, {} def generate_weight_cuts(prob, sol): - ## print "In generate_weight_cuts, sol = ", sol + ## print "In generate_weight_cuts, sol = ", sol # Define mu and T for each knapsack mu = {} @@ -170,9 +155,10 @@ def generate_weight_cuts(prob, sol): bestAssign = None for i in LOCATIONS: for j in PRODUCTS: - if j not in S[i]: # If this product is not in the subset - if (sol[assign_vars[(i, j)]] > bestValue) \ - and (REQUIREMENT[j] <= mu[i]): + if j not in S[i]: # If this product is not in the subset + if (sol[assign_vars[(i, j)]] > bestValue) and ( + REQUIREMENT[j] <= mu[i] + ): # The assignment variable for this product is closer # to 1 than any other product checked, and "fits" in # this location's remaining space @@ -180,22 +166,23 @@ def generate_weight_cuts(prob, sol): bestAssign = (i, j) # Make the best assignment found across all products and locactions if bestAssign: - (i,j) = bestAssign - mu[i] -= REQUIREMENT[j] # Decrease spare CAPACITY at this location - S[i].append(j) # Assign this product to this location's set + (i, j) = bestAssign + mu[i] -= REQUIREMENT[j] # Decrease spare CAPACITY at this location + S[i].append(j) # Assign this product to this location's set else: - assigning = False # Didn't find anything to assign - stop + assigning = False # Didn't find anything to assign - stop # Generate the weight cuts from the sets found above new_cuts = [] for i in LOCATIONS: - if len(S[i]) > 0: # If an item assigned to this location - con = LpAffineExpression() # Start a new constraint - con += sum(REQUIREMENT[j] * assign_vars[(i, j)] - for j in S[i]) - con += sum(max(0, REQUIREMENT[j] - mu[i]) * - assign_vars[(i, j)] for j in PRODUCTS - if j not in S[i]) + if len(S[i]) > 0: # If an item assigned to this location + con = LpAffineExpression() # Start a new constraint + con += sum(REQUIREMENT[j] * assign_vars[(i, j)] for j in S[i]) + con += sum( + max(0, REQUIREMENT[j] - mu[i]) * assign_vars[(i, j)] + for j in PRODUCTS + if j not in S[i] + ) new_cuts.append(con <= CAPACITY - mu[i]) ## print new_cuts @@ -204,9 +191,8 @@ def generate_weight_cuts(prob, sol): return sols - return prob, choose_antisymmetry_branch, generate_weight_cuts -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/Dip/src/dippy/tests/test_tsp.py b/Dip/src/dippy/tests/test_tsp.py index 4b0b0745..6283b624 100644 --- a/Dip/src/dippy/tests/test_tsp.py +++ b/Dip/src/dippy/tests/test_tsp.py @@ -8,13 +8,10 @@ class TestTSPProblem(DippyTestCase): - def setUp(self): - self.prob, self.generate_cuts, self.is_solution_feasible = \ - create_tsp_problem() + self.prob, self.generate_cuts, self.is_solution_feasible = create_tsp_problem() self.tol = 1e-6 - def test_pulp_solve(self): """ tests that pulp can solve the problem @@ -25,7 +22,7 @@ def test_pulp_solve(self): self.variable_feasibility_test(self.prob) self.constraint_feasibility_test(self.prob) # would like this in here but not sure how to do it - #~ self.assertFalse(self.is_solution_feasible()) + # ~ self.assertFalse(self.is_solution_feasible()) def test_dippy_solve(self): """ @@ -40,6 +37,7 @@ def test_dippy_solve(self): # would like this in here but not sure how to do it self.assertTrue(self.is_solution_feasible(self.prob, sol, self.tol)) + def create_tsp_problem(doRelaxed=False): """ creates and returns the tsp problem @@ -50,27 +48,36 @@ def create_tsp_problem(doRelaxed=False): # 2d Euclidean TSP with extremely simple cut generation # x,y coords of cities - CITY_LOCS = [(0, 2), (0, 4), (1, 2), (1, 4), \ - (4, 1), (4, 4), (4, 5), (5, 0), \ - (5, 2), (5, 5)] + CITY_LOCS = [ + (0, 2), + (0, 4), + (1, 2), + (1, 4), + (4, 1), + (4, 4), + (4, 5), + (5, 0), + (5, 2), + (5, 5), + ] CITIES = list(range(len(CITY_LOCS))) - ARCS = [] # list of arcs (no duplicates) - ARC_COSTS = {} # distance + ARCS = [] # list of arcs (no duplicates) + ARC_COSTS = {} # distance # for each city, list of arcs into/out of CITY_ARCS = [[] for i in CITIES] # use 2d euclidean distance def dist(x1, y1, x2, y2): - return sqrt((x1-x2)**2 + (y1-y2)**2) + return sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) # construct list of arcs for i in CITIES: i_x, i_y = CITY_LOCS[i] - for j in CITIES[i+1:]: + for j in CITIES[i + 1 :]: j_x, j_y = CITY_LOCS[j] - ARC_COSTS[(i,j)] = dist(i_x, i_y, j_x, j_y) + ARC_COSTS[(i, j)] = dist(i_x, i_y, j_x, j_y) ARCS.append((i, j)) CITY_ARCS[i].append((i, j)) CITY_ARCS[j].append((i, j)) @@ -102,30 +109,27 @@ def generate_cuts(prob, sol): not_connected = set(CITIES) while not_connected: - # print "not_connected", [n for n in not_connected] + # print "not_connected", [n for n in not_connected] start = not_connected.pop() nodes, arcs = get_subtour(sol, start) - if len(nodes) == len(arcs) and \ - len(nodes) < len(CITIES): - cons.append( sum(arc_vars[a] for a in arcs) \ - <= len(arcs) - 1 ) - # print "nodes", [n for n in nodes] - # print "arcs", [a for a in arcs] + if len(nodes) == len(arcs) and len(nodes) < len(CITIES): + cons.append(sum(arc_vars[a] for a in arcs) <= len(arcs) - 1) + # print "nodes", [n for n in nodes] + # print "arcs", [a for a in arcs] nodes.remove(start) not_connected -= nodes - ## print "# cons = ", len(cons) - ## print "cons = ", [ con for con in cons ] + ## print "# cons = ", len(cons) + ## print "cons = ", [ con for con in cons ] return cons def is_solution_feasible(prob, sol, tol): nodes, arcs = get_subtour(sol, 0) - # print "Checking: # nodes = ", len(nodes), \ - # ", # cities = ", len(CITIES) - # print "nodes", [n for n in nodes] + # print "Checking: # nodes = ", len(nodes), \ + # ", # cities = ", len(CITIES) + # print "nodes", [n for n in nodes] - return len(nodes) == len(arcs) and \ - len(nodes) == len(CITIES) + return len(nodes) == len(arcs) and len(nodes) == len(CITIES) def get_subtour(sol, node): # returns: list of nodes and arcs @@ -141,26 +145,25 @@ def get_subtour(sol, node): while to_process: c = to_process.pop() not_processed.remove(c) - new_arcs = [ symmetric[(c, i)] \ - for i in not_processed \ - if sol[ \ - arc_vars[symmetric[(c, i)]]] - > one] - new_nodes = [ i for i in not_processed \ - if symmetric[(i, c)] in new_arcs ] - # print "new_nodes", [n for n in new_nodes] - # print "new_arcs", [a for a in new_arcs] + new_arcs = [ + symmetric[(c, i)] + for i in not_processed + if sol[arc_vars[symmetric[(c, i)]]] > one + ] + new_nodes = [i for i in not_processed if symmetric[(i, c)] in new_arcs] + # print "new_nodes", [n for n in new_nodes] + # print "new_arcs", [a for a in new_arcs] arcs |= set(new_arcs) nodes |= set(new_nodes) to_process |= set(new_nodes) - # print "not_processed", [n for n in not_processed] - # print "nodes", [n for n in nodes] - # print "arcs", [a for a in arcs] + # print "not_processed", [n for n in not_processed] + # print "nodes", [n for n in nodes] + # print "arcs", [a for a in arcs] return nodes, arcs return prob, generate_cuts, is_solution_feasible -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/format-all-sources.sh b/format-all-sources.sh new file mode 100755 index 00000000..a051d1fd --- /dev/null +++ b/format-all-sources.sh @@ -0,0 +1,13 @@ +#!/bin/sh +# script to format all sources +# in subdirectories according +# to .clang-format - 2019 - Haroldo +# modified from Cbc - Simon + +echo formatting all c++ source using .clang-format +find ./ -iname '*.h' -o -iname '*.cpp' -exec clang-format -style=llvm -i {} + +echo formatting all python source using black with .flake8 +black Dip/src + +# Check for formatting. Use this in CI pipeline +# find ./ -iname '*.h' -o -iname '*.cpp' -exec clang-format -style=llvm -output-replacements-xml {} + | grep -c "/dev/null