diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 2c7a7bd1..d317aae9 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -3,7 +3,7 @@ name: Bug report about: Create a report to help us improve title: '' labels: bug -assignees: cuihantao +assignees: jinningwang --- diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index f12c71ce..7569b5f3 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -3,7 +3,7 @@ name: Feature request about: Suggest an idea for this project title: '' labels: '' -assignees: +assignees: jinningwang --- diff --git a/README.md b/README.md index dcad8692..d839c7f5 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,16 @@ Python Software for Power System Dispatch Modeling and Co-Simulation with Dynani With the built-in interface with dynamic simulation engine, LTB ANDES, AMS enables Dynamics Interfaced Stability Constrained Production Cost and Market Operation Modeling. +AMS produces credible dispatch results. +The following table shows the comparison of DC optimal power flow results between AMS and other tools. + +| | AMS GUROBI | AMS ECOS | PYPOWER | pandapower | +|---------------|--------------|--------------|--------------|--------------| +| case39.m | 41,461.94 | 41,461.94 | 41,461.94 | 41,461.94 | +| ieee14_uced.xlsx | 45.72 | 45.72 | 45.72 | 45.72 | +| ieee39_uced.xlsx | 36,513.47 | 36,513.47 | 36,513.47 | 36,513.47 | +| npcc.m | 57,664,760.85| 57,664,760.85| 57,664,760.85| 57,664,760.85| + AMS is currently under active development. Use the following resources to get involved. @@ -41,6 +51,17 @@ See [GitHub contributors][GitHub contributors] for the contributor list. AMS is licensed under the [GPL v3 License](./LICENSE). +# Related Projects + ++ [CVXPY](https://github.com/cvxpy/cvxpy) ++ [Visualizations of Mittelmann benchmarks](https://mattmilten.github.io/mittelmann-plots/) ++ [CBC Solver](https://github.com/coin-or/Cbc) ++ [MATPOWER](https://github.com/MATPOWER/matpower) ++ [PYPOWER](https://github.com/rwl/PYPOWER) ++ [pandapower](https://github.com/e2nIEE/pandapower) + +Some commercial solvers provide academic licenses, such as COPT, GUROBI, CPLEX, and MOSEK. + * * * [GitHub releases]: https://github.com/CURENT/ams/releases diff --git a/ams/cases/5bus/pjm5bus_uced.json b/ams/cases/5bus/pjm5bus_uced.json index 2f997f79..f688d112 100644 --- a/ams/cases/5bus/pjm5bus_uced.json +++ b/ams/cases/5bus/pjm5bus_uced.json @@ -110,7 +110,8 @@ "q0": 0.9861, "vmax": 1.1, "vmin": 0.9, - "owner": null + "owner": null, + "ctrl": 1.0 }, { "idx": "PQ_2", @@ -122,7 +123,8 @@ "q0": 0.9861, "vmax": 1.1, "vmin": 0.9, - "owner": null + "owner": null, + "ctrl": 1.0 }, { "idx": "PQ_3", @@ -134,7 +136,8 @@ "q0": 1.3147, "vmax": 1.1, "vmin": 0.9, - "owner": null + "owner": null, + "ctrl": 1.0 } ], "PV": [ @@ -286,7 +289,7 @@ { "idx": 0, "u": 1.0, - "name": "Line 1-2", + "name": "Line AB", "bus1": 0, "bus2": 1, "Sn": 100.0, @@ -304,9 +307,9 @@ "trans": 0.0, "tap": 1.0, "phi": 0.0, - "rate_a": 999.0, - "rate_b": 999.0, - "rate_c": 999.0, + "rate_a": 4.0, + "rate_b": 0.0, + "rate_c": 0.0, "owner": null, "xcoord": null, "ycoord": null, @@ -316,7 +319,7 @@ { "idx": 1, "u": 1.0, - "name": "Line 1-4", + "name": "Line AD", "bus1": 0, "bus2": 3, "Sn": 100.0, @@ -334,9 +337,9 @@ "trans": 0.0, "tap": 1.0, "phi": 0.0, - "rate_a": 999.0, - "rate_b": 999.0, - "rate_c": 999.0, + "rate_a": 0.0, + "rate_b": 0.0, + "rate_c": 0.0, "owner": null, "xcoord": null, "ycoord": null, @@ -346,7 +349,7 @@ { "idx": 2, "u": 1.0, - "name": "Line 1-5", + "name": "Line AE", "bus1": 0, "bus2": 4, "Sn": 100.0, @@ -364,9 +367,9 @@ "trans": 0.0, "tap": 1.0, "phi": 0.0, - "rate_a": 999.0, - "rate_b": 999.0, - "rate_c": 999.0, + "rate_a": 0.0, + "rate_b": 0.0, + "rate_c": 0.0, "owner": null, "xcoord": null, "ycoord": null, @@ -376,7 +379,7 @@ { "idx": 3, "u": 1.0, - "name": "Line 2-3", + "name": "Line BC", "bus1": 1, "bus2": 2, "Sn": 100.0, @@ -394,9 +397,9 @@ "trans": 0.0, "tap": 1.0, "phi": 0.0, - "rate_a": 999.0, - "rate_b": 999.0, - "rate_c": 999.0, + "rate_a": 0.0, + "rate_b": 0.0, + "rate_c": 0.0, "owner": null, "xcoord": null, "ycoord": null, @@ -406,7 +409,7 @@ { "idx": 4, "u": 1.0, - "name": "Line 3-4", + "name": "Line CD", "bus1": 2, "bus2": 3, "Sn": 100.0, @@ -424,9 +427,9 @@ "trans": 0.0, "tap": 1.0, "phi": 0.0, - "rate_a": 999.0, - "rate_b": 999.0, - "rate_c": 999.0, + "rate_a": 0.0, + "rate_b": 0.0, + "rate_c": 0.0, "owner": null, "xcoord": null, "ycoord": null, @@ -436,7 +439,7 @@ { "idx": 5, "u": 1.0, - "name": "Line 4-5", + "name": "Line DE", "bus1": 3, "bus2": 4, "Sn": 100.0, @@ -454,9 +457,9 @@ "trans": 0.0, "tap": 1.0, "phi": 0.0, - "rate_a": 999.0, - "rate_b": 999.0, - "rate_c": 999.0, + "rate_a": 2.4, + "rate_b": 0.0, + "rate_c": 0.0, "owner": null, "xcoord": null, "ycoord": null, @@ -466,7 +469,7 @@ { "idx": 6, "u": 1.0, - "name": "Line 1-2 (2)", + "name": "Line AB2", "bus1": 0, "bus2": 1, "Sn": 100.0, @@ -484,9 +487,9 @@ "trans": 0.0, "tap": 1.0, "phi": 0.0, - "rate_a": 999.0, - "rate_b": 999.0, - "rate_c": 999.0, + "rate_a": 4.0, + "rate_b": 0.0, + "rate_c": 0.0, "owner": null, "xcoord": null, "ycoord": null, @@ -583,7 +586,7 @@ "csu": 0.0, "csd": 0.0, "c2": 0.0, - "c1": 1450.0, + "c1": 0.145, "c0": 0.0 }, { @@ -595,7 +598,7 @@ "csu": 0.0, "csd": 0.0, "c2": 0.0, - "c1": 3000.0, + "c1": 0.3, "c0": 0.0 }, { @@ -607,7 +610,7 @@ "csu": 0.0, "csd": 0.0, "c2": 0.0, - "c1": 4000.0, + "c1": 0.4, "c0": 0.0 }, { @@ -619,7 +622,7 @@ "csu": 0.0, "csd": 0.0, "c2": 0.0, - "c1": 3000.0, + "c1": 0.1, "c0": 0.0 } ], @@ -717,6 +720,29 @@ "cnsr": 0.1 } ], + "DCost": [ + { + "idx": "DCost_1", + "u": 1.0, + "name": "Dcost 1", + "pq": "PQ_1", + "cdp": 1000.0 + }, + { + "idx": "DCost_2", + "u": 1.0, + "name": "Dcost 2", + "pq": "PQ_2", + "cdp": 1000.0 + }, + { + "idx": "DCost_3", + "u": 1.0, + "name": "Dcost 3", + "pq": "PQ_3", + "cdp": 1000.0 + } + ], "EDTSlot": [ { "idx": "EDT1", diff --git a/ams/cases/5bus/pjm5bus_uced.xlsx b/ams/cases/5bus/pjm5bus_uced.xlsx index 2a9446ca..5634cba9 100644 Binary files a/ams/cases/5bus/pjm5bus_uced.xlsx and b/ams/cases/5bus/pjm5bus_uced.xlsx differ diff --git a/ams/cases/5bus/pjm5bus_uced_esd1.xlsx b/ams/cases/5bus/pjm5bus_uced_esd1.xlsx index b0a1926d..4e35afe1 100644 Binary files a/ams/cases/5bus/pjm5bus_uced_esd1.xlsx and b/ams/cases/5bus/pjm5bus_uced_esd1.xlsx differ diff --git a/ams/cases/ieee123/ieee123_regcv1.xlsx b/ams/cases/ieee123/ieee123_regcv1.xlsx index 8bf6cfec..a8599d5a 100644 Binary files a/ams/cases/ieee123/ieee123_regcv1.xlsx and b/ams/cases/ieee123/ieee123_regcv1.xlsx differ diff --git a/ams/cases/ieee14/ieee14_uced.xlsx b/ams/cases/ieee14/ieee14_uced.xlsx index dc7a723b..a484cfb9 100644 Binary files a/ams/cases/ieee14/ieee14_uced.xlsx and b/ams/cases/ieee14/ieee14_uced.xlsx differ diff --git a/ams/cases/ieee39/ieee39.xlsx b/ams/cases/ieee39/ieee39.xlsx index 45d1e50d..eeb6d52f 100644 Binary files a/ams/cases/ieee39/ieee39.xlsx and b/ams/cases/ieee39/ieee39.xlsx differ diff --git a/ams/cases/ieee39/ieee39_uced.xlsx b/ams/cases/ieee39/ieee39_uced.xlsx index 1db5c73d..1bb21eab 100644 Binary files a/ams/cases/ieee39/ieee39_uced.xlsx and b/ams/cases/ieee39/ieee39_uced.xlsx differ diff --git a/ams/cases/ieee39/ieee39_uced_esd1.xlsx b/ams/cases/ieee39/ieee39_uced_esd1.xlsx index eb3cb8d9..362ca5be 100644 Binary files a/ams/cases/ieee39/ieee39_uced_esd1.xlsx and b/ams/cases/ieee39/ieee39_uced_esd1.xlsx differ diff --git a/ams/cases/ieee39/ieee39_uced_pvd1.xlsx b/ams/cases/ieee39/ieee39_uced_pvd1.xlsx new file mode 100644 index 00000000..2710b4d4 Binary files /dev/null and b/ams/cases/ieee39/ieee39_uced_pvd1.xlsx differ diff --git a/ams/cases/ieee39/ieee39_uced_vis.xlsx b/ams/cases/ieee39/ieee39_uced_vis.xlsx new file mode 100644 index 00000000..95475120 Binary files /dev/null and b/ams/cases/ieee39/ieee39_uced_vis.xlsx differ diff --git a/ams/cases/matpower/case39.m b/ams/cases/matpower/case39.m new file mode 100644 index 00000000..5a0836cd --- /dev/null +++ b/ams/cases/matpower/case39.m @@ -0,0 +1,205 @@ +function mpc = case39 +%CASE39 Power flow data for 39 bus New England system. +% Please see CASEFORMAT for details on the case file format. +% +% Data taken from [1] with the following modifications/additions: +% +% - renumbered gen buses consecutively (as in [2] and [4]) +% - added Pmin = 0 for all gens +% - added Qmin, Qmax for gens at 31 & 39 (copied from gen at 35) +% - added Vg based on V in bus data (missing for bus 39) +% - added Vg, Pg, Pd, Qd at bus 39 from [2] (same in [4]) +% - added Pmax at bus 39: Pmax = Pg + 100 +% - added line flow limits and area data from [4] +% - added voltage limits, Vmax = 1.06, Vmin = 0.94 +% - added identical quadratic generator costs +% - increased Pmax for gen at bus 34 from 308 to 508 +% (assumed typo in [1], makes initial solved case feasible) +% - re-solved power flow +% +% Notes: +% - Bus 39, its generator and 2 connecting lines were added +% (by authors of [1]) to represent the interconnection with +% the rest of the eastern interconnect, and did not include +% Vg, Pg, Qg, Pd, Qd, Pmin, Pmax, Qmin or Qmax. +% - As the swing bus, bus 31 did not include and Q limits. +% - The voltages, etc in [1] appear to be quite close to the +% power flow solution of the case before adding bus 39 with +% it's generator and connecting branches, though the solution +% is not exact. +% - Explicit voltage setpoints for gen buses are not given, so +% they are taken from the bus data, however this results in two +% binding Q limits at buses 34 & 37, so the corresponding +% voltages have probably deviated from their original setpoints. +% - The generator locations and types are as follows: +% 1 30 hydro +% 2 31 nuke01 +% 3 32 nuke02 +% 4 33 fossil02 +% 5 34 fossil01 +% 6 35 nuke03 +% 7 36 fossil04 +% 8 37 nuke04 +% 9 38 nuke05 +% 10 39 interconnection to rest of US/Canada +% +% This is a solved power flow case, but it includes the following +% violations: +% - Pmax violated at bus 31: Pg = 677.87, Pmax = 646 +% - Qmin violated at bus 37: Qg = -1.37, Qmin = 0 +% +% References: +% [1] G. W. Bills, et.al., "On-Line Stability Analysis Study" +% RP90-1 Report for the Edison Electric Institute, October 12, 1970, +% pp. 1-20 - 1-35. +% prepared by E. M. Gulachenski - New England Electric System +% J. M. Undrill - General Electric Co. +% "generally representative of the New England 345 KV system, but is +% not an exact or complete model of any past, present or projected +% configuration of the actual New England 345 KV system. +% [2] M. A. Pai, Energy Function Analysis for Power System Stability, +% Kluwer Academic Publishers, Boston, 1989. +% (references [3] as source of data) +% [3] Athay, T.; Podmore, R.; Virmani, S., "A Practical Method for the +% Direct Analysis of Transient Stability," IEEE Transactions on Power +% Apparatus and Systems , vol.PAS-98, no.2, pp.573-584, March 1979. +% URL: https://doi.org/10.1109/TPAS.1979.319407 +% (references [1] as source of data) +% [4] Data included with TC Calculator at http://www.pserc.cornell.edu/tcc/ +% for 39-bus system. + +% MATPOWER + +%% MATPOWER Case Format : Version 2 +mpc.version = '2'; + +%%----- Power Flow Data -----%% +%% system MVA base +mpc.baseMVA = 100; + +%% bus data +% bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin +mpc.bus = [ + 1 1 97.6 44.2 0 0 2 1.0393836 -13.536602 345 1 1.06 0.94; + 2 1 0 0 0 0 2 1.0484941 -9.7852666 345 1 1.06 0.94; + 3 1 322 2.4 0 0 2 1.0307077 -12.276384 345 1 1.06 0.94; + 4 1 500 184 0 0 1 1.00446 -12.626734 345 1 1.06 0.94; + 5 1 0 0 0 0 1 1.0060063 -11.192339 345 1 1.06 0.94; + 6 1 0 0 0 0 1 1.0082256 -10.40833 345 1 1.06 0.94; + 7 1 233.8 84 0 0 1 0.99839728 -12.755626 345 1 1.06 0.94; + 8 1 522 176.6 0 0 1 0.99787232 -13.335844 345 1 1.06 0.94; + 9 1 6.5 -66.6 0 0 1 1.038332 -14.178442 345 1 1.06 0.94; + 10 1 0 0 0 0 1 1.0178431 -8.170875 345 1 1.06 0.94; + 11 1 0 0 0 0 1 1.0133858 -8.9369663 345 1 1.06 0.94; + 12 1 8.53 88 0 0 1 1.000815 -8.9988236 345 1 1.06 0.94; + 13 1 0 0 0 0 1 1.014923 -8.9299272 345 1 1.06 0.94; + 14 1 0 0 0 0 1 1.012319 -10.715295 345 1 1.06 0.94; + 15 1 320 153 0 0 3 1.0161854 -11.345399 345 1 1.06 0.94; + 16 1 329 32.3 0 0 3 1.0325203 -10.033348 345 1 1.06 0.94; + 17 1 0 0 0 0 2 1.0342365 -11.116436 345 1 1.06 0.94; + 18 1 158 30 0 0 2 1.0315726 -11.986168 345 1 1.06 0.94; + 19 1 0 0 0 0 3 1.0501068 -5.4100729 345 1 1.06 0.94; + 20 1 680 103 0 0 3 0.99101054 -6.8211783 345 1 1.06 0.94; + 21 1 274 115 0 0 3 1.0323192 -7.6287461 345 1 1.06 0.94; + 22 1 0 0 0 0 3 1.0501427 -3.1831199 345 1 1.06 0.94; + 23 1 247.5 84.6 0 0 3 1.0451451 -3.3812763 345 1 1.06 0.94; + 24 1 308.6 -92.2 0 0 3 1.038001 -9.9137585 345 1 1.06 0.94; + 25 1 224 47.2 0 0 2 1.0576827 -8.3692354 345 1 1.06 0.94; + 26 1 139 17 0 0 2 1.0525613 -9.4387696 345 1 1.06 0.94; + 27 1 281 75.5 0 0 2 1.0383449 -11.362152 345 1 1.06 0.94; + 28 1 206 27.6 0 0 3 1.0503737 -5.9283592 345 1 1.06 0.94; + 29 1 283.5 26.9 0 0 3 1.0501149 -3.1698741 345 1 1.06 0.94; + 30 2 0 0 0 0 2 1.0499 -7.3704746 345 1 1.06 0.94; + 31 3 9.2 4.6 0 0 1 0.982 0 345 1 1.06 0.94; + 32 2 0 0 0 0 1 0.9841 -0.1884374 345 1 1.06 0.94; + 33 2 0 0 0 0 3 0.9972 -0.19317445 345 1 1.06 0.94; + 34 2 0 0 0 0 3 1.0123 -1.631119 345 1 1.06 0.94; + 35 2 0 0 0 0 3 1.0494 1.7765069 345 1 1.06 0.94; + 36 2 0 0 0 0 3 1.0636 4.4684374 345 1 1.06 0.94; + 37 2 0 0 0 0 2 1.0275 -1.5828988 345 1 1.06 0.94; + 38 2 0 0 0 0 3 1.0265 3.8928177 345 1 1.06 0.94; + 39 2 1104 250 0 0 1 1.03 -14.535256 345 1 1.06 0.94; +]; + +%% generator data +% bus Pg Qg Qmax Qmin Vg mBase status Pmax Pmin Pc1 Pc2 Qc1min Qc1max Qc2min Qc2max ramp_agc ramp_10 ramp_30 ramp_q apf +mpc.gen = [ + 30 250 161.762 400 140 1.0499 100 1 1040 0 0 0 0 0 0 0 0 0 0 0 0; + 31 677.871 221.574 300 -100 0.982 100 1 646 0 0 0 0 0 0 0 0 0 0 0 0; + 32 650 206.965 300 150 0.9841 100 1 725 0 0 0 0 0 0 0 0 0 0 0 0; + 33 632 108.293 250 0 0.9972 100 1 652 0 0 0 0 0 0 0 0 0 0 0 0; + 34 508 166.688 167 0 1.0123 100 1 508 0 0 0 0 0 0 0 0 0 0 0 0; + 35 650 210.661 300 -100 1.0494 100 1 687 0 0 0 0 0 0 0 0 0 0 0 0; + 36 560 100.165 240 0 1.0636 100 1 580 0 0 0 0 0 0 0 0 0 0 0 0; + 37 540 -1.36945 250 0 1.0275 100 1 564 0 0 0 0 0 0 0 0 0 0 0 0; + 38 830 21.7327 300 -150 1.0265 100 1 865 0 0 0 0 0 0 0 0 0 0 0 0; + 39 1000 78.4674 300 -100 1.03 100 1 1100 0 0 0 0 0 0 0 0 0 0 0 0; +]; + +%% branch data +% fbus tbus r x b rateA rateB rateC ratio angle status angmin angmax +mpc.branch = [ + 1 2 0.0035 0.0411 0.6987 600 600 600 0 0 1 -360 360; + 1 39 0.001 0.025 0.75 1000 1000 1000 0 0 1 -360 360; + 2 3 0.0013 0.0151 0.2572 500 500 500 0 0 1 -360 360; + 2 25 0.007 0.0086 0.146 500 500 500 0 0 1 -360 360; + 2 30 0 0.0181 0 900 900 2500 1.025 0 1 -360 360; + 3 4 0.0013 0.0213 0.2214 500 500 500 0 0 1 -360 360; + 3 18 0.0011 0.0133 0.2138 500 500 500 0 0 1 -360 360; + 4 5 0.0008 0.0128 0.1342 600 600 600 0 0 1 -360 360; + 4 14 0.0008 0.0129 0.1382 500 500 500 0 0 1 -360 360; + 5 6 0.0002 0.0026 0.0434 1200 1200 1200 0 0 1 -360 360; + 5 8 0.0008 0.0112 0.1476 900 900 900 0 0 1 -360 360; + 6 7 0.0006 0.0092 0.113 900 900 900 0 0 1 -360 360; + 6 11 0.0007 0.0082 0.1389 480 480 480 0 0 1 -360 360; + 6 31 0 0.025 0 1800 1800 1800 1.07 0 1 -360 360; + 7 8 0.0004 0.0046 0.078 900 900 900 0 0 1 -360 360; + 8 9 0.0023 0.0363 0.3804 900 900 900 0 0 1 -360 360; + 9 39 0.001 0.025 1.2 900 900 900 0 0 1 -360 360; + 10 11 0.0004 0.0043 0.0729 600 600 600 0 0 1 -360 360; + 10 13 0.0004 0.0043 0.0729 600 600 600 0 0 1 -360 360; + 10 32 0 0.02 0 900 900 2500 1.07 0 1 -360 360; + 12 11 0.0016 0.0435 0 500 500 500 1.006 0 1 -360 360; + 12 13 0.0016 0.0435 0 500 500 500 1.006 0 1 -360 360; + 13 14 0.0009 0.0101 0.1723 600 600 600 0 0 1 -360 360; + 14 15 0.0018 0.0217 0.366 600 600 600 0 0 1 -360 360; + 15 16 0.0009 0.0094 0.171 600 600 600 0 0 1 -360 360; + 16 17 0.0007 0.0089 0.1342 600 600 600 0 0 1 -360 360; + 16 19 0.0016 0.0195 0.304 600 600 2500 0 0 1 -360 360; + 16 21 0.0008 0.0135 0.2548 600 600 600 0 0 1 -360 360; + 16 24 0.0003 0.0059 0.068 600 600 600 0 0 1 -360 360; + 17 18 0.0007 0.0082 0.1319 600 600 600 0 0 1 -360 360; + 17 27 0.0013 0.0173 0.3216 600 600 600 0 0 1 -360 360; + 19 20 0.0007 0.0138 0 900 900 2500 1.06 0 1 -360 360; + 19 33 0.0007 0.0142 0 900 900 2500 1.07 0 1 -360 360; + 20 34 0.0009 0.018 0 900 900 2500 1.009 0 1 -360 360; + 21 22 0.0008 0.014 0.2565 900 900 900 0 0 1 -360 360; + 22 23 0.0006 0.0096 0.1846 600 600 600 0 0 1 -360 360; + 22 35 0 0.0143 0 900 900 2500 1.025 0 1 -360 360; + 23 24 0.0022 0.035 0.361 600 600 600 0 0 1 -360 360; + 23 36 0.0005 0.0272 0 900 900 2500 1 0 1 -360 360; + 25 26 0.0032 0.0323 0.531 600 600 600 0 0 1 -360 360; + 25 37 0.0006 0.0232 0 900 900 2500 1.025 0 1 -360 360; + 26 27 0.0014 0.0147 0.2396 600 600 600 0 0 1 -360 360; + 26 28 0.0043 0.0474 0.7802 600 600 600 0 0 1 -360 360; + 26 29 0.0057 0.0625 1.029 600 600 600 0 0 1 -360 360; + 28 29 0.0014 0.0151 0.249 600 600 600 0 0 1 -360 360; + 29 38 0.0008 0.0156 0 1200 1200 2500 1.025 0 1 -360 360; +]; + +%%----- OPF Data -----%% +%% generator cost data +% 1 startup shutdown n x1 y1 ... xn yn +% 2 startup shutdown n c(n-1) ... c0 +mpc.gencost = [ + 2 0 0 3 0.01 0.3 0.2; + 2 0 0 3 0.01 0.3 0.2; + 2 0 0 3 0.01 0.3 0.2; + 2 0 0 3 0.01 0.3 0.2; + 2 0 0 3 0.01 0.3 0.2; + 2 0 0 3 0.01 0.3 0.2; + 2 0 0 3 0.01 0.3 0.2; + 2 0 0 3 0.01 0.3 0.2; + 2 0 0 3 0.01 0.3 0.2; + 2 0 0 3 0.01 0.3 0.2; +]; diff --git a/ams/cases/npcc/npcc_uced.xlsx b/ams/cases/npcc/npcc_uced.xlsx index 58d63a6b..09cdf7af 100644 Binary files a/ams/cases/npcc/npcc_uced.xlsx and b/ams/cases/npcc/npcc_uced.xlsx differ diff --git a/ams/cases/wecc/wecc_uced.xlsx b/ams/cases/wecc/wecc_uced.xlsx index cf3e6a29..c2d3d54b 100644 Binary files a/ams/cases/wecc/wecc_uced.xlsx and b/ams/cases/wecc/wecc_uced.xlsx differ diff --git a/ams/core/matprocessor.py b/ams/core/matprocessor.py index a94b7fa5..7f7ed62b 100644 --- a/ams/core/matprocessor.py +++ b/ams/core/matprocessor.py @@ -10,7 +10,7 @@ from scipy.sparse import csr_matrix as c_sparse from scipy.sparse import lil_matrix as l_sparse -from ams.pypower.make import makePTDF, makeBdc +from ams.pypower.make import makePTDF from ams.io.pypower import system2ppc from ams.opt.omodel import Param @@ -49,13 +49,14 @@ def __init__(self, info: Optional[str] = None, unit: Optional[str] = None, v: Optional[np.ndarray] = None, + sparse: Optional[bool] = False, ): - Param.__init__(self, name=name, info=info) self.name = name self.tex_name = tex_name if (tex_name is not None) else name self.info = info self.unit = unit self._v = v + self.sparse = sparse self.owner = None @property @@ -102,27 +103,40 @@ class MatProcessor: def __init__(self, system): self.system = system + self.Bbus = MParam(name='Bbus', tex_name=r'B_{bus}', + info='Bus admittance matrix', + v=None, sparse=True) + self.Bf = MParam(name='Bf', tex_name=r'B_{f}', + info='Bf matrix', + v=None, sparse=True) + self.Pbusinj = MParam(name='Pbusinj', tex_name=r'P_{bus}^{inj}', + info='Bus power injection vector', + v=None,) + self.Pfinj = MParam(name='Pfinj', tex_name=r'P_{f}^{inj}', + info='Line power injection vector', + v=None,) self.PTDF = MParam(name='PTDF', tex_name=r'P_{TDF}', info='Power transfer distribution factor', v=None) + self.Cft = MParam(name='Cft', tex_name=r'C_{ft}', - info='Connectivity matrix', - v=None) - self.pl = MParam(name='pl', tex_name=r'p_l', - info='Nodal active load', - v=None) - self.ql = MParam(name='ql', tex_name=r'q_l', - info='Nodal reactive load', - v=None) + info='Line connectivity matrix', + v=None, sparse=True) + self.CftT = MParam(name='CftT', tex_name=r'C_{ft}^{T}', + info='Line connectivity matrix transpose', + v=None, sparse=True) self.Cg = MParam(name='Cg', tex_name=r'C_g', info='Generator connectivity matrix', - v=None) + v=None, sparse=True) self.Cs = MParam(name='Cs', tex_name=r'C_s', info='Slack connectivity matrix', - v=None) + v=None, sparse=True) self.Cl = MParam(name='Cl', tex_name=r'Cl', info='Load connectivity matrix', - v=None) + v=None, sparse=True) + self.Csh = MParam(name='Csh', tex_name=r'C_{sh}', + info='Shunt connectivity matrix', + v=None, sparse=True) def make(self): """ @@ -132,30 +146,11 @@ def make(self): """ system = self.system ppc = system2ppc(system) - + # FIXME: PTDF sequence okay? self.PTDF._v = makePTDF(ppc['baseMVA'], ppc['bus'], ppc['branch']) - _, _, _, _, self.Cft._v = makeBdc(ppc['baseMVA'], ppc['bus'], ppc['branch']) - - # FIXME: sparsity? - # FIXME: hard coded here - gen_bus = system.StaticGen.get(src='bus', attr='v', - idx=system.StaticGen.get_idx()) - slack_bus = system.Slack.get(src='bus', attr='v', - idx=system.Slack.idx.v) - all_bus = system.Bus.idx.v - load_bus = system.StaticLoad.get(src='bus', attr='v', - idx=system.StaticLoad.get_idx()) - idx_PD = system.PQ.find_idx(keys="bus", values=all_bus, - allow_none=True, default=None) - self.pl._v = c_sparse(system.PQ.get(src='p0', attr='v', idx=idx_PD)) - self.ql._v = np.array(system.PQ.get(src='q0', attr='v', idx=idx_PD)) - - row, col = np.meshgrid(all_bus, slack_bus) - self.Cs._v = c_sparse((row == col).astype(int)) - row, col = np.meshgrid(all_bus, gen_bus) - self.Cg._v = c_sparse((row == col).astype(int)) - row, col = np.meshgrid(all_bus, load_bus) - self.Cl._v = c_sparse((row == col).astype(int)) + + self._makeC() + self._makeBdc() return True @@ -172,3 +167,116 @@ def n(self): To fit the RParam style. """ return 2 + + def _makeBdc(self): + """ + Make Bdc matrix. + + Call _makeC() before this method to ensure Cft is available. + """ + system = self.system + + # common variables + nb = system.Bus.n + nl = system.Line.n + + # line parameters + idx_line = system.Line.idx.v + x = system.Line.get(src='x', attr='v', idx=idx_line) + u_line = system.Line.get(src='u', attr='v', idx=idx_line) + b = u_line / x # series susceptance + + # in DC, tap is assumed to be 1 + tap0 = system.Line.get(src='tap', attr='v', idx=idx_line) + tap = np.ones(nl) + i = np.flatnonzero(tap0) + tap[i] = tap0[i] # assign non-zero tap ratios + b = b / tap # adjusted series susceptance + + # build Bf such that Bf * Va is the vector of real branch powers injected + # at each branch's "from" bus + f = system.Bus.idx2uid(system.Line.get(src='bus1', attr='v', idx=idx_line)) + t = system.Bus.idx2uid(system.Line.get(src='bus2', attr='v', idx=idx_line)) + ir = np.r_[range(nl), range(nl)] # double set of row indices + Bf = c_sparse((np.r_[b, -b], (ir, np.r_[f, t])), (nl, nb)) + + # build Cft, note that this Cft is different from the one in _makeC() + Cft = c_sparse((np.r_[np.ones(nl), -np.ones(nl)], (ir, np.r_[f, t])), (nl, nb)) + + # build Bbus + Bbus = Cft.T * Bf + + phi = system.Line.get(src='phi', attr='v', idx=idx_line) + Pfinj = b * (-phi) + Pbusinj = Cft.T * Pfinj + + self.Bbus._v, self.Bf._v, self.Pbusinj._v, self.Pfinj._v = Bbus, Bf, Pbusinj, Pfinj + return True + + def _makeC(self): + """ + Make connectivity matrix. + """ + system = self.system + + # common variables + nb = system.Bus.n + ng = system.StaticGen.n + nl = system.Line.n + npq = system.PQ.n + nsh = system.Shunt.n + + # --- Cg --- + # bus indices: idx -> uid + idx_gen = system.StaticGen.get_idx() + u_gen = system.StaticGen.get(src='u', attr='v', idx=idx_gen) + on_gen = np.flatnonzero(u_gen) # uid of online generators + on_gen_idx = [idx_gen[i] for i in on_gen] # idx of online generators + on_gen_bus_idx = system.StaticGen.get(src='bus', attr='v', idx=on_gen_idx) + + row = np.array([system.Bus.idx2uid(x) for x in on_gen_bus_idx]) + col = np.array([system.StaticGen.idx2uid(x) for x in on_gen_idx]) + Cg = c_sparse((np.ones(len(on_gen_idx)), (row, col)), (nb, ng)) + + # --- Cl --- + # load indices: idx -> uid + idx_load = system.PQ.idx.v + u_load = system.PQ.get(src='u', attr='v', idx=idx_load) + on_load = np.flatnonzero(u_load) # uid of online loads + on_load_idx = [idx_load[i] for i in on_load] # idx of online loads + on_load_bus_idx = system.PQ.get(src='bus', attr='v', idx=on_load_idx) + + row = np.array([system.Bus.idx2uid(x) for x in on_load_bus_idx]) + col = np.array([system.PQ.idx2uid(x) for x in on_load_idx]) + Cl = c_sparse((np.ones(len(on_load_idx)), (row, col)), (nb, npq)) + + # --- Cft --- + # line indices: idx -> uid + idx_line = system.Line.idx.v + u_line = system.Line.get(src='u', attr='v', idx=idx_line) + on_line = np.flatnonzero(u_line) # uid of online lines + on_line_idx = [idx_line[i] for i in on_line] # idx of online lines + on_line_bus1_idx = system.Line.get(src='bus1', attr='v', idx=on_line_idx) + on_line_bus2_idx = system.Line.get(src='bus2', attr='v', idx=on_line_idx) + + data_line = np.ones(2*len(on_line_idx)) + data_line[len(on_line_idx):] = -1 + row_line = np.array([system.Bus.idx2uid(x) for x in on_line_bus1_idx + on_line_bus2_idx]) + col_line = np.array([system.Line.idx2uid(x) for x in on_line_idx + on_line_idx]) + Cft = c_sparse((data_line, (row_line, col_line)), (nb, nl)) + + # --- Csh --- + # shunt indices: idx -> uid + idx_shunt = system.Shunt.idx.v + u_shunt = system.Shunt.get(src='u', attr='v', idx=idx_shunt) + on_shunt = np.flatnonzero(u_shunt) # uid of online shunts + on_shunt_idx = [idx_shunt[i] for i in on_shunt] # idx of online shunts + on_shunt_bus_idx = system.Shunt.get(src='bus', attr='v', idx=on_shunt_idx) + + row = np.array([system.Bus.idx2uid(x) for x in on_shunt_bus_idx]) + col = np.array([system.Shunt.idx2uid(x) for x in on_shunt_idx]) + Csh = c_sparse((np.ones(len(on_shunt_idx)), (row, col)), (nb, nsh)) + + self.Cg._v, self.Cl._v, self.Cft._v, self.Csh._v = Cg, Cl, Cft, Csh + self.CftT._v = Cft.T + return True diff --git a/ams/core/param.py b/ams/core/param.py index 9b900577..e41a0b12 100644 --- a/ams/core/param.py +++ b/ams/core/param.py @@ -9,6 +9,7 @@ import numpy as np from scipy.sparse import issparse +from scipy.sparse import csr_matrix as c_sparse from andes.core import BaseParam, DataParam, IdxParam, NumParam, ExtParam # NOQA from andes.models.group import GroupBase # NOQA @@ -78,8 +79,8 @@ class RParam(Param): True to set the parameter as positive. neg: bool, optional True to set the parameter as negative. - sparsity: list, optional - Sparsity pattern of the parameter. + sparse: bool, optional + True to set the parameter as sparse. Examples -------- @@ -125,12 +126,12 @@ def __init__(self, integer: Optional[bool] = False, pos: Optional[bool] = False, neg: Optional[bool] = False, - sparsity: Optional[list] = None, + sparse: Optional[list] = None, ): Param.__init__(self, nonneg=nonneg, nonpos=nonpos, complex=complex, imag=imag, symmetric=symmetric, diag=diag, hermitian=hermitian, boolean=boolean, - integer=integer, pos=pos, neg=neg, sparsity=sparsity) + integer=integer, pos=pos, neg=neg, sparse=sparse) self.name = name self.tex_name = tex_name if (tex_name is not None) else name self.info = info @@ -145,10 +146,12 @@ def __init__(self, self.owner = None # instance of the owner model or group self.rtn = None # instance of the owner routine self.is_ext = False # indicate if the value is set externally + self._v = None # external value if v is not None: self._v = v self.is_ext = True + # FIXME: might need a better organization @property def v(self): """ @@ -160,6 +163,9 @@ def v(self): - The value will sort by the indexer if indexed, used for optmization modeling. """ out = None + if self.sparse and self.expand_dims is not None: + msg = 'Sparse matrix does not support expand_dims.' + raise NotImplementedError(msg) if self.indexer is None: if self.is_ext: if issparse(self._v): @@ -176,7 +182,9 @@ def v(self): try: imodel = getattr(self.rtn.system, self.imodel) except AttributeError: - raise AttributeError(f'Indexer source model <{self.imodel}> not found, likely a modeling error.') + msg = f'Indexer source model <{self.imodel}> not found, ' + msg += 'likely a modeling error.' + raise AttributeError(msg) try: sorted_idx = self.owner.find_idx(keys=self.indexer, values=imodel.get_idx()) except AttributeError: @@ -257,10 +265,13 @@ def get_idx(self): try: imodel = getattr(self.rtn.system, self.imodel) except AttributeError: - raise AttributeError(f'Indexer source model <{self.imodel}> not found, likely a modeling error.') + msg = f'Indexer source model <{self.imodel}> not found, ' + msg += 'likely a modeling error.' + raise AttributeError(msg) try: sorted_idx = self.owner.find_idx(keys=self.indexer, values=imodel.get_idx()) except AttributeError: - raise AttributeError(f'Indexer <{self.indexer}> not found in <{self.imodel}>, \ - likely a modeling error.') + msg = f'Indexer <{self.indexer}> not found in <{self.imodel}>, ' + msg += 'likely a modeling error.' + raise AttributeError(msg) return sorted_idx diff --git a/ams/core/service.py b/ams/core/service.py index 1b052710..f376e4f9 100644 --- a/ams/core/service.py +++ b/ams/core/service.py @@ -2,10 +2,11 @@ Service. """ -import logging # NOQA -from typing import Callable, Optional, Type, Union, Iterable # NOQA +import logging +from typing import Callable, Type -import numpy as np # NOQA +import numpy as np +import scipy.sparse as spr from andes.core.service import BaseService, BackRef, RefFlatten # NOQA @@ -36,6 +37,8 @@ class RBaseService(BaseService, Param): Model name. no_parse: bool, optional True to skip parsing the service. + sparse: bool, optional + True to return output as scipy csr_matrix. """ def __init__(self, @@ -45,6 +48,7 @@ def __init__(self, info: str = None, vtype: Type = None, no_parse: bool = False, + sparse: bool = False, ): Param.__init__(self, name=name, unit=unit, info=info, no_parse=no_parse) @@ -53,13 +57,14 @@ def __init__(self, self.export = False self.is_group = False self.rtn = None + self.sparse = sparse @property def shape(self): """ Return the shape of the service. """ - if isinstance(self.v, np.ndarray): + if isinstance(self.v, (np.ndarray, spr.csr_matrix)): return self.v.shape else: raise TypeError(f'{self.class_name}: {self.name} is not an array.') @@ -103,6 +108,8 @@ class ValueService(RBaseService): Variable type. model : str, optional Model name. + sparse: bool, optional + True to return output as scipy csr_matrix. """ def __init__(self, @@ -113,10 +120,11 @@ def __init__(self, info: str = None, vtype: Type = None, no_parse: bool = False, + sparse: bool = False, ): super().__init__(name=name, tex_name=tex_name, unit=unit, info=info, vtype=vtype, - no_parse=no_parse) + no_parse=no_parse, sparse=sparse) self._v = value @property @@ -124,6 +132,8 @@ def v(self): """ Value of the service. """ + if self.sparse: + return spr.csr_matrix(self._v) return self._v @@ -147,6 +157,8 @@ class ROperationService(RBaseService): Variable type. model : str, optional Model name. + sparse: bool, optional + True to return output as scipy csr_matrix. """ def __init__(self, @@ -156,10 +168,11 @@ def __init__(self, unit: str = None, info: str = None, vtype: Type = None, - no_parse: bool = False,): + no_parse: bool = False, + sparse: bool = False,): super().__init__(name=name, tex_name=tex_name, unit=unit, info=info, vtype=vtype, - no_parse=no_parse) + no_parse=no_parse, sparse=sparse) self.u = u @@ -181,6 +194,8 @@ class LoadScale(ROperationService): Unit. info : str, optional Description. + sparse: bool, optional + True to return output as scipy csr_matrix. """ def __init__(self, @@ -191,10 +206,12 @@ def __init__(self, unit: str = None, info: str = None, no_parse: bool = False, + sparse: bool = False, ): tex_name = tex_name if tex_name is not None else u.tex_name super().__init__(name=name, tex_name=tex_name, unit=unit, - info=info, u=u, no_parse=no_parse) + info=info, u=u, no_parse=no_parse, + sparse=sparse) self.sd = sd @property @@ -206,6 +223,8 @@ def v(self): u_yloc = np.array(sys.Region.idx2uid(u_zone)) p0s = np.multiply(self.sd.v[:, u_yloc].transpose(), self.u.v[:, np.newaxis]) + if self.sparse: + return spr.csr_matrix(p0s) return p0s @@ -242,6 +261,8 @@ class NumOp(ROperationService): Expand the dimensions of the output array along a specified axis. array_out : bool, optional Whether to force the output to be an array. + sparse: bool, optional + True to return output as scipy csr_matrix. """ def __init__(self, @@ -257,11 +278,12 @@ def __init__(self, rargs: dict = {}, expand_dims: int = None, array_out=True, - no_parse: bool = False,): + no_parse: bool = False, + sparse: bool = False,): tex_name = tex_name if tex_name is not None else u.tex_name super().__init__(name=name, tex_name=tex_name, unit=unit, info=info, vtype=vtype, u=u, - no_parse=no_parse) + no_parse=no_parse, sparse=sparse) self.fun = fun self.args = args self.rfun = rfun @@ -271,10 +293,7 @@ def __init__(self, @property def v0(self): - if isinstance(self.u, Iterable): - out = self.fun([u.v for u in self.u], **self.args) - else: - out = self.fun(self.u.v, **self.args) + out = self.fun(self.u.v, **self.args) if self.array_out: if not isinstance(out, np.ndarray): out = np.array([out]) @@ -290,9 +309,12 @@ def v1(self): @property def v(self): if self.expand_dims is not None: - return np.expand_dims(self.v1, axis=int(self.expand_dims)) + out = np.expand_dims(self.v1, axis=int(self.expand_dims)) else: - return self.v1 + out = self.v1 + if self.sparse: + return spr.csr_matrix(out) + return out class NumExpandDim(NumOp): @@ -318,6 +340,8 @@ class NumExpandDim(NumOp): Variable type. array_out : bool, optional Whether to force the output to be an array. + sparse: bool, optional + True to return output as scipy csr_matrix. """ def __init__(self, @@ -330,17 +354,21 @@ def __init__(self, info: str = None, vtype: Type = None, array_out: bool = True, - no_parse: bool = False,): + no_parse: bool = False, + sparse: bool = False,): super().__init__(name=name, tex_name=tex_name, unit=unit, info=info, vtype=vtype, u=u, fun=np.expand_dims, args=args, array_out=array_out, - no_parse=no_parse) + no_parse=no_parse, sparse=sparse) self.axis = axis @property def v(self): - return self.fun(self.u.v, axis=self.axis, **self.args) + out = self.fun(self.u.v, axis=self.axis, **self.args) + if self.sparse: + return spr.csr_matrix(out) + return out class NumOpDual(NumOp): @@ -378,6 +406,8 @@ class NumOpDual(NumOp): Expand the dimensions of the output array along a specified axis. array_out : bool, optional Whether to force the output to be an array. + sparse: bool, optional + True to return output as scipy csr_matrix. """ def __init__(self, @@ -394,7 +424,8 @@ def __init__(self, rargs: dict = {}, expand_dims: int = None, array_out=True, - no_parse: bool = False,): + no_parse: bool = False, + sparse: bool = False,): tex_name = tex_name if tex_name is not None else u.tex_name super().__init__(name=name, tex_name=tex_name, unit=unit, info=info, vtype=vtype, @@ -402,7 +433,7 @@ def __init__(self, rfun=rfun, rargs=rargs, expand_dims=expand_dims, array_out=array_out, - no_parse=no_parse) + no_parse=no_parse, sparse=sparse) self.u2 = u2 @property @@ -411,6 +442,8 @@ def v0(self): if self.array_out: if not isinstance(out, np.ndarray): out = np.array([out]) + if self.sparse: + return spr.csr_matrix(out) return out @@ -433,6 +466,8 @@ class MinDur(NumOpDual): Unit. info : str, optional Description. + sparse: bool, optional + True to return output as scipy csr_matrix. """ def __init__(self, @@ -443,14 +478,15 @@ def __init__(self, unit: str = None, info: str = None, vtype: Type = None, - no_parse: bool = False,): + no_parse: bool = False, + sparse: bool = False,): tex_name = tex_name if tex_name is not None else u.tex_name super().__init__(name=name, tex_name=tex_name, unit=unit, info=info, vtype=vtype, u=u, u2=u2, fun=None, args=None, rfun=None, rargs=None, expand_dims=None, - no_parse=no_parse) + no_parse=no_parse, sparse=sparse) if self.u.horizon is None: msg = f'{self.class_name} <{self.name}>.u: <{self.u.name}> ' msg += 'has no horizon, likely a modeling error.' @@ -471,6 +507,8 @@ def v(self): # Create a mask for valid time periods based on minimum duration valid_mask = (t + td[i] <= n_ts) tout[i[valid_mask], t[valid_mask]] = 1 + if self.sparse: + return spr.csr_matrix(tout) return tout @@ -500,6 +538,8 @@ class NumHstack(NumOp): Variable type. model : str, optional Model name. + sparse: bool, optional + True to return output as scipy csr_matrix. """ def __init__(self, @@ -513,12 +553,13 @@ def __init__(self, vtype: Type = None, rfun: Callable = None, rargs: dict = {}, - no_parse: bool = False,): + no_parse: bool = False, + sparse: bool = False,): super().__init__(name=name, tex_name=tex_name, unit=unit, info=info, vtype=vtype, u=u, fun=np.hstack, args=args, rfun=rfun, rargs=rargs, - no_parse=no_parse) + no_parse=no_parse, sparse=sparse) self.ref = ref @property @@ -584,6 +625,8 @@ class ZonalSum(NumOp): Variable type. model : str Model name. + sparse: bool, optional + True to return output as scipy csr_matrix. """ def __init__(self, @@ -597,12 +640,12 @@ def __init__(self, rfun: Callable = None, rargs: dict = {}, no_parse: bool = False, - ): + sparse: bool = False,): super().__init__(name=name, tex_name=tex_name, unit=unit, info=info, vtype=vtype, u=u, fun=None, args={}, rfun=rfun, rargs=rargs, - no_parse=no_parse) + no_parse=no_parse, sparse=sparse) self.zone = zone @property @@ -665,6 +708,8 @@ class RTED: Keyword arguments to pass to ``rfun``. array_out : bool, optional Whether to force the output to be an array. + sparse: bool, optional + True to return output as scipy csr_matrix. """ def __init__(self, @@ -680,12 +725,12 @@ def __init__(self, rargs: dict = {}, array_out: bool = True, no_parse: bool = False, - **kwargs - ): + sparse: bool = False, + **kwargs): super().__init__(name=name, tex_name=tex_name, unit=unit, info=info, vtype=vtype, u=u, fun=None, rfun=rfun, rargs=rargs, array_out=array_out, - no_parse=no_parse, + no_parse=no_parse, sparse=sparse, **kwargs) self.indexer = indexer self.gamma = gamma @@ -757,6 +802,8 @@ class VarReduction(NumOp): The variable type. model : str, optional The model name associated with the operation. + sparse: bool, optional + True to return output as scipy csr_matrix. """ def __init__(self, @@ -770,12 +817,12 @@ def __init__(self, rfun: Callable = None, rargs: dict = {}, no_parse: bool = False, - **kwargs - ): + sparse: bool = False, + **kwargs): super().__init__(name=name, tex_name=tex_name, unit=unit, info=info, vtype=vtype, u=u, fun=None, rfun=rfun, rargs=rargs, - no_parse=no_parse, + no_parse=no_parse, sparse=sparse, **kwargs) self.fun = fun @@ -813,6 +860,8 @@ class RampSub(NumOp): Variable type. model : str Model name. + sparse: bool, optional + True to return output as scipy csr_matrix. """ def __init__(self, @@ -825,11 +874,11 @@ def __init__(self, rfun: Callable = None, rargs: dict = {}, no_parse: bool = False, - ): + sparse: bool = False,): super().__init__(name=name, tex_name=tex_name, unit=unit, info=info, vtype=vtype, u=u, fun=None, rfun=rfun, rargs=rargs, - no_parse=no_parse,) + no_parse=no_parse, sparse=sparse,) @property def v0(self): @@ -842,4 +891,7 @@ def v1(self): @property def v(self): nr = self.u.horizon.n - return np.eye(nr, nr-1, k=-1) - np.eye(nr, nr-1, k=0) + out = np.eye(nr, nr-1, k=-1) - np.eye(nr, nr-1, k=0) + if self.sparse: + return spr.csr_matrix(out) + return out diff --git a/ams/core/symprocessor.py b/ams/core/symprocessor.py index c3e7d27b..a5651648 100644 --- a/ams/core/symprocessor.py +++ b/ams/core/symprocessor.py @@ -9,6 +9,8 @@ import sympy as sp +from ams.core.matprocessor import MatProcessor + logger = logging.getLogger(__name__) @@ -23,34 +25,17 @@ class SymProcessor: Attributes ---------- - x: sympy.Matrix - variables pretty print - c : sympy.Matrix - pretty print of variables coefficients - Aub : sympy.Matrix - Aub pretty print - Aeq : sympy.Matrix - Aeq pretty print - bub : sympy.Matrix - pretty print of inequality upper bound - beq : sympy.Matrix - pretty print of equality bound - lb : sympy.Matrix - pretty print of variables lower bound - ub : sympy.Matrix - pretty print of variables upper bound - inputs_dict : OrderedDict - All possible symbols in equations, including variables, parameters, and - config flags. - vars_dict : OrderedDict - variable-only symbols, which are useful when getting the Jacobian matrices. + sub_map : dict + Substitution map for symbolic processing. + tex_map : dict + Tex substitution map for documentation. + val_map : dict + Value substitution map for post-solving value evaluation. """ def __init__(self, parent): self.parent = parent self.inputs_dict = OrderedDict() - self.vars_dict = OrderedDict() - self.vars_list = list() # list of variable symbols, corresponding to `self.xy` self.services_dict = OrderedDict() self.config = parent.config self.class_name = parent.class_name @@ -96,6 +81,11 @@ def __init__(self, parent): (r'\bsum\b', 'SUM'), ]) + # mapping dict for evaluating expressions + self.val_map = OrderedDict([ + (r'\bcp.\b', 'np.'), + ]) + self.status = { 'optimal': 0, 'infeasible': 1, @@ -117,6 +107,7 @@ def generate_symbols(self, force_generate=False): if not force_generate and self.parent._syms: return True logger.debug(f'- Generating symbols for {self.parent.class_name}') + # process tex_names defined in routines # ----------------------------------------------------------- for key in self.parent.tex_names.keys(): @@ -126,18 +117,31 @@ def generate_symbols(self, force_generate=False): for vname, var in self.parent.vars.items(): tmp = sp.symbols(f'{var.name}') # tmp = sp.symbols(var.name) - self.vars_dict[vname] = tmp self.inputs_dict[vname] = tmp self.sub_map[rf"\b{vname}\b"] = f"self.om.{vname}" self.tex_map[rf"\b{vname}\b"] = rf'{var.tex_name}' + self.val_map[rf"\b{vname}\b"] = f"rtn.{vname}.v" # RParams for rpname, rparam in self.parent.rparams.items(): tmp = sp.symbols(f'{rparam.name}') self.inputs_dict[rpname] = tmp - sub_name = f'self.rtn.{rpname}.v' if rparam.no_parse else f'self.om.{rpname}' + sub_name = '' + if isinstance(rparam.owner, MatProcessor): + # sparse matrices are accessed from MatProcessor + # otherwise, dense matrices are accessed from Routine + if rparam.sparse: + sub_name = f'self.rtn.system.mats.{rpname}._v' + else: + sub_name = f'self.rtn.{rpname}.v' + elif rparam.no_parse: + sub_name = f'self.rtn.{rpname}.v' + else: + sub_name = f'self.om.{rpname}' self.sub_map[rf"\b{rpname}\b"] = sub_name self.tex_map[rf"\b{rpname}\b"] = f'{rparam.tex_name}' + if not rparam.no_parse: + self.val_map[rf"\b{rpname}\b"] = f"rtn.{rpname}.v" # Routine Services for sname, service in self.parent.services.items(): @@ -147,6 +151,8 @@ def generate_symbols(self, force_generate=False): sub_name = f'self.rtn.{sname}.v' if service.no_parse else f'self.om.{sname}' self.sub_map[rf"\b{sname}\b"] = sub_name self.tex_map[rf"\b{sname}\b"] = f'{service.tex_name}' + if not service.no_parse: + self.val_map[rf"\b{sname}\b"] = f"rtn.{sname}.v" # store tex names defined in `self.config` for key in self.config.as_dict(): @@ -170,8 +176,6 @@ def generate_symbols(self, force_generate=False): self.inputs_dict['sys_f'] = sp.symbols('sys_f') self.inputs_dict['sys_mva'] = sp.symbols('sys_mva') - self.vars_list = list(self.vars_dict.values()) # useful for ``.jacobian()`` - self.parent._syms = True return True diff --git a/ams/interop/andes.py b/ams/interop/andes.py index a4bb56b4..216d2a6f 100644 --- a/ams/interop/andes.py +++ b/ams/interop/andes.py @@ -2,28 +2,74 @@ Interface with ANDES """ -import os # NOQA -import logging # NOQA -from collections import OrderedDict, Counter # NOQA +import os +import logging +from collections import OrderedDict, Counter -from andes.shared import pd, np # NOQA -from andes.utils.misc import elapsed # NOQA -from andes import load as andes_load # NOQA +from andes.shared import pd, np +from andes.utils.misc import elapsed +from andes.system import System as andes_System from andes.interop.pandapower import make_link_table # NOQA -from ams.io import input_formats, json # NOQA -from ams.models.group import StaticGen # NOQA -from ams.models.static import PV, Slack # NOQA +from ams.io import input_formats +from ams.models.group import StaticGen +from ams.models.static import PV, Slack logger = logging.getLogger(__name__) +# Models used in ANDES PFlow +pflow_dict = OrderedDict([ + ('Bus', ['idx', 'u', 'name', + 'Vn', 'vmax', 'vmin', + 'v0', 'a0', 'xcoord', 'ycoord', + 'area', 'zone', 'owner']), + ('PQ', ['idx', 'u', 'name', + 'bus', 'Vn', 'p0', 'q0', + 'vmax', 'vmin', 'owner']), + ('PV', ['idx', 'u', 'name', 'Sn', + 'Vn', 'bus', 'busr', 'p0', 'q0', + 'pmax', 'pmin', 'qmax', 'qmin', + 'v0', 'vmax', 'vmin', 'ra', 'xs']), + ('Slack', ['idx', 'u', 'name', 'Sn', + 'Vn', 'bus', 'busr', 'p0', 'q0', + 'pmax', 'pmin', 'qmax', 'qmin', + 'v0', 'vmax', 'vmin', 'ra', 'xs', + 'a0']), + ('Shunt', ['idx', 'u', 'name', 'Sn', + 'Vn', 'bus', 'g', 'b', 'fn']), + ('Line', ['idx', 'u', 'name', + 'bus1', 'bus2', 'Sn', + 'fn', 'Vn1', 'Vn2', + 'r', 'x', 'b', 'g', 'b1', 'g1', 'b2', 'g2', + 'trans', 'tap', 'phi', + 'rate_a', 'rate_b', 'rate_c', + 'owner', 'xcoord', 'ycoord']), + ('Area', ['idx', 'u', 'name']), +]) + +# dict for guessing dynamic models given its idx +idx_guess = {'rego': 'RenGovernor', + 'ree': 'RenExciter', + 'rea': 'RenAerodynamics', + 'rep': 'RenPitch', + 'busf': 'BusFreq', + 'zone': 'Region', + 'gen': 'StaticGen', + 'pq': 'PQ', } + + def to_andes(system, setup=False, addfile=None, - overwrite=None, no_keep=True, **kwargs): """ Convert the AMS system to an ANDES system. + A preferred dynamic system file to be added has following features: + 1. The file contains both power flow and dynamic models. + 2. The file can run in ANDES natively. + 3. Power flow models are in the same shape as the AMS system. + 4. Dynamic models, if any, are in the same shape as the AMS system. + This function is wrapped as the ``System`` class method ``to_andes()``. Using the file conversion ``to_andes()`` will automatically link the AMS system instance to the converted ANDES system instance @@ -41,10 +87,6 @@ def to_andes(system, setup=False, addfile=None, Whether to call `setup()` after the conversion. Default is True. addfile : str, optional The additional file to be converted to ANDES dynamic mdoels. - overwrite : bool, optional - Whether to overwrite the existing file. - no_keep : bool, optional - True to remove the converted file after the conversion. **kwargs : dict Keyword arguments to be passed to `andes.system.System`. @@ -69,31 +111,30 @@ def to_andes(system, setup=False, addfile=None, 3. Index in the addfile is automatically adjusted when necessary. """ t0, _ = elapsed() - andes_file = system.files.name + '.json' - json.write(system, andes_file, - overwrite=overwrite, - to_andes=True, - ) + adsys = andes_System() + # FIXME: is there a systematic way to do this? Other config might be needed + adsys.config.freq = system.config.freq + adsys.config.mva = system.config.mva - _, s = elapsed(t0) - logger.info(f'System convert to ANDES in {s}, saved as "{andes_file}".') + for mdl_name, mdl_cols in pflow_dict.items(): + mdl = getattr(system, mdl_name) + for row in mdl.cache.df_in[mdl_cols].to_dict(orient='records'): + adsys.add(mdl_name, row) - adsys = andes_load(andes_file, setup=False, **kwargs) - - if no_keep: - logger.info('Converted file is removed. Set "no_keep=False" to keep it.') - os.remove(andes_file) + _, s = elapsed(t0) - # 1. additonal file for dynamic simulation + # additonal file for dynamic models if addfile: - t, _ = elapsed() + t_add, _ = elapsed() # --- parse addfile --- adsys = parse_addfile(adsys=adsys, amsys=system, addfile=addfile) - _, s = elapsed(t) - logger.info('Addfile parsed in %s.', s) + _, s_add = elapsed(t_add) + logger.info('Addfile parsed in %s.', s_add) + + logger.info(f'System converted to ANDES in {s}.') # finalize system.dyn = Dynamic(amsys=system, adsys=adsys) @@ -143,21 +184,19 @@ def parse_addfile(adsys, amsys, addfile): # Try parsing the addfile logger.info('Parsing additional file "%s"...', addfile) - # FIXME: hard-coded list of power flow models - # FIXME: this might be a problem in the future once we add more models - # FIXME: find a better way to handle this, e.g. REGCV1, or ESD1 - pflow_mdl = ['Bus', 'PQ', 'PV', 'Slack', 'Shunt', 'Line', 'Area'] - for mdl in pflow_mdl: - am_mdl = getattr(amsys, mdl) - if am_mdl.n == 0: - pflow_mdl.remove(mdl) - idx_map = OrderedDict([]) reader = pd.ExcelFile(addfile) - common_elements = set(reader.sheet_names) & set(pflow_mdl) - if common_elements: - msg = 'Power flow models exist in the addfile.' - msg += ' Only dynamic models will be used.' + + pflow_mdl = list(pflow_dict.keys()) + + pflow_mdls_overlap = [] + for mdl_name in pflow_dict.keys(): + if mdl_name in reader.sheet_names: + pflow_mdls_overlap.append(mdl_name) + + if len(pflow_mdls_overlap) > 0: + msg = 'Following PFlow models in addfile will be overwritten: ' + msg += ', '.join([f'<{mdl}>' for mdl in pflow_mdls_overlap]) logger.warning(msg) pflow_df_models = pd.read_excel(addfile, @@ -170,9 +209,13 @@ def parse_addfile(adsys, amsys, addfile): df.dropna(axis=0, how='all', inplace=True) # collect idx_map if difference exists + idx_map = OrderedDict([]) for name, df in pflow_df_models.items(): am_idx = amsys.models[name].idx.v ad_idx = df['idx'].values + if len(set(am_idx)) != len(set(ad_idx)): + msg = f'<{name}> has different number of rows in addfile.' + logger.warning(msg) if set(am_idx) != set(ad_idx): idx_map[name] = dict(zip(ad_idx, am_idx)) @@ -191,23 +234,14 @@ def parse_addfile(adsys, amsys, addfile): mdl = adsys.models[name] except KeyError: mdl = adsys.model_aliases[name] - # skip if no idx_params - if len(mdl.idx_params) == 0: + if len(mdl.idx_params) == 0: # skip if no idx_params continue for idxn, idxp in mdl.idx_params.items(): if idxp.model is None: # make a guess if no model is specified mdl_guess = idxn.capitalize() if mdl_guess not in adsys.models.keys(): - typical_map = {'rego': 'RenGovernor', - 'ree': 'RenExciter', - 'rea': 'RenAerodynamics', - 'rep': 'RenPitch', - 'busf': 'BusFreq', - 'zone': 'Region', - 'gen': 'StaticGen', - 'pq': 'PQ', } try: - mdl_guess = typical_map[idxp.name] + mdl_guess = idx_guess[idxp.name] except KeyError: # set the most frequent string as the model name split_list = [] for item in df[idxn].values: @@ -243,14 +277,30 @@ def parse_addfile(adsys, amsys, addfile): df[idxn] = df[idxn].replace(idx_map[mdl_guess]) logger.debug(f'Adjust {idxp.class_name} <{name}.{idxp.name}>') - # parse addfile and add models to ANDES system + # add dynamic models for name, df in df_models.items(): # drop rows that all nan df.dropna(axis=0, how='all', inplace=True) + # if the dynamic model also exists in AMS, use AMS parameters for overlap + if name in amsys.models.keys(): + if df.shape[0] != amsys.models[name].n: + msg = f'<{name}> has different number of rows in addfile.' + logger.warning(msg) + am_params = set(amsys.models[name].params.keys()) + ad_params = set(df.columns) + overlap_params = list(am_params.intersection(ad_params)) + ad_rest_params = list(ad_params - am_params) + ['idx'] + msg = f'Following <{name}> parameters in addfile are overwriten: ' + msg += ', '.join(overlap_params) + logger.debug(msg) + tmp = amsys.models[name].cache.df_in[overlap_params] + df = pd.merge(left=tmp, right=df[ad_rest_params], + on='idx', how='left') for row in df.to_dict(orient='records'): adsys.add(name, row) # --- adjust SynGen Vn with Bus Vn --- + # NOTE: RenGen and DG have no Vn, so no need to adjust syg_idx = [] for _, syg in adsys.SynGen.models.items(): if syg.n > 0: @@ -500,31 +550,30 @@ def send(self, adsys=None, routine=None): # NOTE:if DC types, check if results are converted if rtn.type != 'ACED': if rtn.is_ac: - logger.debug(f'{rtn.class_name} results has been converted to AC.') + logger.info(f'{rtn.class_name} results has been converted to AC.') else: - msg = f'{rtn.class_name} has not been converted' - msg += ' to AC, error might occur!' + msg = f'<{rtn.class_name}> AC conversion failed or not done yet!' logger.error(msg) try: - logger.warning(f'Send {rtn.class_name} results to ANDES <{hex(id(sa))}>') + logger.info(f'Send <{rtn.class_name}> results to ANDES <{hex(id(sa))}>') except AttributeError: logger.warning('No solved routine found. Unable to sync with ANDES.') return False - # mapping dict + # map-to dict map2 = getattr(sp.recent, 'map2') if len(map2) == 0: logger.warning(f'Mapping dict "map2" of {sp.recent.class_name} is empty.') return True # check map2 - pv_slack = 'PV' in map2.keys() and 'SLACK' in map2.keys() + pv_slack = 'PV' in map2.keys() and 'Slack' in map2.keys() if pv_slack: - logger.warning('PV and SLACK are both in map2, this might cause error when mapping.') + logger.warning('PV and Slack are both in map2, this might cause error when mapping.') # 2. sync to dynamic if initialized if self.is_tds: - logger.warning('TDS is initialized, send to models.') + logger.info('TDS is initialized, send to models.') # 1) send Dynamic generator online status self._send_dgu(sa=sa, sp=sp) # 2) send other results @@ -541,7 +590,7 @@ def send(self, adsys=None, routine=None): if not is_tgr_set and isinstance(mdl, (StaticGen, PV, Slack)): self._send_tgr(sa=sa, sp=sp) is_tgr_set = True - logger.warning('Generator power reference are sent to dynamic devices') + logger.info('Generator power reference are sent to dynamic devices') continue # c. others, if any ams_var = getattr(sp.recent, ams_vname) @@ -561,31 +610,43 @@ def send(self, adsys=None, routine=None): msg = 'ANDES PFlow has been run, please re-run it' msg += ' before running TDS to avoid failed TDS initialization' logger.info(msg) - logger.warning('TDS is not initialized, send to models') + else: + logger.warning('TDS is not excuted, send to models') + # NOTE: mdl_name can be model name or group name + # it can be the case that an AMS model or group is not in ANDES for mdl_name, pmap in map2.items(): for ams_vname, andes_pname in map2[mdl_name].items(): - # a. voltage reference - if ams_vname in ['qg']: + # if qg in mapping, raise a warning + if (ams_vname == 'qg') & (mdl_name in ['PV', 'Slack', 'StaticGen']): logger.warning('Setting `qg` to ANDES dynamic does not take effect.') - # b. others, if any + ams_var = getattr(sp.recent, ams_vname) - # NOTE: here we stick with ANDES device idx - idx = ams_var.get_idx() + idx = ams_var.get_idx() # use AMS idx v_ams = sp.recent.get(src=ams_vname, attr='v', idx=idx) - mdl_andes = getattr(sa, mdl_name) + + # try to find the corresponding ANDES model + ad_mdl_grp = list(sa.models.keys()) + list(sa.model_aliases.keys()) + list(sa.groups.keys()) + if mdl_name in ad_mdl_grp: + mdl_andes = getattr(sa, mdl_name) + else: + msg = f'<{mdl_name}> is neither a ANDES model nor group!' + logger.error(msg) + continue + logger.debug(f'Send <{ams_vname}> to {mdl_andes.class_name}.{andes_pname}') mdl_andes.set(src=andes_pname, idx=idx, attr='v', value=v_ams) - logger.debug(f'Send <{ams_vname}> to {mdl_andes.class_name} {andes_pname}') - # correct StaticGen v0 with Bus v0 if any - try: - rtn.map2['Bus']['vBus'] == 'v0' - logger.warning('Adjust ANDES StaticGen v0 with Bus v0') - stg_idx = sa.PV.idx.v + sa.Slack.idx.v - stg_bus = sa.PV.bus.v + sa.Slack.bus.v - vBus = sp.Bus.get(src='v0', attr='v', idx=stg_bus) - sa.StaticGen.set(src='v0', attr='v', idx=stg_idx, value=vBus) - except KeyError: - msg = 'Skip adjusting ANDES StaticGen v0 because Bus v0 is not found in map2' - logger.warning(msg) + + # correct StaticGen v0 with Bus v0 if available + if 'Bus' in rtn.map2.keys(): + if 'vBus' in rtn.map2['Bus'].keys(): + try: + logger.warning('Adjust ANDES StaticGen.v0 with Bus.v0') + stg_idx = sa.PV.idx.v + sa.Slack.idx.v + stg_bus = sa.PV.bus.v + sa.Slack.bus.v + vBus = sp.Bus.get(src='v0', attr='v', idx=stg_bus) + sa.StaticGen.set(src='v0', attr='v', idx=stg_idx, value=vBus) + except KeyError: + msg = 'Failed to adjust ANDES StaticGen.v0 with Bus.v0' + logger.warning(msg) return True @require_link @@ -604,7 +665,7 @@ def receive(self, adsys=None): # 1. information try: rtn_name = sp.recent.class_name - logger.debug(f'Receiving ANDES <{hex(id(sa))}> results to {rtn_name}.') + logger.info(f'Receiving ANDES <{hex(id(sa))}> results to {rtn_name}.') except AttributeError: logger.warning('No target AMS routine found. Failed to sync with ANDES.') return False @@ -618,7 +679,7 @@ def receive(self, adsys=None): # 2. sync dynamic results if dynamic is initialized if self.is_tds: # TODO: dynamic results - logger.debug(f'Receiving results to {sp.recent.class_name}...') + logger.info(f'Receiving results to {sp.recent.class_name}...') # 1) receive models online status is_dgu_set = False for mname, mdl in self.amsys.models.items(): @@ -628,22 +689,24 @@ def receive(self, adsys=None): if mdl.n == 0: continue # a. dynamic generator online status - if not is_dgu_set and mdl.group in ['StaticGen']: + if (not is_dgu_set) & (mdl.group in ['StaticGen']): self._receive_dgu(sa=sa, sp=sp) is_dgu_set = True - logger.warning('Generator online status are received from dynamic generators.') continue + # FIXME: in AMS, dynamic generatos `u` is not in effect + # how to make this consistent with ANDES? # b. other models online status idx = mdl.idx.v - if mname in sa.models: + if (mname in sa.models) & (mdl.group not in ['StaticGen']): mdl_andes = getattr(sa, mname) # 1) receive models online status u_andes = mdl_andes.get(src='u', idx=idx, attr='v') mdl.set(src='u', idx=idx, attr='v', value=u_andes) + logger.debug(f'Receive {mdl.class_name}.u from {mname}.u') # 2) receive other results is_pe_set = False for mname, pmap in map1.items(): - mdl = getattr(sa, mname) + ams_vname = getattr(sa, mname) for ams_vname, andes_pname in pmap.items(): # a. output power if not is_pe_set and andes_pname == 'p' and mname == 'StaticGen': @@ -665,7 +728,7 @@ def receive(self, adsys=None): return True # 3. sync static results if dynamic is not initialized else: - logger.debug(f'Receiving results to {sp.recent.class_name}...') + logger.info(f'Receiving results to {sp.recent.class_name}...') for mname, mdl in sp.models.items(): # NOTE: skip models without idx: ``Summary`` if not hasattr(mdl, 'idx'): @@ -719,6 +782,9 @@ def _receive_pe(self, sa, sp): allow_none=True, default=0,) Pe = Pe_sg + Pe_dg + Pe_rg idx = sp.dyn.link['stg_idx'].replace(np.NaN, None).to_list() + dyg_names = 'SynGen' if sa.SynGen.n > 0 else '' + dyg_names += ', DG' if sa.DG.n > 0 else '' + dyg_names += ', RenGen' if sa.RenGen.n > 0 else '' return Pe, idx def _receive_dgu(self, sa, sp): @@ -742,4 +808,8 @@ def _receive_dgu(self, sa, sp): sp.StaticGen.set(idx=sp.dyn.link['stg_idx'].to_list(), src='u', attr='v', value=u_dyngen,) + dyg_names = 'SynGen' if sa.SynGen.n > 0 else '' + dyg_names += ', DG' if sa.DG.n > 0 else '' + dyg_names += ', RenGen' if sa.RenGen.n > 0 else '' + logger.debug(f'Receive StaticGen.u from of {dyg_names}') return True diff --git a/ams/io/json.py b/ams/io/json.py index df0a09ab..9ea4d9ac 100644 --- a/ams/io/json.py +++ b/ams/io/json.py @@ -62,7 +62,6 @@ def _dump_system(system, skip_empty, orient='records', to_andes=False): Dump parameters of each model into a json string and return them all in an OrderedDict. """ - na_models = [] ad_models = [] if to_andes: # Instantiate an ANDES system @@ -74,7 +73,9 @@ def _dump_system(system, skip_empty, orient='records', to_andes=False): for name, instance in system.models.items(): if skip_empty and instance.n == 0: continue - if to_andes and name in ad_models: + if to_andes: + if name not in ad_models: + continue # NOTE: ommit parameters that are not in ANDES skip_params = [] ams_params = list(instance.params.keys()) @@ -82,9 +83,7 @@ def _dump_system(system, skip_empty, orient='records', to_andes=False): skip_params = list(set(ams_params) - set(andes_params)) df = instance.cache.df_in.drop(skip_params, axis=1, errors='ignore') out[name] = df.to_dict(orient=orient) - continue - na_models.append(name) - - df = instance.cache.df_in - out[name] = df.to_dict(orient=orient) + else: + df = instance.cache.df_in + out[name] = df.to_dict(orient=orient) return json.dumps(out, indent=2) diff --git a/ams/io/matpower.py b/ams/io/matpower.py index c359abdf..46977f4c 100644 --- a/ams/io/matpower.py +++ b/ams/io/matpower.py @@ -58,7 +58,7 @@ def mpc2system(mpc: dict, system) -> bool: for data in mpc['bus']: # idx ty pd qd gs bs area vmag vang baseKV zone vmax vmin - # 0 1 2 3 4 5 6 7 8 9 10 11 12 + # 0 1 2 3 4 5 6 7 8 9 10 11 12 idx = int(data[0]) ty = data[1] if ty == 3: @@ -87,13 +87,23 @@ def mpc2system(mpc: dict, system) -> bool: system.add('Shunt', bus=idx, name='Shunt ' + str(idx), Vn=baseKV, g=gs, b=bs) gen_idx = 0 - for data in mpc['gen']: - # bus pg qg qmax qmin vg mbase status pmax pmin pc1 pc2 - # 0 1 2 3 4 5 6 7 8 9 10 11 - # qc1min qc1max qc2min qc2max ramp_agc ramp_10 ramp_30 ramp_q - # 12 13 14 15 16 17 18 19 - # apf - # 20 + if mpc['gen'].shape[1] <= 10: # missing data + mpc_gen = np.zeros((mpc['gen'].shape[0], 21), dtype=np.float64) + mpc_gen[:, :10] = mpc['gen'] + mbase = base_mva + mpc_gen[:, 16] = system.PV.Ragc.default * mbase / 60 + mpc_gen[:, 17] = system.PV.R10.default * mbase / 6 + mpc_gen[:, 18] = system.PV.R30.default * mbase / 2 + mpc_gen[:, 19] = system.PV.Rq.default * mbase / 60 + else: + mpc_gen = mpc['gen'] + for data in mpc_gen: + # bus pg qg qmax qmin vg mbase status pmax pmin + # 0 1 2 3 4 5 6 7 8 9 + # pc1 pc2 qc1min qc1max qc2min qc2max ramp_agc ramp_10 + # 10 11 12 13 14 15 16 17 + # ramp_30 ramp_q apf + # 18 19 20 bus_idx = int(data[0]) gen_idx += 1 @@ -151,9 +161,9 @@ def mpc2system(mpc: dict, system) -> bool: for data in mpc['branch']: # fbus tbus r x b rateA rateB rateC ratio angle - # 0 1 2 3 4 5 6 7 8 9 + # 0 1 2 3 4 5 6 7 8 9 # status angmin angmax Pf Qf Pt Qt - # 10 11 12 13 14 15 16 + # 10 11 12 13 14 15 16 fbus = int(data[0]) tbus = int(data[1]) r = data[2] @@ -195,7 +205,7 @@ def mpc2system(mpc: dict, system) -> bool: for data, gen in zip(mpc['gencost'], gen_idx): # NOTE: only type 2 costs are supported for now # type startup shutdown n c2 c1 c0 - # 0 1 2 3 4 5 6 + # 0 1 2 3 4 5 6 if data[0] != 2: raise ValueError('Only MODEL 2 costs are supported') # TODO: Add Model 1 @@ -256,15 +266,21 @@ def system2mpc(system) -> dict: In the ``gen`` section, slack generators preceeds PV generators. - This function is revised from ``andes.io.matpower.system2mpc``. - - Compared to the original one, this function includes the - generator cost data in the ``gencost`` section. + Compared to the ``andes.io.matpower.system2mpc``, + this function includes the generator cost data in the ``gencost`` + section. + Additionally, ``c2`` and ``c1`` are scaled by ``base_mva`` to match + MATPOWER unit ``MW``. Parameters ---------- system : ams.core.system.System AMS system + + Returns + ------- + mpc: dict + MATPOWER mpc dict """ mpc = dict(version='2', @@ -350,23 +366,28 @@ def system2mpc(system) -> dict: branch[:, 2] = system.Line.r.v branch[:, 3] = system.Line.x.v branch[:, 4] = system.Line.b.v - branch[:, 5] = system.Line.rate_a.v - branch[:, 6] = system.Line.rate_b.v - branch[:, 7] = system.Line.rate_c.v + branch[:, 5] = system.Line.rate_a.v * base_mva + branch[:, 6] = system.Line.rate_b.v * base_mva + branch[:, 7] = system.Line.rate_c.v * base_mva branch[:, 8] = system.Line.tap.v branch[:, 9] = system.Line.phi.v * rad2deg branch[:, 10] = system.Line.u.v # --- GCost --- + # NOTE: adjust GCost sequence to match the generator sequence if system.GCost.n > 0: + stg_idx = system.Slack.idx.v + system.PV.idx.v + gcost_idx = system.GCost.find_idx(keys=['gen'], + values=[stg_idx]) + gcost_uid = system.GCost.idx2uid(gcost_idx) gencost = mpc['gencost'] - gencost[:, 0] = system.GCost.type.v - gencost[:, 1] = system.GCost.csu.v - gencost[:, 2] = system.GCost.csd.v + gencost[:, 0] = system.GCost.type.v[gcost_uid] + gencost[:, 1] = system.GCost.csu.v[gcost_uid] + gencost[:, 2] = system.GCost.csd.v[gcost_uid] gencost[:, 3] = 3 - gencost[:, 4] = system.GCost.c2.v / base_mva / base_mva - gencost[:, 5] = system.GCost.c1.v / base_mva - gencost[:, 6] = system.GCost.c0.v / base_mva + gencost[:, 4] = system.GCost.c2.v[gcost_uid] / base_mva / base_mva + gencost[:, 5] = system.GCost.c1.v[gcost_uid] / base_mva + gencost[:, 6] = system.GCost.c0.v[gcost_uid] else: mpc.pop('gencost') diff --git a/ams/io/pypower.py b/ams/io/pypower.py index 98c82221..897e7860 100644 --- a/ams/io/pypower.py +++ b/ams/io/pypower.py @@ -80,8 +80,7 @@ def ppc2system(ppc: dict, system) -> bool: bool True if successful; False otherwise. """ - mpc2system(ppc, system) - return True + return mpc2system(ppc, system) def system2ppc(system) -> dict: @@ -97,27 +96,9 @@ def system2ppc(system) -> dict: # Map the original bus indices to consecutive values # Adjust discontinuous bus indices BUS_I = mpc['bus'][:, 0].astype(int) - if np.max(mpc['bus'][:, 0]) > mpc['bus'].shape[0]: - # Find the unique indices in busi - old_bus = np.unique(BUS_I) - # Generate a mapping dictionary to map the unique indices to consecutive values - mapping = {busi0: i for i, busi0 in enumerate(old_bus)} - # Map the original bus indices to consecutive values - # BUS_I - mpc['bus'][:, 0] = np.array([mapping[busi0] for busi0 in BUS_I]) - # GEN_BUS - GEN_BUS = mpc['gen'][:, 0].astype(int) - mpc['gen'][:, 0] = np.array([mapping[busi0] for busi0 in GEN_BUS]) - # F_BUS - F_BUS = mpc['branch'][:, 0].astype(int) - mpc['branch'][:, 0] = np.array([mapping[busi0] for busi0 in F_BUS]) - # T_BUS - T_BUS = mpc['branch'][:, 1].astype(int) - mpc['branch'][:, 1] = np.array([mapping[busi0] for busi0 in T_BUS]) - # adjust the bus index to start from 0 - if np.min(mpc['bus'][:, 0]) > 0: - mpc['bus'][:, 0] -= 1 # BUS_I - mpc['gen'][:, 0] -= 1 # GEN_BUS - mpc['branch'][:, 0] -= 1 # F_BUS - mpc['branch'][:, 1] -= 1 # T_BUS + bus_map = {busi0: i for i, busi0 in enumerate(BUS_I)} + mpc['bus'][:, 0] = np.array([bus_map[busi0] for busi0 in BUS_I]) + mpc['gen'][:, 0] = np.array([bus_map[busi0] for busi0 in mpc['gen'][:, 0].astype(int)]) + mpc['branch'][:, 0] = np.array([bus_map[busi0] for busi0 in mpc['branch'][:, 0].astype(int)]) + mpc['branch'][:, 1] = np.array([bus_map[busi0] for busi0 in mpc['branch'][:, 1].astype(int)]) return mpc diff --git a/ams/io/xlsx.py b/ams/io/xlsx.py index 1e23ab7c..bc7826c4 100644 --- a/ams/io/xlsx.py +++ b/ams/io/xlsx.py @@ -61,7 +61,6 @@ def _write_system(system, writer, skip_empty, to_andes=False): Rewrite function ``andes.io.xlsx._write_system`` to skip non-andes sheets. """ - skip_params = [] ad_models = [] if to_andes: # Instantiate an ANDES system @@ -73,13 +72,16 @@ def _write_system(system, writer, skip_empty, to_andes=False): if skip_empty and instance.n == 0: continue instance.cache.refresh("df_in") - df = instance.cache.df_in - if to_andes and name in ad_models: + if to_andes: + if name not in ad_models: + continue # NOTE: ommit parameters that are not in ANDES skip_params = [] ams_params = list(instance.params.keys()) andes_params = list(sa.models[name].params.keys()) skip_params = list(set(ams_params) - set(andes_params)) df = instance.cache.df_in.drop(skip_params, axis=1, errors='ignore') + else: + df = instance.cache.df_in df.to_excel(writer, sheet_name=name, freeze_panes=(1, 0)) return writer diff --git a/ams/models/__init__.py b/ams/models/__init__.py index 7d0a38ad..fe37b201 100644 --- a/ams/models/__init__.py +++ b/ams/models/__init__.py @@ -11,12 +11,13 @@ ('static', ['PQ', 'PV', 'Slack']), ('shunt', ['Shunt']), ('line', ['Line']), - ('distributed', ['ESD1']), - ('renewable', ['REGCV1']), + ('distributed', ['PVD1', 'ESD1']), + ('renewable', ['REGCA1', 'REGCV1', 'REGCV2']), ('area', ['Area']), ('region', ['Region']), - ('reserve', ['SFR', 'SR', 'NSR']), - ('cost', ['GCost', 'SFRCost', 'SRCost', 'NSRCost', 'REGCV1Cost']), + ('reserve', ['SFR', 'SR', 'NSR', 'VSGR']), + ('cost', ['GCost', 'SFRCost', 'SRCost', 'NSRCost', 'VSGCost']), + ('cost', ['DCost']), ('timeslot', ['TimeSlot', 'EDTSlot', 'UCTSlot']), ]) diff --git a/ams/models/cost.py b/ams/models/cost.py index eb3d9535..4d0611ba 100644 --- a/ams/models/cost.py +++ b/ams/models/cost.py @@ -84,10 +84,10 @@ def __init__(self, system, config): self.gen = IdxParam(info="static generator index", model='StaticGen', mandatory=True,) - self.cru = NumParam(default=0, power=False, + self.cru = NumParam(default=0, tex_name=r'c_{r}', unit=r'$/(p.u.*h)', info='cost for RegUp reserve',) - self.crd = NumParam(default=0, power=False, + self.crd = NumParam(default=0, tex_name=r'c_{r}', unit=r'$/(p.u.*h)', info='cost for RegDn reserve',) @@ -103,7 +103,7 @@ def __init__(self, system, config): self.gen = IdxParam(info="static generator index", model='StaticGen', mandatory=True,) - self.csr = NumParam(default=0, power=False, + self.csr = NumParam(default=0, tex_name=r'c_{sr}', unit=r'$/(p.u.*h)', info='cost for spinning reserve',) @@ -119,12 +119,27 @@ def __init__(self, system, config): self.gen = IdxParam(info="static generator index", model='StaticGen', mandatory=True,) - self.cnsr = NumParam(default=0, power=False, + self.cnsr = NumParam(default=0, tex_name=r'c_{nsr}', unit=r'$/(p.u.*h)', info='cost for non-spinning reserve',) -class REGCV1CostData(ModelData): +class DCost(ModelData, Model): + """ + Linear cost model for dispatchable loads. + """ + def __init__(self, system, config): + ModelData.__init__(self) + Model.__init__(self, system, config) + self.group = 'Cost' + self.pq = IdxParam(info="static load index", + mandatory=True,) + self.cdp = NumParam(default=999, + tex_name=r'c_{d,p}', unit=r'$/(p.u.*h)', + info='cost for unserve load penalty',) + + +class VSGCostData(ModelData): def __init__(self): super().__init__() self.reg = IdxParam(info="Renewable generator idx", @@ -133,24 +148,22 @@ def __init__(self): ) self.cm = NumParam(default=0, info='cost for emulated inertia (M)', - power=False, tex_name=r'c_{r}', unit=r'$/s', ) self.cd = NumParam(default=0, info='cost for emulated damping (D)', - power=False, tex_name=r'c_{r}', unit=r'$/p.u.', ) -class REGCV1Cost(REGCV1CostData, Model): +class VSGCost(VSGCostData, Model): """ - Linear cost model for :ref:`REGCV1` emulated inertia (M) and damping (D). + Linear cost model for VSG emulated inertia (M) and damping (D). """ def __init__(self, system, config): - REGCV1CostData.__init__(self) + VSGCostData.__init__(self) Model.__init__(self, system, config) self.group = 'Cost' diff --git a/ams/models/distributed/__init__.py b/ams/models/distributed/__init__.py index 5137d579..e07807ce 100644 --- a/ams/models/distributed/__init__.py +++ b/ams/models/distributed/__init__.py @@ -1 +1,2 @@ +from ams.models.distributed.pvd1 import PVD1 # NOQA from ams.models.distributed.esd1 import ESD1 # NOQA diff --git a/ams/models/distributed/esd1.py b/ams/models/distributed/esd1.py index baa69d9c..7ad133d2 100644 --- a/ams/models/distributed/esd1.py +++ b/ams/models/distributed/esd1.py @@ -2,44 +2,20 @@ Distributed energy storage model. """ -from andes.core.param import IdxParam, NumParam # NOQA -from andes.core.model import ModelData # NOQA +from andes.core.param import NumParam -from ams.core.model import Model # NOQA -from ams.core.var import Algeb # NOQA +from ams.core.model import Model +from ams.models.distributed.pvd1 import PVD1Data -class ESD1Data(ModelData): + +class ESD1Data(PVD1Data): """ ESD1 model data. """ def __init__(self): - ModelData.__init__(self) - - self.bus = IdxParam(model='Bus', - info="interface bus id", - mandatory=True, - ) - - self.gen = IdxParam(info="static generator index", - mandatory=True, - ) - - self.Sn = NumParam(default=100.0, tex_name='S_n', - info='device MVA rating', - unit='MVA', - ) - - self.gammap = NumParam(default=1.0, tex_name=r'\gamma_p', - info='Ratio of ESD1.pref0 w.r.t to that of static PV', - vrange='(0, 1]', - ) - - self.gammaq = NumParam(default=1.0, tex_name=r'\gamma_q', - info='Ratio of ESD1.qref0 w.r.t to that of static PV', - vrange='(0, 1]', - ) + PVD1Data.__init__(self) self.SOCmin = NumParam(default=0.0, tex_name='SOC_{min}', info='Minimum required value for SOC in limiter', @@ -69,41 +45,27 @@ def __init__(self): ) -class ESD1Model(Model): - """ - ESD1 model for dispatch. +class ESD1(ESD1Data, Model): """ + Distributed energy storage model, revised from ANDES ``ESD1`` model for + dispatch. - def __init__(self, system=None, config=None): - super().__init__(system, config) - - -class ESD1(ESD1Data, ESD1Model): - """ - Distributed energy storage model. - Revised from ANDES ``ESD1`` model for dispatch purpose. - - Notes - ----- - 1. some parameters are removed from the original dynamic model, including: - ``fn``, ``busf``, ``xc``, ``pqflag``, ``igreg``, ``v0``, ``v1``, ``dqdv``, - ``fdbd``, ``ddn``, ``ialim``, ``vt0``, ``vt1``, ``vt2``, ``vt3``, - ``vrflag``, ``ft0``, ``ft1``, ``ft2``, ``ft3``, ``frflag``, ``tip``, - ``tiq``, ``recflag``. + Following parameters are omitted from the original dynamic model: + ``fn``, ``busf``, ``xc``, ``pqflag``, ``igreg``, ``v0``, ``v1``, + ``dqdv``, ``fdbd``, ``ddn``, ``ialim``, ``vt0``, ``vt1``, ``vt2``, + ``vt3``, ``vrflag``, ``ft0``, ``ft1``, ``ft2``, ``ft3``, ``frflag``, + ``tip``, ``tiq``, ``recflag``. Reference: - [1] Powerworld, Renewable Energy Electrical Control Model REEC_C - - [2] ANDES Documentation, ESD1 + [1] ANDES Documentation, ESD1 Available: - https://www.powerworld.com/WebHelp/Content/TransientModels_HTML/Exciter%20REEC_C.htm https://docs.andes.app/en/latest/groupdoc/DG.html#esd1 """ def __init__(self, system, config): ESD1Data.__init__(self) - ESD1Model.__init__(self, system, config) + Model.__init__(self, system, config) self.group = 'DG' diff --git a/ams/models/distributed/pvd1.py b/ams/models/distributed/pvd1.py new file mode 100644 index 00000000..2178dd18 --- /dev/null +++ b/ams/models/distributed/pvd1.py @@ -0,0 +1,67 @@ +""" +Distributed PV models. +""" + +from andes.core.param import IdxParam, NumParam +from andes.core.model import ModelData + +from ams.core.model import Model + + +class PVD1Data(ModelData): + """ + PVD1 model data. + """ + + def __init__(self): + ModelData.__init__(self) + + self.bus = IdxParam(model='Bus', + info="interface bus id (place holder)", + mandatory=True, + ) + + self.gen = IdxParam(info="static generator index", + mandatory=True, + ) + + self.Sn = NumParam(default=100.0, tex_name='S_n', + info='device MVA rating', + unit='MVA', + ) + + self.gammap = NumParam(default=1.0, tex_name=r'\gamma_p', + info='Ratio of ESD1.pref0 w.r.t to that of static PV', + vrange='(0, 1]', + ) + + self.gammaq = NumParam(default=1.0, tex_name=r'\gamma_q', + info='Ratio of ESD1.qref0 w.r.t to that of static PV', + vrange='(0, 1]', + ) + + +class PVD1(PVD1Data, Model): + """ + Distributed PV model, revised from ANDES ``PVD1`` model for + dispatch. + + Following parameters are omitted from the original dynamic model: + ``fn``, ``busf``, ``xc``, ``pqflag``, ``igreg``, ``v0``, ``v1``, + ``dqdv``, ``fdbd``, ``ddn``, ``ialim``, ``vt0``, ``vt1``, ``vt2``, + ``vt3``, ``vrflag``, ``ft0``, ``ft1``, ``ft2``, ``ft3``, ``frflag``, + ``tip``, ``tiq``, ``recflag``. + + Reference: + + [1] ANDES Documentation, PVD1 + + Available: + + https://docs.andes.app/en/latest/groupdoc/DG.html#pvd1 + """ + + def __init__(self, system, config): + PVD1Data.__init__(self) + Model.__init__(self, system, config) + self.group = 'DG' diff --git a/ams/models/group.py b/ams/models/group.py index 19e082eb..338c92a4 100644 --- a/ams/models/group.py +++ b/ams/models/group.py @@ -126,6 +126,19 @@ def __init__(self): self.common_vars.extend(('Pe', 'Qe')) +class VSG(GroupBase): + """ + Renewable generator with virtual synchronous generator (VSG) control group. + + Note that this is a group separate from ``RenGen`` for VSG dispatch study. + """ + + def __init__(self): + super().__init__() + self.common_params.extend(('bus', 'gen', 'Sn')) + self.common_vars.extend(('Pe', 'Qe')) + + class DG(GroupBase): """ Distributed generation (small-scale). diff --git a/ams/models/renewable/__init__.py b/ams/models/renewable/__init__.py index 7c7002b0..b9c28f31 100644 --- a/ams/models/renewable/__init__.py +++ b/ams/models/renewable/__init__.py @@ -2,4 +2,4 @@ Top-level package for renewable models. """ -from ams.models.renewable.regcv1 import REGCV1 # NOQA +from ams.models.renewable.regc import REGCA1, REGCV1, REGCV2 # NOQA diff --git a/ams/models/renewable/regc.py b/ams/models/renewable/regc.py new file mode 100644 index 00000000..eec0f5b5 --- /dev/null +++ b/ams/models/renewable/regc.py @@ -0,0 +1,111 @@ +""" +RenGen dispatch model. +""" + +from andes.core.param import NumParam, IdxParam, ExtParam +from andes.core.model import ModelData +from ams.core.model import Model + + +class REGCData(ModelData): + """ + Data container for RenGen dispatch model. + """ + + def __init__(self): + ModelData.__init__(self) + self.bus = IdxParam(model='Bus', + info="interface bus idx", + mandatory=True, + ) + self.gen = IdxParam(info="static generator index", + mandatory=True, + ) + self.Sn = NumParam(default=100.0, tex_name='S_n', + info='device MVA rating', + unit='MVA', + ) + self.gammap = NumParam(default=1.0, + info="P ratio of linked static gen", + tex_name=r'\gamma_P' + ) + self.gammaq = NumParam(default=1.0, + info="Q ratio of linked static gen", + tex_name=r'\gamma_Q' + ) + + +class REGCA1(REGCData, Model): + """ + Renewable generator dispatch model. + + Reference: + + [1] ANDES Documentation, REGCA1 + + Available: + https://docs.andes.app/en/latest/groupdoc/RenGen.html#regca1 + """ + + def __init__(self, system=None, config=None) -> None: + REGCData.__init__(self) + Model.__init__(self, system, config) + self.group = 'RenGen' + + +class REGCV1(REGCData, Model): + """ + Voltage-controlled converter model (virtual synchronous generator) with + inertia emulation. + + Here Mmax and Dmax are assumed to be constant, but they might subject to + the operating condition of the converter. + + Notes + ----- + - The generation is defined by group :ref:`StaticGen` + - Generation cost is defined by model :ref:`GCost` + - Inertia emulation cost is defined by model :ref:`VSGCost` + + Reference: + + [1] ANDES Documentation, REGCV1 + + Available: + + https://docs.andes.app/en/latest/groupdoc/RenGen.html#regcv1 + """ + + def __init__(self, system=None, config=None) -> None: + REGCData.__init__(self) + Model.__init__(self, system, config) + self.group = 'VSG' + self.zone = ExtParam(model='Bus', src='zone', + indexer=self.bus, export=False, + info='Retrieved zone idx', + vtype=str, default=None) + self.Mmax = NumParam(default=99, tex_name='M_{max}', + info='Maximum inertia emulation', + unit='s', + power=True,) + self.Dmax = NumParam(default=99, tex_name='D_{max}', + info='Maximum damping emulation', + unit='p.u.', + power=True,) + + +class REGCV2(REGCV1): + """ + Voltage-controlled VSC. + + Reference: + + [1] ANDES Documentation, REGCV2 + + Available: + + https://docs.andes.app/en/latest/groupdoc/RenGen.html#regcv2 + """ + + def __init__(self, system=None, config=None) -> None: + REGCV1.__init__(self, system, config) diff --git a/ams/models/renewable/regcv1.py b/ams/models/renewable/regcv1.py deleted file mode 100644 index 2afc1eed..00000000 --- a/ams/models/renewable/regcv1.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -REGCV1 model. - -Voltage-controlled converter model (virtual synchronous generator) with inertia emulation. -""" - -from andes.core.param import NumParam, IdxParam # NOQA -from andes.core.model import ModelData # NOQA -from ams.core.model import Model # NOQA - - -class REGCV1(ModelData, Model): - """ - Voltage-controlled converter model (virtual synchronous generator) with inertia emulation. - - Notes - ----- - - The generation is defined by group :ref:`StaticGen` - - Generation cost is defined by model :ref:`GCost` - - Inertia emulation cost is defined by model :ref:`REGCV1Cost` - """ - - def __init__(self, system=None, config=None) -> None: - ModelData.__init__(self) - Model.__init__(self, system, config) - self.group = 'RenGen' - - self.bus = IdxParam(model='Bus', - info="interface bus id", - mandatory=True, - ) - self.gen = IdxParam(info="static generator index", - mandatory=True, - ) - self.Sn = NumParam(default=100.0, tex_name='S_n', - info='device MVA rating', - unit='MVA', - ) - self.gammap = NumParam(default=1.0, - info="P ratio of linked static gen", - tex_name=r'\gamma_P' - ) - self.gammaq = NumParam(default=1.0, - info="Q ratio of linked static gen", - tex_name=r'\gamma_Q' - ) diff --git a/ams/models/reserve.py b/ams/models/reserve.py index 22e85ceb..8959242f 100644 --- a/ams/models/reserve.py +++ b/ams/models/reserve.py @@ -71,3 +71,24 @@ def __init__(self, system, config): self.demand = NumParam(default=0.1, unit='%', info='Zonal non-spinning reserve demand', tex_name=r'd_{NSR}') + + +class VSGR(ReserveData, Model): + """ + Zonal VSG provided reserve model. + + Notes + ----- + - ``Zone`` model is required for this model, and zone is defined by Param ``Bus.zone``. + """ + + def __init__(self, system, config): + ReserveData.__init__(self) + Model.__init__(self, system, config) + self.group = 'Reserve' + self.dvm = NumParam(default=0, unit='s', + info='Zonal virtual inertia demand', + tex_name=r'd_{VM}') + self.dvd = NumParam(default=0, unit='p.u.', + info='Zonal virtual damping demand', + tex_name=r'd_{VD}') diff --git a/ams/models/static/pq.py b/ams/models/static/pq.py index 2d8645d6..6edbc148 100644 --- a/ams/models/static/pq.py +++ b/ams/models/static/pq.py @@ -1,7 +1,7 @@ -from collections import OrderedDict # NOQA +from collections import OrderedDict -from andes.core.param import ExtParam # NOQA -from andes.models.static.pq import PQData # NOQA +from andes.core.param import NumParam, ExtParam +from andes.models.static.pq import PQData from ams.core.model import Model @@ -56,3 +56,6 @@ def __init__(self, system, config): self.zone = ExtParam(model='Bus', src='zone', indexer=self.bus, export=False, info='Retrieved zone idx', vtype=str, default=None, ) + self.ctrl = NumParam(default=1, + info="load controllability", + tex_name=r'c_{trl}',) diff --git a/ams/opt/omodel.py b/ams/opt/omodel.py index c8af43d5..1a5d8d4e 100644 --- a/ams/opt/omodel.py +++ b/ams/opt/omodel.py @@ -8,10 +8,11 @@ import re import numpy as np +import scipy.sparse as spr +from scipy.sparse import csr_matrix as c_sparse # NOQA from andes.core.common import Config - -from ams.utils import timer +from andes.utils.misc import elapsed import cvxpy as cp @@ -124,8 +125,8 @@ class Param(OptzBase): True to set the parameter as positive. neg: bool, optional True to set the parameter as negative. - sparsity: list, optional - Sparsity pattern of the parameter. + sparse: bool, optional + True to set the parameter as sparse. """ def __init__(self, @@ -144,11 +145,11 @@ def __init__(self, integer: Optional[bool] = False, pos: Optional[bool] = False, neg: Optional[bool] = False, - sparsity: Optional[list] = None, + sparse: Optional[list] = False, ): OptzBase.__init__(self, name=name, info=info, unit=unit) self.no_parse = no_parse # True to skip parsing the parameter - self.sparsity = sparsity # sparsity pattern + self.sparse = sparse self.config = Config(name=self.class_name) # `config` that can be exported @@ -172,13 +173,19 @@ def parse(self): config = self.config.as_dict() # NOQA sub_map = self.om.rtn.syms.sub_map shape = np.shape(self.v) - code_param = f"tmp=param(shape={shape}, **config)" + # NOTE: it seems that there is no need to use re.sub here + code_param = f"self.optz=param(shape={shape}, **config)" for pattern, replacement, in sub_map.items(): code_param = re.sub(pattern, replacement, code_param) - exec(code_param) - exec("self.optz=tmp") + exec(code_param, globals(), locals()) try: - exec("self.optz.value = self.v") + msg = f"Parameter <{self.name}> is set as sparse, " + msg += "but the value is not sparse." + val = "self.v" + if self.sparse: + if not spr.issparse(self.v): + val = "c_sparse(self.v)" + exec(f"self.optz.value = {val}", globals(), locals()) except ValueError: msg = f"Parameter <{self.name}> has non-numeric value, " msg += "no_parse=True is applied." @@ -216,14 +223,13 @@ class Var(OptzBase): unit : str, optional Unit tex_name : str - LaTeX-formatted variable symbol. If is None, the value of `name` will be - used. + LaTeX-formatted variable symbol. Defaults to the value of ``name``. name : str, optional Variable name. One should typically assigning the name directly because it will be automatically assigned by the model. The value of ``name`` will be the symbol name to be used in expressions. src : str, optional - Source variable name. If is None, the value of `name` will be used. + Source variable name. Defaults to the value of ``name``. model : str, optional Name of the owner model or group. horizon : ams.routines.RParam, optional @@ -257,10 +263,10 @@ class Var(OptzBase): Attributes ---------- - a : array-like - variable address - v : array-like - local-storage of the variable value + a : np.ndarray + Variable address. + _v : np.ndarray + Local-storage of the variable value. rtn : ams.routines.Routine The owner routine instance. """ @@ -294,8 +300,10 @@ def __init__(self, self.unit = unit self.tex_name = tex_name if tex_name else name - self.owner = None # instance of the owner Model - self.id = None # variable internal index inside a model (assigned in run time) + # instance of the owner Model + self.owner = None + # variable internal index inside a model (assigned in run time) + self.id = None OptzBase.__init__(self, name=name, info=info, unit=unit) self.src = src self.is_group = False @@ -305,9 +313,7 @@ def __init__(self, self.horizon = horizon self._shape = shape self._v = None - - self.optz_lb = None - self.optz_ub = None + self.a: np.ndarray = np.array([], dtype=int) self.config = Config(name=self.class_name) # `config` that can be exported @@ -326,23 +332,25 @@ def __init__(self, ('neg', neg), ))) - self.id = None # variable internal index inside a model (assigned in run time) - - self.a: np.ndarray = np.array([], dtype=int) - @property def v(self): """ Return the CVXPY variable value. """ - if self.name in self.om.vars: - out = self.om.vars[self.name].value if self._v is None else self._v + if self.optz is None: + return None + if self.optz.value is None: + try: + shape = self.optz.shape + return np.zeros(shape) + except AttributeError: + return None else: - out = None - return out + return self.optz.value @v.setter def v(self, value): + # FIXME: is this safe? self._v = value def get_idx(self): @@ -391,11 +399,11 @@ def parse(self): nc = shape[1] if len(shape) > 1 else 0 else: raise ValueError(f"Invalid shape {self._shape}.") - code_var = f"tmp=var({shape}, **config)" + code_var = f"self.optz=var({shape}, **config)" for pattern, replacement, in sub_map.items(): code_var = re.sub(pattern, replacement, code_var) - exec(code_var) # build the Var object - exec("self.optz = tmp") # assign the to the optz attribute + # build the Var object + exec(code_var, globals(), locals()) return True def __repr__(self): @@ -418,29 +426,15 @@ class Constraint(OptzBase): info : str, optional Additional informational text about the constraint. type : str, optional - The type of constraint, defaults to 'uq'. It might represent - different types of mathematical relationships such as equality or - inequality. + The type of constraint, which determines the mathematical relationship. + Possible values include 'uq' (inequality, default) and 'eq' (equality). Attributes ---------- - name : str or None - Name of the constraint. - e_str : str or None - Expression string of the constraint. - info : str or None - Additional information about the constraint. - type : str - Type of the constraint. - rtn : ams.routines.Routine - The owner routine instance. is_disabled : bool Flag indicating if the constraint is disabled, False by default. - - Notes - ----- - - The attribute 'type' needs to be properly handled with predefined types. - - There is also a TODO for incorporating constraint information from the solver. + rtn : ams.routines.Routine + The owner routine instance. """ def __init__(self, @@ -451,9 +445,11 @@ def __init__(self, ): OptzBase.__init__(self, name=name, info=info) self.e_str = e_str - self.type = type # TODO: determine constraint type + self.type = type self.is_disabled = False - # TODO: add constraint info from solver + self.dual = None + self.code = None + # TODO: add constraint info from solver, maybe dual? def parse(self, no_code=True): """ @@ -464,9 +460,10 @@ def parse(self, no_code=True): no_code : bool, optional Flag indicating if the code should be shown, True by default. """ - sub_map = self.om.rtn.syms.sub_map if self.is_disabled: return True + # parse the expression str + sub_map = self.om.rtn.syms.sub_map code_constr = self.e_str for pattern, replacement in sub_map.items(): try: @@ -474,34 +471,72 @@ def parse(self, no_code=True): except TypeError as e: logger.error(f"Error in parsing constr <{self.name}>.") raise e - if self.type == 'uq': - code_constr = f'tmp = {code_constr} <= 0' - elif self.type == 'eq': - code_constr = f'tmp = {code_constr} == 0' - else: + # store the parsed expression str code + self.code = code_constr + # parse the constraint type + code_constr = "self.optz=" + code_constr + if self.type not in ['uq', 'eq']: raise ValueError(f'Constraint type {self.type} is not supported.') - logger.debug(f"Set constrs {self.name}: {self.e_str} {'<= 0' if self.type == 'uq' else '== 0'}") + code_constr += " <= 0" if self.type == 'uq' else " == 0" + msg = f"Parse Constr <{self.name}>: {self.e_str} " + msg += " <= 0" if self.type == 'uq' else " == 0" + logger.debug(msg) if not no_code: - logger.info(f"Code Constr: {code_constr}") - exec(code_constr) - exec("self.optz = tmp") + logger.info(f"<{self.name}> code: {code_constr}") + # set the parsed constraint + exec(code_constr, globals(), locals()) return True def __repr__(self): - enabled = 'ON' if self.name in self.om.constrs else 'OFF' - out = f"[{enabled}]: {self.e_str}" - out += " =0" if self.type == 'eq' else " <=0" + enabled = 'OFF' if self.is_disabled else 'ON' + out = f"{self.class_name}: {self.name} [{enabled}]" return out + @property + def v2(self): + """ + Return the calculated constraint LHS value. + Note that ``v`` should be used primarily as it is obtained + from the solver directly. + ``v2`` is for debugging purpose, and should be consistent with ``v``. + """ + if self.code is None: + logger.info(f"Constraint <{self.name}> is not parsed yet.") + return None + + val_map = self.om.rtn.syms.val_map + code = self.code + for pattern, replacement in val_map.items(): + try: + code = re.sub(pattern, replacement, code) + except TypeError as e: + logger.error(f"Error in parsing value for constr <{self.name}>.") + raise e + + try: + logger.debug(f"Value code: {code}") + out = eval(code) + return out + except Exception as e: + logger.error(f"Error in calculating constr <{self.name}>.") + logger.error(f"Original error: {e}") + return None + @property def v(self): """ Return the CVXPY constraint LHS value. """ - if self.name in self.om.constrs: - return self.om.constrs[self.name]._expr.value - else: + if self.optz is None: return None + if self.optz._expr.value is None: + try: + shape = self._expr.shape + return np.zeros(shape) + except AttributeError: + return None + else: + return self.optz._expr.value class Objective(OptzBase): @@ -521,19 +556,11 @@ class Objective(OptzBase): info : str, optional Additional informational text about the objective function. sense : str, optional - The sense of the objective function. It should be either 'min' - for minimization or 'max' for maximization. Default is 'min'. + The sense of the objective function, default to 'min'. + `min` for minimization and `max` for maximization. Attributes ---------- - name : str or None - Name of the objective function. - e_str : str or None - Expression string of the objective function. - info : str or None - Additional information about the objective function. - sense : str - Sense of the objective function ('min' or 'max'). v : NoneType The value of the objective function. It needs to be set through computation. @@ -550,13 +577,46 @@ def __init__(self, OptzBase.__init__(self, name=name, info=info, unit=unit) self.e_str = e_str self.sense = sense - self._v = None + self._v = 0 + self.code = None + + @property + def v2(self): + """ + Return the calculated objective value. + Note that ``v`` should be used primarily as it is obtained + from the solver directly. + ``v2`` is for debugging purpose, and should be consistent with ``v``. + """ + if self.code is None: + logger.info(f"Objective <{self.name}> is not parsed yet.") + return None + + val_map = self.om.rtn.syms.val_map + code = self.code + for pattern, replacement in val_map.items(): + try: + code = re.sub(pattern, replacement, code) + except TypeError as e: + logger.error(f"Error in parsing value for obj <{self.name}>.") + raise e + + try: + logger.debug(f"Value code: {code}") + out = eval(code) + return out + except Exception as e: + logger.error(f"Error in calculating obj <{self.name}>.") + logger.error(f"Original error: {e}") + return None @property def v(self): """ Return the CVXPY objective value. """ + if self.optz is None: + return None out = self.om.obj.value out = self._v if out is None else out return out @@ -574,31 +634,27 @@ def parse(self, no_code=True): no_code : bool, optional Flag indicating if the code should be shown, True by default. """ + # parse the expression str sub_map = self.om.rtn.syms.sub_map code_obj = self.e_str for pattern, replacement, in sub_map.items(): code_obj = re.sub(pattern, replacement, code_obj) - if self.sense == 'min': - code_obj = f'cp.Minimize({code_obj})' - elif self.sense == 'max': - code_obj = f'cp.Maximize({code_obj})' - else: + # store the parsed expression str code + self.code = code_obj + if self.sense not in ['min', 'max']: raise ValueError(f'Objective sense {self.sense} is not supported.') - code_obj = 'self.om.obj=' + code_obj - logger.debug(f"Set obj {self.name}: {self.sense}. {self.e_str}") + sense = 'cp.Minimize' if self.sense == 'min' else 'cp.Maximize' + code_obj = f"self.optz={sense}({code_obj})" + logger.debug(f"Parse Objective <{self.name}>: {self.sense.upper()}. {self.e_str}") if not no_code: - logger.info(f"Code Obj: {code_obj}") - exec(code_obj) + logger.info(f"Code: {code_obj}") + # set the parsed objective function + exec(code_obj, globals(), locals()) + exec("self.om.obj = self.optz", globals(), locals()) return True def __repr__(self): - name_str = f"{self.name}=" if self.name is not None else "obj=" - try: - v = self.v - except Exception: - v = None - value_str = f"{v:.4f}, " if v is not None else "" - return f"{name_str}{value_str}{self.e_str}" + return f"{self.class_name}: {self.name} [{self.sense.upper()}]" class OModel: @@ -612,7 +668,7 @@ class OModel: Attributes ---------- - mdl: cvxpy.Problem + prob: cvxpy.Problem Optimization model. params: OrderedDict Parameters. @@ -622,44 +678,33 @@ class OModel: Constraints. obj: Objective Objective function. - n: int - Number of decision variables. - m: int - Number of constraints. """ def __init__(self, routine): self.rtn = routine - self.mdl = None + self.prob = None self.params = OrderedDict() self.vars = OrderedDict() self.constrs = OrderedDict() self.obj = None - self.n = 0 # number of decision variables - self.m = 0 # number of constraints - - @timer - def setup(self, no_code=True, force_generate=False): + self.initialized = False + self._parsed = False + + def _parse(self, no_code=True): """ - Setup the optimziation model from symbolic description. - - Decision variables are the ``Var`` of a routine. - For example, the power outputs ``pg`` of routine ``DCOPF`` - are decision variables. - - Disabled constraints (indicated by attr ``is_disabled``) will not - be added to the optimization model. - + Parse the optimization model from the symbolic description. + Parameters ---------- no_code : bool, optional - Flag indicating if the code should be shown, True by default. - force : bool, optional - True to force generating symbols, False by default. + Flag indicating if the parsing code should be displayed, + True by default. """ rtn = self.rtn - rtn.syms.generate_symbols(force_generate=force_generate) + rtn.syms.generate_symbols(force_generate=False) + # --- add RParams and Services as parameters --- + t0, _ = elapsed() for key, val in rtn.params.items(): if not val.no_parse: try: @@ -669,7 +714,11 @@ def setup(self, no_code=True, force_generate=False): msg += f"Original error: {e}" raise Exception(msg) setattr(self, key, val.optz) + _, s = elapsed(t0) + logger.debug(f"Parse Params in {s}") + # --- add decision variables --- + t0, _ = elapsed() for key, val in rtn.vars.items(): try: val.parse() @@ -678,7 +727,11 @@ def setup(self, no_code=True, force_generate=False): msg += f"Original error: {e}" raise Exception(msg) setattr(self, key, val.optz) + _, s = elapsed(t0) + logger.debug(f"Parse Vars in {s}") + # --- add constraints --- + t0, _ = elapsed() for key, val in rtn.constrs.items(): try: val.parse(no_code=no_code) @@ -687,36 +740,78 @@ def setup(self, no_code=True, force_generate=False): msg += f"Original error: {e}" raise Exception(msg) setattr(self, key, val.optz) + _, s = elapsed(t0) + logger.debug(f"Parse Constrs in {s}") + # --- parse objective functions --- - if rtn.type == 'PF': - # NOTE: power flow type has no objective function - pass - elif rtn.obj is not None: - try: - rtn.obj.parse(no_code=no_code) - except Exception as e: - msg = f"Failed to parse Obj <{rtn.obj.name}>. " - msg += f"Original error: {e}" - raise Exception(msg) + t0, _ = elapsed() + if rtn.type != 'PF': + if rtn.obj is not None: + try: + rtn.obj.parse(no_code=no_code) + except Exception as e: + msg = f"Failed to parse Objective <{rtn.obj.name}>. " + msg += f"Original error: {e}" + raise Exception(msg) + else: + logger.warning(f"{rtn.class_name} has no objective function!") + _, s = elapsed(t0) + self._parsed = False + return self._parsed + _, s = elapsed(t0) + logger.debug(f"Parse Objective in {s}") + + self._parsed = True + return self._parsed + + def init(self, no_code=True): + """ + Set up the optimization model from the symbolic description. + + This method initializes the optimization model by parsing decision variables, + constraints, and the objective function from the associated routine. + Parameters + ---------- + no_code : bool, optional + Flag indicating if the parsing code should be displayed, + True by default. + + Returns + ------- + bool + Returns True if the setup is successful, False otherwise. + """ + t_setup, _ = elapsed() + + if not self._parsed: + self._parse(no_code=no_code) + + if self.rtn.type != 'PF': # --- finalize the optimziation formulation --- - code_mdl = "problem(self.obj, [constr for constr in self.constrs.values()])" + code_prob = "self.prob = problem(self.obj, " + constrs_skip = [] + constrs_add = [] + for key, val in self.rtn.constrs.items(): + if (val is None) or (val.is_disabled): + constrs_skip.append(f'<{key}>') + else: + constrs_add.append(val.optz) + code_prob += f"[constr for constr in constrs_add])" for pattern, replacement in self.rtn.syms.sub_map.items(): - code_mdl = re.sub(pattern, replacement, code_mdl) - code_mdl = "self.mdl=" + code_mdl - exec(code_mdl) - - # --- count --- - n_list = [cpvar.size for cpvar in self.vars.values()] - self.n = np.sum(n_list) # number of decision variables - m_list = [cpconstr.size for cpconstr in self.constrs.values()] - self.m = np.sum(m_list) # number of constraints - - if rtn.type != 'PF' and rtn.obj is None: - logger.warning(f"{rtn.class_name} has no objective function.") - return False - - return True + code_prob = re.sub(pattern, replacement, code_prob) + msg = f"Finalize: {code_prob}" + if len(constrs_skip) > 0: + msg += f"; Skipped constrs: " + msg += ", ".join(constrs_skip) + logger.debug(msg) + exec(code_prob, globals(), locals()) + + _, s_setup = elapsed(t_setup) + self.initialized = True + logger.debug(f"OModel for <{self.rtn.class_name}> initialized in {s_setup}.") + + return self.initialized @property def class_name(self): diff --git a/ams/pypower/routines/opf.py b/ams/pypower/routines/opf.py index 3cad33b6..a2078ceb 100644 --- a/ams/pypower/routines/opf.py +++ b/ams/pypower/routines/opf.py @@ -2,29 +2,28 @@ Module to solve OPF. """ -import logging # NOQA -from copy import deepcopy # NOQA +import logging +from copy import deepcopy -import numpy as np # NOQA -from numpy import flatnonzero as find # NOQA +import numpy as np +from numpy import flatnonzero as find -import scipy.sparse as sp # NOQA -from scipy.sparse import csr_matrix as c_sparse # NOQA +import scipy.sparse as sp +from scipy.sparse import csr_matrix as c_sparse -from andes.shared import deg2rad # NOQA -from andes.utils.misc import elapsed # NOQA +from andes.shared import deg2rad +from andes.utils.misc import elapsed -from ams.pypower.core import ppoption, pipsopf_solver, ipoptopf_solver # NOQA -from ams.pypower.utils import isload, fairmax # NOQA -from ams.pypower.idx import IDX # NOQA -from ams.pypower.io import loadcase # NOQA -import ams.pypower.utils as putil # NOQA +from ams.pypower.core import (ppoption, pipsopf_solver, ipoptopf_solver) +from ams.pypower.utils import isload, fairmax +from ams.pypower.idx import IDX +from ams.pypower.io import loadcase from ams.pypower.make import (makeYbus, makeAvl, makeApq, - makeAang, makeAy) # NOQA + makeAang, makeAy) -import ams.pypower.routines.opffcns as opfcn # NOQA +import ams.pypower.routines.opffcns as opfcn -from ams.pypower.toggle import toggle_reserves # NOQA +from ams.pypower.toggle import toggle_reserves logger = logging.getLogger(__name__) @@ -40,8 +39,6 @@ def runopf(casedata, ppopt): sstats = dict(solver_name='PYPOWER', num_iters=1) # solver stats ppopt = ppoption(ppopt) - - # ----- run the optimal power flow ----- r = fopf(casedata, ppopt) sstats['solver_name'] = 'PYPOWER-PIPS' sstats['num_iters'] = r['raw']['output']['iterations'] @@ -1204,7 +1201,7 @@ def linear_constraints(self): # nnzA = nnzA + nnz(self.lin["data"].A.(self.lin.order{k})) if self.lin["N"]: - A = scipy.sparse.lil_matrix((self.lin["N"], self.var["N"])) + A = sp.sparse.lil_matrix((self.lin["N"], self.var["N"])) u = np.Inf * np.ones(self.lin["N"]) l = -u else: @@ -1259,7 +1256,6 @@ def userdata(self, name, val=None): else: return np.array([]) - def opf_execute(om, ppopt): """ Executes the OPF specified by an OPF model object. diff --git a/ams/pypower/routines/pflow.py b/ams/pypower/routines/pflow.py index 4516106a..991eae06 100644 --- a/ams/pypower/routines/pflow.py +++ b/ams/pypower/routines/pflow.py @@ -181,6 +181,8 @@ def runpf(casedata, ppopt): # update data matrices with solution bus, gen, branch = pfsoln(baseMVA, bus, gen, branch, Ybus, Yf, Yt, V, ref, pv, pq) + enforce_q_lims = ppopt["ENFORCE_Q_LIMS"] + logger.debug(f'ENFORCE_Q_LIMS={enforce_q_lims}') if ppopt["ENFORCE_Q_LIMS"]: # enforce generator Q limits # find gens with violated Q constraints gen_status = gen[:, IDX.gen.GEN_STATUS] > 0 @@ -192,14 +194,17 @@ def runpf(casedata, ppopt): if len(mx) > 0 or len(mn) > 0: # we have some Q limit violations # first check for INFEASIBILITY (all remaining gens violating) - infeas = np.union1d(mx, mn) - remaining = find(gen_status & - (bus[gen[:, IDX.gen.GEN_BUS], IDX.bus.BUS_TYPE] == IDX.bus.PV | - bus[gen[:, IDX.gen.GEN_BUS], IDX.bus.BUS_TYPE] == IDX.bus.REF)) - if len(infeas) == len(remaining) or all(infeas == remaining): - logger.warning( - 'All %d remaining gens exceed to their Q limits: INFEASIBLE PROBLEM\n' % len(infeas)) - + gen_violate = np.union1d(mx, mn) + gen_bus = gen[:, IDX.gen.GEN_BUS].astype(int) + cond_pv = bus[gen_bus, IDX.bus.BUS_TYPE] == IDX.bus.PV + cond_ref = bus[gen_bus, IDX.bus.BUS_TYPE] == IDX.bus.REF + gen_remain = find(gen_status & (cond_pv | cond_ref)) + msg = f'gen_violate = {gen_violate}\nremaining = {gen_remain}' + logger.debug(msg) + # TODO: condition can be improved, but not sure now + if len(gen_remain) <= 0: + msg = f'Infeasible: remaining {len(gen_violate)} gens exceed Q limits.' + logger.warning(msg) success = 0 break @@ -213,46 +218,50 @@ def runpf(casedata, ppopt): else: mx = mx[k] mn = [] - if mx: - logger.debug('Gen ' + ', '.join(str(i + 1) - for i in mx) + ' at upper Q limit, converting to PQ bus') - if mn: - logger.debug('Gen ' + ', '.join(str(i + 1) - for i in mn) + ' at lower Q limit, converting to PQ bus') + + if len(mx) > 0: + msg = 'Following Gen convert to PQ bus because' + msg += ' exceed Q upper limits:\n' + msg += ', '.join(str(i + 1) for i in mx) + logger.debug(msg) + if len(mn) > 0: + msg = 'Following Gen convert to PQ bus because' + msg += ' exceed Q lower limits:\n' + msg += ', '.join(str(i + 1) for i in mn) + logger.debug(msg) # save corresponding limit values fixedQg[mx] = gen[mx, IDX.gen.QMAX] fixedQg[mn] = gen[mn, IDX.gen.QMIN] mx = np.r_[mx, mn].astype(int) - # convert to IDX.bus.PQ bus - # Convert generators to IDX.bus.PQ bus + # convert to PQ bus + # Convert generators PV bus to PQ bus for i in range(len(mx)): idx = mx[i] gen[idx, IDX.gen.QG] = fixedQg[idx] # Set Qg to binding gen[idx, IDX.gen.GEN_STATUS] = 0 # Temporarily turn off generator bi = int(gen[idx, IDX.gen.GEN_BUS]) # Get the bus index - bus[bi, [IDX.bus.PD, IDX.bus.QD]] -= gen[idx, - [IDX.gen.PG, IDX.gen.QG]] # Adjust load + bus[bi, [IDX.bus.PD, IDX.bus.QD]] -= gen[idx, [IDX.gen.PG, IDX.gen.QG]] # Adjust load if len(ref) > 1 and any(bus[gen[mx, IDX.gen.GEN_BUS], IDX.bus.BUS_TYPE] == IDX.bus.REF): raise ValueError("PYPOWER cannot enforce Q limits for systems with multiple slack buses. " "Please ensure there is only one slack bus in the system.") - # & set bus type to IDX.bus.PQ + # set bus type to PQ bus[gen[mx, IDX.gen.GEN_BUS].astype(int), IDX.bus.BUS_TYPE] = IDX.bus.PQ # update bus index lists of each type of bus ref_temp = ref ref, pv, pq = putils.bustypes(bus, gen) - # previous line can modify lists to select new IDX.bus.REF bus + # previous line can modify lists to select new REF bus # if there was none, so we should update bus with these # just to keep them consistent if ref != ref_temp: bus[ref, IDX.bus.BUS_TYPE] = IDX.bus.REF bus[pv, IDX.bus.BUS_TYPE] = pv - logger.debug('Bus %d is new slack bus\n' % ref) + logger.debug(f'Bus<{ref[0]}> is new slack bus') limited = np.r_[limited, mx].astype(int) else: diff --git a/ams/pypower/utils.py b/ams/pypower/utils.py index cafd6fb1..23a9aaaa 100644 --- a/ams/pypower/utils.py +++ b/ams/pypower/utils.py @@ -58,11 +58,10 @@ def bustypes(bus, gen): # pick a new reference bus if for some reason there is none (may have been # shut down) - if len(ref) == 0: + if (len(ref) == 0) & (len(pv) > 0): ref = np.zeros(1, dtype=int) ref[0] = pv[0] # use the first PV bus pv = pv[1:] # take it off PV list - return ref, pv, pq diff --git a/ams/routines/__init__.py b/ams/routines/__init__.py index 8e2cb649..a638ba55 100644 --- a/ams/routines/__init__.py +++ b/ams/routines/__init__.py @@ -2,8 +2,8 @@ Dispatch routines. """ -from collections import OrderedDict # NOQA -from andes.utils.func import list_flatten # NOQA +from collections import OrderedDict +from andes.utils.func import list_flatten from ams.routines.routine import RoutineData, RoutineModel # NOQA all_routines = OrderedDict([ @@ -12,9 +12,9 @@ ('cpf', ['CPF']), ('acopf', ['ACOPF']), ('dcopf', ['DCOPF']), - ('ed', ['ED', 'EDES']), - ('rted', ['RTED', 'RTEDES']), - ('uc', ['UC', 'UCES']), + ('ed', ['ED', 'EDDG', 'EDES']), + ('rted', ['RTED', 'RTEDDG', 'RTEDES', 'RTEDVIS']), + ('uc', ['UC', 'UCDG', 'UCES']), ('dopf', ['DOPF', 'DOPFVIS']), ]) diff --git a/ams/routines/acopf.py b/ams/routines/acopf.py index bfa2a97d..350a4df4 100644 --- a/ams/routines/acopf.py +++ b/ams/routines/acopf.py @@ -1,17 +1,17 @@ """ ACOPF routines. """ -import logging # NOQA -from collections import OrderedDict # NOQA +import logging +from collections import OrderedDict -from ams.pypower import runopf # NOQA -from ams.pypower.core import ppoption # NOQA +from ams.pypower import runopf +from ams.pypower.core import ppoption -from ams.io.pypower import system2ppc # NOQA -from ams.core.param import RParam # NOQA +from ams.io.pypower import system2ppc +from ams.core.param import RParam -from ams.routines.dcpf import DCPFlowBase # NOQA -from ams.opt.omodel import Var, Constraint, Objective # NOQA +from ams.routines.dcpf import DCPFlowBase +from ams.opt.omodel import Var, Constraint, Objective logger = logging.getLogger(__name__) @@ -51,8 +51,18 @@ def unpack(self, res): """ super().unpack(res) + # --- Bus --- + bus_idx = self.vBus.get_idx() + self.vBus.optz.value = self.system.Bus.get(src='v', attr='v', idx=bus_idx) + self.aBus.optz.value = self.system.Bus.get(src='a', attr='v', idx=bus_idx) + + # --- Gen --- + gen_idx = self.pg.get_idx() + self.pg.optz.value = self.system.StaticGen.get(src='p', attr='v', idx=gen_idx) + self.qg.optz.value = self.system.StaticGen.get(src='q', attr='v', idx=gen_idx) + # --- Objective --- - self.obj.v = res['f'] # TODO: check unit + self.obj.v = res['f'] self.system.recent = self.system.routines[self.class_name] return True @@ -118,9 +128,9 @@ def __init__(self, system, config): unit=r'$', model='GCost', indexer='gen', imodel='StaticGen', no_parse=True) - self.ql = RParam(info='reactive power demand (system base)', - name='ql', tex_name=r'q_{l}', - model='mats', src='ql', + self.qd = RParam(info='reactive demand', + name='qd', tex_name=r'q_{d}', + model='StaticLoad', src='q0', unit='p.u.',) # --- bus --- self.aBus = Var(info='Bus voltage angle', @@ -143,12 +153,11 @@ def __init__(self, system, config): # --- constraints --- self.pb = Constraint(name='pb', info='power balance', - e_str='sum(pl) - sum(pg)', - type='eq', - ) + e_str='sum(pd) - sum(pg)', + type='eq',) # TODO: ACOPF formulation # --- objective --- - self.obj = Objective(name='tc', + self.obj = Objective(name='obj', info='total cost', e_str='sum(c2 * pg**2 + c1 * pg + c0)', sense='min',) diff --git a/ams/routines/dcopf.py b/ams/routines/dcopf.py index e311630d..4aa8f400 100644 --- a/ams/routines/dcopf.py +++ b/ams/routines/dcopf.py @@ -15,16 +15,35 @@ logger = logging.getLogger(__name__) -class DCOPFBase(RoutineModel): +class DCOPF(RoutineModel): """ - Base class for DCOPF dispatch model. + DC optimal power flow (DCOPF). - Overload the ``solve``, ``unpack``, and ``run`` methods. + Line flow variable `plf` is calculated as ``Bf@aBus + Pfinj`` + after solving the problem in ``_post_solve()`` . """ def __init__(self, system, config): RoutineModel.__init__(self, system, config) + self.info = 'DC Optimal Power Flow' + self.type = 'DCED' + # --- Data Section --- # --- generator cost --- + self.c2 = RParam(info='Gen cost coefficient 2', + name='c2', tex_name=r'c_{2}', + unit=r'$/(p.u.^2)', model='GCost', + indexer='gen', imodel='StaticGen', + nonneg=True, no_parse=True) + self.c1 = RParam(info='Gen cost coefficient 1', + name='c1', tex_name=r'c_{1}', + unit=r'$/(p.u.)', model='GCost', + indexer='gen', imodel='StaticGen',) + self.c0 = RParam(info='Gen cost coefficient 0', + name='c0', tex_name=r'c_{0}', + unit=r'$', model='GCost', + indexer='gen', imodel='StaticGen', + no_parse=True) + # --- generator --- self.ug = RParam(info='Gen connection status', name='ug', tex_name=r'u_{g}', model='StaticGen', src='u', @@ -45,21 +64,6 @@ def __init__(self, system, config): name='nctrle', tex_name=r'c_{trl,n,e}', u=self.nctrl, u2=self.ug, fun=np.multiply, no_parse=True) - self.c2 = RParam(info='Gen cost coefficient 2', - name='c2', tex_name=r'c_{2}', - unit=r'$/(p.u.^2)', model='GCost', - indexer='gen', imodel='StaticGen', - nonneg=True) - self.c1 = RParam(info='Gen cost coefficient 1', - name='c1', tex_name=r'c_{1}', - unit=r'$/(p.u.)', model='GCost', - indexer='gen', imodel='StaticGen',) - self.c0 = RParam(info='Gen cost coefficient 0', - name='c0', tex_name=r'c_{0}', - unit=r'$', model='GCost', - indexer='gen', imodel='StaticGen', - no_parse=True) - # --- generator --- self.pmax = RParam(info='Gen maximum active power', name='pmax', tex_name=r'p_{g, max}', unit='p.u.', model='StaticGen', @@ -69,62 +73,105 @@ def __init__(self, system, config): unit='p.u.', model='StaticGen', no_parse=False,) self.pg0 = RParam(info='Gen initial active power', - name='p0', tex_name=r'p_{g,0}', - unit='p.u.', model='StaticGen',) - + name='p0', tex_name=r'p_{g, 0}', + unit='p.u.', + model='StaticGen', src='pg0') # --- load --- self.pd = RParam(info='active demand', name='pd', tex_name=r'p_{d}', model='StaticLoad', src='p0', unit='p.u.',) - # --- line --- - self.x = RParam(info='line reactance', - name='x', tex_name=r'x', - model='Line', src='x', - unit='p.u.', no_parse=True,) self.rate_a = RParam(info='long-term flow limit', name='rate_a', tex_name=r'R_{ATEA}', - unit='MVA', model='Line') - + unit='MVA', model='Line',) + # --- shunt --- + self.gsh = RParam(info='shunt conductance', + name='gsh', tex_name=r'g_{sh}', + model='Shunt', src='g', + no_parse=True,) # --- connection matrix --- self.Cg = RParam(info='Gen connection matrix', name='Cg', tex_name=r'C_{g}', model='mats', src='Cg', - no_parse=True,) - self.Cs = RParam(info='Slack connection matrix', - name='Cs', tex_name=r'C_{s}', - model='mats', src='Cs', - no_parse=True,) + no_parse=True, sparse=True,) self.Cl = RParam(info='Load connection matrix', name='Cl', tex_name=r'C_{l}', model='mats', src='Cl', - no_parse=True,) + no_parse=True, sparse=True,) self.Cft = RParam(info='Line connection matrix', name='Cft', tex_name=r'C_{ft}', model='mats', src='Cft', - no_parse=True,) - self.PTDF = RParam(info='Power Transfer Distribution Factor', - name='PTDF', tex_name=r'P_{TDF}', - model='mats', src='PTDF', - no_parse=True,) - - self.Cgi = NumOp(u=self.Cg, fun=np.linalg.pinv, - name='Cgi', tex_name=r'C_{g}^{-1}', - info='inverse of Cg', - no_parse=True,) - self.Cli = NumOp(u=self.Cl, fun=np.linalg.pinv, - name='Cli', tex_name=r'C_{l}^{-1}', - info='inverse of Cl', - no_parse=True,) + no_parse=True, sparse=True,) + self.Csh = RParam(info='Shunt connection matrix', + name='Csh', tex_name=r'C_{sh}', + model='mats', src='Csh', + no_parse=True, sparse=True,) + # --- system matrix --- + self.Bbus = RParam(info='Bus admittance matrix', + name='Bbus', tex_name=r'B_{bus}', + model='mats', src='Bbus', + no_parse=True, sparse=True,) + self.Bf = RParam(info='Bf matrix', + name='Bf', tex_name=r'B_{f}', + model='mats', src='Bf', + no_parse=True, sparse=True,) + self.Pbusinj = RParam(info='Bus power injection vector', + name='Pbusinj', tex_name=r'P_{bus}^{inj}', + model='mats', src='Pbusinj', + no_parse=True,) + self.Pfinj = RParam(info='Line power injection vector', + name='Pfinj', tex_name=r'P_{f}^{inj}', + model='mats', src='Pfinj', + no_parse=True,) + + # --- Model Section --- + # --- generation --- + self.pg = Var(info='Gen active power', + unit='p.u.', + name='pg', tex_name=r'p_g', + model='StaticGen', src='p', + v0=self.pg0) + pglb = '-pg + mul(nctrle, pg0) + mul(ctrle, pmin)' + self.pglb = Constraint(name='pglb', info='pg min', + e_str=pglb, type='uq',) + pgub = 'pg - mul(nctrle, pg0) - mul(ctrle, pmax)' + self.pgub = Constraint(name='pgub', info='pg max', + e_str=pgub, type='uq',) + # --- bus --- + self.aBus = Var(info='Bus voltage angle', + unit='rad', + name='aBus', tex_name=r'\theta_{bus}', + model='Bus', src='a',) + # --- power balance --- + pb = 'Bbus@aBus + Pbusinj + Cl@pd + Csh@gsh - Cg@pg' + self.pb = Constraint(name='pb', info='power balance', + e_str=pb, type='eq',) + # --- line flow --- + self.plf = Var(info='Line flow', + unit='p.u.', + name='plf', tex_name=r'p_{lf}', + model='Line',) + self.plflb = Constraint(info='line flow lower bound', + name='plflb', type='uq', + e_str='-Bf@aBus - Pfinj - rate_a',) + self.plfub = Constraint(info='line flow upper bound', + name='plfub', type='uq', + e_str='Bf@aBus + Pfinj - rate_a',) + # --- objective --- + obj = 'sum(mul(c2, power(pg, 2)))' + obj += '+ sum(mul(c1, pg))' + obj += '+ sum(mul(ug, c0))' + self.obj = Objective(name='obj', + info='total cost', unit='$', + sense='min', e_str=obj,) def solve(self, **kwargs): """ Solve the routine optimization model. """ - res = self.om.mdl.solve(**kwargs) - return res + return self.om.prob.solve(**kwargs) def run(self, no_code=True, **kwargs): """ @@ -167,11 +214,18 @@ def run(self, no_code=True, **kwargs): """ return RoutineModel.run(self, no_code=no_code, **kwargs) + def _post_solve(self): + # --- post-solving calculations --- + # line flow: Bf@aBus + Pfinj + # using sparse matrix in MatProcessor is faster + self.plf.optz.value = self.system.mats.Bf._v@self.aBus.v + self.Pfinj.v + return True + def unpack(self, **kwargs): """ Unpack the results from CVXPY model. """ - # --- copy results from solver into routine algeb --- + # --- solver results to routine algeb --- for _, var in self.vars.items(): # --- copy results from routine algeb into system algeb --- if var.model is None: # if no owner @@ -188,69 +242,10 @@ def unpack(self, **kwargs): # NOTE: only unpack the variables that are in the model or group try: var.owner.set(src=var.src, attr='v', idx=idx, value=var.v) - except KeyError: # failed to find source var in the owner (model or group) - pass - except TypeError: # failed to find source var in the owner (model or group) + # failed to find source var in the owner (model or group) + except (KeyError, TypeError): pass + + # label the most recent solved routine self.system.recent = self.system.routines[self.class_name] return True - - -class DCOPF(DCOPFBase): - """ - Standard DC optimal power flow (DCOPF). - - In this model, the bus injected power ``pn`` is used as internal variable - between generator output and load demand. - """ - - def __init__(self, system, config): - DCOPFBase.__init__(self, system, config) - self.info = 'DC Optimal Power Flow' - self.type = 'DCED' - # --- vars --- - self.pg = Var(info='Gen active power', - unit='p.u.', - name='pg', tex_name=r'p_{g}', - model='StaticGen', src='p', - v0=self.pg0) - # NOTE: `ug*pmin` results in unexpected error - pglb = '-pg + mul(nctrle, pg0) + mul(ctrle, pmin)' - self.pglb = Constraint(name='pglb', info='pg min', - e_str=pglb, type='uq',) - pgub = 'pg - mul(nctrle, pg0) - mul(ctrle, pmax)' - self.pgub = Constraint(name='pgub', info='pg max', - e_str=pgub, type='uq',) - - self.aBus = Var(info='Bus voltage angle', - name='aBus', tex_name=r'\theta_{n}', - unit='rad', model='Bus',) - - self.plf = Var(info='Line active power', - name='plf', tex_name=r'p_{lf}', - unit='p.u.', model='Line',) - self.plflb = Constraint(name='plflb', info='Line power lower bound', - e_str='-plf - rate_a', type='uq',) - self.plfub = Constraint(name='plfub', info='Line power upper bound', - e_str='plf - rate_a', type='uq',) - - # --- constraints --- - self.pb = Constraint(name='pb', info='power balance', - e_str='sum(pd) - sum(pg)', - type='eq',) - # TODO: add eqn to get aBus - self.aref = Constraint(name='aref', type='eq', - info='reference bus angle', - e_str='Cs@aBus',) - - self.pnb = Constraint(name='pnb', type='eq', - info='nodal power injection', - e_str='PTDF@(Cgi@pg - Cli@pd) - plf',) - - # --- objective --- - self.obj = Objective(name='tc', - info='total cost', unit='$', - sense='min',) - self.obj.e_str = 'sum_squares(mul(c2, pg))' \ - '+ sum(mul(c1, pg))' \ - '+ sum(mul(ug, c0))' diff --git a/ams/routines/dcopf0.py b/ams/routines/dcopf0.py new file mode 100644 index 00000000..eb92db26 --- /dev/null +++ b/ams/routines/dcopf0.py @@ -0,0 +1,253 @@ +""" +DCOPF routines. +""" +import logging + +import numpy as np +from ams.core.param import RParam +from ams.core.service import NumOp, NumOpDual + +from ams.routines.routine import RoutineModel + +from ams.opt.omodel import Var, Constraint, Objective + + +logger = logging.getLogger(__name__) + + +class DCOPF(RoutineModel): + """ + DC optimal power flow (DCOPF). + For large-scale convex problems, the Dual Simplex can be efficient. + + When using the GUROBI solver, the optimization method can be specified + through the `Method` parameter, and all available methods are: + 0: Primal Simplex; 1: Dual Simplex; 2: Barrier; + 3: Concurrent; 4: Deterministic Concurrent + + When using the CPLEX solver, the optimization method can also be + specified. + To specify the method in CPLEX, use the `cplex_params` argument with + `solver='CPLEX'` in the solve function. For example, to use the Dual + Simplex method, set `cplex_params={'lpmethod': 1}`. + CPLEX supports the following methods: + 0: Primal Simplex; 1: Dual Simplex; 2: Barrier; + 3: Non-deterministic Concurrent; 4: Deterministic Concurrent; + 5: Network Simplex (suitable for network flow problems) + """ + + def __init__(self, system, config): + RoutineModel.__init__(self, system, config) + self.info = 'DC Optimal Power Flow' + self.type = 'DCED' + # --- Data Section --- + # --- generator cost --- + self.c2 = RParam(info='Gen cost coefficient 2', + name='c2', tex_name=r'c_{2}', + unit=r'$/(p.u.^2)', model='GCost', + indexer='gen', imodel='StaticGen', + nonneg=True) + self.c1 = RParam(info='Gen cost coefficient 1', + name='c1', tex_name=r'c_{1}', + unit=r'$/(p.u.)', model='GCost', + indexer='gen', imodel='StaticGen',) + self.c0 = RParam(info='Gen cost coefficient 0', + name='c0', tex_name=r'c_{0}', + unit=r'$', model='GCost', + indexer='gen', imodel='StaticGen', + no_parse=True) + # --- generator --- + self.ug = RParam(info='Gen connection status', + name='ug', tex_name=r'u_{g}', + model='StaticGen', src='u', + no_parse=True) + self.ctrl = RParam(info='Gen controllability', + name='ctrl', tex_name=r'c_{trl}', + model='StaticGen', src='ctrl', + no_parse=True) + self.ctrle = NumOpDual(info='Effective Gen controllability', + name='ctrle', tex_name=r'c_{trl, e}', + u=self.ctrl, u2=self.ug, + fun=np.multiply, no_parse=True) + self.nctrl = NumOp(u=self.ctrl, fun=np.logical_not, + name='nctrl', tex_name=r'c_{trl,n}', + info='Effective Gen uncontrollability', + no_parse=True,) + self.nctrle = NumOpDual(info='Effective Gen uncontrollability', + name='nctrle', tex_name=r'c_{trl,n,e}', + u=self.nctrl, u2=self.ug, + fun=np.multiply, no_parse=True) + self.pmax = RParam(info='Gen maximum active power', + name='pmax', tex_name=r'p_{g, max}', + unit='p.u.', model='StaticGen', + no_parse=False,) + self.pmin = RParam(info='Gen minimum active power', + name='pmin', tex_name=r'p_{g, min}', + unit='p.u.', model='StaticGen', + no_parse=False,) + self.pg0 = RParam(info='Gen initial active power', + name='p0', tex_name=r'p_{g, 0}', + unit='p.u.', + model='StaticGen', src='pg0') + # --- load --- + self.pd = RParam(info='active demand', + name='pd', tex_name=r'p_{d}', + model='StaticLoad', src='p0', + unit='p.u.',) + # --- line --- + self.x = RParam(info='line reactance', + name='x', tex_name=r'x', + model='Line', src='x', + unit='p.u.', no_parse=True,) + self.rate_a = RParam(info='long-term flow limit', + name='rate_a', tex_name=r'R_{ATEA}', + unit='MVA', model='Line',) + # --- connection matrix --- + self.Cg = RParam(info='Gen connection matrix', + name='Cg', tex_name=r'C_{g}', + model='mats', src='Cg', + no_parse=True, sparse=True,) + self.Cl = RParam(info='Load connection matrix', + name='Cl', tex_name=r'C_{l}', + model='mats', src='Cl', + no_parse=True, sparse=True,) + self.Cft = RParam(info='Line connection matrix', + name='Cft', tex_name=r'C_{ft}', + model='mats', src='Cft', + no_parse=True, sparse=True,) + self.PTDF = RParam(info='Power Transfer Distribution Factor', + name='PTDF', tex_name=r'P_{TDF}', + model='mats', src='PTDF', + no_parse=True,) + # --- Model Section --- + # --- generation --- + self.pg = Var(info='Gen active power', + unit='p.u.', + name='pg', tex_name=r'p_g', + model='StaticGen', src='p', + v0=self.pg0) + # NOTE: Var bounds need to set separately + pglb = '-pg + mul(nctrle, pg0) + mul(ctrle, pmin)' + self.pglb = Constraint(name='pglb', info='pg min', + e_str=pglb, type='uq',) + pgub = 'pg - mul(nctrle, pg0) - mul(ctrle, pmax)' + self.pgub = Constraint(name='pgub', info='pg max', + e_str=pgub, type='uq',) + # --- bus --- + self.png = Var(info='Bus active power from gen', + unit='p.u.', + name='png', tex_name=r'p_{ng}', + model='Bus',) + self.pnd = Var(info='Bus active power from load', + unit='p.u.', + name='pnd', tex_name=r'p_{nd}', + model='Bus',) + self.pngb = Constraint(name='pngb', type='eq', + e_str='Cg@png - pg', + info='Bus active power from gen',) + self.pndb = Constraint(name='pndb', type='eq', + e_str='Cl@pnd - pd', + info='Bus active power from load',) + # --- line --- + # NOTE: `ug*pmin` results in unexpected error + self.plf = Var(info='Line active power', + name='plf', tex_name=r'p_{lf}', + unit='p.u.', model='Line',) + self.plflb = Constraint(name='plflb', info='Line power lower bound', + e_str='-plf - rate_a', type='uq',) + self.plfub = Constraint(name='plfub', info='Line power upper bound', + e_str='plf - rate_a', type='uq',) + # --- shunt --- + self.gsh = RParam(info='shunt conductance', + name='gsh', tex_name=r'g_{sh}', + model='Shunt', src='g', + no_parse=True,) + # --- power balance --- + self.pb = Constraint(name='pb', info='power balance', + e_str='sum(pd) + sum(gsh) - sum(pg)', + type='eq',) + self.pnb = Constraint(name='pnb', type='eq', + info='nodal power injection', + e_str='PTDF@(png - pnd) - plf',) + # --- objective --- + obj = 'sum(mul(c2, power(pg, 2)))' + obj += '+ sum(mul(c1, pg))' + obj += '+ sum(c0)' + self.obj = Objective(name='obj', + info='total cost', unit='$', + sense='min', e_str=obj,) + + def solve(self, **kwargs): + """ + Solve the routine optimization model. + """ + return self.om.prob.solve(**kwargs) + + def run(self, no_code=True, **kwargs): + """ + Run the routine. + + Parameters + ---------- + no_code : bool, optional + If True, print the generated CVXPY code. Defaults to False. + + Other Parameters + ---------------- + solver: str, optional + The solver to use. For example, 'GUROBI', 'ECOS', 'SCS', or 'OSQP'. + verbose : bool, optional + Overrides the default of hiding solver output and prints logging + information describing CVXPY's compilation process. + gp : bool, optional + If True, parses the problem as a disciplined geometric program + instead of a disciplined convex program. + qcp : bool, optional + If True, parses the problem as a disciplined quasiconvex program + instead of a disciplined convex program. + requires_grad : bool, optional + Makes it possible to compute gradients of a solution with respect to Parameters + by calling problem.backward() after solving, or to compute perturbations to the variables + given perturbations to Parameters by calling problem.derivative(). + Gradients are only supported for DCP and DGP problems, not quasiconvex problems. + When computing gradients (i.e., when this argument is True), the problem must satisfy the DPP rules. + enforce_dpp : bool, optional + When True, a DPPError will be thrown when trying to solve a + non-DPP problem (instead of just a warning). + Only relevant for problems involving Parameters. Defaults to False. + ignore_dpp : bool, optional + When True, DPP problems will be treated as non-DPP, which may speed up compilation. Defaults to False. + method : function, optional + A custom solve method to use. + kwargs : keywords, optional + Additional solver specific arguments. See CVXPY documentation for details. + """ + return RoutineModel.run(self, no_code=no_code, **kwargs) + + def unpack(self, **kwargs): + """ + Unpack the results from CVXPY model. + """ + # --- copy results from solver into routine algeb --- + for _, var in self.vars.items(): + # --- copy results from routine algeb into system algeb --- + if var.model is None: # if no owner + continue + if var.src is None: # if no source + continue + else: + try: + idx = var.owner.get_idx() + except AttributeError: + idx = var.owner.idx.v + else: + pass + # NOTE: only unpack the variables that are in the model or group + try: + var.owner.set(src=var.src, attr='v', idx=idx, value=var.v) + # failed to find source var in the owner (model or group) + except (KeyError, TypeError): + pass + # label the most recent solved routine + self.system.recent = self.system.routines[self.class_name] + return True diff --git a/ams/routines/dcpf.py b/ams/routines/dcpf.py index cd20ec27..5a4d305b 100644 --- a/ams/routines/dcpf.py +++ b/ams/routines/dcpf.py @@ -44,10 +44,10 @@ def __init__(self, system, config): unit='radian',) # --- load --- - self.pl = RParam(info='nodal active load (system base)', - name='pl', tex_name=r'p_{l}', + self.pd = RParam(info='active deman', + name='pd', tex_name=r'p_{d}', unit='p.u.', - model='mats', src='pl') + model='StaticLoad', src='p0') def unpack(self, res): """ @@ -70,7 +70,7 @@ def unpack(self, res): system.Slack.q.v = res['gen'][:system.Slack.n, 2] / mva # reactive power # --- copy results from system algeb into routine algeb --- - for raname, var in self.vars.items(): + for vname, var in self.vars.items(): owner = getattr(system, var.model) # instance of owner, Model or Group if var.src is None: # skip if no source variable is specified continue @@ -81,12 +81,14 @@ def unpack(self, res): idx = owner.get_idx() else: msg = f"Failed to find valid source variable `{owner.class_name}.{var.src}` for " - msg += f"{self.class_name}.{raname}, skip unpacking." + msg += f"{self.class_name}.{vname}, skip unpacking." logger.warning(msg) continue try: + logger.debug(f"Unpacking {vname} into {owner.class_name}.{var.src}.") var.v = owner.get(src=var.src, attr='v', idx=idx) except AttributeError: + logger.debug(f"Failed to unpack {vname} into {owner.class_name}.{var.src}.") continue self.system.recent = self.system.routines[self.class_name] return True @@ -132,13 +134,13 @@ def run(self, force_init=False, no_code=True, self.exit_code = 0 if success else 1 _, s = elapsed(t0) self.exec_time = float(s.split(' ')[0]) - self.unpack(res) n_iter = int(sstats['num_iters']) n_iter_str = f"{n_iter} iterations " if n_iter > 1 else f"{n_iter} iteration " if self.exit_code == 0: msg = f"{self.class_name} solved in {s}, converged after " msg += n_iter_str + f"using solver {sstats['solver_name']}." logger.info(msg) + self.unpack(res) return True else: msg = f"{self.class_name} failed after " diff --git a/ams/routines/dopf.py b/ams/routines/dopf.py index 298e453c..f850f629 100644 --- a/ams/routines/dopf.py +++ b/ams/routines/dopf.py @@ -14,6 +14,8 @@ class DOPF(DCOPF): """ Linearzied distribution OPF, where power loss are ignored. + UNDER DEVELOPMENT! + Reference: [1] L. Bai, J. Wang, C. Wang, C. Chen, and F. Li, “Distribution Locational Marginal Pricing (DLMP) @@ -26,29 +28,42 @@ def __init__(self, system, config): self.info = 'Linearzied distribution OPF' self.type = 'DED' - # --- params --- + # -- Data Section -- + # --- generator --- + self.qmax = RParam(info='generator maximum reactive power', + name='qmax', tex_name=r'q_{max}', unit='p.u.', + model='StaticGen', src='qmax',) + self.qmin = RParam(info='generator minimum reactive power', + name='qmin', tex_name=r'q_{min}', unit='p.u.', + model='StaticGen', src='qmin',) + self.CftT = RParam(info='Line connectivity matrix transpose', + name='CftT', tex_name=r'C_{ft}^{T}', + model='mats', src='CftT', no_parse=True,) + # --- load --- self.qd = RParam(info='reactive demand', name='qd', tex_name=r'q_{d}', unit='p.u.', model='StaticLoad', src='q0',) + # --- bus --- self.vmax = RParam(info="Bus voltage upper limit", - name='vmax', tex_name=r'\overline{v}', + name='vmax', tex_name=r'v_{max}', unit='p.u.', model='Bus', src='vmax', no_parse=True, ) self.vmin = RParam(info="Bus voltage lower limit", - name='vmin', tex_name=r'\underline{v}', + name='vmin', tex_name=r'v_{min}', unit='p.u.', model='Bus', src='vmin', no_parse=True,) + # --- line --- self.r = RParam(info='line resistance', name='r', tex_name='r', unit='p.u.', - model='Line', src='r') - self.qmax = RParam(info='generator maximum reactive power', - name='qmax', tex_name=r'q_{max}', unit='p.u.', - model='StaticGen', src='qmax',) - self.qmin = RParam(info='generator minimum reactive power', - name='qmin', tex_name=r'q_{min}', unit='p.u.', - model='StaticGen', src='qmin',) - # --- vars --- + model='Line', src='r', + no_parse=True,) + self.x = RParam(info='line reactance', + name='x', tex_name='x', unit='p.u.', + model='Line', src='x', + no_parse=True,) + # --- Model Section --- + # --- generator --- self.qg = Var(info='Gen reactive power', name='qg', tex_name=r'q_{g}', unit='p.u.', model='StaticGen', src='q',) @@ -58,7 +73,11 @@ def __init__(self, system, config): self.qgub = Constraint(name='qgub', type='uq', info='qg max', e_str='qg - mul(ug, qmax)',) - + # --- bus --- + self.v = Var(info='Bus voltage', + name='v', tex_name=r'v', + unit='p.u.', + model='Bus', src='v') self.vsq = Var(info='square of Bus voltage', name='vsq', tex_name=r'v^{2}', unit='p.u.', model='Bus',) @@ -70,29 +89,26 @@ def __init__(self, system, config): info='Voltage lower limit', e_str='-vsq + vmin**2', type='uq',) - + # --- line --- self.qlf = Var(info='line reactive power', name='qlf', tex_name=r'q_{lf}', unit='p.u.', model='Line',) - - # --- constraints --- - self.pnb.e_str = 'PTDF@(Cgi@pg - Cli@pd) - plf' - - self.qb = Constraint(name='qb', info='reactive power balance', - e_str='sum(qd) - sum(qg)', - type='eq',) - self.lvd = Constraint(name='lvd', - info='line voltage drop', - e_str='Cft@vsq - (r * plf + x * qlf)', - type='eq',) + self.lvd = Constraint(info='line voltage drop', + name='lvd', type='eq', + e_str='CftT@vsq - (r * plf + x * qlf)',) + # --- power balance --- + # NOTE: following Eqn seems to be wrong, double check + # g_Q(\Theta, V, Q_g) = B_{bus}V\Theta + Q_{bus,shift} + Q_d + B_{sh} - C_gQ_g = 0 + self.qb = Constraint(info='reactive power balance', + name='qb', type='eq', + e_str='sum(qd) - sum(qg)',) # --- objective --- # NOTE: no need to revise objective function - def unpack(self, **kwargs): - super().unpack(**kwargs) - vBus = np.sqrt(self.vsq.v) - self.system.Bus.set(src='v', attr='v', value=vBus, idx=self.vsq.get_idx()) + def _post_solve(self): + self.v.optz.value = np.sqrt(self.vsq.optz.value) + return super()._post_solve() class DOPFVIS(DOPF): @@ -116,20 +132,20 @@ def __init__(self, system, config): self.cm = RParam(info='Virtual inertia cost', name='cm', src='cm', tex_name=r'c_{m}', unit=r'$/s', - model='REGCV1Cost', - indexer='reg', imodel='REGCV1') + model='VSGCost', + indexer='reg', imodel='VSG') self.cd = RParam(info='Virtual damping cost', name='cd', src='cd', tex_name=r'c_{d}', unit=r'$/(p.u.)', - model='REGCV1Cost', - indexer='reg', imodel='REGCV1',) + model='VSGCost', + indexer='reg', imodel='VSG',) # --- vars --- self.M = Var(info='Emulated startup time constant (M=2H) from REGCV1', name='M', tex_name=r'M', unit='s', - model='REGCV1',) + model='VSG',) self.D = Var(info='Emulated damping coefficient from REGCV1', name='D', tex_name=r'D', unit='p.u.', - model='REGCV1',) + model='VSG',) obj = 'sum(c2 * pg**2 + c1 * pg + ug * c0 + cm * M + cd * D)' self.obj = Objective(name='tc', info='total cost', unit='$', diff --git a/ams/routines/ed.py b/ams/routines/ed.py index 88b4e8f2..011bab51 100644 --- a/ams/routines/ed.py +++ b/ams/routines/ed.py @@ -9,7 +9,7 @@ from ams.core.service import (NumOpDual, NumHstack, RampSub, NumOp, LoadScale) -from ams.routines.rted import RTED, ESD1Base +from ams.routines.rted import RTED, DGBase, ESD1Base from ams.opt.omodel import Var, Constraint @@ -20,7 +20,7 @@ class SRBase: """ Base class for spinning reserve. """ - + def __init__(self) -> None: self.dsrp = RParam(info='spinning reserve requirement in percentage', name='dsr', tex_name=r'd_{sr}', @@ -64,17 +64,12 @@ def __init__(self) -> None: name='sd', tex_name=r's_{d}', src='sd', model='EDTSlot') + # NOTE: update timeslot.model in dispatch model if necessary self.timeslot = RParam(info='Time slot for multi-period ED', name='timeslot', tex_name=r't_{s,idx}', src='idx', model='EDTSlot', no_parse=True) - self.pdsz = NumOpDual(u=self.sd, u2=self.pdz, - fun=np.multiply, rfun=np.transpose, - name='pds', tex_name=r'p_{d,s,z}', - unit='p.u.', - info='Scaled zonal total load') - self.tlv = NumOp(u=self.timeslot, fun=np.ones_like, args=dict(dtype=float), expand_dims=0, @@ -89,12 +84,13 @@ def __init__(self) -> None: self.R30 = RParam(info='30-min ramp rate', name='R30', tex_name=r'R_{30}', src='R30', unit='p.u./min', - model='StaticGen') + model='StaticGen', no_parse=True,) self.Mr = RampSub(u=self.pg, name='Mr', tex_name=r'M_{r}', - info='Subtraction matrix for ramping',) + info='Subtraction matrix for ramping', + no_parse=True, sparse=True,) self.RR30 = NumHstack(u=self.R30, ref=self.Mr, name='RR30', tex_name=r'R_{30,R}', - info='Repeated ramp rate',) + info='Repeated ramp rate', no_parse=True,) self.ctrl.expand_dims = 1 self.c0.expand_dims = 1 @@ -102,6 +98,15 @@ def __init__(self) -> None: self.pmin.expand_dims = 1 self.pg0.expand_dims = 1 self.rate_a.expand_dims = 1 + self.Pfinj.expand_dims = 1 + self.Pbusinj.expand_dims = 1 + self.gsh.expand_dims = 1 + + # NOTE: extend pg to 2D matrix: row for gen and col for timeslot + self.pg.horizon = self.timeslot + self.pg.info = '2D Gen power' + self.aBus.horizon = self.timeslot + self.aBus.info = '2D Bus angle' class ED(RTED): """ @@ -111,11 +116,9 @@ class ED(RTED): ED extends DCOPF as follows: - 1. Vars ``pg``, ``pru``, ``prd`` are extended to 2D - - 2. 2D Vars ``rgu`` and ``rgd`` are introduced - - 3. Param ``ug`` is sourced from ``EDTSlot.ug`` as commitment decisions + - Vars ``pg``, ``pru``, ``prd`` are extended to 2D + - 2D Vars ``rgu`` and ``rgd`` are introduced + - Param ``ug`` is sourced from ``EDTSlot.ug`` as commitment decisions Notes ----- @@ -146,16 +149,14 @@ def __init__(self, system, config): self.dud.expand_dims = 1 self.ddd.expand_dims = 1 - # --- params --- + # --- Data Section --- self.ugt = NumOp(u=self.ug, fun=np.transpose, name='ugt', tex_name=r'u_{g}', info='input ug transpose', no_parse=True) - # --- vars --- - # NOTE: extend pg to 2D matrix, where row is gen and col is timeslot - self.pg.horizon = self.timeslot - self.pg.info = '2D Gen power' + # --- Model Section --- + # --- gen --- self.ctrle.u2 = self.ugt self.nctrle.u2 = self.ugt pglb = '-pg + mul(mul(nctrle, pg0), tlv) ' @@ -165,37 +166,30 @@ def __init__(self, system, config): pgub += '- mul(mul(ctrle, tlv), pmax)' self.pgub.e_str = pgub - self.plf.horizon = self.timeslot - self.plf.info = '2D Line flow' - self.pru.horizon = self.timeslot self.pru.info = '2D RegUp power' - self.prd.horizon = self.timeslot self.prd.info = '2D RegDn power' self.prs.horizon = self.timeslot - - # --- constraints --- - # NOTE: Spg @ pg returns a row vector - self.pb.e_str = '- gs @ pg + pdsz' # power balance - - self.prsb.e_str = 'mul(ugt, mul(pmax, tlv) - pg) - prs' + self.prsb.e_str = 'mul(ugt, pmax@tlv - pg) - prs' self.rsr.e_str = '-gs@prs + dsr' - # --- bus power injection --- - self.pnb.e_str = 'PTDF@(Cgi@pg - Cli@pds) - plf' + # --- line --- + self.plf.horizon = self.timeslot + self.plf.info = '2D Line flow' + self.plflb.e_str = '-Bf@aBus - Pfinj@tlv - rate_a@tlv' + self.plfub.e_str = 'Bf@aBus + Pfinj@tlv - rate_a@tlv' - # --- line limits --- - self.plflb.e_str = '-plf - mul(rate_a, tlv)' - self.plfub.e_str = 'plf - mul(rate_a, tlv)' + # --- power balance --- + self.pb.e_str = 'Bbus@aBus + Pbusinj@tlv + Cl@pds + Csh@gsh@tlv - Cg@pg' # --- ramping --- self.rbu.e_str = 'gs@mul(ugt, pru) - mul(dud, tlv)' self.rbd.e_str = 'gs@mul(ugt, prd) - mul(ddd, tlv)' self.rru.e_str = 'mul(ugt, pg + pru) - mul(pmax, tlv)' - self.rrd.e_str = 'mul(ugt, -pg + prd) - mul(pmin, tlv)' + self.rrd.e_str = 'mul(ugt, -pg + prd) + mul(pmin, tlv)' self.rgu.e_str = 'pg @ Mr - t dot RR30' self.rgd.e_str = '-pg @ Mr - t dot RR30' @@ -217,6 +211,16 @@ def __init__(self, system, config): cost += ' + sum(csr@prs)' self.obj.e_str = cost + def _post_solve(self): + """ + Overwrite ``_post_solve``. + """ + # --- post-solving calculations --- + # line flow: Bf@aBus + Pfinj + mats = self.system.mats + self.plf.optz.value = mats.Bf._v@self.aBus.v + self.Pfinj.v@self.tlv.v + return True + def dc2ac(self, **kwargs): """ AC conversion ``dc2ac`` is not implemented yet for @@ -234,10 +238,26 @@ def unpack(self, **kwargs): """ return None -# TODO: add data check -# if has model ``TimeSlot``, mandatory -# if has model ``Region``, optional -# if ``Region``, if ``Bus`` has param ``zone``, optional, if none, auto fill + +class EDDG(ED, DGBase): + """ + ED with distributed generation :ref:`DG`. + + Note that EDDG only inlcudes DG output power. If ESD1 is included, + EDES should be used instead, otherwise there is no SOC. + """ + + def __init__(self, system, config): + ED.__init__(self, system, config) + DGBase.__init__(self) + + self.config.t = 1 # dispatch interval in hour + + self.info = 'Economic dispatch with distributed generation' + self.type = 'DCED' + + # NOTE: extend vars to 2D + self.pgdg.horizon = self.timeslot class ESD1MPBase(ESD1Base): @@ -248,9 +268,9 @@ class ESD1MPBase(ESD1Base): def __init__(self): ESD1Base.__init__(self) - self.Mre = RampSub(u=self.SOC, name='Mre', tex_name=r'M_{r,E}', + self.Mre = RampSub(u=self.SOC, name='Mre', tex_name=r'M_{r,ES}', info='Subtraction matrix for SOC', - no_parse=True) + no_parse=True, sparse=True,) self.EnR = NumHstack(u=self.En, ref=self.Mre, name='EnR', tex_name=r'E_{n,R}', info='Repeated En as 2D matrix, (ng, ng-1)') @@ -266,7 +286,7 @@ def __init__(self): SOCb0 = 'mul(En, SOC[:, 0] - SOCinit) - t dot mul(EtaC, zce[:, 0])' SOCb0 += ' + t dot mul(REtaD, zde[:, 0])' - self.SOCb0 = Constraint(name='SOCb', type='eq', + self.SOCb0 = Constraint(name='SOCb0', type='eq', info='ESD1 SOC initial balance', e_str=SOCb0,) @@ -275,7 +295,7 @@ def __init__(self): e_str='SOC[:, -1] - SOCinit',) -class EDES(ED, ESD1Base): +class EDES(ED, ESD1MPBase): """ ED with energy storage :ref:`ESD1`. The bilinear term in the formulation is linearized with big-M method. @@ -283,7 +303,7 @@ class EDES(ED, ESD1Base): def __init__(self, system, config): ED.__init__(self, system, config) - ESD1Base.__init__(self) + ESD1MPBase.__init__(self) self.config.t = 1 # dispatch interval in hour @@ -291,6 +311,7 @@ def __init__(self, system, config): self.type = 'DCED' # NOTE: extend vars to 2D + self.pgdg.horizon = self.timeslot self.SOC.horizon = self.timeslot self.pce.horizon = self.timeslot self.pde.horizon = self.timeslot diff --git a/ams/routines/pflow.py b/ams/routines/pflow.py index 9e689987..7be37028 100644 --- a/ams/routines/pflow.py +++ b/ams/routines/pflow.py @@ -1,15 +1,16 @@ """ Power flow routines. """ -import logging # NOQA +import logging +from collections import OrderedDict -from ams.pypower import runpf # NOQA +from ams.pypower import runpf -from ams.io.pypower import system2ppc # NOQA -from ams.pypower.core import ppoption # NOQA -from ams.core.param import RParam # NOQA +from ams.io.pypower import system2ppc +from ams.pypower.core import ppoption +from ams.core.param import RParam -from ams.routines.dcpf import DCPFlowBase # NOQA +from ams.routines.dcpf import DCPFlowBase from ams.opt.omodel import Var, Constraint # NOQA logger = logging.getLogger(__name__) @@ -32,49 +33,38 @@ def __init__(self, system, config): self.info = "AC Power Flow" self.type = "PF" - self.qd = RParam( - info="reactive power load in system base", - name="qd", - src="q0", - tex_name=r"q_{d}", - unit="p.u.", - model="PQ", - ) + self.config.add(OrderedDict((('qlim', 0), + ))) + self.config.add_extra("_help", + qlim="Enforce generator q limits", + ) + self.config.add_extra("_alt", + qlim=(0, 1, 2), + ) + + self.qd = RParam(info="reactive power load in system base", + name="qd", tex_name=r"q_{d}", + unit="p.u.", + model="StaticLoad", src="q0",) # --- bus --- - self.aBus = Var( - info="bus voltage angle", - unit="rad", - name="aBus", - src="a", - tex_name=r"a_{Bus}", - model="Bus", - ) - self.vBus = Var( - info="bus voltage magnitude", - unit="p.u.", - name="vBus", - src="v", - tex_name=r"v_{Bus}", - model="Bus", - ) + self.aBus = Var(info="bus voltage angle", + unit="rad", + name="aBus", tex_name=r"a_{Bus}", + model="Bus", src="a",) + self.vBus = Var(info="bus voltage magnitude", + unit="p.u.", + name="vBus", tex_name=r"v_{Bus}", + model="Bus", src="v",) # --- gen --- - self.pg = Var( - info="active power generation", - unit="p.u.", - name="pg", - src="p", - tex_name=r"p_{g}", - model="StaticGen", - ) - self.qg = Var( - info="reactive power generation", - unit="p.u.", - name="qg", - src="q", - tex_name=r"q_{g}", - model="StaticGen", - ) + self.pg = Var(info="active power generation", + unit="p.u.", + name="pg", tex_name=r"p_{g}", + model="StaticGen", src="p",) + self.qg = Var(info="reactive power generation", + unit="p.u.", + name="qg", tex_name=r"q_{g}", + model="StaticGen", src="q",) # NOTE: omit AC power flow formulation here def solve(self, method="newton"): @@ -91,7 +81,7 @@ def solve(self, method="newton"): if alg is None: msg = f"Invalid method `{method}` for PFlow." raise ValueError(msg) - ppopt = ppoption(PF_ALG=alg) + ppopt = ppoption(PF_ALG=alg, ENFORCE_Q_LIMS=self.config.qlim) res, success, sstats = runpf(casedata=ppc, ppopt=ppopt) return res, success, sstats @@ -126,9 +116,6 @@ def run(self, force_init=False, no_code=True, method="newton", **kwargs): exit_code : int Exit code of the routine. """ - return super().run( - force_init=force_init, - no_code=no_code, - method=method, - **kwargs, - ) + return super().run(force_init=force_init, + no_code=no_code, method=method, + **kwargs,) diff --git a/ams/routines/routine.py b/ams/routines/routine.py index f891b133..996f15ca 100644 --- a/ams/routines/routine.py +++ b/ams/routines/routine.py @@ -75,22 +75,16 @@ def __init__(self, system=None, config=None): self.config.load(config) # TODO: these default configs might to be revised self.config.add( - OrderedDict( - ( - ("sparselib", "klu"), - ("linsolve", 0), - ) + OrderedDict(( + ("sparselib", "klu"), + ) ) ) self.config.add_extra( - "_help", - sparselib="linear sparse solver name", - linsolve="solve symbolic factorization each step (enable when KLU segfaults)", + "_help", sparselib="linear sparse solver name", ) self.config.add_extra( - "_alt", - sparselib=("klu", "umfpack", "spsolve", "cupy"), - linsolve=(0, 1), + "_alt", sparselib=("klu", "umfpack", "spsolve", "cupy"), ) self.exec_time = 0.0 # recorded time to execute the routine in seconds @@ -246,27 +240,33 @@ def doc(self, max_width=78, export="plain"): """ return self.docum.get(max_width=max_width, export=export) - def _constr_check(self): + def _get_off_constrs(self): """ - Chcek if constraints are in-use. + Chcek if constraints are turned off. """ disabled = [] for cname, c in self.constrs.items(): if c.is_disabled: disabled.append(cname) - if len(disabled) > 0: - logger.warning(f"Disabled constraints: {disabled}") - return True + return disabled - def _data_check(self): + def _data_check(self, info=True): """ Check if data is valid for a routine. + + Parameters + ---------- + info: bool + Whether to print warning messages. """ no_input = [] owner_list = [] for rname, rparam in self.rparams.items(): if rparam.owner is not None: - if rparam.owner.n == 0: + # NOTE: skip checking Shunt.g + if (rparam.owner.class_name == 'Shunt') and (rparam.src == 'g'): + pass + elif rparam.owner.n == 0: no_input.append(rname) owner_list.append(rparam.owner.class_name) # TODO: add more data config check? @@ -274,15 +274,21 @@ def _data_check(self): if not np.all(rparam.v > 0): logger.warning(f"RParam <{rname}> should have all positive values.") if len(no_input) > 0: - msg = f"Following models are missing in input: {set(owner_list)}" - logger.warning(msg) + if info: + msg = f"Following models are missing in input: {set(owner_list)}" + logger.warning(msg) return False # TODO: add data validation for RParam, typical range, etc. return True - def init(self, force=True, no_code=True, **kwargs): + def init(self, force=False, no_code=True, **kwargs): """ - Setup optimization model. + Initialize the routine. + + Force initialization (`force=True`) will do the following: + - Rebuild the system matrices + - Enable all constraints + - Reinitialize the optimization model Parameters ---------- @@ -291,26 +297,53 @@ def init(self, force=True, no_code=True, **kwargs): no_code: bool Whether to show generated code. """ - if not force and self.initialized: + skip_all = (not force) and self.initialized and self.om.initialized + skip_ominit = (not force) and self.om.initialized + + if skip_all: logger.debug(f"{self.class_name} has already been initialized.") return True + + t0, _ = elapsed() + # --- data check --- if self._data_check(): logger.debug(f"{self.class_name} data check passed.") else: msg = f"{self.class_name} data check failed, setup may run into error!" logger.warning(msg) - self._constr_check() - # FIXME: build the system matrices every init might slow down - self.system.mats.make() - results, elapsed_time = self.om.setup(no_code=no_code) - common_msg = f"Routine <{self.class_name}> " - if results: - msg = f"initialized in {elapsed_time}." + + # --- matrix build --- + if force: + t_mat, _ = elapsed() + self.system.mats.make() + _, s_mat = elapsed(t_mat) + logger.debug(f"Built system matrices in {s_mat}.") + for constr in self.constrs.values(): + constr.is_disabled = False + + # --- constraint check --- + disabled = self._get_off_constrs() + if len(disabled) > 0: + msg = "Disabled constraints: " + d_str = [f'{constr}' for constr in disabled] + msg += ", ".join(d_str) + logger.warning(msg) + + if not skip_ominit: + om_init = self.om.init(no_code=no_code) + else: + om_init = True + _, s_init = elapsed(t0) + + msg = f"Routine <{self.class_name}> " + if om_init: + msg += f"initialized in {s_init}." self.initialized = True else: - msg = "initialization failed!" - logger.info(common_msg + msg) - return results + msg += "initialization failed!" + self.initialized = False + logger.info(msg) + return self.initialized def prepare(self): """ @@ -323,7 +356,6 @@ def solve(self, **kwargs): """ Solve the routine optimization model. """ - pass return True def unpack(self, **kwargs): @@ -332,10 +364,21 @@ def unpack(self, **kwargs): """ return None + def _post_solve(self): + """ + Post-solve calculation. + """ + return None + def run(self, force_init=False, no_code=True, **kwargs): """ Run the routine. + Force initialization (`force_init=True`) will do the following: + - Rebuild the system matrices + - Enable all constraints + - Reinitialize the optimization model + Parameters ---------- force_init: bool @@ -345,21 +388,15 @@ def run(self, force_init=False, no_code=True, **kwargs): """ # --- setup check --- self.init(force=force_init, no_code=no_code) - # NOTE: if the model data is altered, we need to re-setup the model - # this implementation if not efficient at large-scale - # FIXME: find a more efficient way to update the OModel values if - # the system data is altered - # elif self.exec_time > 0: - # self.init(no_code=no_code) # --- solve optimization --- t0, _ = elapsed() _ = self.solve(**kwargs) - status = self.om.mdl.status + status = self.om.prob.status self.exit_code = self.syms.status[status] self.system.exit_code = self.exit_code _, s = elapsed(t0) self.exec_time = float(s.split(" ")[0]) - sstats = self.om.mdl.solver_stats # solver stats + sstats = self.om.prob.solver_stats # solver stats if sstats.num_iters is None: n_iter = -1 else: @@ -370,6 +407,7 @@ def run(self, force_init=False, no_code=True, **kwargs): msg += n_iter_str + f"using solver {sstats.solver_name}." logger.warning(msg) self.unpack(**kwargs) + self._post_solve() return True else: msg = f"{self.class_name} failed as {status} after " @@ -595,12 +633,11 @@ def disable(self, name): for n in name: if n not in self.constrs: logger.warning(f"Constraint <{n}> not found.") - continue - if self.constrs[n].is_disabled: + elif self.constrs[n].is_disabled: logger.warning(f"Constraint <{n}> has already been disabled.") - continue - self.constrs[n].is_disabled = True - self.initialized = False + else: + self.constrs[n].is_disabled = True + self.om.initialized = False return True if name in self.constrs: @@ -608,7 +645,7 @@ def disable(self, name): logger.warning(f"Constraint <{name}> has already been disabled.") else: self.constrs[name].is_disabled = True - self.initialized = False + self.om.initialized = False logger.warning(f"Disable constraint <{name}>.") return True diff --git a/ams/routines/rted.py b/ams/routines/rted.py index b463830a..6175f6f4 100644 --- a/ams/routines/rted.py +++ b/ams/routines/rted.py @@ -1,28 +1,25 @@ """ Real-time economic dispatch. """ -import logging # NOQA -from collections import OrderedDict # NOQA -import numpy as np # NOQA +import logging +from collections import OrderedDict +import numpy as np -from ams.core.param import RParam # NOQA -from ams.core.service import ZonalSum, VarSelect, NumOp, NumOpDual # NOQA -from ams.routines.dcopf import DCOPFBase, DCOPF # NOQA +from ams.core.param import RParam +from ams.core.service import ZonalSum, VarSelect, NumOp, NumOpDual +from ams.routines.dcopf import DCOPF -from ams.opt.omodel import Var, Constraint # NOQA +from ams.opt.omodel import Var, Constraint logger = logging.getLogger(__name__) -class RTEDBase(DCOPF): +class RTEDBase: """ Base class for real-time economic dispatch (RTED). - - Overload ``dc2ac``, ``run``. """ - def __init__(self, system, config): - DCOPF.__init__(self, system, config) + def __init__(self): # --- region --- self.zg = RParam(info='Gen zone', name='zg', tex_name='z_{one,g}', @@ -34,7 +31,8 @@ def __init__(self, system, config): no_parse=True) self.gs = ZonalSum(u=self.zg, zone='Region', name='gs', tex_name=r'S_{g}', - info='Sum Gen vars vector in shape of zone') + info='Sum Gen vars vector in shape of zone', + no_parse=True, sparse=True) self.ds = ZonalSum(u=self.zd, zone='Region', name='ds', tex_name=r'S_{d}', info='Sum pd vector in shape of zone', @@ -46,7 +44,6 @@ def __init__(self, system, config): name='pdz', tex_name=r'p_{d,z}', unit='p.u.', info='zonal total load', no_parse=True,) - # --- generator --- self.R10 = RParam(info='10-min ramp rate', name='R10', tex_name=r'R_{10}', @@ -60,16 +57,16 @@ class SFRBase: """ def __init__(self): - - # 1. reserve - # --- reserve cost --- + # --- SFR cost --- self.cru = RParam(info='RegUp reserve coefficient', name='cru', tex_name=r'c_{r,u}', model='SFRCost', src='cru', + indexer='gen', imodel='StaticGen', unit=r'$/(p.u.)',) self.crd = RParam(info='RegDown reserve coefficient', name='crd', tex_name=r'c_{r,d}', model='SFRCost', src='crd', + indexer='gen', imodel='StaticGen', unit=r'$/(p.u.)',) # --- reserve requirement --- self.du = RParam(info='RegUp reserve requirement in percentage', @@ -80,26 +77,48 @@ def __init__(self): name='dd', tex_name=r'd_{d}', model='SFR', src='dd', unit='%', no_parse=True,) + self.dud = NumOpDual(u=self.pdz, u2=self.du, fun=np.multiply, + rfun=np.reshape, rargs=dict(newshape=(-1,)), + name='dud', tex_name=r'd_{u, d}', + info='zonal RegUp reserve requirement',) + self.ddd = NumOpDual(u=self.pdz, u2=self.dd, fun=np.multiply, + rfun=np.reshape, rargs=dict(newshape=(-1,)), + name='ddd', tex_name=r'd_{d, d}', + info='zonal RegDn reserve requirement',) + # --- SFR --- + self.pru = Var(info='RegUp reserve', + unit='p.u.', name='pru', tex_name=r'p_{r,u}', + model='StaticGen', nonneg=True,) + self.prd = Var(info='RegDn reserve', + unit='p.u.', name='prd', tex_name=r'p_{r,d}', + model='StaticGen', nonneg=True,) + # NOTE: define e_str in dispatch routine + self.rbu = Constraint(name='rbu', type='eq', + info='RegUp reserve balance',) + self.rbd = Constraint(name='rbd', type='eq', + info='RegDn reserve balance',) + self.rru = Constraint(name='rru', type='uq', + info='RegUp reserve source',) + self.rrd = Constraint(name='rrd', type='uq', + info='RegDn reserve source',) + self.rgu = Constraint(name='rgu', type='uq', + info='Gen ramping up',) + self.rgd = Constraint(name='rgd', type='uq', + info='Gen ramping down',) -class RTED(RTEDBase, SFRBase): +class RTED(DCOPF, RTEDBase, SFRBase): """ DC-based real-time economic dispatch (RTED). RTED extends DCOPF with: - 1. Param ``pg0``, which can be retrieved from dynamic simulation results. - - 2. RTED has mapping dicts to interface with ANDES. - - 3. RTED routine adds a function ``dc2ac`` to do the AC conversion using ACOPF - - 4. Vars for zonal SFR reserve: ``pru`` and ``prd``; - - 5. Param for linear cost of zonal SFR reserve ``cru`` and ``crd``; - - 6. Param for SFR requirement ``du`` and ``dd``; - - 7. Param for generator ramping: start point ``pg0`` and ramping limit ``R10``; + - Mapping dicts to interface with ANDES + - Function ``dc2ac`` to do the AC conversion + - Vars for SFR reserve: ``pru`` and ``prd`` + - Param for linear SFR cost: ``cru`` and ``crd`` + - Param for SFR requirement: ``du`` and ``dd`` + - Param for ramping: start point ``pg0`` and ramping limit ``R10`` + - Param ``pg0``, which can be retrieved from dynamic simulation results. The function ``dc2ac`` sets the ``vBus`` value from solved ACOPF. Without this conversion, dynamic simulation might fail due to the gap between @@ -113,7 +132,8 @@ class RTED(RTEDBase, SFRBase): """ def __init__(self, system, config): - RTEDBase.__init__(self, system, config) + DCOPF.__init__(self, system, config) + RTEDBase.__init__(self) SFRBase.__init__(self) self.config.add(OrderedDict((('t', 5/60), @@ -141,46 +161,23 @@ def __init__(self, system, config): self.info = 'Real-time economic dispatch' self.type = 'DCED' - # --- vars --- - self.pru = Var(info='RegUp reserve', - unit='p.u.', name='pru', tex_name=r'p_{r,u}', - model='StaticGen', nonneg=True,) - self.prd = Var(info='RegDn reserve', - unit='p.u.', name='prd', tex_name=r'p_{r,d}', - model='StaticGen', nonneg=True,) + # --- Model Section --- + # --- SFR --- + # RegUp/Dn reserve balance + self.rbu.e_str = 'gs @ mul(ug, pru) - dud' + self.rbd.e_str = 'gs @ mul(ug, prd) - ddd' + # RegUp/Dn reserve source + self.rru.e_str = 'mul(ug, pg + pru) - mul(ug, pmax)' + self.rrd.e_str = 'mul(ug, -pg + prd) + mul(ug, pmin)' + # Gen ramping up/down + self.rgu.e_str = 'mul(ug, pg-pg0-R10)' + self.rgd.e_str = 'mul(ug, -pg+pg0-R10)' - # --- constraints --- - self.dud = NumOpDual(u=self.pdz, u2=self.du, fun=np.multiply, - rfun=np.reshape, rargs=dict(newshape=(-1,)), - name='dud', tex_name=r'd_{u, d}', - info='zonal RegUp reserve requirement',) - self.ddd = NumOpDual(u=self.pdz, u2=self.dd, fun=np.multiply, - rfun=np.reshape, rargs=dict(newshape=(-1,)), - name='ddd', tex_name=r'd_{d, d}', - info='zonal RegDn reserve requirement',) - self.rbu = Constraint(name='rbu', type='eq', - info='RegUp reserve balance', - e_str='gs @ mul(ug, pru) - dud',) - self.rbd = Constraint(name='rbd', type='eq', - info='RegDn reserve balance', - e_str='gs @ mul(ug, prd) - ddd',) - self.rru = Constraint(name='rru', type='uq', - info='RegUp reserve source', - e_str='mul(ug, pg + pru) - mul(ug, pmax)',) - self.rrd = Constraint(name='rrd', type='uq', - info='RegDn reserve source', - e_str='mul(ug, -pg + prd) - mul(ug, pmin)',) - self.rgu = Constraint(name='rgu', type='uq', - info='Gen ramping up', - e_str='mul(ug, pg-pg0-R10)',) - self.rgd = Constraint(name='rgd', type='uq', - info='Gen ramping down', - e_str='mul(ug, -pg+pg0-R10)',) # --- objective --- self.obj.info = 'total generation and reserve cost' # NOTE: the product of dt and pg is processed using ``dot``, # because dt is a numnber - cost = 'sum_squares(mul(c2, pg))' + cost = 'sum(mul(c2, power(pg, 2)))' cost += '+ sum(c1 @ (t dot pg))' cost += '+ ug * c0' # constant cost cost += '+ sum(cru * pru + crd * prd)' # reserve cost @@ -209,10 +206,19 @@ def dc2ac(self, **kwargs): self.system.StaticGen.set(src='pmax', attr='v', idx=pr_idx, value=pmax) self.system.StaticGen.set(src='p0', attr='v', idx=pr_idx, value=self.pg.v) ACOPF.run() + if not ACOPF.exit_code == 0: + logger.warning(' did not converge, conversion failed.') + # NOTE: mock results to fit interface with ANDES + self.vBus = ACOPF.vBus + self.vBus.optz.value = np.ones(self.system.Bus.n) + self.aBus = ACOPF.aBus + self.aBus.optz.value = np.zeros(self.system.Bus.n) + return False self.pg.v = ACOPF.pg.v # NOTE: mock results to fit interface with ANDES self.vBus = ACOPF.vBus + self.aBus = ACOPF.aBus # reset pmin, pmax, p0 self.system.StaticGen.set(src='pmin', attr='v', idx=pr_idx, value=pmin0) @@ -221,7 +227,7 @@ def dc2ac(self, **kwargs): self.system.recent = self self.is_ac = True - logger.warning('RTED is converted to AC.') + logger.warning(f'<{self.class_name}> is converted to AC.') return True def run(self, no_code=True, **kwargs): @@ -273,12 +279,63 @@ def run(self, no_code=True, **kwargs): return super().run(**kwargs) -class ESD1Base: +class DGBase: + """ + Base class for DG used in DCED. + """ + + def __init__(self): + # --- params --- + self.gendg = RParam(info='gen of DG', + name='gendg', tex_name=r'g_{DG}', + model='DG', src='gen', + no_parse=True,) + info = 'Ratio of DG.pge w.r.t to that of static generator', + self.gammapdg = RParam(name='gammapd', tex_name=r'\gamma_{p,DG}', + model='DG', src='gammap', + no_parse=True, info=info) + + # --- vars --- + # TODO: maybe there will be constraints on pgd, maybe upper/lower bound? + # TODO: this might requre new device like DGSlot + self.pgdg = Var(info='DG output power', + unit='p.u.', name='pgdg', + tex_name=r'p_{g,DG}', + model='DG',) + + # --- constraints --- + self.cdg = VarSelect(u=self.pg, indexer='gendg', + name='cd', tex_name=r'C_{DG}', + info='Select DG power from pg', + gamma='gammapdg', + no_parse=True, sparse=True,) + self.cdgb = Constraint(name='cdgb', type='eq', + info='Select DG power from pg', + e_str='cdg @ pg - pgdg',) + + +class RTEDDG(RTED, DGBase): + """ + RTED with distributed generator :ref:`DG`. + + Note that RTEDDG only inlcudes DG output power. If ESD1 is included, + RTEDES should be used instead, otherwise there is no SOC. + """ + + def __init__(self, system, config): + RTED.__init__(self, system, config) + DGBase.__init__(self) + self.info = 'Real-time economic dispatch with DG' + self.type = 'DCED' + + +class ESD1Base(DGBase): """ Base class for ESD1 used in DCED. """ def __init__(self): + DGBase.__init__(self) # --- params --- self.En = RParam(info='Rated energy capacity', name='En', src='En', @@ -304,14 +361,14 @@ def __init__(self): name='EtaD', src='EtaD', tex_name=r'\eta_d', unit='%', model='ESD1', no_parse=True,) - self.gene = RParam(info='gen of ESD1', - name='gene', tex_name=r'g_{E}', - model='ESD1', src='gen', - no_parse=True,) - info = 'Ratio of ESD1.pge w.r.t to that of static generator', - self.gammape = RParam(name='gammape', tex_name=r'\gamma_{p,e}', - model='ESD1', src='gammap', - no_parse=True, info=info) + self.genesd = RParam(info='gen of ESD1', + name='genesd', tex_name=r'g_{ESD}', + model='ESD1', src='gen', + no_parse=True,) + info = 'Ratio of ESD1.pge w.r.t to that of static generator' + self.gammapesd = RParam(name='gammapesd', tex_name=r'\gamma_{p,ESD}', + model='ESD1', src='gammap', + no_parse=True, info=info) # --- service --- self.REtaD = NumOp(name='REtaD', tex_name=r'\frac{1}{\eta_d}', @@ -333,38 +390,40 @@ def __init__(self): self.SOCub = Constraint(name='SOCub', type='uq', info='SOC upper bound', e_str='SOC - SOCmax',) - self.ce = VarSelect(u=self.pg, indexer='gene', - name='ce', tex_name=r'C_{E}', - info='Select zue from pg', - gamma='gammape', no_parse=True,) self.pce = Var(info='ESD1 charging power', unit='p.u.', name='pce', - tex_name=r'p_{c,E}', + tex_name=r'p_{c,ESD}', model='ESD1', nonneg=True,) self.pde = Var(info='ESD1 discharging power', unit='p.u.', name='pde', - tex_name=r'p_{d,E}', + tex_name=r'p_{d,ESD}', model='ESD1', nonneg=True,) self.uce = Var(info='ESD1 charging decision', - name='uce', tex_name=r'u_{c,E}', + name='uce', tex_name=r'u_{c,ESD}', model='ESD1', boolean=True,) self.ude = Var(info='ESD1 discharging decision', - name='ude', tex_name=r'u_{d,E}', + name='ude', tex_name=r'u_{d,ESD}', model='ESD1', boolean=True,) - self.zce = Var(name='zce', tex_name=r'z_{c,E}', + self.zce = Var(name='zce', tex_name=r'z_{c,ESD}', model='ESD1', nonneg=True,) - self.zce.info = 'Aux var for charging, :math:`z_{c,e}=u_{c,E}*p_{c,E}`' - self.zde = Var(name='zde', tex_name=r'z_{d,E}', + self.zce.info = 'Aux var for charging, ' + self.zce.info += ':math:`z_{c,ESD}=u_{c,ESD}*p_{c,ESD}`' + self.zde = Var(name='zde', tex_name=r'z_{d,ESD}', model='ESD1', nonneg=True,) - self.zde.info = 'Aux var for discharging, :math:`z_{d,e}=u_{d,E}*p_{d,E}`' + self.zde.info = 'Aux var for discharging, ' + self.zde.info += ':math:`z_{d,ESD}=u_{d,ESD}*p_{d,ESD}`' # --- constraints --- - self.ceb = Constraint(name='ceb', type='eq', + self.cdb = Constraint(name='cdb', type='eq', info='Charging decision bound', e_str='uce + ude - 1',) - self.cpe = Constraint(name='cpe', type='eq', - info='Select pce from pg', - e_str='ce @ pg - zce - zde',) + self.ces = VarSelect(u=self.pg, indexer='genesd', + name='ce', tex_name=r'C_{ESD}', + info='Select zue from pg', + gamma='gammapesd', no_parse=True,) + self.cesb = Constraint(name='cesb', type='eq', + info='Select ESD1 power from pg', + e_str='ces @ pg + zce - zde',) self.zce1 = Constraint(name='zce1', type='uq', info='zce bound 1', e_str='-zce + pce',) @@ -398,3 +457,104 @@ def __init__(self, system, config): ESD1Base.__init__(self) self.info = 'Real-time economic dispatch with energy storage' self.type = 'DCED' + + +class VISBase: + """ + Base class for virtual inertia scheduling. + """ + + def __init__(self) -> None: + # --- Data Section --- + self.cm = RParam(info='Virtual inertia cost', + name='cm', src='cm', + tex_name=r'c_{m}', unit=r'$/s', + model='VSGCost', + indexer='reg', imodel='VSG') + self.cd = RParam(info='Virtual damping cost', + name='cd', src='cd', + tex_name=r'c_{d}', unit=r'$/(p.u.)', + model='VSGCost', + indexer='reg', imodel='VSG',) + self.zvsg = RParam(info='VSG zone', + name='zvsg', tex_name='z_{one,vsg}', + model='VSG', src='zone', + no_parse=True) + self.Mmax = RParam(info='Maximum inertia emulation', + name='Mmax', tex_name='M_{max}', + model='VSG', src='Mmax', + unit='s',) + self.Dmax = RParam(info='Maximum damping emulation', + name='Dmax', tex_name='D_{max}', + model='VSG', src='Dmax', + unit='p.u.',) + self.dvm = RParam(info='Emulated inertia requirement', + name='dvm', tex_name=r'd_{v,m}', + unit='s', + model='VSGR', src='dvm',) + self.dvd = RParam(info='Emulated damping requirement', + name='dvd', tex_name=r'd_{v,d}', + unit='p.u.', + model='VSGR', src='dvd',) + + # --- Model Section --- + self.M = Var(info='Emulated startup time constant (M=2H)', + name='M', tex_name=r'M', unit='s', + model='VSG', nonneg=True,) + self.D = Var(info='Emulated damping coefficient', + name='D', tex_name=r'D', unit='p.u.', + model='VSG', nonneg=True,) + + self.gvsg = ZonalSum(u=self.zvsg, zone='Region', + name='gvsg', tex_name=r'S_{g}', + info='Sum VSG vars vector in shape of zone', + no_parse=True) + self.Mub = Constraint(name='Mub', type='uq', + info='M upper bound', + e_str='M - Mmax',) + self.Dub = Constraint(name='Dub', type='uq', + info='D upper bound', + e_str='D - Dmax',) + self.Mreq = Constraint(name='Mreq', type='eq', + info='Emulated inertia requirement', + e_str='-gvsg@M + dvm',) + self.Dreq = Constraint(name='Dreq', type='eq', + info='Emulated damping requirement', + e_str='-gvsg@D + dvd',) + + # NOTE: revise the objective function to include virtual inertia cost + + +class RTEDVIS(RTED, VISBase): + """ + RTED with virtual inertia scheduling. + + Reference: + + [1] B. She, F. Li, H. Cui, J. Wang, Q. Zhang and R. Bo, "Virtual + Inertia Scheduling (VIS) for Real-time Economic Dispatch of + IBRs-penetrated Power Systems," in IEEE Transactions on + Sustainable Energy, doi: 10.1109/TSTE.2023.3319307. + """ + + def __init__(self, system, config): + RTED.__init__(self, system, config) + VISBase.__init__(self) + self.info = 'Real-time economic dispatch with virtual inertia scheduling' + self.type = 'DCED' + + # --- objective --- + self.obj.info = 'total generation and reserve cost' + gcost = 'sum(mul(c2, power(pg, 2)))' + gcost += '+ sum(c1 @ (t dot pg))' + gcost += '+ ug * c0 ' # constant cost + rcost = '+ sum(cru * pru + crd * prd) ' # reserve cost + vsgcost = '+ sum(cm * M + cd * D)' + self.obj.e_str = gcost + rcost + vsgcost + + self.map2.update({ + 'RenGen': { + 'M': 'M', + 'D': 'D', + }, + }) diff --git a/ams/routines/uc.py b/ams/routines/uc.py index 0b2d4435..115b8b24 100644 --- a/ams/routines/uc.py +++ b/ams/routines/uc.py @@ -8,8 +8,9 @@ from ams.core.param import RParam from ams.core.service import (NumOp, NumOpDual, MinDur) +from ams.routines.dcopf import DCOPF from ams.routines.rted import RTEDBase -from ams.routines.ed import SRBase, MPBase, ESD1MPBase +from ams.routines.ed import SRBase, MPBase, ESD1MPBase, DGBase from ams.opt.omodel import Var, Constraint @@ -51,18 +52,17 @@ def __init__(self) -> None: name='rnsr', type='uq',) -class UC(RTEDBase, MPBase, SRBase, NSRBase): +class UC(DCOPF, RTEDBase, MPBase, SRBase, NSRBase): """ - DC-based unit commitment (UC). + DC-based unit commitment (UC): The bilinear term in the formulation is linearized with big-M method. - Penalty for unserved load is introduced as ``config.cul`` (:math:`c_{ul, cfg}`), - 1000 [$/p.u.] by default. + Non-negative var `pdu` is introduced as unserved load with its penalty `cdp`. Constraints include power balance, ramping, spinning reserve, non-spinning reserve, minimum ON/OFF duration. The cost inludes generation cost, startup cost, shutdown cost, spinning reserve cost, - non-spinning reserve cost, and unserved energy penalty. + non-spinning reserve cost, and unserved load penalty. Method ``_initial_guess`` is used to make initial guess for commitment decision if all generators are online at initial. It is a simple heuristic method, which may not be optimal. @@ -84,23 +84,26 @@ class UC(RTEDBase, MPBase, SRBase, NSRBase): """ def __init__(self, system, config): - RTEDBase.__init__(self, system, config) + DCOPF.__init__(self, system, config) + RTEDBase.__init__(self) MPBase.__init__(self) SRBase.__init__(self) NSRBase.__init__(self) self.config.add(OrderedDict((('t', 1), - ('cul', 10000), ))) self.config.add_extra("_help", t="time interval in hours", - cul="penalty for unserved load, $/p.u.", ) self.info = 'unit commitment' self.type = 'DCUC' + # --- Data Section --- + # update timeslot model to UCTSlot + self.timeslot.info = 'Time slot for multi-period UC' self.timeslot.model = 'UCTSlot' + # --- reserve cost --- self.csu = RParam(info='startup cost', name='csu', tex_name=r'c_{su}', model='GCost', src='csu', @@ -109,6 +112,18 @@ def __init__(self, system, config): name='csd', tex_name=r'c_{sd}', model='GCost', src='csd', unit='$',) + # --- load --- + self.cdp = RParam(info='penalty for unserved load', + name='cdp', tex_name=r'c_{d,p}', + model='DCost', src='cdp', + no_parse=True, + unit=r'$/(p.u.*h)',) + self.dctrl = RParam(info='load controllability', + name='dctrl', tex_name=r'c_{trl,d}', + model='StaticLoad', src='ctrl', + expand_dims=1, + no_parse=True,) + # --- gen --- self.td1 = RParam(info='minimum ON duration', name='td1', tex_name=r't_{d1}', model='StaticGen', src='td1', @@ -121,24 +136,13 @@ def __init__(self, system, config): self.sd.info = 'zonal load factor for UC' self.sd.model = 'UCTSlot' - self.timeslot.info = 'Time slot for multi-period UC' - self.timeslot.model = 'UCTSlot' - self.ug.expand_dims = 1 - # NOTE: extend pg to 2D matrix, where row is gen and col is timeslot + # --- Model Section --- + # --- gen --- + # NOTE: extend pg to 2D matrix, where row for gen and col for timeslot self.pg.horizon = self.timeslot self.pg.info = '2D Gen power' - - self.plf.horizon = self.timeslot - self.plf.info = '2D Line flow' - - self.prs.horizon = self.timeslot - self.prs.info = '2D Spinning reserve' - - self.prns.horizon = self.timeslot - self.prns.info = '2D Non-spinning reserve' - # TODO: havn't test non-controllability? self.ctrle.u2 = self.tlv self.ctrle.info = 'Reshaped controllability' @@ -151,7 +155,6 @@ def __init__(self, system, config): pgub += '- mul(mul(ctrl, pmax), ugd)' self.pgub.e_str = pgub - # --- vars --- self.ugd = Var(info='commitment decision', horizon=self.timeslot, name='ugd', tex_name=r'u_{g,d}', @@ -167,13 +170,11 @@ def __init__(self, system, config): name='wgd', tex_name=r'w_{g,d}', model='StaticGen', src='u', boolean=True,) - self.zug = Var(info='Aux var, :math:`z_{ug} = u_{g,d} * p_g`', horizon=self.timeslot, name='zug', tex_name=r'z_{ug}', model='StaticGen', pos=True,) - - # NOTE: actions have two parts, one for initial status, another for the rest + # NOTE: actions have two parts: initial status and the rest self.actv = Constraint(name='actv', type='eq', info='startup action', e_str='ugd @ Mr - vgd[:, 1:]',) @@ -187,9 +188,11 @@ def __init__(self, system, config): info='initial shutdown action', e_str='-ugd[:, 0] + ug[:, 0] - wgd[:, 0]',) - # --- constraints --- - self.pb.e_str = 'gs @ zug - pdsz' # power balance - self.pb.type = 'uq' # soft constraint + self.prs.horizon = self.timeslot + self.prs.info = '2D Spinning reserve' + + self.prns.horizon = self.timeslot + self.prns.info = '2D Non-spinning reserve' # spinning reserve self.prsb.e_str = 'mul(ugd, mul(pmax, tlv)) - zug - prs' @@ -201,9 +204,6 @@ def __init__(self, system, config): # non-spinning reserve requirement self.rnsr.e_str = '-gs@prns + dnsr' - # --- bus power injection --- - self.pnb.e_str = 'PTDF@(Cgi@pg - Cli@pds) - plf' - # --- big M for ugd*pg --- self.Mzug = NumOp(info='10 times of max of pmax as big M for zug', name='Mzug', tex_name=r'M_{zug}', @@ -231,13 +231,37 @@ def __init__(self, system, config): name='doff', type='uq', e_str='multiply(Coff, wgd) - (1 - ugd)') + # --- line --- + self.plf.horizon = self.timeslot + self.plf.info = '2D Line flow' + self.plflb.e_str = '-plf - mul(rate_a, tlv)' + self.plfub.e_str = 'plf - mul(rate_a, tlv)' + + # --- unserved load --- + self.pdu = Var(info='unserved demand', + name='pdu', tex_name=r'p_{d,u}', + model='StaticLoad', unit='p.u.', + horizon=self.timeslot, + nonneg=True,) + self.pdsp = NumOp(u=self.pds, fun=np.clip, + args=dict(a_min=0, a_max=None), + info='positive demand', + name='pdsp', tex_name=r'p_{d,s}^{+}',) + self.pdumax = Constraint(info='unserved demand upper bound', + name='pdumax', type='uq', + e_str='pdu - mul(pdsp, dctrl@tlv)') + # --- power balance --- + # NOTE: nodal balance is also contributed by unserved load + pb = 'Bbus@aBus + Pbusinj@tlv + Cl@(pds-pdu) + Csh@gsh@tlv - Cg@pg' + self.pb.e_str = pb + # --- objective --- gcost = 'sum(c2 @ (t dot zug)**2 + c1 @ (t dot zug))' gcost += '+ sum(mul(mul(ug, c0), tlv))' acost = ' + sum(csu * vgd + csd * wgd)' # action srcost = ' + sum(csr @ prs)' # spinning reserve nsrcost = ' + sum(cnsr @ prns)' # non-spinning reserve - dcost = ' + sum(cul dot pos(pdsz - gs @ pg))' # unserved load + dcost = ' + sum(cdp @ pdu)' # unserved load self.obj.e_str = gcost + acost + srcost + nsrcost + dcost def _initial_guess(self): @@ -279,6 +303,16 @@ def _initial_guess(self): logger.warning(f'Turn off StaticGen {g_idx} as initial commitment guess.') return True + def _post_solve(self): + """ + Overwrite ``_post_solve``. + """ + # --- post-solving calculations --- + # line flow: Bf@aBus + Pfinj + mats = self.system.mats + self.plf.optz.value = mats.Bf._v@self.aBus.v + self.Pfinj.v@self.tlv.v + return True + def init(self, **kwargs): self._initial_guess() return super().init(**kwargs) @@ -301,6 +335,25 @@ def unpack(self, **kwargs): return None +class UCDG(UC, DGBase): + """ + UC with distributed generation :ref:`DG`. + + Note that UCDG only inlcudes DG output power. If ESD1 is included, + UCES should be used instead, otherwise there is no SOC. + """ + + def __init__(self, system, config): + UC.__init__(self, system, config) + DGBase.__init__(self) + + self.info = 'unit commitment with distributed generation' + self.type = 'DCUC' + + # NOTE: extend vars to 2D + self.pgdg.horizon = self.timeslot + + class UCES(UC, ESD1MPBase): """ UC with energy storage :ref:`ESD1`. diff --git a/ams/system.py b/ams/system.py index 24ffbad9..9c37d7a6 100644 --- a/ams/system.py +++ b/ams/system.py @@ -5,7 +5,7 @@ import inspect import logging from collections import OrderedDict -from typing import Dict, Optional, Tuple, Union # NOQA +from typing import Dict, Optional import numpy as np @@ -32,7 +32,7 @@ def disable_method(func): def wrapper(*args, **kwargs): - logger.warning(f"Method `{func.__name__}` is included in ANDES System but not supported in AMS System.") + logger.warning("This method is included in ANDES but not supported in AMS.") return None return wrapper @@ -126,7 +126,7 @@ def __init__(self, '_p_restore', '_store_calls', '_store_tf', '_to_orddct', '_v_to_dae', 'save_config', 'collect_config', 'e_clear', 'f_update', 'fg_to_dae', 'from_ipysheet', 'g_islands', 'g_update', 'get_z', - 'init', 'j_islands', 'j_update', 'l_update_eq', 'summary', + 'init', 'j_islands', 'j_update', 'l_update_eq', 'l_update_var', 'precompile', 'prepare', 'reload', 'remove_pycapsule', 's_update_post', 's_update_var', 'store_adder_setter', 'store_no_check_init', 'store_sparse_pattern', 'store_switch_times', 'switch_action', 'to_ipysheet', @@ -399,13 +399,21 @@ def setup(self): if not self.link_ext_param(): ret = False - if self.Line.rate_a.v.max() == 0: - logger.info("Line rate_a is adjusted to large value automatically.") - self.Line.rate_a.v = 99 + # --- model parameters range check --- + # TODO: there might be other parameters check? + # Line rate + adjusted_rate = [] + default_rate = 999 + for rate in [self.Line.rate_a, self.Line.rate_b, self.Line.rate_c]: + rate.v[rate.v == 0] = default_rate + adjusted_rate.append(rate.name) + adjusted_rate = ', '.join(adjusted_rate) + msg = f"Zero line rates detacted in {adjusted_rate}, " + msg += f"adjusted to {default_rate}." + msg += "\nIf expect a line outage, please set 'u' to 0." + logger.info(msg) # === no device addition or removal after this point === - # TODO: double check calc_pu_coeff self.calc_pu_coeff() # calculate parameters in system per units - # self.store_existing() # store models with routine flags if ret is True: self.is_setup = True # set `is_setup` if no error occurred @@ -497,15 +505,15 @@ def connectivity(self, info=True): raise NotImplementedError - def to_andes(self, setup=True, addfile=None, overwite=None, no_keep=True, - **kwargs): + def to_andes(self, setup=True, addfile=None, **kwargs): """ Convert the AMS system to an ANDES system. - This function is a wrapper of ``ams.interop.andes.to_andes()``. - Using the file conversion ``sp.to_andes()`` will automatically - link the AMS system instance to the converted ANDES system instance - in the AMS system attribute ``sp.dyn``. + A preferred dynamic system file to be added has following features: + 1. The file contains both power flow and dynamic models. + 2. The file can run in ANDES natively. + 3. Power flow models are in the same shape as the AMS system. + 4. Dynamic models, if any, are in the same shape as the AMS system. Parameters ---------- @@ -515,10 +523,6 @@ def to_andes(self, setup=True, addfile=None, overwite=None, no_keep=True, Whether to call `setup()` after the conversion. Default is True. addfile : str, optional The additional file to be converted to ANDES dynamic mdoels. - overwrite : bool, optional - Whether to overwrite the existing file. - no_keep : bool, optional - True to remove the converted file after the conversion. **kwargs : dict Keyword arguments to be passed to `andes.system.System`. @@ -537,9 +541,45 @@ def to_andes(self, setup=True, addfile=None, overwite=None, no_keep=True, ... overwrite=True, no_keep=True, no_output=True) """ return to_andes(self, setup=setup, addfile=addfile, - overwite=overwite, no_keep=no_keep, **kwargs) + def summary(self): + """ + Print out system summary. + """ + # FIXME: add system connectivity check + # logger.info("-> System connectivity check results:") + rtn_check = OrderedDict((key, val._data_check(info=False)) for key, val in self.routines.items()) + rtn_types = OrderedDict({tp: [] for tp in self.types.keys()}) + + for name, data_pass in rtn_check.items(): + if data_pass: + type = self.routines[name].type + rtn_types[type].append(name) + + nb = self.Bus.n + nl = self.Line.n + ng = self.StaticGen.n + + pd = self.PQ.p0.v.sum() * self.config.mva + qd = self.PQ.q0.v.sum() * self.config.mva + + out = list() + + out.append(f"-> Systen size:") + out.append(f"{nb} Buses; {nl} Lines; {ng} Generators") + out.append(f"Active load: {pd:,.2f} MW; Reactive load: {qd:,.2f} MVar") + + out.append("-> Data check results:") + for type, names in rtn_types.items(): + if len(names) == 0: + continue + names = ", ".join(names) + out.append(f"{type}: {names}") + + out_str = '\n'.join(out) + logger.info(out_str) + # --------------- Helper Functions --------------- # NOTE: _config_numpy, load_config_rc are imported from andes.system diff --git a/docs/source/release-notes.rst b/docs/source/release-notes.rst index ddf55835..5d199b3c 100644 --- a/docs/source/release-notes.rst +++ b/docs/source/release-notes.rst @@ -9,6 +9,18 @@ The APIs before v3.0.0 are in beta and may change without prior notice. Pre-v1.0.0 ========== +v0.8.0 (2024-01-09) +------------------- + +- Refactor ``DCED`` routines to improve performance + +v0.7.5 (2023-12-28) +------------------- + +- Refactor ``MatProcessor`` and ``DCED`` routines to improve performance +- Integrate sparsity pattern in ``RParam`` +- Rename energy storage routines ``RTED2``, ``ED2`` and ``UC2`` to ``RTEDES``, ``EDES`` and ``UCES`` + v0.7.4 (2023-11-29) ------------------- diff --git a/tests/test_case.py b/tests/test_case.py index a9d8be54..f209b69a 100644 --- a/tests/test_case.py +++ b/tests/test_case.py @@ -87,9 +87,9 @@ def test_alter_param_before_routine(self): """ self.ss.GCost.alter("c1", ['GCost_1', 'GCost_2'], [1500., 3100.]) - np.testing.assert_array_equal(self.ss.GCost.c1.v, [1500., 3100., 4000., 3000.]) + np.testing.assert_array_equal(self.ss.GCost.c1.v, [1500., 3100., 0.4, 0.1]) self.ss.ACOPF.run() - np.testing.assert_array_equal(self.ss.GCost.c1.v, [1500., 3100., 4000., 3000.]) + np.testing.assert_array_equal(self.ss.GCost.c1.v, [1500., 3100., 0.4, 0.1]) def test_alter_param_after_routine(self): """ @@ -98,9 +98,9 @@ def test_alter_param_after_routine(self): self.ss.ACOPF.run() self.ss.GCost.alter("c1", ['GCost_1', 'GCost_2'], [1500., 3100.]) - np.testing.assert_array_equal(self.ss.GCost.c1.v, [1500., 3100., 4000., 3000.]) + np.testing.assert_array_equal(self.ss.GCost.c1.v, [1500., 3100., 0.4, 0.1]) self.ss.ACOPF.run() - np.testing.assert_array_equal(self.ss.GCost.c1.v, [1500., 3100., 4000., 3000.]) + np.testing.assert_array_equal(self.ss.GCost.c1.v, [1500., 3100., 0.4, 0.1]) def test_multiple_disconnected_line(self): """ diff --git a/tests/test_dctypes.py b/tests/test_dctypes.py new file mode 100644 index 00000000..c28217e3 --- /dev/null +++ b/tests/test_dctypes.py @@ -0,0 +1,112 @@ +import unittest +from functools import wraps +import numpy as np + +import ams +import cvxpy as cp + + +def require_MIP_solver(f): + """ + Decorator for skipping tests that require MIP solver. + """ + def wrapper(*args, **kwargs): + all_solvers = cp.installed_solvers() + mip_solvers = ['CPLEX', 'GUROBI', 'MOSEK'] + if any(s in mip_solvers for s in all_solvers): + pass + else: + raise unittest.SkipTest("MIP solver is not available.") + return f(*args, **kwargs) + return wrapper + + +class TestDCED(unittest.TestCase): + """ + Test DCED types routine. + """ + + def setUp(self) -> None: + self.ss = ams.load(ams.get_case("ieee39/ieee39_uced_esd1.xlsx"), + default_config=True, + no_output=True, + ) + + def test_dcopf(self): + """ + Test DCOPF. + """ + init = self.ss.DCOPF.init() + self.assertTrue(init, "DCOPF initialization failed!") + self.ss.DCOPF.run(solver='ECOS') + np.testing.assert_equal(self.ss.DCOPF.exit_code, 0) + + def test_rted(self) -> None: + """ + Test RTED. + """ + init = self.ss.RTED.init() + self.assertTrue(init, "RTED initialization failed!") + self.ss.RTED.run(solver='ECOS') + np.testing.assert_equal(self.ss.RTED.exit_code, 0) + + def test_ed(self) -> None: + """ + Test ED. + """ + init = self.ss.ED.init() + self.assertTrue(init, "ED initialization failed!") + self.ss.ED.run(solver='ECOS') + np.testing.assert_equal(self.ss.ED.exit_code, 0) + + @require_MIP_solver + def test_rtedes(self) -> None: + """ + Test RTEDES. + """ + init = self.ss.RTEDES.init() + self.assertTrue(init, "RTEDES initialization failed!") + self.ss.RTEDES.run() + np.testing.assert_equal(self.ss.RTEDES.exit_code, 0) + + @require_MIP_solver + def test_edes(self) -> None: + """ + Test EDES. + """ + init = self.ss.EDES.init() + self.assertTrue(init, "EDES initialization failed!") + self.ss.EDES.run() + np.testing.assert_equal(self.ss.EDES.exit_code, 0) + + +class test_DCUC(unittest.TestCase): + """ + Test DCUC types routine. + """ + + def setUp(self) -> None: + self.ss = ams.load(ams.get_case("ieee39/ieee39_uced_esd1.xlsx"), + default_config=True, + no_output=True, + ) + + @require_MIP_solver + def test_uc(self) -> None: + """ + Test UC. + """ + init = self.ss.UC.init() + self.assertTrue(init, "UC initialization failed!") + self.ss.UC.run() + np.testing.assert_equal(self.ss.UC.exit_code, 0) + + @require_MIP_solver + def test_uces(self) -> None: + """ + Test UCES. + """ + init = self.ss.UCES.init() + self.assertTrue(init, "UCES initialization failed!") + self.ss.UCES.run() + np.testing.assert_equal(self.ss.UCES.exit_code, 0) diff --git a/tests/test_omodel.py b/tests/test_omodel.py deleted file mode 100644 index 5ead7cb5..00000000 --- a/tests/test_omodel.py +++ /dev/null @@ -1,64 +0,0 @@ -import unittest -import numpy as np - -import ams -import cvxpy as cp - - -def require_MIP_solver(f): - """ - Decorator for skipping tests that require MIP solver. - """ - def wrapper(*args, **kwargs): - all_solvers = cp.installed_solvers() - mip_solvers = ['CBC', 'COPT', 'GLPK_MI', 'CPLEX', 'GUROBI', - 'MOSEK', 'SCIP', 'XPRESS', 'SCIPY'] - if any(s in mip_solvers for s in all_solvers): - pass - else: - raise unittest.SkipTest("MIP solver is not available.") - return f(*args, **kwargs) - return wrapper - - -class TestOMdel(unittest.TestCase): - """ - Test methods of `OModel`. - """ - def setUp(self) -> None: - self.ss = ams.load(ams.get_case("ieee39/ieee39_uced_esd1.xlsx"), - default_config=True, - no_output=True, - ) - - def test_var_access_brfore_solve(self): - """ - Test `Var` access before solve. - """ - self.ss.DCOPF.init() - self.assertIsNone(self.ss.DCOPF.pg.v) - - def test_var_access_after_solve(self): - """ - Test `Var` access after solve. - """ - self.ss.DCOPF.run() - np.testing.assert_equal(self.ss.DCOPF.pg.v, - self.ss.StaticGen.get(src='p', attr='v', - idx=self.ss.DCOPF.pg.get_idx())) - - def test_constr_access_brfore_solve(self): - """ - Test `Constr` access before solve. - """ - self.ss.DCOPF.init(force=True) - np.testing.assert_equal(self.ss.DCOPF.plflb.v, None) - - def test_constr_access_after_solve(self): - """ - Test `Constr` access after solve. - """ - self.ss.DCOPF.run() - self.assertIsInstance(self.ss.DCOPF.plflb.v, np.ndarray) - - # NOTE: add Var, Constr add functions diff --git a/tests/test_routine.py b/tests/test_routine.py index 7d317bc1..d2339d89 100644 --- a/tests/test_routine.py +++ b/tests/test_routine.py @@ -33,12 +33,28 @@ class TestRoutineMethods(unittest.TestCase): """ Test methods of `Routine`. """ + def setUp(self) -> None: - self.ss = ams.load(ams.get_case("ieee39/ieee39_uced_esd1.xlsx"), + self.ss = ams.load(ams.get_case("ieee39/ieee39_uced.xlsx"), default_config=True, no_output=True, ) + def test_data_check(self): + """ + Test `Routine._data_check()` method. + """ + + self.assertTrue(self.ss.DCOPF._data_check()) + self.assertFalse(self.ss.RTEDES._data_check()) + + def test_get_off_constrs(self): + """ + Test `Routine._get_off_constrs()` method. + """ + + self.assertIsInstance(self.ss.DCOPF._get_off_constrs(), list) + def test_routine_set(self): """ Test `Routine.set()` method. @@ -61,33 +77,12 @@ def test_routine_get(self): np.testing.assert_equal(self.ss.DCOPF.get('pg', 'PV_30', 'v'), self.ss.StaticGen.get('p', 'PV_30', 'v')) - -class TestRoutineInit(unittest.TestCase): - """ - Test solving routines. - """ - def setUp(self) -> None: - self.s1 = ams.load(ams.get_case("ieee39/ieee39_uced_esd1.xlsx"), - default_config=True, - no_output=True, - ) - self.s2 = ams.load(ams.get_case("ieee123/ieee123_regcv1.xlsx"), - default_config=True, - no_output=True, - ) - - def test_Init(self): + def test_rouine_init(self): """ - Test `routine.init()`. + Test `Routine.init()` method. """ - # NOTE: for DED, using ieee123 as a test case - for rtn in self.s1.routines.values(): - if not rtn._data_check(): - rtn = getattr(self.s2, rtn.class_name) - if not rtn._data_check(): - continue # TODO: here should be a warning? - self.assertTrue(rtn.init(force=True), - f"{rtn.class_name} initialization failed!") + + self.assertTrue(self.ss.DCOPF.init(), "DCOPF initialization failed!") @unittest.skipUnless(HAVE_IGRAPH, "igraph not available") diff --git a/tests/test_service.py b/tests/test_service.py index 44f37e04..12806545 100644 --- a/tests/test_service.py +++ b/tests/test_service.py @@ -1,8 +1,8 @@ import unittest import numpy as np -from scipy.sparse import csr_matrix as c_sparse # NOQA -from scipy.sparse import lil_matrix as l_sparse # NOQA +from scipy.sparse import csr_matrix as c_sparse +from scipy.sparse import lil_matrix as l_sparse import ams from ams.core.matprocessor import MatProcessor, MParam @@ -50,13 +50,9 @@ def test_Cft(self): """ self.assertIsInstance(self.mats.Cft._v, (c_sparse, l_sparse)) self.assertIsInstance(self.mats.Cft.v, np.ndarray) - self.assertEqual(self.mats.Cft.v.max(), 1) - - def test_pl(self): - """ - Test `pl`. - """ - self.assertEqual(self.mats.pl.v.ndim, 1) + self.assertEqual(self.mats.Cft._v.max(), 1) + np.testing.assert_equal(self.mats.Cft._v.sum(axis=0), + np.zeros((1, self.nl))) class TestService(unittest.TestCase): @@ -106,8 +102,8 @@ def test_NumOpDual(self): """ Test `NumOpDual`. """ - p_vec = MParam(v=self.ss.mats.pl.v) - one_vec = MParam(v=np.ones(self.ss.Bus.n)) + p_vec = MParam(v=self.ss.DCOPF.pd.v) + one_vec = MParam(v=np.ones(self.ss.StaticLoad.n)) p_sum = NumOpDual(u=p_vec, u2=one_vec, fun=np.multiply, rfun=np.sum) self.assertEqual(p_sum.v, self.ss.PQ.p0.v.sum())