From 35ac916254d8d4063bafed4aa61573b95c31e3c4 Mon Sep 17 00:00:00 2001 From: Patrick Vorgers Date: Thu, 24 Oct 2024 21:20:52 +0200 Subject: [PATCH] iSolarCloud (#65) --- AUTHORS.md | 7 +- Datasources/iSolarCloud/README.md | 24 ++ ...nthly.Report_PLANT_NAME_20241011100248.csv | 31 ++ .../elec_feed_in_tariff_1_high_resolution.csv | 30 ++ ...elec_feed_out_tariff_1_high_resolution.csv | 30 ++ .../elec_solar_high_resolution.csv | 30 ++ .../iSolarCloud/iSolarCloudDataPrepare.py | 364 ++++++++++++++++++ README.md | 3 + 8 files changed, 518 insertions(+), 1 deletion(-) create mode 100644 Datasources/iSolarCloud/README.md create mode 100644 Datasources/iSolarCloud/Sample files/Monthly.Report_PLANT_NAME_20241011100248.csv create mode 100644 Datasources/iSolarCloud/Sample files/elec_feed_in_tariff_1_high_resolution.csv create mode 100644 Datasources/iSolarCloud/Sample files/elec_feed_out_tariff_1_high_resolution.csv create mode 100644 Datasources/iSolarCloud/Sample files/elec_solar_high_resolution.csv create mode 100644 Datasources/iSolarCloud/iSolarCloudDataPrepare.py diff --git a/AUTHORS.md b/AUTHORS.md index 27f64dc..0ff6700 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -27,4 +27,9 @@ Please share scripts or how-to guides if you have built an integration with a ne * Slyoldfox (https://github.com/slyoldfox) * Implementation: Fluvius * TylonHH (https://github.com/TylonHH) - * Implementation: EnergyControl (app) \ No newline at end of file + * Implementation: EnergyControl (app) +* Nikolaj Hendel (https://github.com/nikolajhendel) + * How to for: iSolarCloud (Sungrow) + * Sample files for: iSolarCloud (Sungrow) + + \ No newline at end of file diff --git a/Datasources/iSolarCloud/README.md b/Datasources/iSolarCloud/README.md new file mode 100644 index 0000000..f27056e --- /dev/null +++ b/Datasources/iSolarCloud/README.md @@ -0,0 +1,24 @@ +# Energy provider: iSolarCloud + +iSolarCloud offers the option to export data from the [iSolarCloud](https://www.isolarcloud.com/) site. This data can be transformed and used to import into Home Assistant. + +**Data provided** +- Solar production - High resolution (day interval) - kWh + +**Tooling needed** +- Python 3 +- Pandas python library ```pip install pandas``` + + +**How-to** +- Export data from the [iSolarCloud](https://www.isolarcloud.com/) site + - Go to the [iSolarCloud](https://www.isolarcloud.com/) site + - Login with your account + - Select plant + - Scroll down to statistics section (default is day) and select month + - Click export and select csv + - Repear this for each month you want to export + - The data for each selected month is downloaded to your PC as a separate file +- Download the ```iSolarCloudaPrepare.py``` file and put it in the same directory as the iSolarCloud data +- Execute the python script with as parameter the name of the directory which contains the files with the exported data ```python iSolarCloudDataPrepare.py *.csv```. The python script creates the needed file for the generic import script. +- Follow the steps in the overall how-to \ No newline at end of file diff --git a/Datasources/iSolarCloud/Sample files/Monthly.Report_PLANT_NAME_20241011100248.csv b/Datasources/iSolarCloud/Sample files/Monthly.Report_PLANT_NAME_20241011100248.csv new file mode 100644 index 0000000..bda1205 --- /dev/null +++ b/Datasources/iSolarCloud/Sample files/Monthly.Report_PLANT_NAME_20241011100248.csv @@ -0,0 +1,31 @@ +Time,PV(kWh),Purchased Energy(kWh),Feed-in(kWh),Load(kWh) +2024-09-01,56.2,7,44.7,18.5 +2024-09-02,56.7,6.2,47.7,15.2 +2024-09-03,57.5,7.3,48.3,16.5 +2024-09-04,57.1,6.7,44.6,19.2 +2024-09-05,50.3,5.9,41.2,15 +2024-09-06,57.3,6.7,44.2,19.8 +2024-09-07,45.4,5.7,32.6,18.5 +2024-09-08,16.7,8.7,7.1,18.3 +2024-09-09,54.2,5.4,42.9,16.7 +2024-09-10,46.6,7.8,29.9,24.5 +2024-09-11,52.7,5.5,39.7,18.5 +2024-09-12,24,7.6,12.1,19.5 +2024-09-13,59.2,6,46.7,18.5 +2024-09-14,57.6,5.4,47,16 +2024-09-15,49.2,6.3,36.3,19.2 +2024-09-16,62.4,5.3,46.5,21.2 +2024-09-17,61.3,5.7,50.6,16.4 +2024-09-18,64.3,6.1,51.2,19.2 +2024-09-19,62.7,5.3,51.5,16.5 +2024-09-20,63.5,4.7,49.3,18.9 +2024-09-21,64.1,5.4,51.4,18.1 +2024-09-22,59.9,5.7,41.2,24.4 +2024-09-23,62,5.7,46.9,20.8 +2024-09-24,43.6,6.1,31,18.7 +2024-09-25,28.7,7,17.1,18.6 +2024-09-26,5.4,9.4,1.9,12.9 +2024-09-27,41.5,5.8,28.7,18.6 +2024-09-28,51.2,5,35.8,20.4 +2024-09-29,24.6,10.4,14.2,20.8 +2024-09-30,34.4,5.6,22.8,17.2 diff --git a/Datasources/iSolarCloud/Sample files/elec_feed_in_tariff_1_high_resolution.csv b/Datasources/iSolarCloud/Sample files/elec_feed_in_tariff_1_high_resolution.csv new file mode 100644 index 0000000..3a1fa75 --- /dev/null +++ b/Datasources/iSolarCloud/Sample files/elec_feed_in_tariff_1_high_resolution.csv @@ -0,0 +1,30 @@ +1725148800,44.7 +1725235200,92.4 +1725321600,140.7 +1725408000,185.3 +1725494400,226.5 +1725580800,270.7 +1725667200,303.3 +1725753600,310.4 +1725840000,353.3 +1725926400,383.2 +1726012800,422.9 +1726099200,435.0 +1726185600,481.7 +1726272000,528.7 +1726358400,565.0 +1726444800,611.5 +1726531200,662.1 +1726617600,713.3 +1726704000,764.8 +1726790400,814.1 +1726876800,865.5 +1726963200,906.7 +1727049600,953.6 +1727136000,984.6 +1727222400,1001.7 +1727308800,1003.6 +1727395200,1032.3 +1727481600,1068.1 +1727568000,1082.3 +1727654400,1105.1 diff --git a/Datasources/iSolarCloud/Sample files/elec_feed_out_tariff_1_high_resolution.csv b/Datasources/iSolarCloud/Sample files/elec_feed_out_tariff_1_high_resolution.csv new file mode 100644 index 0000000..aab99ff --- /dev/null +++ b/Datasources/iSolarCloud/Sample files/elec_feed_out_tariff_1_high_resolution.csv @@ -0,0 +1,30 @@ +1725148800,7.0 +1725235200,13.2 +1725321600,20.5 +1725408000,27.2 +1725494400,33.1 +1725580800,39.8 +1725667200,45.5 +1725753600,54.2 +1725840000,59.6 +1725926400,67.4 +1726012800,72.9 +1726099200,80.5 +1726185600,86.5 +1726272000,91.9 +1726358400,98.2 +1726444800,103.5 +1726531200,109.2 +1726617600,115.3 +1726704000,120.6 +1726790400,125.3 +1726876800,130.7 +1726963200,136.4 +1727049600,142.1 +1727136000,148.2 +1727222400,155.2 +1727308800,164.6 +1727395200,170.4 +1727481600,175.4 +1727568000,185.8 +1727654400,191.4 diff --git a/Datasources/iSolarCloud/Sample files/elec_solar_high_resolution.csv b/Datasources/iSolarCloud/Sample files/elec_solar_high_resolution.csv new file mode 100644 index 0000000..694e6cf --- /dev/null +++ b/Datasources/iSolarCloud/Sample files/elec_solar_high_resolution.csv @@ -0,0 +1,30 @@ +1725148800,56.2 +1725235200,112.9 +1725321600,170.4 +1725408000,227.5 +1725494400,277.8 +1725580800,335.1 +1725667200,380.5 +1725753600,397.2 +1725840000,451.4 +1725926400,498.0 +1726012800,550.7 +1726099200,574.7 +1726185600,633.9 +1726272000,691.5 +1726358400,740.7 +1726444800,803.1 +1726531200,864.4 +1726617600,928.7 +1726704000,991.4 +1726790400,1054.9 +1726876800,1119.0 +1726963200,1178.9 +1727049600,1240.9 +1727136000,1284.5 +1727222400,1313.2 +1727308800,1318.6 +1727395200,1360.1 +1727481600,1411.3 +1727568000,1435.9 +1727654400,1470.3 diff --git a/Datasources/iSolarCloud/iSolarCloudDataPrepare.py b/Datasources/iSolarCloud/iSolarCloudDataPrepare.py new file mode 100644 index 0000000..f6c9cf7 --- /dev/null +++ b/Datasources/iSolarCloud/iSolarCloudDataPrepare.py @@ -0,0 +1,364 @@ +import datetime +import glob +import json +import math +import os +import sys +from collections import namedtuple +from typing import List + +import pandas as pd + +# DataFilter named tuple definition +# column: The name of the column on which the filter should be applied +# value: The value on which should be filtered (regular expressions can be used) +# equal: Boolean value indicating whether the filter should be inclusive or exclusive (True/False) +DataFilter = namedtuple("DataFilter", ["column", "value", "equal"]) + +# OutputFileDefinition named tuple definition +# outputFileName: The name of the output file +# valueColumnName: The name of the column holding the value +# dataFilters: A list of datafilters (see above the definition of a datafilter) +# recalculate: Boolean value indication whether the data should be recalculated, +# because the source is not an increasing value +OutputFileDefinition = namedtuple( + "OutputFileDefinition", + ["outputFileName", "valueColumnName", "dataFilters", "recalculate"], +) + +# --------------------------------------------------------------------------------------------------------------------- +# TEMPLATE SETUP +# --------------------------------------------------------------------------------------------------------------------- + +# Name of the energy provider +energyProviderName = "iSolarCloud" + +# Inputfile(s): filename extension +inputFileNameExtension = ".csv" +# Inputfile(s): Name of the column containing the date of the reading. +# Use this in case date and time is combined in one field. +inputFileDateColumnName = "Time" +# Inputfile(s): Name of the column containing the time of the reading. +# Leave empty in case date and time is combined in one field. +inputFileTimeColumnName = "" +# Inputfile(s): Date/time format used in the datacolumn. +# Combine the format of the date and time in case date and time are two seperate fields. +inputFileDateTimeColumnFormat = "%Y-%m-%d" +# Inputfile(s): Data seperator being used in the .csv input file +inputFileDataSeperator = "," +# Inputfile(s): Decimal token being used in the input file +inputFileDataDecimal = "." +# Inputfile(s): Number of header rows in the input file +inputFileNumHeaderRows = 0 +# Inputfile(s): Number of footer rows in the input file +inputFileNumFooterRows = 0 +# Inputfile(s): Json path of the records (only needed for json files) +# Example: inputFileJsonPath: List[str] = ['energy', 'values'] +inputFileJsonPath: List[str] = [] +# Inputfile(s): Name or index of the excel sheet (only needed for excel files containing more sheets, +# leave at 0 for the first sheet) +inputFileExcelSheetName = 0 + +# Name used for the temporary date/time field. +# This needs normally no change only when it conflicts with existing columns. +dateTimeColumnName = "_DateTime" + +# List of one or more output file definitions +outputFiles = [ + OutputFileDefinition( + "elec_feed_in_tariff_1_high_resolution.csv", + "Feed-in(kWh)", + [], + True, + ), + OutputFileDefinition( + "elec_feed_out_tariff_1_high_resolution.csv", + "Purchased Energy(kWh)", + [], + True, + ), + OutputFileDefinition( + "elec_solar_high_resolution.csv", + "PV(kWh)", + [], + True, + ), +] + +# Use the below functions in case data has to be manipulated after the data has been read. +# Use the customPrepareDataPre function in case the time/date data has to be manipulated. +# Use the customPrepareDataPost function in all other cases + + +# Prepare the input data (before date/time manipulation) +def customPrepareDataPre(dataFrame: pd.DataFrame) -> pd.DataFrame: + return dataFrame + + +# Prepare the input data (after date/time manipulation) +def customPrepareDataPost(dataFrame: pd.DataFrame) -> pd.DataFrame: + # Default no manipulation, add code if needed + + # Example: + # dataFrame["Energy Produced (Wh)"] = + # dataFrame["Energy Produced (Wh)"].str.replace(',', '').replace('\"', '').astype(int) + return dataFrame + + +# --------------------------------------------------------------------------------------------------------------------- + +# Template version number +versionNumber = "1.5.0" + + +# Prepare the input data +def prepareData(dataFrame: pd.DataFrame) -> pd.DataFrame: + print("Preparing data") + + # Handle any custom dataframe manipulation (Pre) + dataFrame = customPrepareDataPre(dataFrame) + + # Check if we have to combine a date and time field + if inputFileTimeColumnName != "": + # Take note that the format is changed in case the column was parsed as date. + # For excel change the type of the cell to text or adjust the format accordingly, + # use statement print(dataFrame) to get information about the used format. + dataFrame[dateTimeColumnName] = pd.to_datetime( + dataFrame[inputFileDateColumnName].astype(str) + + " " + + dataFrame[inputFileTimeColumnName].astype(str), + format=inputFileDateTimeColumnFormat, + utc=True, + ) + else: + dataFrame[dateTimeColumnName] = pd.to_datetime( + dataFrame[inputFileDateColumnName], + format=inputFileDateTimeColumnFormat, + utc=True, + ) + # Remove the timezone (if it exists) + dataFrame[dateTimeColumnName] = dataFrame[dateTimeColumnName].dt.tz_localize(None) + + # Select only correct dates + df = dataFrame.loc[ + ( + dataFrame[dateTimeColumnName] + >= datetime.datetime.strptime("01-01-1970", "%d-%m-%Y") + ) + & ( + dataFrame[dateTimeColumnName] + <= datetime.datetime.strptime("31-12-2099", "%d-%m-%Y") + ) + ] + + # Make sure that the data is correctly sorted + df.sort_values(by=dateTimeColumnName, ascending=True, inplace=True) + + # Transform the date into unix timestamp for Home-Assistant + df[dateTimeColumnName] = ( + df[dateTimeColumnName].astype("int64") / 1000000000 + ).astype("int64") + + # Handle any custom dataframe manipulation (Post) + df = customPrepareDataPost(df) + + return df + + +# Filter the data based on the provided dataFilter(s) +def filterData(dataFrame: pd.DataFrame, filters: List[DataFilter]) -> pd.DataFrame: + df = dataFrame + # Iterate all the provided filters + for dataFilter in filters: + # Determine the subset based on the provided filter (regular expression) + series = ( + df[dataFilter.column].astype(str).str.contains(dataFilter.value, regex=True) + ) + + # Validate whether the data is included or excluded + if not dataFilter.equal: + series = ~series + + df = df[series] + + return df + + +# Recalculate the data so that the value increases +def recalculateData(dataFrame: pd.DataFrame, dataColumnName: str) -> pd.DataFrame: + df = dataFrame + + # Make the value column increasing (skip first row) + previousRowIndex = -1 + for index, _ in df.iterrows(): + # Check if the current row contains a valid value + if math.isnan(df.at[index, dataColumnName]): + df.at[index, dataColumnName] = 0.0 + + if previousRowIndex > -1: + # Add the value of the previous row to the current row + df.at[index, dataColumnName] = round( + df.at[index, dataColumnName] + df.at[previousRowIndex, dataColumnName], + 3, + ) + previousRowIndex = index + + return df + + +# Generate the datafile which can be imported +def generateImportDataFile( + dataFrame: pd.DataFrame, + outputFile: str, + dataColumnName: str, + filters: list[DataFilter], + recalculate: bool, +): + # Check if the column exists + if dataColumnName in dataFrame.columns: + print("Creating file: " + outputFile) + dataFrameFiltered = filterData(dataFrame, filters) + + # Check if we have to recalculate the data + if recalculate: + dataFrameFiltered = recalculateData(dataFrameFiltered, dataColumnName) + + # Select only the needed data + dataFrameFiltered = dataFrameFiltered.filter( + [dateTimeColumnName, dataColumnName] + ) + + # Create the output file + dataFrameFiltered.to_csv( + outputFile, sep=",", decimal=".", header=False, index=False + ) + else: + print( + "Could not create file: " + + outputFile + + " because column: " + + dataColumnName + + " does not exist" + ) + + +# Read the inputfile +def readInputFile(inputFileName: str) -> pd.DataFrame: + # Read the specified file + print("Loading data: " + inputFileName) + + # Check if we have a supported extension + if inputFileNameExtension == ".csv": + # Read the CSV file + df = pd.read_csv( + inputFileName, + sep=inputFileDataSeperator, + decimal=inputFileDataDecimal, + skiprows=inputFileNumHeaderRows, + skipfooter=inputFileNumFooterRows, + index_col=False, + engine="python", + ) + elif (inputFileNameExtension == ".xlsx") or (inputFileNameExtension == ".xls"): + # Read the XLSX/XLS file + df = pd.read_excel( + inputFileName, + sheet_name=inputFileExcelSheetName, + decimal=inputFileDataDecimal, + skiprows=inputFileNumHeaderRows, + skipfooter=inputFileNumFooterRows, + ) + elif inputFileNameExtension == ".json": + # Read the JSON file + jsonData = json.load(open(inputFileName)) + df = pd.json_normalize(jsonData, record_path=inputFileJsonPath) + else: + raise Exception("Unsupported extension: " + inputFileNameExtension) + + return df + + +# Check if all the provided files have the correct extension +def correctFileExtensions(fileNames: list[str]) -> bool: + # Check all filenames for the right extension + for fileName in fileNames: + _, fileNameExtension = os.path.splitext(fileName) + if fileNameExtension != inputFileNameExtension: + return False + return True + + +# Generate the datafiles which can be imported +def generateImportDataFiles(inputFileNames: str): + # Find the file(s) + fileNames = glob.glob(inputFileNames) + if len(fileNames) > 0: + print("Found files based on: " + inputFileNames) + + # Check if all the found files are of the correct type + if correctFileExtensions(fileNames): + # Read all the found files and concat the data + dataFrame = pd.concat( + map(readInputFile, fileNames), ignore_index=True, sort=True + ) + + # Prepare the data + dataFrame = prepareData(dataFrame) + + # Create the output files + for outputFile in outputFiles: + generateImportDataFile( + dataFrame, + outputFile.outputFileName, + outputFile.valueColumnName, + outputFile.dataFilters, + outputFile.recalculate, + ) + + print("Done") + else: + print("Only " + inputFileNameExtension + " datafiles are allowed") + else: + print("No files found based on : " + inputFileNames) + + +# Validate that the script is started from the command prompt +if __name__ == "__main__": + print(energyProviderName + " Data Prepare") + print("") + print( + "This python script prepares " + + energyProviderName + + " data for import into Home Assistant." + ) + print( + "The files will be prepared in the current directory any previous files will be overwritten!" + ) + print("") + if len(sys.argv) == 2: + if ( + input("Are you sure you want to continue [Y/N]?: ").lower().strip()[:1] + == "y" + ): + generateImportDataFiles(sys.argv[1]) + else: + print(energyProviderName + "PrepareData usage:") + print( + energyProviderName + + "PrepareData <" + + energyProviderName + + " " + + inputFileNameExtension + + " filename (wildcard)>" + ) + print() + print( + "Enclose the path/filename in quotes in case wildcards are being used on Linux based systems." + ) + print( + "Example: " + + energyProviderName + + 'PrepareData "*' + + inputFileNameExtension + + '"' + ) diff --git a/README.md b/README.md index 0776124..9ef6e9d 100644 --- a/README.md +++ b/README.md @@ -228,6 +228,9 @@ If you want to contribute to this please read the [Contribution guidelines](CONT * Implementation: Fluvius * TylonHH (https://github.com/TylonHH) * Implementation: EnergyControl (app) +* Nikolaj Hendel (https://github.com/nikolajhendel) + * How to for: iSolarCloud (Sungrow) + * Sample files for: iSolarCloud (Sungrow)

(back to top)