print tr("Mot-clé : %s obligatoire non valorisé")
-print tr("None n'est pas ujjjjjjjjjne valeur autorisée")
+print tr("None n'est pas une valeur autorisée")
print tr("Mot-cle simple : ")
print tr("Fin Mot-clé simple : ")
IMPORTANT
_________
-Il est nécessaire d'effectuer un pull avant de faire un push. L'action de pull va nous placer dans la branche dédiée au DER.
-
-
-
-Présentation d'un cas de figure pouvant poser problème
-______________________________________________________
-L'utilisateur effecte un pull puis ferme l'application Git Bash. Il travaille sur ce dossier pendant plusieurs jours sans effectuer de push.
-
-Lorsqu'il va vouloir finalement push ce nouveau dossier modifié, il sera obligé d'effectuer un pull au préalable ce qui risque d'écraser son dossier actuel.
-
-Il faut alors renommer son dossier modifié avant d'effectuer ce pull. Ensuite, il peut transférer les fichiers modifiés dans le dossier qu'il vient de pull.
-
-Il effectue ensute son push selon les modalités ci-dessus.
+Il est nécessaire d'effectuer un pull avant de faire un push. L'action de pull va nous placer dans la branche du DER.
for k in dico['CONTINGENCY_PROCESSING'].keys():
- print(k)
+ # print(k)
if k[0:19] == 'Component_List_For_' or k[0:21] =='Contingency_List_For_' :
- print(k)
+ # print(k)
newK=k.replace('__',' ')
- print(newK)
+ # print(newK)
l="'"+str(newK)+"'"
- print(l)
+ # print(l)
dico['CONTINGENCY_PROCESSING'][l]=dico['CONTINGENCY_PROCESSING'][k]
- print(dico['CONTINGENCY_PROCESSING'])
+ # print(dico['CONTINGENCY_PROCESSING'])
del dico['CONTINGENCY_PROCESSING'][k]
###to delete
# Subprocess dans la fonction PFExtractGeneratorLoadLineandTransfoDico
########################################################################################################################
import pdb
-from math import *
NoBreakersandSwitches = True
PF_PATH = r'C:\Program Files\DIgSILENT\PowerFactory 2016 SP2\Python\3.5'
def PFExtractData(NetworkFile, PF_PATH):
- #PSEN sous PowerFactory, extract data from Gen, Load, Bus, Branch, Transfo, Motor
import os
import sys
import numpy as np
"""
app = pf.GetApplication()
user = app.GetCurrentUser()
- ComImp = user.CreateObject('ComPFDIMPORT') # Object to import pfd file
+ ComImp = user.CreateObject('ComPFDIMPORT') # Objet permettant l'import d'un .pfd
- app.SetWriteCacheEnabled(1) # Disable consistency check
+ app.SetWriteCacheEnabled(1) # Desactive le controle de coherence
ComImp.g_file = NetworkFile
- ComImp.g_target = user # Project is imported under the user account
- err = ComImp.Execute() # Execute command starts the import process
+ ComImp.g_target = user # Le projet est im porté sous le compte utilisateur
+ err = ComImp.Execute() # Lance le processus d'import
ComImp.Delete()
- app.SetWriteCacheEnabled(0) # Enable consistency check
+ app.SetWriteCacheEnabled(0) # Active le controle de coherence
prjs = user.GetContents('*.IntPrj')
prjs.sort(key=lambda x: x.gnrl_modif, reverse=True)
for key in tmp:
Options.ContFullList.append(key)
tmp = list(Options.TransfoBaseList.keys())
-## trs = []
-## for tr in tmp:
-## if tr.split("__")[-1].startswith("3WNDTR"):
-## b = tr.split('__')
-## for j,val in enumerate(b):
-## if val.startswith("Wnd"):
-## del b[j]
-## tfo = '__'.join(b)
-## trs.append(tfo)
+
tmp.sort()
for key in tmp:
Options.ContFullList.append(key)
Options.CustomContingencies.append(MatList)
def checkIfBorder(graph, key, depth, tmplist):
- #print "in checkifBorder"
- #print "depth ",depth
- #print graph
+
if key in tmplist:
return True
if depth == 0:
mon_depickler = pickle.Unpickler(fichier)
data_file = mon_depickler.load()
- # def convert(data):
- # if isinstance(data, basestring):
- # return str(data)
- # elif isinstance(data, collections.Mapping):
- # return dict(map(convert, data.iteritems()))
- # elif isinstance(data, collections.Iterable):
- # return type(data)(map(convert, data))
- # else:
- # return data
- #
- # data = convert(data_file)
def convert_keys_to_string(dictionary):
"""Recursively converts dictionary keys to strings."""
if not isinstance(dictionary, dict):
if ii[2] not in Options.IsolatedGenList:
Options.IsolatedGenList.append(ii[2])
- # for i in range(len(carray[0])):
- # if carray[0][i] in tmplist:
- # if iarray[0][i] not in Options.IsolatedGenList:
- # Options.IsolatedGenList.append(iarray[0][i])
- # if carray[1][i] in tmplist:
- # if iarray[1][i] not in Options.IsolatedGenList:
- # Options.IsolatedGenList.append(iarray[1][i])
- #
- # for i in range(len(darray[0])):
- # if darray[0][i] in tmplist:
- # if jarray[0][i] not in Options.IsolatedGenList:
- # Options.IsolatedGenList.append(jarray[0][i])
- # if darray[1][i] in tmplist:
- # if jarray[1][i] not in Options.IsolatedGenList:
- # Options.IsolatedGenList.append(jarray[1][i])
- # if darray[2][i] in tmplist:
- # if jarray[1][i] not in Options.IsolatedGenList:
- # Options.IsolatedGenList.append(jarray[1][i])
-
lines = []
outLines = []
for line in LineDico.keys():
lines.append(line)
else:
outLines.append(line)
- # for i in range(len(iarray[0])):
- # #name = iarray[0][i] + ' - ' + iarray[1][i]
- # idname = iarray[2][i].strip()
- # if '@' in idname:
- # idname = idname.replace('@','BR')
- # elif '*' in idname:
- # idname = idname.replace('*','SW')
- # else:
- # #separate lines and transfos
- # U1 = Options.BusBaseList[iarray[0][i]]
- # U2 = Options.BusBaseList[iarray[1][i]]
- # if U1==U2:
- # typecode = 'LI'
- # else:
- # typecode= 'TR'
- # try:
- # idname = typecode + str(int(idname))
- # except:
- # idname = typecode + idname
- # linename = iarray[0][i].strip() + "__" + iarray[1][i].strip() + "__" + idname
- # linename = linename.replace(" ","_")
- # linename = linename.replace("-","_")
- # linename = linename.replace(".","_")
- # linename = linename.replace("&","and")
- # try:
- # int(linename[0])
- # linename="_" + linename
- # except:
- # pass
- #
- # if '@' in iarray[2][i] or '*' in iarray[2][i]:
- # outLines.append(linename)
- # elif iarray[0][i] not in Options.IsolatedGenList and iarray[1][i] not in Options.IsolatedGenList:
- # lines.append(linename)
- # else:
- # outLines.append(linename)
-
- # for i in range(len(jarray[0])):
- # idname = '3WNDTR' + darray[6][i].strip()
- # tfoname = darray[0][i].strip() + "__" + darray[1][i].strip() + "__" + darray[2][i].strip() + "__" + idname
- # tfoname = tfoname.replace(" ","_")
- # tfoname = tfoname.replace("-","_")
- # tfoname = tfoname.replace(".","_")
- # tfoname = tfoname.replace("&","and")
- # try:
- # int(tfoname[0])
- # tfoname="_" + tfoname
- # except:
- # pass
- # if jarray[0][i] not in Options.IsolatedGenList and jarray[1][i] not in Options.IsolatedGenList and jarray[2][i] not in Options.IsolatedGenList:
- # lines.append(tfoname)
- # else:
- # outLines.append(tfoname)
+
Options.TrueLines = lines
Options.RadialLines = outLines
- #pdb.set_trace()
+
return lines, outLines
NoBreakersandSwitches = True
path1 = os.getcwd()
- # path_temp=os.path.dirname(NetworkFile)
filew = open('temp.txt', 'w')
filew.write(NetworkFile + '\n')
filew.write(PF_PATH + '\n')
filew.write(Python3_path + '\n')
filew.close()
- # print('changer le chemin de Python3 executable')
lancer = [Python3_path + '/python.exe', path1 + '/com_base.py'] # changer le chemin de Python3 executable
proc = subprocess.Popen(lancer)
proc.wait()
LineDico = data['LineDico']
TfoDico = data['TransfoDico']
MotorDico = data['MotorDico']
- # os.remove('Data_for_interface')
return MachineDico, LoadDico, LineDico, TfoDico, MotorDico
-
-#NetworkFile= ""
-#PF_PATH=
-#Python3_path=
-#MachineDico, LoadDico, LineDico, TfoDico, MotorDico = PFExtractGeneratorLoadLineandTransfoDico(0, 0, NetworkFile, PF_PATH,Python3_path)
\ No newline at end of file
PSEN_results_folder = SIMP(statut="o", typ="Repertoire"),
PSEN_results_csvfile = SIMP(statut='o', typ = ('Fichier', 'CSV file (*.csv);;All Files (*)',),),
PSEN_results_csvfile_cleaned = SIMP ( statut = "o",typ=bool,defaut=False,),
- DecimalSeparator = SIMP(statut="o",typ='TXM',into=[',','.'],defaut='.',),
+ DecimalSeparator = SIMP(statut="o",typ='TXM',into=[',','.'],defaut=',',),
BusesList = SIMP(statut = 'f', typ = 'R', min = 0, max = '**', defaut = (), homo = 'SansOrdreNiDoublon'),
LinesList = SIMP(statut = 'f', typ = 'R', min = 0, max = '**', defaut = (), homo = 'SansOrdreNiDoublon'),
TripLines = SIMP(statut = 'o', typ = bool, defaut = True),
TripTransfos = SIMP(statut = 'o', typ = bool, defaut = True),
- TripGenerators = SIMP(statut = 'o', typ = bool, defaut = True),
- #TripBuses = SIMP(statut = 'o', typ = bool, defaut = False),
-
- #consigne = SIMP(statut='o',homo='information',typ = "TXM",defaut = 'Once the TripComponent key is selected above, all voltage levels will be included in the creation of N-1 contingencies by default, unless specific voltage levels are selected below.'),
- #N1AreaList = SIMP(statut = 'o', typ = 'I', min = 0, max = '**', defaut = (), homo = 'SansOrdreNiDoublon'),
- #N1BusesList = SIMP(statut = 'o', typ = 'R', min = 0, max = '**', defaut = (), homo = 'SansOrdreNiDoublon'),
- #N1LinesList = SIMP(statut = 'o', typ = 'R', min = 0, max = '**', defaut = (), homo = 'SansOrdreNiDoublon'),
- #N1TransformersList = SIMP(statut = 'o', typ = 'TXM', min = 0, max = '**', defaut = (), homo = 'SansOrdreNiDoublon'),
-
+ TripGenerators = SIMP(statut = 'o', typ = bool, defaut = True),
),
Automatic_N_2_Selection = FACT(statut='f',
for bus in liste_de_bus:
processedData[0].append(bus)
- print('ok1')
for contingency in liste_de_contingences:
- print('ok2')
liste = [contingency]
for n in range(len(liste_de_bus)):
- print('ok3')
liste.append('')
processedData.append(liste)
- print('ok4')
-
- print(processedData)
- print(data)
- print(data[0])
+
+
for ligne in data:
if ligne[0] not in liste_de_bus:
continue
import os
import xlsxwriter
+import os
+import pandas as pd
+import win32com.client as win32
def getXLSinfo(filename):
wb = xlrd.open_workbook(filename)
def processXLS(dico):
print('je suis dans processor')
-
+
UpdateProcessorOptions(dico)
indexes = {}
toGather = {}
try:
- nomColonne = "'"+'Component_List_For_'+str(name)+"'"
+ nomColonne = 'Component_List_For_'+str(name)
nomColonne = nomColonne.replace('_ ',' _')
- nomLigne = "'"+'Contingency_List_For_'+str(name)+"'"
+ nomLigne = 'Contingency_List_For_'+str(name)
nomLigne = nomLigne.replace('_ ',' _')
+
if nomColonne not in dico['CONTINGENCY_PROCESSING'].keys():
continue
--- /dev/null
+import xlrd # XLS read
+import xlwt # XLS write
+import csv
+import pdb
+
+import Options
+import Compute
+#from Run import *
+import pickle
+from UpdateOptions import UpdateProcessorOptions
+#from itertools import izip_longest # Reverse the double array
+#from future.moves.itertools import zip_longest
+import itertools
+import os
+import xlsxwriter
+
+import os
+import pandas as pd
+import win32com.client as win32
+
+
+def getXLSinfo(filename):
+ wb = xlrd.open_workbook(filename)
+ sheets = wb.sheet_names()
+ ret = {}
+ for name in sheets:
+ sheet = wb.sheet_by_name(name)
+ ret[name] = [[],[]]
+ for i in range(0, sheet.nrows):
+ data = str(sheet.cell_value(i, 0))
+ if data not in ret[name][0]:
+ ret[name][0].append(data)
+ data = str(sheet.cell_value(i, 1))
+ if data not in ret[name][1]:
+ ret[name][1].append(data)
+ return ret
+
+def getCSVinfo(csvfilename):
+ foldername = os.path.dirname(csvfilename)
+ sheets =[]
+ for file in os.listdir(foldername):
+ if file.endswith('.csv') and (' Voltage ' in file or ' FlowsDif ' in file or ' Flows ' in file or ' LoadShed ' in file) and 'processed_' not in file.lower():
+ sheets.append(file[0:-4])
+ ret = {}
+ for name in sheets:
+ ACCCresultsfile = os.path.join(foldername, name + '.csv')
+ try: #python 2 compatible
+ h = open(ACCCresultsfile,"rb")
+ crd = csv.reader(h,delimiter=";")
+ ret[name] = [[],[]]
+ for i, row in enumerate(crd):
+ if len(row)>2:
+ data = str(row[0])
+ if data not in ret[name][0]:
+ ret[name][0].append(data)
+ data = str(row[1])
+ if data not in ret[name][1]:
+ ret[name][1].append(data)
+ h.close()
+ except: #python 3 compatible
+ h = open(ACCCresultsfile,"r",newline='')
+ crd = csv.reader(h,delimiter=";")
+ ret[name] = [[],[]]
+ for i, row in enumerate(crd):
+ if len(row)>2:
+ data = str(row[0])
+ if data not in ret[name][0]:
+ ret[name][0].append(data)
+ data = str(row[1])
+ if data not in ret[name][1]:
+ ret[name][1].append(data)
+ h.close()
+ return ret
+
+def processXLS(dico):
+ print('je suis dans processor')
+
+
+ # """
+ # On renseigne les chemins vers les fichiers d'entrée et de sortie
+ # """
+
+ # input_path = dico['CONTINGENCY_PROCESSING']['XLS_file']
+
+ # filename = dico['CONTINGENCY_SELECTION']['case_name'] + '.xlsx'
+ # output_path = os.path.join(dico['CASE_SELECTION']['PSEN_results_folder'],filename)
+
+
+ # """
+ # Cette commande va permettre d'ouvrir le fichier résultat dans lequel on va enregistrer différents onglets
+ # Uniquement à la fin de totues les écritures, nous viendrons le sauvegarder
+ # """
+ # writer = pd.ExcelWriter(output_path, engine='xlsxwriter')
+
+
+
+ # """
+ # On importe le fichier excel et on crée une DataFrame pour chaque Onglet/Sheet du fichier
+ # On récupère également les noms des Onglets/Sheets afin de pouvoir adapter les intitulés des composants et des valeurs
+
+ # Voltage ==> 'Bus' ; 'Max Voltage'
+ # Flows ==> 'Branch' ; 'Max Violation'
+ # """
+ # input_excel = pd.ExcelFile(input_path)
+
+ # sheet_names_all = dico['CONTINGENCY_PROCESSING']['TabList']
+
+
+ # for sheet in sheet_names_all:
+
+
+ # """
+ # On crée une DataFrame pour l'onglet/sheet actuel
+ # Selon le nom de l'onglet/sheet, on précise l'intitulé de la valeur que l'on va récupérer
+
+
+ # On crée des listes répertoriant les noms des composants et contingingences en faisant appel aux éléments sélectionnés par l'utilisateur
+ # Ces éléments sont stockes dans dicoN1_process
+
+ # """
+
+ # df = input_excel.parse(sheet)
+
+ # conting_label = 'Contingency'
+
+ # if 'Voltage' in sheet:
+
+ # compo_label = 'Bus'
+ # value_label = 'Max Voltage'
+
+ # for k in dico['CONTINGENCY_PROCESSING'].keys():
+
+ # if 'Voltage' in k and 'Component' in k:
+ # compo = dico['CONTINGENCY_PROCESSING'][k]
+
+ # elif 'Voltage' in k and 'Contingency' in k:
+ # conting = dico['CONTINGENCY_PROCESSING'][k]
+
+
+ # elif 'Flows' in sheet:
+
+ # compo_label = 'Branch'
+ # value_label = 'Max Violation'
+
+ # for k in dico['CONTINGENCY_PROCESSING'].keys():
+
+ # if 'Flows' in k and 'Component' in k:
+ # compo = dico['CONTINGENCY_PROCESSING'][k]
+
+ # elif 'Flows' in k and 'Contingency' in k:
+ # conting = dico['CONTINGENCY_PROCESSING'][k]
+
+
+ # """
+ # On range ces listes par ordre alphabétique
+ # """
+ # compo.sort()
+ # conting.sort()
+
+ # """
+ # On vient créer le squelette de notre matrice, on la remplit de 0
+ # """
+ # output_excel = pd.DataFrame(index = compo, columns = conting)
+ # output_excel = output_excel.fillna(0)
+
+
+ # """
+ # On vient ranger nos lignes et colonnes par ordre alphabétique, de la même manière que les listes compo et conting
+ # """
+ # output_excel.sort_index(axis = 1, ascending = True, inplace =True)
+ # output_excel.sort_index(axis = 0, ascending = True, inplace = True)
+
+
+ # for i in range(len(compo)):
+
+ # for j in range(len(conting)):
+ # """
+ # Cette commande permet de venir selectionner la valeur du composant X impacté par la contingence Y
+
+ # """
+ # valeur = df[(df[compo_label] == compo[i]) & (df[conting_label] == conting[j])][value_label]
+
+
+ # """
+ # Cette commande permet de venir remplir notre matrice avec les valeurs récupérés dans la DataFrame d'origine
+ # """
+ # try:
+ # output_excel.loc[compo[i], conting[j]] = float(valeur)
+ # except:
+ # pass
+
+
+ # """
+ # On importe notre matrice au format excel
+ # """
+ # output_excel.to_excel(writer, sheet_name = sheet)
+
+ # writer.save()
+
+ # """
+ # Ajustez la taille des colonnes et lignes automatiquement
+ # """
+
+ # excel = win32.gencache.EnsureDispatch('Excel.Application')
+ # wb = excel.Workbooks.Open(output_path)
+
+ # for sheet_to_autofit in sheet_names_all:
+ # ws = wb.Worksheets(sheet_to_autofit)
+ # ws.Columns.AutoFit()
+
+ # wb.Save()
+ # excel.Application.Quit()
+
+
+def processXLS_out(dico):
+
+ UpdateProcessorOptions(dico)
+ indexes = {}
+ toGather = {}
+ data = {}
+ totalData = {}
+ # pdb.set_trace()
+
+ if Options.csvFileName.endswith('xls'):
+ # Step 1 : get the indexes of each columns to process
+ wb = xlrd.open_workbook(Options.csvFileName)
+ sheets = wb.sheet_names()
+ # Now get data from the selected columns. data and TotalData are filled in gatherxlsData and are accessible here
+ gatherXlsData(wb, sheets, data, totalData)
+ elif Options.csvFileName.endswith('csv'):
+
+ ACCCresultsfolder = os.path.dirname(Options.csvFileName) #os.path.join(Options.FolderList[0], "ACCCresults")
+ sheets =[]
+ for file in os.listdir(ACCCresultsfolder):
+ if file.endswith('.csv') and (' Voltage ' in file or ' FlowsDif ' in file or ' Flows ' in file or ' LoadShed ' in file) :
+ # print(file[0:-4])
+ name = str(file[0:-4])
+ nomCle = "'"+'Component_List_For_'+str(name)+"'"
+ nomCle = nomCle.replace('_ ',' _')
+ if nomCle in dico['CONTINGENCY_PROCESSING'].keys():
+ sheets.append(file[0:-4])
+
+ gatherCsvData(sheets, data, totalData)
+
+ # Now we process the gathered data depending on the required calculus
+ processedData = {}
+
+ for name in sheets:
+
+ try:
+
+ nomColonne = "'"+'Component_List_For_'+str(name)+"'"
+ nomColonne = nomColonne.replace('_ ',' _')
+
+ nomLigne = "'"+'Contingency_List_For_'+str(name)+"'"
+ nomLigne = nomLigne.replace('_ ',' _')
+
+
+ if nomColonne not in dico['CONTINGENCY_PROCESSING'].keys():
+ continue
+
+ Options.selectedDoubleCol[str(name)] = dico['CONTINGENCY_PROCESSING'][nomColonne]
+ Options.selectedDoubleRow[str(name)] = dico['CONTINGENCY_PROCESSING'][nomLigne]
+
+ processedData[name] = [[]]
+
+ processedData[name] = Compute.createDoubleArray(totalData[name], processedData[name], name)
+
+ except KeyError:
+ print("error dans ecriture acc results")
+ pass
+
+ xlsToOutput(processedData)
+
+def gatherXlsData(wb, sheets, data, totalData):
+ for name in sheets:
+ sheet = wb.sheet_by_name(name)
+ data[name] = []
+ totalData[name] = []
+
+ for i in range(0, sheet.nrows):
+ totalData[name].append([])
+ data[name].append([])
+ for j in range(0, sheet.ncols):
+ # Store data anyway in totalData
+ if i == 0:
+ totalData[name][i] = [j]
+ try:
+ totalData[name][i].append(float(sheet.cell_value(i, j)))
+ except:
+ totalData[name][i].append(sheet.cell_value(i, j))
+ try:
+ if j == 0:
+ try:
+ if sheet.cell_value(i, 0) in Options.selectedDoubleRow[name] and sheet.cell_value(i, 1) in Options.selectedDoubleCol[name]:
+ pass
+ else:
+ break
+ except:
+ break
+ if i == 0:
+ data[name][i] = [j]
+ data[name][i].append(float(sheet.cell_value(i, j)))
+ except:
+ data[name][i].append('N/A')
+
+def gatherCsvData(sheets, data, totalData):
+ # try: #python 2
+ for name in sheets:
+ ACCCresultsfolder = os.path.dirname(Options.csvFileName)
+ ACCCresultsfile = os.path.join(ACCCresultsfolder,name + '.csv')
+ h = open(ACCCresultsfile,"rb")
+ crd = csv.reader(h,delimiter=";")
+
+ data[name] = []
+ totalData[name] = []
+
+ for i, row in enumerate(crd):
+
+ totalData[name].append([])
+ data[name].append([])
+
+ for j in range(len(row)):
+ # Store data anyway in totalData
+ if i == 0:
+ totalData[name][i] = [j]
+ continue
+ try:
+ totalData[name][i].append(float(row[j]))
+ except:
+ totalData[name][i].append(row[j])
+
+
+
+ h.close()
+
+
+def isData(row):
+ for item in row:
+ try:
+ v = float(item)
+ if v > 0:
+ return True
+ except:
+ try:
+ v = float(item['mean'])
+ if v >= 0: #used to be > 0 but want to keep zero cases!!
+ return True
+ except:
+ pass
+ return False
+
+
+def xlsToOutput(data):
+ ACCCresultsfolder = os.path.dirname(Options.csvFileName)
+ filename = os.path.join(ACCCresultsfolder,"ACCCresults_processed.xlsx")
+ workbook = xlsxwriter.Workbook(filename)
+ worksheet = workbook.add_worksheet()
+ row = 0
+
+ for colonne in data:
+ col=0
+ for cellule in colonne:
+ worksheet.write(col, row, cellule)
+ col = col+1
+ row = row+1
+ workbook.close()
+
+
+def xlsToCsv(indexes, data): #if too much data to be written to xls file, output a csv
+ for name in data:
+ if Options.csvFileName.endswith('.csv'):
+ ACCCresultsfolder = os.path.dirname(Options.csvFileName)
+ newSheet = os.path.join(ACCCresultsfolder,"Processed_" + name +'.csv')
+ totalsSheet = os.path.join(ACCCresultsfolder,"Processed_" + name + '_Total.csv')
+ if 'voltage' in name.lower() and 'loadshed' not in name.lower():
+ zerosSheet = os.path.join(ACCCresultsfolder,"Processed_" + name + '_Zeros.csv')
+ recapSheet = os.path.join(ACCCresultsfolder,"Processed_" + name + '_Recap.csv')
+ elif Options.csvFileName.endswith('.xls') or Options.csvFileName.endswith('.xlsx'):
+ newSheet = Options.csvFileName[:-4] + '_processed_' + name + '.csv'
+ totalsSheet = Options.csvFileName[:-4] + '_processed_' + name + '_Total.csv'
+ if 'voltage' in name.lower() and 'loadshed' not in name.lower():
+ zerosSheet = Options.csvFileName[:-4] + '_processed_' + name + '_Zeros.csv'
+ recapSheet = Options.csvFileName[:-4] + '_processed_' + name + '_Recap.csv'
+ with open(newSheet, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter = ';')
+ flatData = []
+ # Flatten data to remove all dict items
+ for row in data[name]:
+ newRow = []
+ for item in row:
+ if type(item) == dict:
+ try:
+ newRow.append(item['mean'])
+ except:
+ print(item)
+ else:
+ newRow.append(item)
+ flatData.append(newRow)
+ for row in flatData:
+ writer.writerow(row)
+ print('A file has been saved under ' + newSheet + '.')
+
+ with open(totalsSheet, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter = ';')
+ flatData = []
+ # Flatten data to remove all dict items
+ for row in data[name]:
+ newRow = []
+ for item in row:
+ if type(item) == dict:
+ try:
+ newRow.append(item['badcase'])
+ except:
+ print(item)
+ else:
+ newRow.append(item)
+ flatData.append(newRow)
+ for row in flatData:
+ writer.writerow(row)
+ print ('A file has been saved under ' + totalsSheet + '.')
+
+ if ' voltage ' in name.lower() and ' loadshed ' not in name.lower() and ' flows ' not in name.lower():
+ with open(zerosSheet, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter = ';')
+ flatData = []
+ # Flatten data to remove all dict items
+ for row in data[name]:
+ newRow = []
+ for item in row:
+ if type(item) == dict:
+ try:
+ newRow.append(item['zerocase'])
+ except:
+ print (item)
+ else:
+ newRow.append(item)
+ flatData.append(newRow)
+ for row in flatData:
+ writer.writerow(row)
+ print( 'A file has been saved under ' + zerosSheet + '.')
+
+ with open(recapSheet, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter = ';')
+ flatData = []
+ # Flatten data to remove all dict items
+ for row in data[name]:
+ newRow = []
+ for item in row:
+ if type(item) == dict:
+ try:
+ if ' voltage ' in name.lower() and ' loadshed ' not in name.lower() and ' flows ' not in name.lower():
+ newRow.append(str(item['mean']) + ' / ' + str(item['badcase']) + ' / ' + str(item['zerocase']))
+ else:
+ newRow.append(str(item['mean']) + ' / ' + str(item['badcase']) )
+ except:
+ print (item)
+ else:
+ newRow.append(item)
+ flatData.append(newRow)
+ for row in flatData:
+ writer.writerow(row)
+ print( 'A file has been saved under ' + recapSheet + '.')
+
+ print( 'Processing over.')
+
+def xlsToCsvPython3(indexes, data): #if too much data to be written to xls file, output a csv
+ for name in data:
+ if Options.csvFileName.endswith('.csv'):
+ ACCCresultsfolder = os.path.dirname(Options.csvFileName)
+ newSheet = os.path.join(ACCCresultsfolder,"Processed_" + name +'.csv')
+ totalsSheet = os.path.join(ACCCresultsfolder,"Processed_" + name + '_Total.csv')
+ if 'voltage' in name.lower() and 'loadshed' not in name.lower():
+ zerosSheet = os.path.join(ACCCresultsfolder,"Processed_" + name + '_Zeros.csv')
+ recapSheet = os.path.join(ACCCresultsfolder,"Processed_" + name + '_Recap.csv')
+ elif Options.csvFileName.endswith('.xls') or Options.csvFileName.endswith('.xlsx'):
+ newSheet = Options.csvFileName[:-4] + '_processed_' + name + '.csv'
+ totalsSheet = Options.csvFileName[:-4] + '_processed_' + name + '_Total.csv'
+ if 'voltage' in name.lower() and 'loadshed' not in name.lower():
+ zerosSheet = Options.csvFileName[:-4] + '_processed_' + name + '_Zeros.csv'
+ recapSheet = Options.csvFileName[:-4] + '_processed_' + name + '_Recap.csv'
+ with open(newSheet, 'w', newline='') as csvfile:
+ writer = csv.writer(csvfile, delimiter = ';')
+ flatData = []
+ # Flatten data to remove all dict items
+ for row in data[name]:
+ newRow = []
+ for item in row:
+ if type(item) == dict:
+ try:
+ newRow.append(item['mean'])
+ except:
+ print(item)
+ else:
+ newRow.append(item)
+ flatData.append(newRow)
+ for row in flatData:
+ writer.writerow(row)
+ print('A file has been saved under ' + newSheet + '.')
+
+ with open(totalsSheet, 'w', newline='') as csvfile:
+ writer = csv.writer(csvfile, delimiter = ';')
+ flatData = []
+ # Flatten data to remove all dict items
+ for row in data[name]:
+ #print( row)
+ newRow = []
+ for item in row:
+ if type(item) == dict:
+ try:
+ newRow.append(item['badcase'])
+ except:
+ print( item)
+ else:
+ newRow.append(item)
+ flatData.append(newRow)
+ for row in flatData:
+ writer.writerow(row)
+ print ('A file has been saved under ' + totalsSheet + '.')
+
+ if ' voltage ' in name.lower() and ' loadshed ' not in name.lower() and ' flows ' not in name.lower():
+ with open(zerosSheet, 'w', newline='') as csvfile:
+ writer = csv.writer(csvfile, delimiter = ';')
+ flatData = []
+ # Flatten data to remove all dict items
+ for row in data[name]:
+ newRow = []
+ for item in row:
+ if type(item) == dict:
+ try:
+ newRow.append(item['zerocase'])
+ except:
+ print (item)
+ else:
+ newRow.append(item)
+ flatData.append(newRow)
+ for row in flatData:
+ writer.writerow(row)
+ print( 'A file has been saved under ' + zerosSheet + '.')
+
+ with open(recapSheet, 'w', newline='') as csvfile:
+ writer = csv.writer(csvfile, delimiter = ';')
+ flatData = []
+ # Flatten data to remove all dict items
+ for row in data[name]:
+ newRow = []
+ for item in row:
+ if type(item) == dict:
+ try:
+ if ' voltage ' in name.lower() and ' loadshed ' not in name.lower() and ' flows ' not in name.lower():
+ newRow.append(str(item['mean']) + ' / ' + str(item['badcase']) + ' / ' + str(item['zerocase']))
+ else:
+ newRow.append(str(item['mean']) + ' / ' + str(item['badcase']) )
+ except:
+ print (item)
+ else:
+ newRow.append(item)
+ flatData.append(newRow)
+ for row in flatData:
+ writer.writerow(row)
+ print( 'A file has been saved under ' + recapSheet + '.')
+
+ print( 'Processing over.')
+
+def xlsToXls(indexes, data):
+
+ print('xlsToXls')
+
+ palette = []
+ newWb = xlwt.Workbook(style_compression = 2)
+ color = 8
+ for name in data:
+ # print( name)
+ newSheet = newWb.add_sheet(name)
+ totalsSheet = newWb.add_sheet(name + '_Total')
+ if ' voltage ' in name.lower() and ' loadshed ' not in name.lower() and ' flows ' not in name.lower():
+ zerosSheet = newWb.add_sheet(name + '_Zeros')
+ recapSheet = newWb.add_sheet(name + '_Recap')
+ i = 0
+ j = 0
+ for row in data[name]:
+
+ n = 0
+ for item in row:
+
+ try:
+ newSheet.write(i, n, item)
+ totalsSheet.write(i, n, item)
+ if ' voltage ' in name.lower() and ' loadshed ' not in name.lower() and ' flows ' not in name.lower():
+ zerosSheet.write(i, n, item)
+ recapSheet.write(i, n, item)
+ except:
+ # item is not a cell, it's a dict -> display color
+ try:
+ if item['color'] == 0x55FF55:
+ newSheet.write(i, n, item['mean'])
+ totalsSheet.write(i, n, item['badcase'])
+ if ' voltage ' in name.lower() and ' loadshed ' not in name.lower() and ' flows ' not in name.lower():
+ zerosSheet.write(i, n, item['zerocase'])
+ recapSheet.write(i, n, str(item['mean']) + ' / ' + str(item['badcase']) + ' / ' + str(item['zerocase']) )
+ else:
+ recapSheet.write(i, n, str(item['mean']) + ' / ' + str(item['badcase']) )
+ else:
+ if item['color'] in palette:
+ style = xlwt.easyxf('pattern: pattern solid, fore_colour custom' + str(item['color']))
+ newSheet.write(i, n, item['mean'], style)
+ totalsSheet.write(i, n, item['badcase'], style)
+ if ' voltage ' in name.lower() and ' loadshed ' not in name.lower() and ' flows ' not in name.lower():
+ zerosSheet.write(i, n, item['zerocase'], style)
+ recapSheet.write(i, n, str(item['mean']) + ' / ' + str(item['badcase']) + ' / ' + str(item['zerocase']), style)
+ else:
+ recapSheet.write(i, n, str(item['mean']) + ' / ' + str(item['badcase']), style)
+ else:
+ R = item['color'] / 65536
+ G = item['color'] / 256 - R * 256
+ B = 0x55
+
+ palette.append(item['color'])
+ xlwt.add_palette_colour('custom' + str(item['color']), color)
+ if R>-0.01 and R<256.01 and G>-0.01 and G<256.01 and B>-0.01 and B<256.01:
+ newWb.set_colour_RGB(color, R, G, B)
+ style = xlwt.easyxf('pattern: pattern solid, fore_colour custom' + str(item['color']))
+ newSheet.write(i, n, item['mean'], style)
+ totalsSheet.write(i, n, item['badcase'], style)
+ if ' voltage ' in name.lower() and ' loadshed ' not in name.lower() and ' flows ' not in name.lower():
+ zerosSheet.write(i, n, item['zerocase'], style)
+ recapSheet.write(i, n, str(item['mean']) + ' / ' + str(item['badcase']) + ' / ' + str(item['zerocase']), style)
+ else:
+ recapSheet.write(i, n, str(item['mean']) + ' / ' + str(item['badcase']), style)
+ color += 1
+ else:
+ newSheet.write(i, n, item['mean'])
+ totalsSheet.write(i, n, item['badcase'])
+ if ' voltage ' in name.lower() and ' loadshed ' not in name.lower() and ' flows ' not in name.lower():
+ zerosSheet.write(i, n, item['zerocase'])
+ recapSheet.write(i, n, str(item['mean']) + ' / ' + str(item['badcase']) + ' / ' + str(item['zerocase']) )
+ else:
+ recapSheet.write(i, n, str(item['mean']) + ' / ' + str(item['badcase']) )
+
+ except Exception as e:
+ print(e)
+ n += 1
+ continue
+ n += 1
+ i += 1
+ if Options.outFileName == '':
+ if Options.ACCcsv:
+ name = os.path.join(os.path.dirname(Options.csvFileName),'ACCCresults_processed.xls')
+ name = name.replace("/","\\")
+ else:
+ name = Options.csvFileName[:-4] + '_processed.xls'
+ name = name.replace("/","\\")
+ else:
+ name = Options.outFileName
+
+ newWb.save(name)
+ print('Processing over. The file has been saved under ' + name + '.')
+
+if __name__ == '__main__':
+
+ from dicodicoN1_process import Dico as dico
+
+ processXLS(dico)
# import powerfactory
import powerfactory as pf
app = pf.GetApplication()
- # app.Show()
user = app.GetCurrentUser()
- ComImp = user.CreateObject('ComPFDIMPORT') # objet pour importer pfd file
+ ComImp = user.CreateObject('ComPFDIMPORT') # objet pour importer pfd file
- app.SetWriteCacheEnabled(1) # Disable consistency check
+ app.SetWriteCacheEnabled(1) # Disable consistency check
ComImp.g_file = CaseFile
- ComImp.g_target = user # project is imported under the user account
- err = ComImp.Execute() # Execute command starts the import process
+ ComImp.g_target = user # project is imported under the user account
+ err = ComImp.Execute() # Execute command starts the import process
ComImp.Delete()
- app.SetWriteCacheEnabled(0) # Enable consistency check
+ app.SetWriteCacheEnabled(0) # Enable consistency check
+
prjs = user.GetContents('*.IntPrj')
prjs.sort(key=lambda x: x.gnrl_modif, reverse=True)
+
prj = prjs[0]
prj.Activate()
+
studycase0 = prj.GetContents('BaseCase.IntCase', 1)[0] # app.GetActiveStudyCase()
studycase0.Activate()
fScen = app.GetProjectFolder('scen') # Dossier contient triggers
print('Error: Power Factory Contingency Analysis ended in error. Network probably does not converge for load-flows.')
resFile = study.GetContents('Analyse de contingences*.ElmRes', 1)[0]
-
- # try:
- # resFile = study.GetContents('Contingency Analysis*.ElmRes', 1)[0]
- # resFile = study.GetContents('Analyse de contingences*.ElmRes', 1)[0]
- # except:
- # pass
-
+
app.PrintPlain(resFile)
resFile.Load()
nbrow = resFile.GetNumberOfRows()
# -*- coding: utf-8 -*-
"""
-Created on Wed May 29 15:00:00 2019
-
-@author: H92579
+On importe nos modules et on renseigne les chemins vers les fichiers d'entrée et de sortie
"""
import os
-import sys
+import pandas as pd
+import win32com.client as win32
+from dicoN1_process import Dico as dico
+
+input_path = dico['CONTINGENCY_PROCESSING']['XLS_file']
+
+filename = dico['CONTINGENCY_SELECTION']['case_name'] + '.xlsx'
+output_path = os.path.join(dico['CASE_SELECTION']['PSEN_results_folder'],filename)
+
+
+"""
+Cette commande va permettre d'ouvrir le fichier résultat dans lequel on va enregistrer différents onglets
+Uniquement à la fin de totues les écritures, nous viendrons le sauvegarder
+"""
+writer = pd.ExcelWriter(output_path, engine='xlsxwriter')
+
+"""
+On importe le fichier excel et on crée une DataFrame pour chaque Onglet/Sheet du fichier
+On récupère également les noms des Onglets/Sheets afin de pouvoir adapter les intitulés des composants et des valeurs
+
+Voltage ==> 'Bus' ; 'Max Voltage'
+Flows ==> 'Branch' ; 'Max Violation'
+"""
+input_excel = pd.ExcelFile(input_path)
+
+sheet_names_all = dico['CONTINGENCY_PROCESSING']['TabList']
-PF_PATH = r'C:\Program Files\DIgSILENT\PowerFactory 2017 SP1\Python\3.5'
-NetworkFile = r'C:\Users\H92579\Documents\PSEN_simu\ResultatSimu\N_20190529_09h33m33\package0_N_20190529_09h33m33/AllCase.pfd'
-(filepath, filename) = os.path.split(NetworkFile)
-sys.path.append(PF_PATH)
-os.environ['PATH'] += ';' + os.path.dirname(os.path.dirname(PF_PATH)) + ';'
+for sheet in sheet_names_all:
+
+ """
+ On crée une DataFrame pour l'onglet/sheet actuel
+ Selon le nom de l'onglet/sheet, on précise l'intitulé de la valeur que l'on va récupérer
+
-import powerfactory
+ On crée des listes répertoriant les noms des composants et contingingences en faisant appel aux éléments sélectionnés par l'utilisateur
+ Ces éléments sont stockes dans dicoN1_process
+
+ """
+
+ df = input_excel.parse(sheet)
+
+ conting_label = 'Contingency'
+
+ if 'Voltage' in sheet:
+
+ compo_label = 'Bus'
+ value_label = 'Max Voltage'
+
+ for k in dico['CONTINGENCY_PROCESSING'].keys():
+
+ if 'Voltage' in k and 'Component' in k:
+ compo = dico['CONTINGENCY_PROCESSING'][k]
+
+ elif 'Voltage' in k and 'Contingency' in k:
+ conting = dico['CONTINGENCY_PROCESSING'][k]
-app = powerfactory.GetApplication()
-user = app.GetCurrentUser()
-study = app.GetActiveStudyCase()
+
+ elif 'Flows' in sheet:
+
+ compo_label = 'Branch'
+ value_label = 'Max Violation'
+
+ for k in dico['CONTINGENCY_PROCESSING'].keys():
+
+ if 'Flows' in k and 'Component' in k:
+ compo = dico['CONTINGENCY_PROCESSING'][k]
+
+ elif 'Flows' in k and 'Contingency' in k:
+ conting = dico['CONTINGENCY_PROCESSING'][k]
-ComImp = user.CreateObject('ComPFDIMPORT')
-app.SetWriteCacheEnabled(1) # Disable consistency check
-ComImp.g_file = NetworkFile
-ComImp.g_target = user # Project is imported under the user account
-err = ComImp.Execute() # Execute command starts the import process
-app.SetWriteCacheEnabled(0) # Enable consistency check
+
+ """
+ On range ces listes par ordre alphabétique
+ """
+ compo.sort()
+ conting.sort()
+
+ """
+ On vient créer le squelette de notre matrice, on la remplit de 0
+ """
+ output_excel = pd.DataFrame(index = compo, columns = conting)
+ output_excel = output_excel.fillna(0)
+
+
+ """
+ On vient ranger nos lignes et colonnes par ordre alphabétique, de la même manière que les listes compo et conting
+ """
+ output_excel.sort_index(axis = 1, ascending = True, inplace =True)
+ output_excel.sort_index(axis = 0, ascending = True, inplace = True)
+
+
+ for i in range(len(compo)):
+
+ for j in range(len(conting)):
+ """
+ Cette commande permet de venir selectionner la valeur du composant X impacté par la contingence Y
+
+ """
+ valeur = df[(df[compo_label] == compo[i]) & (df[conting_label] == conting[j])][value_label]
+
+
+ """
+ Cette commande permet de venir remplir notre matrice avec les valeurs récupérés dans la DataFrame d'origine
+ """
+ try:
+ output_excel.loc[compo[i], conting[j]] = float(valeur)
+ except:
+ pass
+
+
+ """
+ On importe notre matrice au format excel
+ """
+ output_excel.to_excel(writer, sheet_name = sheet)
+
+writer.save()
+
+"""
+Ajustez la taille des colonnes et lignes automatiquement
+"""
-prjs = user.GetContents('*.IntPrj')
-prjs.sort(key=lambda x: x.gnrl_modif, reverse=True)
-prj = prjs[0]
-prj.Activate()
+excel = win32.gencache.EnsureDispatch('Excel.Application')
+wb = excel.Workbooks.Open(output_path)
-resFile = study.GetContents('Contingency Analysis*.ElmRes', 1)[0]
\ No newline at end of file
+for sheet_to_autofit in sheet_names_all:
+ ws = wb.Worksheets(sheet_to_autofit)
+ ws.Columns.AutoFit()
+
+wb.Save()
+excel.Application.Quit()
\ No newline at end of file
-# -*- coding: utf-8 -*-
-"""
-Created on Mon Jun 3 09:19:43 2019
+import Options
+import Compute
+import pickle
+from UpdateOptions import UpdateProcessorOptions
+import xlrd # XLS read
+import xlwt # XLS write
+import itertools
+import os
+import xlsxwriter
+from dicoN1_process import Dico as dico
-@author: H92579
-"""
-import os
+outputExcel = r'C:\Users\H92579\Documents\PSEN_simu\ResultatSimu\N_20190529_09h33m33/ACCCresults.xls'
+Options.csvFileName = outputExcel
-folder = r'C:\Users\H92579\Documents\PSEN_simu\ResultatSimu\N_20190529_09h33m33'
+wb = xlrd.open_workbook(Options.csvFileName)
+sheets = wb.sheet_names()
-try:
- with open(os.path.join(folder,'N2_Processed.xlsx')): pass
-except IOError:
- print('Erreur! Le fichier n a pas pu être ouvert')
-
-filew = open(os.path.dirname(os.path.realpath(__file__))+'/iteration.txt', 'w')
-filew.write(str(0))
-filew.close()
+data = {}
+totalData = {}
-fichier = open (os.path.dirname(os.path.realpath(__file__))+'/iteration.txt', 'r')
-compteur=fichier.read()
-fichier.close()
+def gatherXlsData(wb, sheets, data, totalData):
+ for name in sheets:
+ sheet = wb.sheet_by_name(name)
+ data[name] = []
+ totalData[name] = []
-filew = open(os.path.dirname(os.path.realpath(__file__))+'/iteration.txt', 'w')
-filew.write(str(1))
-filew.close()
\ No newline at end of file
+ for i in range(0, sheet.nrows):
+ totalData[name].append([])
+ data[name].append([])
+ for j in range(0, sheet.ncols):
+ # Store data anyway in totalData
+ if i == 0:
+ totalData[name][i] = [j]
+ try:
+ totalData[name][i].append(float(sheet.cell_value(i, j)))
+ except:
+ totalData[name][i].append(sheet.cell_value(i, j))
+ try:
+ if j == 0:
+ try:
+ if sheet.cell_value(i, 0) in Options.selectedDoubleRow[name] and sheet.cell_value(i, 1) in Options.selectedDoubleCol[name]:
+ pass
+ else:
+ break
+ except:
+ break
+ if i == 0:
+ data[name][i] = [j]
+ data[name][i].append(float(sheet.cell_value(i, j)))
+ except:
+ data[name][i].append('N/A')
+
+gatherXlsData(wb, sheets, data, totalData)
\ No newline at end of file
-Dico ={'CONTINGENCY_SELECTION': {'TripLines': True, 'csv_file': 'C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190529_09h33m33/Test.csv', 'SelectionMethod': 'CaseSelectionFromFile', 'case_name': 'trois_cas', 'TripTransfos': False, 'TripGenerators': True}, 'CASE_SELECTION': {'TransformersList': [], 'PSEN_results_csvfile': 'C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190529_09h33m33/simulationDClog_complete_09h33m33.csv', 'DecimalSeparator': ',', 'MaxDepth': 5, 'NewCsvFile': 'CleanedData.csv', 'PSEN_results_csvfile_cleaned': False, 'Python3_path': 'C:/Python35', 'PF_path': 'C:\\Program Files\\DIgSILENT\\PowerFactory 2017 SP1\\Python\\3.5', 'LinesList': ['90.0'], 'PSEN_results_folder': 'C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190529_09h33m33', 'OutputNewCsv': False, 'BusesList': ['90.0']}, 'CONTINGENCY_OPTIONS': {'ActiveLimits': True, 'Vmin': 0.95, 'FlowLimitTransformers': 100, 'AdjustTaps': False, 'VarLimits': True, 'FlowLimitLines': 100, 'FlatStart': False, 'AdjustShunts': False, 'Vmax': 1.05, 'output_file_format': 'xls', 'DispatchMode': 'ReferenceMachine'}}
\ No newline at end of file
+Dico ={'CASE_SELECTION': {'TransformersList': [], 'PSEN_results_csvfile': 'C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190621_07h31m35/simulationDClog_complete_07h31m35.csv', 'DecimalSeparator': ',', 'MaxDepth': 5, 'NewCsvFile': 'CleanedData.csv', 'PSEN_results_csvfile_cleaned': False, 'Python3_path': 'C:/Python35', 'PF_path': 'C:\\Program Files\\DIgSILENT\\PowerFactory 2017 SP1\\Python\\3.5', 'LinesList': ['90.0'], 'PSEN_results_folder': 'C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190621_07h31m35', 'OutputNewCsv': False, 'BusesList': ['90.0']}, 'N_PROCESSING_OPTIONS': {'Output_bus_values': False, 'Output_transformer_values': False, 'Output_lines_values': True}}
\ No newline at end of file
-Dico ={'CONTINGENCY_PROCESSING': {'XLS_file': 'C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190529_09h33m33/ACCCresults.xls', 'TabList': ['MinAvgVolt Voltage 0'], "'Contingency_List_For_MinAvgVolt Voltage 0'": ['FURIANI_ZI_OLETTA', 'VAZ_G11 [Vazzio]', 'VAZ_G5 [Vazzio]', 'CORTE_MOROSAGLIA', 'VAZ_G4 [Vazzio]', 'TOLLA_G2 [Tolla]', 'CORSCIA_SOVENZIA', 'LORETO_SAGONE', 'VAZ_G1 [Vazzio]', 'CASTIRLA_CORSICA', 'CASAMOZZA_MOROSAGLIA', 'VAZ_G10 [Vazzio]', 'RIZZANESE1 [Propriano]', 'OCANA_VAZZIO', 'VAZ_G12 [Vazzio]', 'CORTE_OCANA', 'RIZZANESE2 [Propriano]', 'VAZ_G2 [Vazzio]', 'SOVENZ_G [Sovenzia]', 'ILE_ROUSSE_ZI_OLETTA', 'VAZ_G6 [Vazzio]', 'CALDANICCIA_OCANA', 'PRORIANO_PO_VO', 'BONIFACCIO_PO_VO2', 'PROPRIANO_ZSSS6', 'SAGONE_SOVENZIA', 'VAZ_G3 [Vazzio]', 'PIETROSELLA_PROPRIANO', 'Vazzio_TAC [Vazzio]', 'CORTE_SAMPOLO', 'GHISONACCIA_STE_LUCIE', 'TOLLA_G1 [Tolla]', 'FURIANI_LUCCIANA2', 'CORSICA_G [Corsica]', 'OCANA_ZSSS6', 'FURIANI_LUCCIANA1', 'BASTIA_FURIANI2', 'BASTIA_FURIANI1', 'TOLLA_G3 [Tolla]', 'PO_VO_STE_LUCIE', 'ASPRETTO_LORETTO', 'ASPRETTO_VAZZIO', 'SAMPO_G2 [Sampolo]', 'PONT_VANNA [Ocana]', 'OCANA_PIETROSELLA', 'OCA_G2 [Ocana]', 'OCA_G1 [Ocana]', 'OCA_G3 [Ocana]', 'Caldanicci_PV [Caldanicci]', 'CASTI_G2 [Castirla]', 'CASTI_G1 [Castirla]', 'SAMPO_G1 [Sampolo]', 'CASTIRLA_ILE_ROUSSE', 'LUCCIANA_HTB_2 [Lucciana]', 'CASAMOZZA_TAGLIO', 'CERVIONE_TAGLIO', 'BONIFACCIO_PO_VO1', 'CERVIONE_GHISONACCIA', 'Corte_PV [Corte]', 'Loretto_PV [Loretto]', 'LORETO_VAZZIO', 'LUCCIANA_HTB [Lucciana]', 'PROPRIA_BT [Propriano]'], "'Component_List_For_MinAvgVolt Voltage 0'": ['Castirla_Castirla', 'Corsica_Corsica', 'Vazzio_Vazzio', 'Pietrosella_Pietrosella', 'Aspretto_Aspretto', 'IleRousse_IleRousse', 'Oletta_Oletta', 'Caldanicci_Caldanicci', 'Sagone_Sagone', 'SainteMarieSicche_SainteMarieSicche', 'Ocana_Ocana', 'Sovenzia_Sovenzia', 'Loretto_Loretto']}, 'CONTINGENCY_SELECTION': {'TripTransfos': False, 'TripLines': True, 'AvgLowVoltage': 1, 'SelectionMethod': 'SelectWorstCases', 'TripGenerators': True}, 'CASE_SELECTION': {'TransformersList': [], 'PSEN_results_csvfile': 'C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190529_09h33m33/simulationDClog_complete_09h33m33.csv', 'DecimalSeparator': ',', 'MaxDepth': 5, 'NewCsvFile': 'CleanedData.csv', 'PSEN_results_csvfile_cleaned': False, 'Python3_path': 'C:/Python35', 'PF_path': 'C:\\Program Files\\DIgSILENT\\PowerFactory 2017 SP1\\Python\\3.5', 'LinesList': ['90.0'], 'PSEN_results_folder': 'C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190529_09h33m33', 'OutputNewCsv': False, 'BusesList': ['90.0']}, 'CONTINGENCY_OPTIONS': {'ActiveLimits': True, 'Vmin': 0.95, 'FlowLimitTransformers': 100, 'AdjustTaps': False, 'VarLimits': True, 'FlowLimitLines': 100, 'FlatStart': False, 'AdjustShunts': False, 'Vmax': 1.05, 'output_file_format': 'xls', 'DispatchMode': 'ReferenceMachine'}}
\ No newline at end of file
+Dico ={'CONTINGENCY_PROCESSING': {'XLS_file': 'C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190621_07h31m35/ACCCresults.xls', "'Contingency_List_For_testuno Flows 0'": ['CERVIONE_GHISONACCIA'], "'Component_List_For_testuno Flows 0'": ['Aspretto_Aspretto_Vazzio_Vazzio_ASPRETTO_VAZZIO__LI'], 'TabList': ['testuno Voltage 0', 'testuno Flows 0'], "'Component_List_For_testuno Voltage 0'": ['Corsica_Corsica'], "'Contingency_List_For_testuno Voltage 0'": ['CASTIRLA_CORSICA']}, 'CONTINGENCY_SELECTION': {'TripLines': True, 'csv_file': 'C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190621_07h31m35/Test.csv', 'SelectionMethod': 'CaseSelectionFromFile', 'case_name': 'testuno', 'TripTransfos': False, 'TripGenerators': True}, 'CASE_SELECTION': {'TransformersList': [], 'PSEN_results_csvfile': 'C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190621_07h31m35/simulationDClog_complete_07h31m35.csv', 'DecimalSeparator': ',', 'MaxDepth': 5, 'NewCsvFile': 'CleanedData.csv', 'PSEN_results_csvfile_cleaned': False, 'Python3_path': 'C:/Python35', 'PF_path': 'C:\\Program Files\\DIgSILENT\\PowerFactory 2017 SP1\\Python\\3.5', 'LinesList': ['90.0'], 'PSEN_results_folder': 'C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190621_07h31m35', 'OutputNewCsv': False, 'BusesList': ['90.0']}, 'CONTINGENCY_OPTIONS': {'ActiveLimits': True, 'Vmin': 0.95, 'FlowLimitTransformers': 100, 'AdjustTaps': False, 'VarLimits': True, 'FlowLimitLines': 100, 'FlatStart': False, 'AdjustShunts': False, 'Vmax': 1.05, 'output_file_format': 'xls', 'DispatchMode': 'ReferenceMachine'}}
\ No newline at end of file
Ces convertisseurs sont implémentés sous forme de plugins
"""
-
-
-
-
tous.extend(grid.obj_id.GetContents( '*.ElmTerm', 1))
bus = []
for noeud in tous:
- # if ((noeud.iUsage == 0) and (noeud.outserv == 0)): # eliminer tous les noeuds out-service
- # if ((noeud.iUsage == 0)or(noeud.iUsage == 1)) :
bus.append(noeud)
noeuds = sorted(bus, key=lambda x: x.cStatName)
buses = []
TfoDico[tfoname]['3NUMBER'] = bus3_number
TfoDico[tfoname]['#WIND'] = 3
- print ("Read data OK")
+ print ("Read data double OK")
prj.Delete()
return MachineDico, LoadDico, LineDico, TfoDico, MotorDico,buses
filer=open('temp.txt','r')
def EficasProcessXLS(listeparam) :
- #print "dans processXLS"
item=listeparam[0]
dico=item.process_N1()
- #print dico.keys()
+
if 'PF_path' in dico['CASE_SELECTION']:
- #from ProcessorPF import processXLS
from Processor import processXLS
else:
from Processor import processXLS
- # processXLS(dico)
- print(dico) ## CM
+
import cProfile, pstats, StringIO
pr = cProfile.Profile()
pr.enable()
ps.print_stats()
print(s.getvalue())
- #if nouvelleVal != [] : prob.set_valeur(nouvelleVal)
-
# le dictionnaire des commandes a la structure suivante :
# la clef est la commande qui va proposer l action
# puis un tuple qui contient
-# - la fonction a appeler
+# - la fonction a appeler
# - le label dans le menu du clic droit
-# - un tuple contenant les parametres attendus par la fonction
-# - appelable depuis Salome uniquement -)
-# - appelable depuis un item valide uniquement
-# - toolTip
+# - un tuple contenant les parametres attendus par la fonction
+# - appelable depuis Salome uniquement -)
+# - appelable depuis un item valide uniquement
+# - toolTip
dict_commandes={
'CONTINGENCY_PROCESSING': (
(EficasProcessXLS,"process",('editor','item',),False,True,"process values "),
#
from PFExtractGeneratorLoadLineandTransfoDico import *
-#from ExtractGeneratorLoadLineandTransfoDico import ExtractGeneratorLoadLineandTransfoDico2
path1 = os.path.abspath(os.path.join(os.path.abspath(__file__), '../','TreatOutputs'))
sys.path.append(path1)
exc_type, exc_obj, exc_tb = sys.exec_info()
print(e)
print(exc_type, exc_tb.tb_lineno)
- #print ""
- #print MachineDico,LoadDico,LineDico,TransfoDico,MotorDico,BusDico,BranchesDico,BusNominal
for e in self.jdc.etapes:
if e.nom == 'CASE_SELECTION' :
self.jdc.appli.changeIntoMC(e, 'BusesList', BusList)
self.jdc.appli.changeIntoMC(e, 'LinesList', LinesList)
self.jdc.appli.changeIntoMC(e, 'TransformersList', TransfosList)
-
- # self.jdc.appli.changeIntoDefMC('CONTINGENCY_SELECTION', ('Automatic_N_2_Selection', 'BusesList'), BusList)
self.jdc.appli.changeIntoDefMC('CONTINGENCY_SELECTION', ('Automatic_N_2_Selection', 'LinesList'), LinesList)
self.jdc.appli.changeIntoDefMC('CONTINGENCY_SELECTION', ('Automatic_N_2_Selection', 'TransformersList'), TransfosList)
if not (hasattr(self,'sheets')) :
from Processor import getXLSinfo, getCSVinfo
- #from Processor_Storage import *
- #print getSheets
- #getSheets()
- #ComponentList, ContingencyList = getComponentandContingencyList(Storage.sheets[0])
- #print ComponentList
- #print ContingencyList
- #Storage.selectedDoubleRow[Storage.sheets[0]]=['PV MATIMBA']
- #Storage.selectedDoubleCol[Storage.sheets[0]]=['MAZENOD_MHDAM_LI1_']
- #self.jdc.appli.changeIntoMC(self,'TabList',Storage.sheets)
- #self.sheets=Storage.sheets
- #self.OngletsValeurs=[]
if not (XLS_file == "" or XLS_file == None):
#XLSinfo = getXLSinfo(XLS_file)
if XLS_file.endswith('.xls') or XLS_file.endswith('.xlsx'):
elif XLS_file.endswith('.csv'):
XLSinfo = getCSVinfo(XLS_file)
self.sheets=XLSinfo
- #self.sheets={'a':(('a','b','c'),('f','g','h'))}
- #v pascale
self.jdc.editor.changeIntoMC(self,'TabList',self.sheets.keys(),('b_TabList',))
-
-
-## self.jdc.appli.changeIntoMC(self,'TabList',self.sheets.keys())
-##
-## for k in self.sheets.keys():
-## nom='Component_List_For_'+k
-## monInto=self.sheets[k][0]
-## self.jdc.appli.ajoutDefinitionMC('CONTINGENCY_PROCESSING',nom,'TXM',min=0, max='**', into=monInto, homo= 'SansOrdreNiDoublon')
-## nom='Contingency_List_For_'+k
-## monInto=self.sheets[k][1]
-## self.jdc.appli.ajoutDefinitionMC('CONTINGENCY_PROCESSING',nom,'TXM',min=0, max='**', into=monInto, homo= 'SansOrdreNiDoublon')
-
self.MCAjoutes=[]
self.OngletsSelectionnes=[]
else :
# On teste si on a modifie la liste des onglets
- nouveauxOngletsSelectionnes = self.get_child('b_TabList').get_child('TabList').valeur
-# print (self.get_child('b_TabList'))
-# print (self.get_child('b_TabList').get_child('TabList'))
-# print (dir(self.get_child('b_TabList').get_child('TabList')))
-# print (self.get_child('b_TabList').get_child('TabList').valeur)
- #print('nouveauxOngletsSelectionnes',nouveauxOngletsSelectionnes)
+ nouveauxOngletsSelectionnes = self.get_child('b_TabList').get_child('TabList').valeur
if nouveauxOngletsSelectionnes == self.OngletsSelectionnes: return
- #print (6)
+
if nouveauxOngletsSelectionnes == () or nouveauxOngletsSelectionnes ==[]:
for MC in self.MCAjoutes : self.jdc.editor.deleteMC(self,MC,('b_TabList',))
self.MCAjoutes==[]
self.OngletsSelectionnes=[]
self.jdc.editor.fenetreCentraleAffichee.reaffiche()
- return
-
-# TabList= self.get_child('b_TabList').get_child('TabList').valeur
-# nouveauxOngletsSelectionnes = []
-# for tab in TabList:
-# nouveauxOngletsSelectionnes.append(tab.replace(' ','___'))
-#
-# if nouveauxOngletsSelectionnes==self.OngletsSelectionnes : return
-#
-# if nouveauxOngletsSelectionnes==() or nouveauxOngletsSelectionnes == [] :
-# for MC in self.MCAjoutes : self.jdc.editor.deleteMC(self,MC,('b_TabList',))
-# self.MCAjoutes=[]
-# self.OngletsSelectionnes=[]
-
+ return
for Onglet in nouveauxOngletsSelectionnes:
self.MCAjoutes.remove(MCFils)
self.OngletsSelectionnes=nouveauxOngletsSelectionnes
- self.jdc.editor.fenetreCentraleAffichee.reaffiche()
-## nouveauxOngletsSelectionnes= self.get_child('TabList').getval()
-## if nouveauxOngletsSelectionnes==self.OngletsSelectionnes : return
-## if nouveauxOngletsSelectionnes==() or nouveauxOngletsSelectionnes == [] :
-## for MC in self.MCAjoutes :
-## self.jdc.appli.deleteMC(self,MC)
-## self.MCAjoutes=[]
-## self.OngletsSelectionnes=[]
-## return
-##
-## for Onglet in nouveauxOngletsSelectionnes:
-## if Onglet in self.OngletsSelectionnes : continue
-##
-## MCFils='Contingency_List_For_'+Onglet
-## self.jdc.appli.ajoutMC(self,MCFils,[])
-## self.MCAjoutes.append(MCFils)
-## MCFils='Component_List_For_'+Onglet
-## self.jdc.appli.ajoutMC(self,MCFils,[])
-## self.MCAjoutes.append(MCFils)
-##
-##
-## for Onglet in self.OngletsSelectionnes:
-## if Onglet in nouveauxOngletsSelectionnes : continue
-##
-## MCFils='Contingency_List_For_'+Onglet
-## self.jdc.appli.deleteMC(self,MCFils)
-## self.MCAjoutes.remove(MCFils)
-##
-## MCFils='Component_List_For_'+Onglet
-## self.jdc.appli.deleteMC(self,MCFils)
-## self.MCAjoutes.remove(MCFils)
-##
-## self.OngletsSelectionnes=nouveauxOngletsSelectionnes
-##
-##
+ self.jdc.editor.fenetreCentraleAffichee.reaffiche()
\ No newline at end of file
# Modules Eficas
import sys,os
-#sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)),'..'))
import prefs
name='prefs_'+prefs.code
__import__(name)
-#acceder scripts de Lucie
-#path1 = os.path.abspath(os.path.join(os.path.abspath(__file__),'TreatOutputs'))
-#path1 = 'C:\\Logiciels DER\\PSEN_V16\\Code\\ProcessOutputs_Eficas\TreatOutputs'
-#sys.path.append(path1)
-
from InterfaceQT4 import eficas_go
if __name__=='__main__':
-C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190529_09h33m33\package0_N_20190529_09h33m33\BaseCase.pfd
+C:/Users/H92579/Documents/PSEN_simu/ResultatSimu/N_20190621_07h31m35\package0_N_20190621_07h31m35\BaseCase.pfd
C:\Program Files\DIgSILENT\PowerFactory 2017 SP1\Python\3.5
C:/Python35