7 changed files with 1 additions and 448952 deletions
@ -0,0 +1 @@
|
||||
/output/ready/WQM_re_evaluated_data_2021_2022.zip |
@ -1,110 +0,0 @@
|
||||
from datetime import datetime |
||||
import glob |
||||
|
||||
def fino_data(datei_in, datei_out): |
||||
fh_in = open(datei_in[0], "r") |
||||
j_max = 40 |
||||
index = datei_out.find(".") # index vor Dateiendung |
||||
|
||||
# die Variable hour sagt, ob es sich um die stündlichen Messungnen handelt. Wenn ja sind Daten zur Stömung interessant |
||||
# und werden in Files geschrieben. Wenn nicht braucht es nur 2 files für CTD und O2 |
||||
# Auch die Länge ist dann interessant (ganze line oder nur 80 entsprechend zwei files) |
||||
if datei_in[0] == "/insida/files_to_be_converted/F2_hydro_1h.txt": |
||||
hour = 1 |
||||
else: |
||||
hour = 0 |
||||
|
||||
if hour: |
||||
files_out = [open(datei_out, "a"), |
||||
open(datei_out[:index] + "_2" + datei_out[index:], "a"), |
||||
open(datei_out[:index] + "_3" + datei_out[index:], "a"), |
||||
open(datei_out[:index] + "_4" + datei_out[index:], "a")] |
||||
else: |
||||
files_out = [open(datei_out, "a"), |
||||
open(datei_out[:index] + "_2" + datei_out[index:], "a")] |
||||
|
||||
|
||||
lines = fh_in.readlines() |
||||
for line in lines: |
||||
j = 0 |
||||
###### Header Zeilen ausblenden |
||||
if "DD.MM.YYYY HH:MM:SS" in line or "Zeitpunkt" in line or "DD.MM.YY HH:MM" in line or "DD.MM.YYYY HH:MM" in line or "DD.MM.YY HH:00" in line or "date" in line or len( |
||||
line) < 2: |
||||
continue |
||||
|
||||
else: |
||||
### Trennzeichen anpassen |
||||
line = line.replace(",", "\t") |
||||
line = line.split("\t") |
||||
# print(str(len(line)) + " Werte in Datei: " + str(file_handler)) |
||||
|
||||
### Parameter auftrennen, erst Datum anpassen |
||||
datum = line[0] |
||||
monat = int(datum[3:5]) |
||||
tag = int(datum[0:2]) |
||||
jahr = int(datum[6:10]) |
||||
stunde = int(datum[11:13]) |
||||
minute = int(datum[14:16]) |
||||
|
||||
for i in files_out: |
||||
i.write("{:%Y%m%d%H%M} ".format(datetime(jahr, monat, tag, stunde, minute))) |
||||
|
||||
j += 1 |
||||
|
||||
writeto = files_out[0] |
||||
windex = 0 |
||||
if hour: # bei h-Messung ganze line mit AWAC, sonst nur 80 Zeichen für zwei files |
||||
length = len(line) |
||||
else: |
||||
length = 80 |
||||
|
||||
for i in range(1, length): |
||||
len_test = line[i].split(".") |
||||
if len(len_test[0]) > 0: |
||||
try: |
||||
data = float(line[i]) |
||||
### Parameter mit mehr als 4 Stellen vor dem Komma an die Formatierung anpassen (max. 8 Stellen) |
||||
if len(len_test[0].rstrip()) == 8: |
||||
writeto.write("{:8.0f}".format(data)) |
||||
elif len(len_test[0].rstrip()) == 7: |
||||
writeto.write("{:8.0f}".format(data)) |
||||
elif len(len_test[0].rstrip()) == 6: |
||||
writeto.write("{:8.1f}".format(data)) |
||||
elif len(len_test[0].rstrip()) == 5: |
||||
writeto.write("{:8.2f}".format(data)) |
||||
else: |
||||
writeto.write("{:8.3f}".format(data)) |
||||
except: |
||||
writeto.write("{:8}".format('')) |
||||
else: |
||||
writeto.write("{:8}".format('')) |
||||
|
||||
j = j + 1 |
||||
|
||||
if i == length-1: # letztes Element der Zeile, daher neue Zeile in diesem file |
||||
writeto.write("\r") |
||||
break |
||||
|
||||
if j % j_max == 0: |
||||
writeto.write("\r") |
||||
windex += 1 |
||||
writeto = files_out[windex] |
||||
|
||||
for i in files_out: |
||||
i.close() |
||||
|
||||
fh_in.close() |
||||
|
||||
# Definition der stündlichen Messungen |
||||
# datei_in = glob.glob("C:/Users/FINO2/Documents/Insida/FINO/OnlineData/UeberFTP/F2_hydro_1h.txt") |
||||
# datei_out = "C:/Users/FINO2/Documents/Insida/FINO/OnlineData/UeberFTP/F2_hydro_1h.dat" |
||||
datei_in = glob.glob("/insida/files_to_be_converted/F2_hydro_1h.txt") |
||||
datei_out = "/insida/import/IOW/inbox/F2_hydro_1h.dat" |
||||
fino_data(datei_in, datei_out) |
||||
|
||||
# Definition der 10 min Messungen |
||||
# datei_in = glob.glob("C:/Users/FINO2/Documents/Insida/FINO/OnlineData/UeberFTP/F2_hydro_10min.txt") |
||||
# datei_out = "C:/Users/FINO2/Documents/Insida/FINO/OnlineData/UeberFTP/F2_hydro_10min.dat" |
||||
datei_in = glob.glob("/insida/files_to_be_converted/F2_hydro_10min.txt") |
||||
datei_out = "/insida/import/IOW/inbox/F2_hydro_10min.dat" |
||||
fino_data(datei_in, datei_out) |
@ -1,64 +0,0 @@
|
||||
### Programm zum Download der MARNET-Daten vom ftp-Server und entpacken der Daten in den vorgegebenen Ordner |
||||
from ftplib import FTP |
||||
import zipfile |
||||
from datetime import datetime |
||||
import fnmatch |
||||
|
||||
file = ["/insida/files_to_be_converted/F2_Hydro_1h.zip","/insida/files_to_be_converted/F2_Hydro_10min.zip","/insida/files_to_be_converted/F2_WQM_10m.zip"] |
||||
# file = ["C:/Users/FINO2/Documents/Insida/FINO/OnlineData/UeberFTP/Hydro_1h.zip", "C:/Users/FINO2/Documents/Insida/FINO/OnlineData/UeberFTP/Hydro_10min.zip", "C:/Users/FINO2/Documents/Insida/FINO/OnlineData/UeberFTP/WQM_10m.zip"] |
||||
|
||||
now = datetime.utcnow() |
||||
|
||||
date = str(now.date().year) |
||||
|
||||
if now.date().month < 10: |
||||
date += "0" + str(now.date().month) |
||||
else: |
||||
date += str(now.date().month) |
||||
|
||||
if now.date().day < 10: |
||||
date += "0" + str(now.date().day) |
||||
else: |
||||
date += str(now.date().day) |
||||
|
||||
hournow = now.time().hour |
||||
if hournow < 10: |
||||
hour = "0" + str(hournow) |
||||
else: |
||||
hour = str(hournow) |
||||
|
||||
# die txt sind mit Datum und Zeit so aufgebaut, danach variabel, daher mit * |
||||
# es wird daher das passende File entsprechend Datum und Zeit in der Liste aller gesucht |
||||
file1_guess = "FINO2_Hydro_ftp_1h_" + date +"_" + hour + "*(1).txt.zip" |
||||
file2_guess = "FINO2_Hydro_ftp_10min_" + date +"_" + hour + "*(2).txt.zip" |
||||
file3_guess = "FINO2_WQM_ftp_10min_" + date + "_" + hour + "*(3).txt.zip" |
||||
|
||||
ftp = FTP("fino.go-sys.de") # adresse |
||||
ftp.login("iow_hydro", "hy73!!iow") # Zugang |
||||
ftp.cwd("in") # Ordner der Daten |
||||
|
||||
all_files = ftp.nlst() |
||||
file1 = fnmatch.filter(all_files, file1_guess)[0] |
||||
file2 = fnmatch.filter(all_files, file2_guess)[0] |
||||
file3 = fnmatch.filter(all_files, file3_guess)[0] |
||||
|
||||
filename = [file1, file2, file3] |
||||
# print(filename) |
||||
textnames = ["F2_hydro_1h.txt", "F2_hydro_10min.txt", "F2_WQM_10min.txt"] |
||||
|
||||
for i in range(len(file)): |
||||
try: |
||||
ftp.retrbinary('RETR %s' % filename[i], open(file[i], 'wb').write) |
||||
except: |
||||
print("Error") |
||||
|
||||
zipdata = zipfile.ZipFile(file[i]) |
||||
zipinfo = zipdata.infolist() |
||||
zipinfo[0].filename = textnames[i] |
||||
# zipdata.extract(zipinfo[0], path="C:/Users/FINO2/Documents/Insida/FINO/OnlineData/UeberFTP/") |
||||
zipdata.extract(zipinfo[0], path="/insida/files_to_be_converted/") |
||||
zipdata.close() |
||||
|
||||
|
||||
ftp.quit() |
||||
|
@ -1,69 +0,0 @@
|
||||
import numpy as np |
||||
import pandas as pd |
||||
import matplotlib.pyplot as plt |
||||
import matplotlib.dates as mdates |
||||
from datetime import datetime |
||||
|
||||
plt.style.use('seaborn-white') |
||||
fig_size=(15,9) |
||||
type="CHL" |
||||
|
||||
# %% import and format data |
||||
print("loading data...") |
||||
F2_microcat_10m_data = pd.read_csv (r'D:\Python\check_insida_data\data\WQM_alle_insida_daten_seit_2013.csv') |
||||
#F2_data10m_df= pd.read_csv (r'D:\Python\Create_figures_from_insida_data\data\oneDay_in_tenMinutes.csv') |
||||
#F2_data10m_df = pd.read_csv (r'D:\Python\Create_figures_from_insida_data\data\FINO2_10m 2022-06-12-2022-07-12 1657614455364.csv') |
||||
|
||||
#F2_data1h_df = pd.read_csv(r'/srv/insida-export/FINO2/CSV/oneMonth_in_oneHour.csv') |
||||
#F2_data10m_df = pd.read_csv(r'/srv/insida-export/FINO2/CSV/oneDay_in_tenMinutes.csv') |
||||
|
||||
B = [datetime.strptime(a, '%Y-%m-%d %H:%M:%S') |
||||
for a in list(F2_microcat_10m_data["date"])] |
||||
F2_microcat_10m_data["date"] = B |
||||
|
||||
is_identical_array = [] |
||||
is_identical = 0 |
||||
|
||||
for idx, datapoint in enumerate(F2_microcat_10m_data['date']): |
||||
if F2_microcat_10m_data[type+' (2.0m)'][idx] == F2_microcat_10m_data[type+' (12.0m)'][idx]: |
||||
is_identical += -1 |
||||
if F2_microcat_10m_data[type+' (12.0m)'][idx] == F2_microcat_10m_data[type+' (20.0m)'][idx]: |
||||
is_identical += -1 |
||||
if F2_microcat_10m_data[type+' (2.0m)'][idx] == F2_microcat_10m_data[type+' (20.0m)'][idx]: |
||||
is_identical += -1 |
||||
is_identical_array.append(is_identical) |
||||
is_identical = 0 |
||||
|
||||
|
||||
|
||||
|
||||
fig, (ax1) = plt.subplots(1, 1, figsize=fig_size) |
||||
|
||||
# -----------------left side plot------------------------- |
||||
|
||||
ax1.scatter(F2_microcat_10m_data['date'], F2_microcat_10m_data[type+' (2.0m)'], label="2m", marker='+', linewidths=0.5) |
||||
ax1.scatter(F2_microcat_10m_data['date'], F2_microcat_10m_data[type+' (12.0m)'], label="12m", marker='x', linewidths=0.5) |
||||
ax1.scatter(F2_microcat_10m_data['date'], F2_microcat_10m_data[type+' (20.0m)'], label="20m", marker='.', linewidths=0.5) |
||||
ax1.scatter(F2_microcat_10m_data['date'], is_identical_array, label="is_identical", marker='.', linewidths=0.1) |
||||
|
||||
|
||||
ax1.legend(ncol=5, facecolor='white', frameon=True, loc=(0.02, 0.92), |
||||
prop={'size': 'large'}, columnspacing=1, handlelength=0.7, |
||||
handletextpad=0.2, borderpad=0.2) |
||||
ax1.set_title(type, size='xx-large', y=1.0, x=0.05, fontweight='bold') |
||||
ax1.grid(linestyle='--') |
||||
|
||||
ax1.xaxis.set_major_formatter(mdates.DateFormatter("%d.%m.%y")) |
||||
ax1.xaxis.set_major_locator(mdates.DayLocator(interval=1)) |
||||
|
||||
#ax1.xaxis.set_major_formatter(mdates.DateFormatter("%m.%y")) |
||||
#ax1.xaxis.set_major_locator(mdates.MonthLocator(interval=1)) |
||||
|
||||
plt.gcf().autofmt_xdate() |
||||
|
||||
plt.tight_layout() |
||||
plt.show() |
||||
|
||||
|
||||
|
||||
print("asdasd") |
File diff suppressed because one or more lines are too long
Loading…
Reference in new issue