NotesWhat is notes.io?

Notes brand slogan

Notes - notes.io

import glob
import hashlib
import os
import re
from datetime import datetime
import app
import pandas as pd
from flask import request, jsonify

from src.utils.constants import LOG_DIR
from src.databaseinterface.get.database_management import DatabaseManagement


def get_filename_hash(file_name, file_size=None):
"""
Calculated sha224 hash out of string
:param file_name: Measurement file name
:param file_size: Measurement file size
:return: file_name_hash : sourceFileMappingId: Used for mapping converted files with sources
:return: file_name_size_hash: sourceFileUniqueId: Used as primary key
:return: meas_recording_datetime: sourceFileRecordingTime: Time when measurement was recorded
"""
regex_mi5id_pattern = r"d*$" # Pytch converted file format pattern
regex_mi5id_pattern1 = r"mi_[0-9]*" # rrec source file pattern
rrec_regex_date_pattern = r"d{4}.d{2}.d{2}_at_d{2}.d{2}.d{2}"
h5_regex_date_pattern = r"d{4}-d{2}-d{2}_d{2}-d{2}-d{2}"

file_name, ext = os.path.splitext(file_name)
mi5_id_string = os.path.basename(file_name).split("__")[0]
extracted_mi5_id = re.compile(regex_mi5id_pattern).findall(mi5_id_string)
extracted_datetime = re.compile(h5_regex_date_pattern).findall(file_name)
if file_name.startswith("MAN_DASimon"):
extracted_mi5_id = ["789", ""]
if len(extracted_mi5_id) > 1 and len(extracted_datetime) > 0:
extracted_mi5_id = extracted_mi5_id[0]
extracted_datetime = extracted_datetime[0]
else:
extracted_mi5_id = re.compile(regex_mi5id_pattern1).findall(file_name)
extracted_datetime = re.compile(rrec_regex_date_pattern).findall(file_name)
if len(extracted_mi5_id) > 0 and len(extracted_datetime) > 0:
extracted_mi5_id = extracted_mi5_id[0].split("_")[1]
extracted_datetime = extracted_datetime[0]
date, time1 = extracted_datetime.split("_at_")
date = date.replace(".", "-")
time1 = time1.replace(".", "-")
extracted_datetime = date + "_" + time1 # Pytch format
else:
return False, False, False

meas_recording_datetime = datetime.strptime(extracted_datetime, "%Y-%m-%d_%H-%M-%S")
if file_size is None:
file_name_size_hash = ""
else:
hash_string_size = extracted_mi5_id + extracted_datetime + file_size
file_name_size_hash = hashlib.sha224(hash_string_size.lower().encode("utf-8")).hexdigest()

hash_string = extracted_mi5_id + extracted_datetime
hash_md5 = hashlib.sha224(hash_string.lower().encode("utf-8"))
return hash_md5.hexdigest(), file_name_size_hash, meas_recording_datetime


def updated_hashtags(logger):
# Update the hashtag in the database
data = request.get_json()
mea_id = data["uniqueId"]
updatedhashtag = data["hashtagRow"]
file_name = data["fileName"]
updated_hash_tags = updatedhashtag.split(',')
updated_hash_tags = [i.strip() for i in updated_hash_tags]
if updated_hash_tags == ['']:
updated_hash_tags = []
result = DatabaseManagement().update_hashtags(mea_id, updated_hash_tags)
logger.info(f'Hashtags updated for:{file_name}, Updated Hashtags : {updated_hash_tags}.')
return result


def updated_external_links(logger):
# Update the external links in database
data = request.get_json()
mea_id = data["uniqueId"]
file_name = data["fileName"]
updated_external_link = data["externalLinkRow"]
updated_external_links = updated_external_link.split(',')
updated_external_links = [i.strip() for i in updated_external_links]
if updated_external_links == ['']:
updated_external_links = []
result = DatabaseManagement().update_external_links(mea_id, updated_external_links)
logger.info(f'External link updated for:{file_name}, New ExternalLinks : {updated_external_link}.')
return result


# def import_hashtags(logger):
# data = request.get_json()
# version_name = data["importFormat"]["format"]
# df = pd.DataFrame(data["data"])

# if version_name != "v1.0.0":
# logger.error("InvalidSchemaVersion: Select valid Schema version for provided excel.")
# return

# # Extract necessary columns from DataFrame
# files = df.iloc[:, 0].apply(lambda x: str(x).strip())
# print("files",files)
# hashtags = df.iloc[:, 1].apply(lambda x: str(x).strip() if pd.notna(x) and pd.notnull(x) else [])
# print("hashtag", hashtags)
# overrides = df.iloc[:, 2].apply(lambda x: str(x).strip().lower() if pd.notna(x) and pd.notnull(x) else None)
# print(overrides)
# # Batch processing of hashtag updates
# batch_data = []
# for file_name, hashtag, override in zip(files, hashtags, overrides):
# print("sourcefiles",file_name)
# print("hashtags", hashtags)

# source_file_mapping_id, _, _ = get_filename_hash(os.path.basename(file_name))
# if source_file_mapping_id:
# print("source_id",source_file_mapping_id)
# file_name = os.path.basename(file_name)
# batch_data.append((file_name, source_file_mapping_id, hashtag))
# print("batch_data",batch_data)
# if batch_data:
# imported_items, record_found = DatabaseManagement().update_hashtags_bulk(batch_data)
# # You might want to handle the result as needed





# elif version_name == "v2.0.0":
# try:
# if pd.isna(df.iloc[i, 0]) and pd.isnull(df.iloc[i, 0]):
# continue
# file_name = str(df.iloc[i, 0]).strip()
# except:
# continue
# try:
# if not pd.isna(df.iloc[i, 1]) and not pd.isnull(df.iloc[i, 1]):
# hashtag = str(df.iloc[i, 1]).strip()
# else:
# hashtag = []
# except:
# logger.error("InvalidSchemaVersion: Select valid Schema version for the given excel")
# return
# try:
# if not pd.isna(df.iloc[i, 2]) and not pd.isnull(df.iloc[i, 2]):
# external_link_name = str(df.iloc[i, 2]).strip()
# else:
# external_link_name = []
# except:
# logger.error("InvalidSchemaVersion: Select valid schema version for the given excel")
# return
# try:
# if not pd.isna(df.iloc[i, 3]) and not pd.isnull(df.iloc[i, 3]):
# external_link_url = str(df.iloc[i, 3]).strip()
# else:
# external_link_url = []
# except:
# logger.error("InvalidSchemaVersion: Select valid schema version for the given excel")
# return
# try:
# if not pd.isna(df.iloc[i, 4]) and not pd.isnull(df.iloc[i, 4]):
# override = str(df.iloc[i, 4]).strip().lower()
# else:
# override = None
# except:
# override = None
# source_file_mapping_id, _, _ = get_filename_hash(os.path.basename(file_name))
# if source_file_mapping_id is not False:
# file_name = os.path.basename(file_name)
# if override is None or len(override) > 2:
# (importedItems, recordFound) = DatabaseManagement().update_hashtags_import(
# file_name, source_file_mapping_id, hashtag, external_link_name, external_link_url)
# elif "e" in override or "h" in override:
# (importedItems, recordFound) = DatabaseManagement().update_hashtags_import(
# file_name, source_file_mapping_id, hashtag, external_link_name, external_link_url, override)
# else:
# logger.error("Invalid input. Please give appropriate input.")
# continue
# if not recordFound:
# logger.warning("Not Modified File {} not found in database".format(file_name))
# elif importedItems:
# logger.info("Updated {} with an items {}".format(file_name, importedItems))
# else:
# if hashtag is not None:
# logger.warning("Not Modified {} with hashtags {}".format(file_name, hashtag))
# if external_link_url is not None:
# logger.warning(
# "Not Modified {} with externalLinks {}".format(file_name, external_link_url))
# else:
# logger.warning("Invalid source file name " + file_name)

# if recordFound:


def import_hashtags(logger):
data = request.get_json()
version_name = data["importFormat"]["format"]
df = pd.DataFrame(data["data"])


bulk_updates = []

for i in range(len(df)):
if version_name == "v1.0.0":
try:
if pd.isna(df.iloc[i, 0]) or pd.isnull(df.iloc[i, 0]):
continue
if not pd.isna(df.iloc[i, 1]) and not pd.isnull(df.iloc[i, 1]):
hashtag = str(df.iloc[i, 1]).strip()
else:
hashtag = []
file_name = str(df.iloc[i, 0]).strip()
except:
logger.error("InvalidSchemaVersion: Select valid Schema version for provided excel.")
return
try:
if not pd.isna(df.iloc[i, 2]) and not pd.isnull(df.iloc[i, 2]):
override = str(df.iloc[i, 2]).strip().lower()
else:
override = None
except:
override = None

source_file_mapping_id, _, _ = get_filename_hash(os.path.basename(file_name))
if source_file_mapping_id is not False:
file_name = os.path.basename(file_name)
if override is None:
bulk_updates.append((file_name, source_file_mapping_id, hashtag))
# (importedItems, recordFound) =
# DatabaseManagement().update_hashtags_import(file_name, source_file_mapping_id, hashtag)
# (importedItems, recordFound) =
# DatabaseManagement().update_hashtags_import(bulk_updates)


elif override == 'h':
(importedItems, recordFound) =
DatabaseManagement().update_hashtags_import(file_name, source_file_mapping_id, hashtag,
override=override)
else:
logger.error("Invalid input. Please give appropriate input.")
continue
# if not recordFound:
# logger.warning("Not Modified File {} not found in database".format(file_name))
# elif importedItems:
# logger.info("Updated {} with hashtags this this{}".format(file_name, importedItems))
# else:
# logger.warning("Not Modified {} with hashtag {}".format(file_name, hashtag))
else:
logger.warning("Record not found " + file_name)

(importedItems, recordFound) =
DatabaseManagement().update_hashtags_import(bulk_updates)
if importedItems:
logger.info("Updated {} with hashtags this this{}".format(file_name, importedItems))
# elif version_name == "v2.0.0":
# try:
# if pd.isna(df.iloc[i, 0]) and pd.isnull(df.iloc[i, 0]):
# continue
# file_name = str(df.iloc[i, 0]).strip()
# except:
# continue
# try:
# if not pd.isna(df.iloc[i, 1]) and not pd.isnull(df.iloc[i, 1]):
# hashtag = str(df.iloc[i, 1]).strip()
# else:
# hashtag = []
# except:
# logger.error("InvalidSchemaVersion: Select valid Schema version for the given excel")
# return
# try:
# if not pd.isna(df.iloc[i, 2]) and not pd.isnull(df.iloc[i, 2]):
# external_link_name = str(df.iloc[i, 2]).strip()
# else:
# external_link_name = []
# except:
# logger.error("InvalidSchemaVersion: Select valid schema version for the given excel")
# return
# try:
# if not pd.isna(df.iloc[i, 3]) and not pd.isnull(df.iloc[i, 3]):
# external_link_url = str(df.iloc[i, 3]).strip()
# else:
# external_link_url = []
# except:
# logger.error("InvalidSchemaVersion: Select valid schema version for the given excel")
# return
# try:
# if not pd.isna(df.iloc[i, 4]) and not pd.isnull(df.iloc[i, 4]):
# override = str(df.iloc[i, 4]).strip().lower()
# else:
# override = None
# except:
# override = None
# source_file_mapping_id, _, _ = get_filename_hash(os.path.basename(file_name))
# if source_file_mapping_id is not False:
# file_name = os.path.basename(file_name)
# if override is None or len(override) > 2:
# (importedItems, recordFound) = DatabaseManagement().update_hashtags_import(
# file_name, source_file_mapping_id, hashtag, external_link_name, external_link_url)
# elif "e" in override or "h" in override:
# (importedItems, recordFound) = DatabaseManagement().update_hashtags_import(
# file_name, source_file_mapping_id, hashtag, external_link_name, external_link_url, override)
# else:
# logger.error("Invalid input. Please give appropriate input.")
# continue
# if not recordFound:
# logger.warning("Not Modified File {} not found in database".format(file_name))
# elif importedItems:
# logger.info("Updated {} with an items {}".format(file_name, importedItems))
# else:
# if hashtag is not None:
# logger.warning("Not Modified {} with hashtags {}".format(file_name, hashtag))
# if external_link_url is not None:
# logger.warning(
# "Not Modified {} with externalLinks {}".format(file_name, external_link_url))
# else:
# logger.warning("Invalid source file name " + file_name)

# if recordFound:
# return True




# def import_hashtags(logger):
# try:
# data = request.get_json()
# version_name = data["importFormat"]["format"]
# data_rows = data["data"]

# if version_name not in ["v1.0.0", "v2.0.0"]:
# logger.error("InvalidSchemaVersion: Select valid Schema version for provided data.")
# return

# # Create a DataFrame from the JSON data
# df = pd.DataFrame(data_rows)

# update_data = []

# for index, row in df.iterrows():
# try:
# file_name = str(row['Sourcefiles']).strip()
# hashtags = [tag.strip() for tag in str(row['Hashtags']).split(',')] if 'Hashtags' in row else []

# # Additional fields for v2.0.0 schema
# if version_name == "v2.0.0":
# external_link_name = str(row.get('ExternalLinkName', '')).strip()
# external_link_url = str(row.get('ExternalLinkURL', '')).strip()
# override = str(row.get('Override', '')).strip().lower()
# update_data.append((file_name, hashtags, external_link_name, external_link_url, override))
# else:
# update_data.append((file_name, hashtags))

# except Exception as e:
# logger.error(f"Error processing row {index}: {e}")
# continue

# # Perform bulk updates
# print("updatedata is:",update_data)
# for update_record in update_data:
# try:
# file_name = update_record[0]
# hashtags = update_record[1]

# if version_name == "v1.0.0":
# (importedItems, recordFound) = DatabaseManagement().update_hashtags_import(
# file_name, hashtags)
# elif version_name == "v2.0.0":
# external_link_name = update_record[2]
# external_link_url = update_record[3]
# override = update_record[4]
# (importedItems, recordFound) = DatabaseManagement().update_hashtags_import(
# file_name, hashtags, external_link_name, external_link_url, override)
# else:
# continue

# if not recordFound:
# logger.warning("Not Modified File {} not found in database".format(file_name))
# elif importedItems:
# logger.info("Updated {} with items {}".format(file_name, importedItems))
# else:
# if hashtags:
# logger.warning("Not Modified {} with hashtags {}".format(file_name, hashtags))
# # Add logging for external links if needed

# except Exception as e:
# logger.error(f"Error updating data: {e}")
# continue

# except Exception as e:
# logger.error(f"Error processing data: {e}")
# return




# def import_hashtags(logger):
# try:
# data = request.get_json()
# version_name = data["importFormat"]["format"]
# data_rows = data["data"]

# if version_name not in ["v1.0.0", "v2.0.0"]:
# logger.error("InvalidSchemaVersion: Select valid Schema version for provided data.")
# return

# # Create a DataFrame from the JSON data
# df = pd.DataFrame(data_rows)

# update_data = []

# for index, row in df.iterrows():
# try:
# file_name = str(row['Sourcefiles']).strip()
# hashtags = [tag.strip() for tag in str(row['Hashtags']).split(',')] if 'Hashtags' in row else []

# # Additional fields for v2.0.0 schema
# if version_name == "v2.0.0":
# external_link_name = str(row.get('ExternalLinkName', '')).strip()
# external_link_url = str(row.get('ExternalLinkURL', '')).strip()
# override = str(row.get('Override', '')).strip().lower()
# update_data.append((file_name, hashtags, external_link_name, external_link_url, override))
# else:
# update_data.append((file_name, hashtags))

# except Exception as e:
# logger.error(f"Error processing row {index}: {e}")
# continue

# # Perform bulk updates
# for update_record in update_data:
# try:
# file_name = update_record[0]
# hashtags = update_record[1]

# # Get source_file_mapping_id, hashtag, external_link_name, external_link_url using get_filename_hash
# source_file_mapping_id, _,_ = get_filename_hash(file_name)

# if source_file_mapping_id is False:
# logger.warning(f"File {file_name} not found or invalid in database")
# continue

# if version_name == "v1.0.0":
# (importedItems, recordFound) = DatabaseManagement().update_hashtags_import(
# file_name, source_file_mapping_id, hashtags)
# elif version_name == "v2.0.0":
# if len(update_record) < 5:
# logger.error("Missing override field in v2.0.0 schema")
# continue
# override = update_record[4]

# if override is None or len(override) > 2:
# (importedItems, recordFound) = DatabaseManagement().update_hashtags_import(
# file_name, source_file_mapping_id, hashtags, external_link_name, external_link_url)
# elif "e" in override or "h" in override:
# (importedItems, recordFound) = DatabaseManagement().update_hashtags_import(
# file_name, source_file_mapping_id, hashtags, external_link_name, external_link_url, override)
# else:
# logger.error("Invalid input. Please give appropriate input.")
# continue
# else:
# continue

# if not recordFound:
# logger.warning(f"Not Modified File {file_name} not found in database")
# elif importedItems:
# logger.info(f"Updated {file_name} with items {importedItems}")
# else:
# if hashtags:
# logger.warning(f"Not Modified {file_name} with hashtags {hashtags}")
# # Add logging for external links if needed

# except Exception as e:
# logger.error(f"Error updating data: {e}")
# continue

# except Exception as e:
# logger.error(f"Error processing data: {e}")
# return

# def import_hashtags(logger):
# try:
# data = request.get_json()
# version_name = data["importFormat"]["format"]
# data_rows = data["data"]

# if version_name not in ["v1.0.0", "v2.0.0"]:
# logger.error("InvalidSchemaVersion: Select valid Schema version for provided data.")
# return

# # Create a DataFrame from the JSON data
# df = pd.DataFrame(data_rows)

# update_data = []

# for index, row in df.iterrows():
# try:
# file_name = str(row['Sourcefiles']).strip()

# # Handle hashtags field
# if 'Hashtags' in row:
# hashtags = row['Hashtags']
# if isinstance(hashtags, list):
# hashtags = ','.join(map(str, hashtags)) # Convert list to comma-separated string
# hashtags = [tag.strip() for tag in str(hashtags).split(',')]
# else:
# hashtags = []

# # Additional fields for v2.0.0 schema
# if version_name == "v2.0.0":
# external_link_name = str(row.get('ExternalLinkName', '')).strip()
# external_link_url = str(row.get('ExternalLinkURL', '')).strip()
# override = str(row.get('Override', '')).strip().lower()
# update_data.append((file_name, hashtags, external_link_name, external_link_url, override))
# else:
# update_data.append((file_name, hashtags))

# except Exception as e:
# logger.error(f"Error processing row {index}: {e}")
# continue
# print( "update data", update_data)

# # Perform bulk updates
# for update_record in update_data:
# try:
# file_name = update_record[0]
# hashtags = update_record[1]

# # Get source_file_mapping_id, hashtag, external_link_name, external_link_url using get_filename_hash
# source_file_mapping_id, _,_ = get_filename_hash(file_name)
# print(source_file_mapping_id,file_name,hashtags)
# if source_file_mapping_id is False:
# logger.warning(f"File {file_name} not found or invalid in database")
# continue

# if version_name == "v1.0.0":
# (importedItems, recordFound) = DatabaseManagement().update_hashtags_import(
# file_name, source_file_mapping_id, hashtags)
# elif version_name == "v2.0.0":
# if len(update_record) < 5:
# logger.error("Missing override field in v2.0.0 schema")
# continue
# override = update_record[4]

# if override is None or len(override) > 2:
# (importedItems, recordFound) = DatabaseManagement().update_hashtags_import(
# file_name, source_file_mapping_id, hashtags, external_link_name, external_link_url)
# elif "e" in override or "h" in override:
# (importedItems, recordFound) = DatabaseManagement().update_hashtags_import(
# file_name, source_file_mapping_id, hashtags, external_link_name, external_link_url, override)
# else:
# logger.error("Invalid input. Please give appropriate input.")
# continue
# else:
# continue

# if not recordFound:
# logger.warning(f"Not Modified File {file_name} not found in database")
# elif importedItems:
# logger.info(f"Updated {file_name} with items {importedItems}")
# else:
# if hashtags:
# logger.warning(f"Not Modified {file_name} with hashtags {hashtags}")
# # Add logging for external links if needed

# except Exception as e:
# logger.error(f"Error updating data: {e}")
# continue

# except Exception as e:
# logger.error(f"Error processing data: {e}")
# return




def add_completeness_regex():
data = request.get_json()
regex_list = data["regex_list"]
selected_hashtag = data["completeness_hashtag"]
result = DatabaseManagement().add_function_regex_into_db(selected_hashtag, regex_list)
if result >= 1:
return jsonify({"status": True, "message": "Regex added Successfully"})
else:
return jsonify({"status": False, "message": "Regex Not Added"})


def revert_record_history():
data = request.get_json()
revert_timestamp = data["revert_timestamp"]
meas_id = data["sourceFileUniqueId"][0]
response = DatabaseManagement().revert_record_history(revert_timestamp, meas_id)
return response


def clear_logs(session_id):
# Clear current log file
session_id=str(session_id) + ".log"
try:
os.remove(os.path.join(app.log_dir,session_id))
return True,"Success"
except Exception as e:
return False, e.args[1]

def update_favorites():
data = request.get_json()
favorites = data["favorites"]
user_name = data["username"]
result = DatabaseManagement().add_user_favorites(favorites, user_name)
return jsonify(result)

     
 
what is notes.io
 

Notes is a web-based application for online taking notes. You can take your notes and share with others people. If you like taking long notes, notes.io is designed for you. To date, over 8,000,000,000+ notes created and continuing...

With notes.io;

  • * You can take a note from anywhere and any device with internet connection.
  • * You can share the notes in social platforms (YouTube, Facebook, Twitter, instagram etc.).
  • * You can quickly share your contents without website, blog and e-mail.
  • * You don't need to create any Account to share a note. As you wish you can use quick, easy and best shortened notes with sms, websites, e-mail, or messaging services (WhatsApp, iMessage, Telegram, Signal).
  • * Notes.io has fabulous infrastructure design for a short link and allows you to share the note as an easy and understandable link.

Fast: Notes.io is built for speed and performance. You can take a notes quickly and browse your archive.

Easy: Notes.io doesn’t require installation. Just write and share note!

Short: Notes.io’s url just 8 character. You’ll get shorten link of your note when you want to share. (Ex: notes.io/q )

Free: Notes.io works for 14 years and has been free since the day it was started.


You immediately create your first note and start sharing with the ones you wish. If you want to contact us, you can use the following communication channels;


Email: [email protected]

Twitter: http://twitter.com/notesio

Instagram: http://instagram.com/notes.io

Facebook: http://facebook.com/notesio



Regards;
Notes.io Team

     
 
Shortened Note Link
 
 
Looding Image
 
     
 
Long File
 
 

For written notes was greater than 18KB Unable to shorten.

To be smaller than 18KB, please organize your notes, or sign in.