Add exceptions when reading from file

merge-requests/30/head
Bob Mottram 2024-07-14 11:56:31 +01:00
parent 0423a296eb
commit dabd58dc1e
1 changed files with 107 additions and 74 deletions

View File

@ -1328,8 +1328,8 @@ def load_json(filename: str) -> {}:
# load from file # load from file
try: try:
with open(filename, 'r', encoding='utf-8') as json_file: with open(filename, 'r', encoding='utf-8') as fp_json:
data = json_file.read() data = fp_json.read()
except OSError as exc: except OSError as exc:
print('EX: load_json exception ' + str(filename) + ' ' + str(exc)) print('EX: load_json exception ' + str(filename) + ' ' + str(exc))
return json_object return json_object
@ -1358,8 +1358,8 @@ def load_json_onionify(filename: str, domain: str, onion_domain: str,
tries = 0 tries = 0
while tries < 5: while tries < 5:
try: try:
with open(filename, 'r', encoding='utf-8') as json_file: with open(filename, 'r', encoding='utf-8') as fp_json:
data = json_file.read() data = fp_json.read()
if data: if data:
data = data.replace(domain, onion_domain) data = data.replace(domain, onion_domain)
data = data.replace('https:', 'http:') data = data.replace('https:', 'http:')
@ -1961,6 +1961,7 @@ def _set_default_pet_name(base_dir: str, nickname: str, domain: str,
petnames_filename) petnames_filename)
return return
try:
with open(petnames_filename, 'r', encoding='utf-8') as petnames_file: with open(petnames_filename, 'r', encoding='utf-8') as petnames_file:
petnames_str = petnames_file.read() petnames_str = petnames_file.read()
if petnames_str: if petnames_str:
@ -1969,6 +1970,8 @@ def _set_default_pet_name(base_dir: str, nickname: str, domain: str,
if pet.startswith(follow_nickname + ' '): if pet.startswith(follow_nickname + ' '):
# petname already exists # petname already exists
return return
except OSError:
print('EX: _set_default_pet_name unable to read ' + petnames_filename)
# petname doesn't already exist # petname doesn't already exist
with open(petnames_filename, 'a+', encoding='utf-8') as petnames_file: with open(petnames_filename, 'a+', encoding='utf-8') as petnames_file:
petnames_file.write(petname_lookup_entry) petnames_file.write(petname_lookup_entry)
@ -2141,6 +2144,7 @@ def locate_news_arrival(base_dir: str, domain: str,
account_dir = data_dir(base_dir) + '/news@' + domain + '/' account_dir = data_dir(base_dir) + '/news@' + domain + '/'
post_filename = account_dir + 'outbox/' + post_url post_filename = account_dir + 'outbox/' + post_url
if os.path.isfile(post_filename): if os.path.isfile(post_filename):
try:
with open(post_filename, 'r', encoding='utf-8') as arrival_file: with open(post_filename, 'r', encoding='utf-8') as arrival_file:
arrival = arrival_file.read() arrival = arrival_file.read()
if arrival: if arrival:
@ -2148,6 +2152,8 @@ def locate_news_arrival(base_dir: str, domain: str,
date_from_string_format(arrival, date_from_string_format(arrival,
["%Y-%m-%dT%H:%M:%S%z"]) ["%Y-%m-%dT%H:%M:%S%z"])
return arrival_date return arrival_date
except OSError:
print('EX: locate_news_arrival unable to read ' + post_filename)
return None return None
@ -2249,11 +2255,15 @@ def get_reply_interval_hours(base_dir: str, nickname: str, domain: str,
reply_interval_filename = \ reply_interval_filename = \
acct_dir(base_dir, nickname, domain) + '/.reply_interval_hours' acct_dir(base_dir, nickname, domain) + '/.reply_interval_hours'
if os.path.isfile(reply_interval_filename): if os.path.isfile(reply_interval_filename):
try:
with open(reply_interval_filename, 'r', with open(reply_interval_filename, 'r',
encoding='utf-8') as interval_file: encoding='utf-8') as fp_interval:
hours_str = interval_file.read() hours_str = fp_interval.read()
if hours_str.isdigit(): if hours_str.isdigit():
return int(hours_str) return int(hours_str)
except OSError:
print('EX: get_reply_interval_hours unable to read ' +
reply_interval_filename)
return default_reply_interval_hrs return default_reply_interval_hrs
@ -2417,6 +2427,7 @@ def _delete_post_remove_replies(base_dir: str, nickname: str, domain: str,
return return
if debug: if debug:
print('DEBUG: removing replies to ' + post_filename) print('DEBUG: removing replies to ' + post_filename)
try:
with open(replies_filename, 'r', encoding='utf-8') as replies_file: with open(replies_filename, 'r', encoding='utf-8') as replies_file:
for reply_id in replies_file: for reply_id in replies_file:
reply_file = locate_post(base_dir, nickname, domain, reply_id) reply_file = locate_post(base_dir, nickname, domain, reply_id)
@ -2426,6 +2437,9 @@ def _delete_post_remove_replies(base_dir: str, nickname: str, domain: str,
delete_post(base_dir, http_prefix, delete_post(base_dir, http_prefix,
nickname, domain, reply_file, debug, nickname, domain, reply_file, debug,
recent_posts_cache, manual) recent_posts_cache, manual)
except OSError:
print('EX: _delete_post_remove_replies unable to read ' +
replies_filename)
# remove the replies file # remove the replies file
try: try:
os.remove(replies_filename) os.remove(replies_filename)
@ -2520,8 +2534,12 @@ def _remove_post_id_from_tag_index(tag_index_filename: str,
"""Remove post_id from the tag index file """Remove post_id from the tag index file
""" """
lines = None lines = None
with open(tag_index_filename, 'r', encoding='utf-8') as index_file: try:
lines = index_file.readlines() with open(tag_index_filename, 'r', encoding='utf-8') as fp_index:
lines = fp_index.readlines()
except OSError:
print('EX: _remove_post_id_from_tag_index unable to read ' +
tag_index_filename)
if not lines: if not lines:
return return
newlines = '' newlines = ''
@ -2606,16 +2624,20 @@ def _delete_conversation_post(base_dir: str, nickname: str, domain: str,
if not os.path.isfile(conversation_filename): if not os.path.isfile(conversation_filename):
return False return False
conversation_str = '' conversation_str = ''
with open(conversation_filename, 'r', encoding='utf-8') as conv_file: try:
conversation_str = conv_file.read() with open(conversation_filename, 'r', encoding='utf-8') as fp_conv:
conversation_str = fp_conv.read()
except OSError:
print('EX: _delete_conversation_post unable to read ' +
conversation_filename)
if post_id + '\n' not in conversation_str: if post_id + '\n' not in conversation_str:
return False return False
conversation_str = conversation_str.replace(post_id + '\n', '') conversation_str = conversation_str.replace(post_id + '\n', '')
if conversation_str: if conversation_str:
try: try:
with open(conversation_filename, 'w+', with open(conversation_filename, 'w+',
encoding='utf-8') as conv_file: encoding='utf-8') as fp_conv:
conv_file.write(conversation_str) fp_conv.write(conversation_str)
except OSError: except OSError:
print('EX: _delete_conversation_post unable to write ' + print('EX: _delete_conversation_post unable to write ' +
conversation_filename) conversation_filename)
@ -2968,13 +2990,17 @@ def no_of_active_accounts_monthly(base_dir: str, months: int) -> bool:
dir_str + '/' + account + '/.lastUsed' dir_str + '/' + account + '/.lastUsed'
if not os.path.isfile(last_used_filename): if not os.path.isfile(last_used_filename):
continue continue
try:
with open(last_used_filename, 'r', with open(last_used_filename, 'r',
encoding='utf-8') as last_used_file: encoding='utf-8') as fp_last_used:
last_used = last_used_file.read() last_used = fp_last_used.read()
if last_used.isdigit(): if last_used.isdigit():
time_diff = curr_time - int(last_used) time_diff = curr_time - int(last_used)
if time_diff < month_seconds: if time_diff < month_seconds:
account_ctr += 1 account_ctr += 1
except OSError:
print('EX: no_of_active_accounts_monthly unable to read ' +
last_used_filename)
break break
return account_ctr return account_ctr
@ -3158,9 +3184,12 @@ def get_css(base_dir: str, css_filename: str) -> str:
if not os.path.isfile(css_filename): if not os.path.isfile(css_filename):
return None return None
try:
with open(css_filename, 'r', encoding='utf-8') as fp_css: with open(css_filename, 'r', encoding='utf-8') as fp_css:
css = fp_css.read() css = fp_css.read()
return css return css
except OSError:
print('EX: get_css unable to read ' + css_filename)
return None return None
@ -3211,10 +3240,11 @@ def _search_virtual_box_posts(base_dir: str, nickname: str, domain: str,
search_words = [search_str] search_words = [search_str]
res = [] res = []
with open(index_filename, 'r', encoding='utf-8') as index_file: try:
with open(index_filename, 'r', encoding='utf-8') as fp_index:
post_filename = 'start' post_filename = 'start'
while post_filename: while post_filename:
post_filename = index_file.readline() post_filename = fp_index.readline()
if not post_filename: if not post_filename:
break break
if '.json' not in post_filename: if '.json' not in post_filename:
@ -3222,8 +3252,8 @@ def _search_virtual_box_posts(base_dir: str, nickname: str, domain: str,
post_filename = path + '/' + post_filename.strip() post_filename = path + '/' + post_filename.strip()
if not os.path.isfile(post_filename): if not os.path.isfile(post_filename):
continue continue
with open(post_filename, 'r', encoding='utf-8') as post_file: with open(post_filename, 'r', encoding='utf-8') as fp_post:
data = post_file.read().lower() data = fp_post.read().lower()
not_found = False not_found = False
for keyword in search_words: for keyword in search_words:
@ -3236,6 +3266,9 @@ def _search_virtual_box_posts(base_dir: str, nickname: str, domain: str,
res.append(post_filename) res.append(post_filename)
if len(res) >= max_results: if len(res) >= max_results:
return res return res
except OSError as exc:
print('EX: _search_virtual_box_posts unable to read ' +
index_filename + ' ' + str(exc))
return res return res
@ -3267,8 +3300,8 @@ def search_box_posts(base_dir: str, nickname: str, domain: str,
for fname in fnames: for fname in fnames:
file_path = os.path.join(root, fname) file_path = os.path.join(root, fname)
try: try:
with open(file_path, 'r', encoding='utf-8') as post_file: with open(file_path, 'r', encoding='utf-8') as fp_post:
data = post_file.read().lower() data = fp_post.read().lower()
not_found = False not_found = False
for keyword in search_words: for keyword in search_words: