From 21e20956960eaed33a33ad24cfcaa7d01e6d2e88 Mon Sep 17 00:00:00 2001 From: Bob Mottram Date: Sat, 13 Jul 2024 15:38:11 +0100 Subject: [PATCH] Exception handling when reading from file --- daemon_get_post.py | 4 +- desktop_client.py | 11 +- epicyon.py | 16 +- follow.py | 48 ++-- happening.py | 376 ++++++++++++++++--------------- inbox.py | 64 ++++-- keys.py | 14 +- manualapprove.py | 200 +++++++++-------- mastoapiv1.py | 32 +-- media.py | 10 +- migrate.py | 32 ++- newsdaemon.py | 65 +++--- newswire.py | 140 ++++++------ person.py | 122 ++++++---- posts.py | 540 +++++++++++++++++++++++++-------------------- question.py | 75 ++++--- reaction.py | 3 +- roles.py | 30 +-- shares.py | 28 ++- siteactive.py | 3 +- speaker.py | 65 +++--- theme.py | 40 ++-- 22 files changed, 1082 insertions(+), 836 deletions(-) diff --git a/daemon_get_post.py b/daemon_get_post.py index c9c3a681b..08315872a 100644 --- a/daemon_get_post.py +++ b/daemon_get_post.py @@ -286,7 +286,7 @@ def show_individual_post(self, ssml_getreq: bool, authorized: bool, with open(ssml_filename, 'r', encoding='utf-8') as fp_ssml: ssml_str = fp_ssml.read() except OSError: - pass + print('EX: unable to read ssml file ' + ssml_filename) if ssml_str: msg = ssml_str.encode('utf-8') msglen = len(msg) @@ -615,7 +615,7 @@ def show_individual_at_post(self, ssml_getreq: bool, authorized: bool, with open(ssml_filename, 'r', encoding='utf-8') as fp_ssml: ssml_str = fp_ssml.read() except OSError: - pass + print('EX: unable to read ssml file 2 ' + ssml_filename) if ssml_str: msg = ssml_str.encode('utf-8') msglen = len(msg) diff --git a/desktop_client.py b/desktop_client.py index 5e1e0f4c1..8e658a4e7 100644 --- a/desktop_client.py +++ b/desktop_client.py @@ -330,10 +330,13 @@ def _desktop_show_banner() -> None: banner_filename = 'theme/' + banner_theme + '/banner.txt' if not os.path.isfile(banner_filename): return - with open(banner_filename, 'r', encoding='utf-8') as banner_file: - banner = banner_file.read() - if banner: - print(banner + '\n') + try: + with open(banner_filename, 'r', encoding='utf-8') as fp_banner: + banner = fp_banner.read() + if banner: + print(banner + '\n') + except OSError: + print('EX: unable to read banner file ' + banner_filename) def _desktop_wait_for_cmd(timeout: int, debug: bool) -> str: diff --git a/epicyon.py b/epicyon.py index 0bb02f831..d1ed2adf5 100644 --- a/epicyon.py +++ b/epicyon.py @@ -1715,12 +1715,16 @@ def _command_options() -> None: approve_follows_filename = accounts_dir + '/followrequests.txt' approve_ctr = 0 if os.path.isfile(approve_follows_filename): - with open(approve_follows_filename, 'r', - encoding='utf-8') as approvefile: - for approve in approvefile: - approve1 = remove_eol(approve) - print(approve1) - approve_ctr += 1 + try: + with open(approve_follows_filename, 'r', + encoding='utf-8') as approvefile: + for approve in approvefile: + approve1 = remove_eol(approve) + print(approve1) + approve_ctr += 1 + except OSError: + print('EX: unable to read follow approvals file ' + + approve_follows_filename) if approve_ctr == 0: print('There are no follow requests pending approval.') sys.exit() diff --git a/follow.py b/follow.py index 10d6b1bbc..bd5292e89 100644 --- a/follow.py +++ b/follow.py @@ -1598,26 +1598,30 @@ def pending_followers_timeline_json(actor: str, base_dir: str, follow_requests_filename = \ acct_dir(base_dir, nickname, domain) + '/followrequests.txt' if os.path.isfile(follow_requests_filename): - with open(follow_requests_filename, 'r', - encoding='utf-8') as req_file: - for follower_handle in req_file: - if len(follower_handle) == 0: - continue - follower_handle = remove_eol(follower_handle) - foll_domain, _ = get_domain_from_actor(follower_handle) - if not foll_domain: - continue - foll_nickname = get_nickname_from_actor(follower_handle) - if not foll_nickname: - continue - follow_activity_filename = \ - acct_dir(base_dir, nickname, domain) + \ - '/requests/' + \ - foll_nickname + '@' + foll_domain + '.follow' - if not os.path.isfile(follow_activity_filename): - continue - follow_json = load_json(follow_activity_filename) - if not follow_json: - continue - result_json['orderedItems'].append(follow_json) + try: + with open(follow_requests_filename, 'r', + encoding='utf-8') as fp_req: + for follower_handle in fp_req: + if len(follower_handle) == 0: + continue + follower_handle = remove_eol(follower_handle) + foll_domain, _ = get_domain_from_actor(follower_handle) + if not foll_domain: + continue + foll_nickname = get_nickname_from_actor(follower_handle) + if not foll_nickname: + continue + follow_activity_filename = \ + acct_dir(base_dir, nickname, domain) + \ + '/requests/' + \ + foll_nickname + '@' + foll_domain + '.follow' + if not os.path.isfile(follow_activity_filename): + continue + follow_json = load_json(follow_activity_filename) + if not follow_json: + continue + result_json['orderedItems'].append(follow_json) + except OSError as exc: + print('EX: unable to read follow requests ' + + follow_requests_filename + ' ' + str(exc)) return result_json diff --git a/happening.py b/happening.py index 9ad2d107e..75047a82c 100644 --- a/happening.py +++ b/happening.py @@ -77,15 +77,23 @@ def _remove_event_from_timeline(event_id: str, """ if not text_in_file(event_id + '\n', tl_events_filename): return + events_timeline = '' with open(tl_events_filename, 'r', encoding='utf-8') as fp_tl: events_timeline = fp_tl.read().replace(event_id + '\n', '') + + if events_timeline: try: with open(tl_events_filename, 'w+', encoding='utf-8') as fp2: fp2.write(events_timeline) except OSError: print('EX: ERROR: unable to save events timeline') + elif os.path.isfile(tl_events_filename): + try: + os.remove(tl_events_filename) + except OSError: + print('EX: ERROR: unable to remove events timeline') def save_event_post(base_dir: str, handle: str, post_id: str, @@ -289,74 +297,78 @@ def get_todays_events(base_dir: str, nickname: str, domain: str, calendar_post_ids = [] recreate_events_file = False - with open(calendar_filename, 'r', encoding='utf-8') as events_file: - for post_id in events_file: - post_id = remove_eol(post_id) - post_filename = locate_post(base_dir, nickname, domain, post_id) - if not post_filename: - recreate_events_file = True - continue - - post_json_object = load_json(post_filename) - if not _is_happening_post(post_json_object): - continue - - content_language = system_language - if post_json_object.get('object'): - content = None - if post_json_object['object'].get('contentMap'): - sys_lang = system_language - if post_json_object['object']['contentMap'].get(sys_lang): - content = \ - post_json_object['object']['contentMap'][sys_lang] - content_language = sys_lang - if not content: - if post_json_object['object'].get('content'): - content = post_json_object['object']['content'] - if content: - if not _event_text_match(content, text_match): - continue - - public_event = is_public_post(post_json_object) - - post_event = [] - day_of_month = None - for tag in post_json_object['object']['tag']: - if not _is_happening_event(tag): + try: + with open(calendar_filename, 'r', encoding='utf-8') as events_file: + for post_id in events_file: + post_id = remove_eol(post_id) + post_filename = \ + locate_post(base_dir, nickname, domain, post_id) + if not post_filename: + recreate_events_file = True continue - # this tag is an event or a place - if tag['type'] == 'Event': - # tag is an event - if not tag.get('startTime'): - continue - event_time = \ - date_from_string_format(tag['startTime'], - ["%Y-%m-%dT%H:%M:%S%z"]) - if int(event_time.strftime("%Y")) == year and \ - int(event_time.strftime("%m")) == month_number and \ - int(event_time.strftime("%d")) == day_number: - day_of_month = str(int(event_time.strftime("%d"))) - if '#statuses#' in post_id: - # link to the id so that the event can be - # easily deleted - tag['post_id'] = post_id.split('#statuses#')[1] - tag['id'] = post_id.replace('#', '/') - tag['sender'] = post_id.split('#statuses#')[0] - tag['sender'] = tag['sender'].replace('#', '/') - tag['public'] = public_event - tag['language'] = content_language - post_event.append(tag) - else: - # tag is a place - post_event.append(tag) - if post_event and day_of_month: - calendar_post_ids.append(post_id) - if not events.get(day_of_month): - events[day_of_month] = [] - events[day_of_month].append(post_event) - events[day_of_month] = \ - _sort_todays_events(events[day_of_month]) + post_json_object = load_json(post_filename) + if not _is_happening_post(post_json_object): + continue + + content_language = system_language + if post_json_object.get('object'): + content = None + if post_json_object['object'].get('contentMap'): + sys_lang = system_language + content_map = post_json_object['object']['contentMap'] + if content_map.get(sys_lang): + content = content_map[sys_lang] + content_language = sys_lang + if not content: + if post_json_object['object'].get('content'): + content = post_json_object['object']['content'] + if content: + if not _event_text_match(content, text_match): + continue + + public_event = is_public_post(post_json_object) + + post_event = [] + day_of_month = None + for tag in post_json_object['object']['tag']: + if not _is_happening_event(tag): + continue + # this tag is an event or a place + if tag['type'] == 'Event': + # tag is an event + if not tag.get('startTime'): + continue + event_time = \ + date_from_string_format(tag['startTime'], + ["%Y-%m-%dT%H:%M:%S%z"]) + if int(event_time.strftime("%Y")) == year and \ + int(event_time.strftime("%m")) == month_number and \ + int(event_time.strftime("%d")) == day_number: + day_of_month = str(int(event_time.strftime("%d"))) + if '#statuses#' in post_id: + # link to the id so that the event can be + # easily deleted + tag['post_id'] = post_id.split('#statuses#')[1] + tag['id'] = post_id.replace('#', '/') + tag['sender'] = post_id.split('#statuses#')[0] + tag['sender'] = tag['sender'].replace('#', '/') + tag['public'] = public_event + tag['language'] = content_language + post_event.append(tag) + else: + # tag is a place + post_event.append(tag) + + if post_event and day_of_month: + calendar_post_ids.append(post_id) + if not events.get(day_of_month): + events[day_of_month] = [] + events[day_of_month].append(post_event) + events[day_of_month] = \ + _sort_todays_events(events[day_of_month]) + except OSError: + print('EX: get_todays_events failed to read ' + calendar_filename) # if some posts have been deleted then regenerate the calendar file if recreate_events_file: @@ -592,37 +604,41 @@ def day_events_check(base_dir: str, nickname: str, domain: str, return False events_exist = False - with open(calendar_filename, 'r', encoding='utf-8') as events_file: - for post_id in events_file: - post_id = remove_eol(post_id) - post_filename = locate_post(base_dir, nickname, domain, post_id) - if not post_filename: - continue + try: + with open(calendar_filename, 'r', encoding='utf-8') as events_file: + for post_id in events_file: + post_id = remove_eol(post_id) + post_filename = \ + locate_post(base_dir, nickname, domain, post_id) + if not post_filename: + continue - post_json_object = load_json(post_filename) - if not _is_happening_post(post_json_object): - continue + post_json_object = load_json(post_filename) + if not _is_happening_post(post_json_object): + continue - for tag in post_json_object['object']['tag']: - if not _is_happening_event(tag): - continue - # this tag is an event or a place - if tag['type'] != 'Event': - continue - # tag is an event - if not tag.get('startTime'): - continue - event_time = \ - date_from_string_format(tag['startTime'], - ["%Y-%m-%dT%H:%M:%S%z"]) - if int(event_time.strftime("%d")) != day_number: - continue - if int(event_time.strftime("%m")) != month_number: - continue - if int(event_time.strftime("%Y")) != year: - continue - events_exist = True - break + for tag in post_json_object['object']['tag']: + if not _is_happening_event(tag): + continue + # this tag is an event or a place + if tag['type'] != 'Event': + continue + # tag is an event + if not tag.get('startTime'): + continue + event_time = \ + date_from_string_format(tag['startTime'], + ["%Y-%m-%dT%H:%M:%S%z"]) + if int(event_time.strftime("%d")) != day_number: + continue + if int(event_time.strftime("%m")) != month_number: + continue + if int(event_time.strftime("%Y")) != year: + continue + events_exist = True + break + except OSError: + print('EX: day_events_check failed to read ' + calendar_filename) return events_exist @@ -648,42 +664,46 @@ def get_this_weeks_events(base_dir: str, nickname: str, domain: str) -> {}: calendar_post_ids = [] recreate_events_file = False - with open(calendar_filename, 'r', encoding='utf-8') as events_file: - for post_id in events_file: - post_id = remove_eol(post_id) - post_filename = locate_post(base_dir, nickname, domain, post_id) - if not post_filename: - recreate_events_file = True - continue - - post_json_object = load_json(post_filename) - if not _is_happening_post(post_json_object): - continue - - post_event = [] - week_day_index = None - for tag in post_json_object['object']['tag']: - if not _is_happening_event(tag): + try: + with open(calendar_filename, 'r', encoding='utf-8') as events_file: + for post_id in events_file: + post_id = remove_eol(post_id) + post_filename = \ + locate_post(base_dir, nickname, domain, post_id) + if not post_filename: + recreate_events_file = True continue - # this tag is an event or a place - if tag['type'] == 'Event': - # tag is an event - if not tag.get('startTime'): + + post_json_object = load_json(post_filename) + if not _is_happening_post(post_json_object): + continue + + post_event = [] + week_day_index = None + for tag in post_json_object['object']['tag']: + if not _is_happening_event(tag): continue - event_time = \ - date_from_string_format(tag['startTime'], - ["%Y-%m-%dT%H:%M:%S%z"]) - if now <= event_time <= end_of_week: - week_day_index = (event_time - now).days() + # this tag is an event or a place + if tag['type'] == 'Event': + # tag is an event + if not tag.get('startTime'): + continue + event_time = \ + date_from_string_format(tag['startTime'], + ["%Y-%m-%dT%H:%M:%S%z"]) + if now <= event_time <= end_of_week: + week_day_index = (event_time - now).days() + post_event.append(tag) + else: + # tag is a place post_event.append(tag) - else: - # tag is a place - post_event.append(tag) - if post_event and week_day_index: - calendar_post_ids.append(post_id) - if not events.get(week_day_index): - events[week_day_index] = [] - events[week_day_index].append(post_event) + if post_event and week_day_index: + calendar_post_ids.append(post_id) + if not events.get(week_day_index): + events[week_day_index] = [] + events[week_day_index].append(post_event) + except OSError: + print('EX: get_this_weeks_events failed to read ' + calendar_filename) # if some posts have been deleted then regenerate the calendar file if recreate_events_file: @@ -717,60 +737,64 @@ def get_calendar_events(base_dir: str, nickname: str, domain: str, calendar_post_ids = [] recreate_events_file = False - with open(calendar_filename, 'r', encoding='utf-8') as events_file: - for post_id in events_file: - post_id = remove_eol(post_id) - post_filename = locate_post(base_dir, nickname, domain, post_id) - if not post_filename: - recreate_events_file = True - continue - - post_json_object = load_json(post_filename) - if not post_json_object: - continue - if not _is_happening_post(post_json_object): - continue - if only_show_reminders: - if not is_reminder(post_json_object): + try: + with open(calendar_filename, 'r', encoding='utf-8') as events_file: + for post_id in events_file: + post_id = remove_eol(post_id) + post_filename = \ + locate_post(base_dir, nickname, domain, post_id) + if not post_filename: + recreate_events_file = True continue - if post_json_object.get('object'): - if post_json_object['object'].get('content'): - content = post_json_object['object']['content'] - if not _event_text_match(content, text_match): + post_json_object = load_json(post_filename) + if not post_json_object: + continue + if not _is_happening_post(post_json_object): + continue + if only_show_reminders: + if not is_reminder(post_json_object): continue - post_event = [] - day_of_month = None - for tag in post_json_object['object']['tag']: - if not _is_happening_event(tag): - continue - # this tag is an event or a place - if tag['type'] == 'Event': - # tag is an event - if not tag.get('startTime'): + if post_json_object.get('object'): + if post_json_object['object'].get('content'): + content = post_json_object['object']['content'] + if not _event_text_match(content, text_match): + continue + + post_event = [] + day_of_month = None + for tag in post_json_object['object']['tag']: + if not _is_happening_event(tag): continue - event_time = \ - date_from_string_format(tag['startTime'], - ["%Y-%m-%dT%H:%M:%S%z"]) - if int(event_time.strftime("%Y")) == year and \ - int(event_time.strftime("%m")) == month_number: - day_of_month = str(int(event_time.strftime("%d"))) - if '#statuses#' in post_id: - tag['post_id'] = post_id.split('#statuses#')[1] - tag['id'] = post_id.replace('#', '/') - tag['sender'] = post_id.split('#statuses#')[0] - tag['sender'] = tag['sender'].replace('#', '/') + # this tag is an event or a place + if tag['type'] == 'Event': + # tag is an event + if not tag.get('startTime'): + continue + event_time = \ + date_from_string_format(tag['startTime'], + ["%Y-%m-%dT%H:%M:%S%z"]) + if int(event_time.strftime("%Y")) == year and \ + int(event_time.strftime("%m")) == month_number: + day_of_month = str(int(event_time.strftime("%d"))) + if '#statuses#' in post_id: + tag['post_id'] = post_id.split('#statuses#')[1] + tag['id'] = post_id.replace('#', '/') + tag['sender'] = post_id.split('#statuses#')[0] + tag['sender'] = tag['sender'].replace('#', '/') + post_event.append(tag) + else: + # tag is a place post_event.append(tag) - else: - # tag is a place - post_event.append(tag) - if post_event and day_of_month: - calendar_post_ids.append(post_id) - if not events.get(day_of_month): - events[day_of_month] = [] - events[day_of_month].append(post_event) + if post_event and day_of_month: + calendar_post_ids.append(post_id) + if not events.get(day_of_month): + events[day_of_month] = [] + events[day_of_month].append(post_event) + except OSError: + print('EX: get_calendar_events failed to read ' + calendar_filename) # if some posts have been deleted then regenerate the calendar file if recreate_events_file: @@ -807,7 +831,7 @@ def remove_calendar_event(base_dir: str, nickname: str, domain: str, with open(calendar_filename, 'r', encoding='utf-8') as fp_cal: lines_str = fp_cal.read() except OSError: - print('EX: unable to read calendar file ' + + print('EX: remove_calendar_event unable to read calendar file ' + calendar_filename) if not lines_str: return diff --git a/inbox.py b/inbox.py index bba79f8cd..f677c169f 100644 --- a/inbox.py +++ b/inbox.py @@ -433,7 +433,7 @@ def store_hash_tags(base_dir: str, nickname: str, domain: str, with open(tags_filename, 'r', encoding='utf-8') as tags_file: content = tags_file.read() except OSError: - pass + print('EX: store_hash_tags failed to read ' + tags_filename) if post_url not in content: content = tag_line + content try: @@ -1226,11 +1226,14 @@ def _notify_moved(base_dir: str, domain_full: str, prev_actor_handle + ' ' + new_actor_handle + ' ' + url if os.path.isfile(moved_file): - with open(moved_file, 'r', - encoding='utf-8') as fp_move: - prev_moved_str = fp_move.read() - if prev_moved_str == moved_str: - continue + try: + with open(moved_file, 'r', + encoding='utf-8') as fp_move: + prev_moved_str = fp_move.read() + if prev_moved_str == moved_str: + continue + except OSError: + print('EX: _notify_moved unable to read ' + moved_file) try: with open(moved_file, 'w+', encoding='utf-8') as fp_move: fp_move.write(moved_str) @@ -3920,10 +3923,13 @@ def _like_notify(base_dir: str, domain: str, # was there a previous like notification? if os.path.isfile(prev_like_file): # is it the same as the current notification ? - with open(prev_like_file, 'r', encoding='utf-8') as fp_like: - prev_like_str = fp_like.read() - if prev_like_str == like_str: - return + try: + with open(prev_like_file, 'r', encoding='utf-8') as fp_like: + prev_like_str = fp_like.read() + if prev_like_str == like_str: + return + except OSError: + print('EX: _like_notify unable to read ' + prev_like_file) try: with open(prev_like_file, 'w+', encoding='utf-8') as fp_like: fp_like.write(like_str) @@ -3985,10 +3991,13 @@ def _reaction_notify(base_dir: str, domain: str, onion_domain: str, # was there a previous reaction notification? if os.path.isfile(prev_reaction_file): # is it the same as the current notification ? - with open(prev_reaction_file, 'r', encoding='utf-8') as fp_react: - prev_reaction_str = fp_react.read() - if prev_reaction_str == reaction_str: - return + try: + with open(prev_reaction_file, 'r', encoding='utf-8') as fp_react: + prev_reaction_str = fp_react.read() + if prev_reaction_str == reaction_str: + return + except OSError: + print('EX: _reaction_notify unable to read ' + prev_reaction_file) try: with open(prev_reaction_file, 'w+', encoding='utf-8') as fp_react: fp_react.write(reaction_str) @@ -4015,10 +4024,13 @@ def _notify_post_arrival(base_dir: str, handle: str, url: str) -> None: notify_file = account_dir + '/.newNotifiedPost' if os.path.isfile(notify_file): # check that the same notification is not repeatedly sent - with open(notify_file, 'r', encoding='utf-8') as fp_notify: - existing_notification_message = fp_notify.read() - if url in existing_notification_message: - return + try: + with open(notify_file, 'r', encoding='utf-8') as fp_notify: + existing_notification_message = fp_notify.read() + if url in existing_notification_message: + return + except OSError: + print('EX: _notify_post_arrival unable to read ' + notify_file) try: with open(notify_file, 'w+', encoding='utf-8') as fp_notify: fp_notify.write(url) @@ -4297,12 +4309,16 @@ def _update_last_seen(base_dir: str, handle: str, actor: str) -> None: days_since_epoch = (curr_time - date_epoch()).days # has the value changed? if os.path.isfile(last_seen_filename): - with open(last_seen_filename, 'r', - encoding='utf-8') as last_seen_file: - days_since_epoch_file = last_seen_file.read() - if int(days_since_epoch_file) == days_since_epoch: - # value hasn't changed, so we can save writing anything to file - return + try: + with open(last_seen_filename, 'r', + encoding='utf-8') as last_seen_file: + days_since_epoch_file = last_seen_file.read() + if int(days_since_epoch_file) == days_since_epoch: + # value hasn't changed, so we can save writing + # anything to file + return + except OSError: + print('EX: _update_last_seen unable to read ' + last_seen_filename) try: with open(last_seen_filename, 'w+', encoding='utf-8') as last_seen_file: diff --git a/keys.py b/keys.py index 83ebe4115..384374ba4 100644 --- a/keys.py +++ b/keys.py @@ -19,8 +19,11 @@ def _get_local_private_key(base_dir: str, nickname: str, domain: str) -> str: key_filename = base_dir + '/keys/private/' + handle.lower() + '.key' if not os.path.isfile(key_filename): return None - with open(key_filename, 'r', encoding='utf-8') as pem_file: - return pem_file.read() + try: + with open(key_filename, 'r', encoding='utf-8') as fp_pem: + return fp_pem.read() + except OSError: + print('EX: _get_local_private_key unable to read ' + key_filename) return None @@ -33,8 +36,11 @@ def _get_local_public_key(base_dir: str, nickname: str, domain: str) -> str: key_filename = base_dir + '/keys/public/' + handle.lower() + '.key' if not os.path.isfile(key_filename): return None - with open(key_filename, 'r', encoding='utf-8') as pem_file: - return pem_file.read() + try: + with open(key_filename, 'r', encoding='utf-8') as fp_pem: + return fp_pem.read() + except OSError: + print('EX: _get_local_public_key unable to read ' + key_filename) return None diff --git a/manualapprove.py b/manualapprove.py index 083bc72bf..affda317b 100644 --- a/manualapprove.py +++ b/manualapprove.py @@ -178,8 +178,12 @@ def manual_approve_follow_request(session, session_onion, session_i2p, # is the handle in the requests file? approve_follows_str = '' - with open(approve_follows_filename, 'r', encoding='utf-8') as fp_foll: - approve_follows_str = fp_foll.read() + try: + with open(approve_follows_filename, 'r', encoding='utf-8') as fp_foll: + approve_follows_str = fp_foll.read() + except OSError: + print('EX: manual_approve_follow_request unable to read ' + + approve_follows_filename) exists = False approve_handle_full = approve_handle if approve_handle in approve_follows_str: @@ -213,101 +217,107 @@ def manual_approve_follow_request(session, session_onion, session_i2p, '" ' + approve_follows_filename) return - with open(approve_follows_filename + '.new', 'w+', - encoding='utf-8') as approvefilenew: - update_approved_followers = False - follow_activity_filename = None - with open(approve_follows_filename, 'r', - encoding='utf-8') as approvefile: - for handle_of_follow_requester in approvefile: - # is this the approved follow? - approve_handl = approve_handle_full - if not handle_of_follow_requester.startswith(approve_handl): - # this isn't the approved follow so it will remain - # in the requests file - approvefilenew.write(handle_of_follow_requester) - continue + try: + with open(approve_follows_filename + '.new', 'w+', + encoding='utf-8') as approvefilenew: + update_approved_followers = False + follow_activity_filename = None + with open(approve_follows_filename, 'r', + encoding='utf-8') as approvefile: + for handle_of_follow_requester in approvefile: + # is this the approved follow? + appr_handl = approve_handle_full + if not handle_of_follow_requester.startswith(appr_handl): + # this isn't the approved follow so it will remain + # in the requests file + approvefilenew.write(handle_of_follow_requester) + continue - handle_of_follow_requester = \ - remove_eol(handle_of_follow_requester) - handle_of_follow_requester = \ - handle_of_follow_requester.replace('\r', '') - port2 = port - if ':' in handle_of_follow_requester: - port2 = get_port_from_domain(handle_of_follow_requester) - requests_dir = account_dir + '/requests' - follow_activity_filename = \ - requests_dir + '/' + handle_of_follow_requester + '.follow' - if not os.path.isfile(follow_activity_filename): + handle_of_follow_requester = \ + remove_eol(handle_of_follow_requester) + handle_of_follow_requester = \ + handle_of_follow_requester.replace('\r', '') + port2 = port + if ':' in handle_of_follow_requester: + port2 = \ + get_port_from_domain(handle_of_follow_requester) + requests_dir = account_dir + '/requests' + follow_activity_filename = \ + requests_dir + '/' + \ + handle_of_follow_requester + '.follow' + if not os.path.isfile(follow_activity_filename): + update_approved_followers = True + continue + follow_json = load_json(follow_activity_filename) + if not follow_json: + update_approved_followers = True + continue + approve_nickname = approve_handle.split('@')[0] + approve_domain = approve_handle.split('@')[1] + approve_domain = remove_eol(approve_domain) + approve_domain = approve_domain.replace('\r', '') + approve_port = port2 + if ':' in approve_domain: + approve_port = get_port_from_domain(approve_domain) + approve_domain = remove_domain_port(approve_domain) + + curr_domain = domain + curr_port = port + curr_session = session + curr_http_prefix = http_prefix + curr_proxy_type = proxy_type + if onion_domain and \ + not curr_domain.endswith('.onion') and \ + approve_domain.endswith('.onion'): + curr_domain = onion_domain + curr_port = 80 + approve_port = 80 + curr_session = session_onion + curr_http_prefix = 'http' + curr_proxy_type = 'tor' + elif (i2p_domain and + not curr_domain.endswith('.i2p') and + approve_domain.endswith('.i2p')): + curr_domain = i2p_domain + curr_port = 80 + approve_port = 80 + curr_session = session_i2p + curr_http_prefix = 'http' + curr_proxy_type = 'i2p' + + if not curr_session: + curr_session = create_session(curr_proxy_type) + + print('Manual follow accept: Sending Accept for ' + + handle + ' follow request from ' + + approve_nickname + '@' + approve_domain) + actor_url = get_actor_from_post(follow_json) + followed_account_accepts(curr_session, base_dir, + curr_http_prefix, + nickname, + curr_domain, curr_port, + approve_nickname, + approve_domain, + approve_port, + actor_url, + federation_list, + follow_json, + send_threads, post_log, + cached_webfingers, + person_cache, + debug, + project_version, False, + signing_priv_key_pem, + domain, + onion_domain, + i2p_domain, + followers_sync_cache, + sites_unavailable, + system_language) update_approved_followers = True - continue - follow_json = load_json(follow_activity_filename) - if not follow_json: - update_approved_followers = True - continue - approve_nickname = approve_handle.split('@')[0] - approve_domain = approve_handle.split('@')[1] - approve_domain = remove_eol(approve_domain) - approve_domain = approve_domain.replace('\r', '') - approve_port = port2 - if ':' in approve_domain: - approve_port = get_port_from_domain(approve_domain) - approve_domain = remove_domain_port(approve_domain) - - curr_domain = domain - curr_port = port - curr_session = session - curr_http_prefix = http_prefix - curr_proxy_type = proxy_type - if onion_domain and \ - not curr_domain.endswith('.onion') and \ - approve_domain.endswith('.onion'): - curr_domain = onion_domain - curr_port = 80 - approve_port = 80 - curr_session = session_onion - curr_http_prefix = 'http' - curr_proxy_type = 'tor' - elif (i2p_domain and - not curr_domain.endswith('.i2p') and - approve_domain.endswith('.i2p')): - curr_domain = i2p_domain - curr_port = 80 - approve_port = 80 - curr_session = session_i2p - curr_http_prefix = 'http' - curr_proxy_type = 'i2p' - - if not curr_session: - curr_session = create_session(curr_proxy_type) - - print('Manual follow accept: Sending Accept for ' + - handle + ' follow request from ' + - approve_nickname + '@' + approve_domain) - actor_url = get_actor_from_post(follow_json) - followed_account_accepts(curr_session, base_dir, - curr_http_prefix, - nickname, - curr_domain, curr_port, - approve_nickname, - approve_domain, - approve_port, - actor_url, - federation_list, - follow_json, - send_threads, post_log, - cached_webfingers, - person_cache, - debug, - project_version, False, - signing_priv_key_pem, - domain, - onion_domain, - i2p_domain, - followers_sync_cache, - sites_unavailable, - system_language) - update_approved_followers = True + except OSError as exc: + print('EX: manual_approve_follow_request unable to write ' + + approve_follows_filename + '.new ' + str(exc)) followers_filename = account_dir + '/followers.txt' if update_approved_followers: diff --git a/mastoapiv1.py b/mastoapiv1.py index 9f71b8ae5..42f0d92b1 100644 --- a/mastoapiv1.py +++ b/mastoapiv1.py @@ -44,20 +44,24 @@ def _meta_data_instance_v1(show_accounts: bool, rules_list = [] rules_filename = data_dir(base_dir) + '/tos.md' if os.path.isfile(rules_filename): - with open(rules_filename, 'r', encoding='utf-8') as fp_rules: - rules_lines = fp_rules.readlines() - rule_ctr = 1 - for line in rules_lines: - line = line.strip() - if not line: - continue - if line.startswith('#'): - continue - rules_list.append({ - 'id': str(rule_ctr), - 'text': line - }) - rule_ctr += 1 + try: + with open(rules_filename, 'r', encoding='utf-8') as fp_rules: + rules_lines = fp_rules.readlines() + rule_ctr = 1 + for line in rules_lines: + line = line.strip() + if not line: + continue + if line.startswith('#'): + continue + rules_list.append({ + 'id': str(rule_ctr), + 'text': line + }) + rule_ctr += 1 + except OSError: + print('EX: _meta_data_instance_v1 unable to read ' + + rules_filename) is_bot = False is_group = False diff --git a/media.py b/media.py index 3c0b21d93..f57948dd0 100644 --- a/media.py +++ b/media.py @@ -328,8 +328,11 @@ def _spoof_meta_data(base_dir: str, nickname: str, domain: str, decoy_seed_filename = acct_dir(base_dir, nickname, domain) + '/decoyseed' decoy_seed = 63725 if os.path.isfile(decoy_seed_filename): - with open(decoy_seed_filename, 'r', encoding='utf-8') as fp_seed: - decoy_seed = int(fp_seed.read()) + try: + with open(decoy_seed_filename, 'r', encoding='utf-8') as fp_seed: + decoy_seed = int(fp_seed.read()) + except OSError: + print('EX: _spoof_meta_data unable to read ' + decoy_seed_filename) else: decoy_seed = randint(10000, 10000000000000000) try: @@ -337,7 +340,8 @@ def _spoof_meta_data(base_dir: str, nickname: str, domain: str, encoding='utf-8') as fp_seed: fp_seed.write(str(decoy_seed)) except OSError: - print('EX: unable to write ' + decoy_seed_filename) + print('EX: _spoof_meta_data unable to write ' + + decoy_seed_filename) if os.path.isfile('/usr/bin/exiftool'): print('Spoofing metadata in ' + output_filename + ' using exiftool') diff --git a/migrate.py b/migrate.py index 04c90fec2..f67b63384 100644 --- a/migrate.py +++ b/migrate.py @@ -36,16 +36,20 @@ def _move_following_handles_for_account(base_dir: str, acct_dir(base_dir, nickname, domain) + '/following.txt' if not os.path.isfile(following_filename): return ctr - with open(following_filename, 'r', encoding='utf-8') as fp_foll: - following_handles = fp_foll.readlines() - for follow_handle in following_handles: - follow_handle = follow_handle.strip("\n").strip("\r") - ctr += \ - _update_moved_handle(base_dir, nickname, domain, - follow_handle, session, - http_prefix, cached_webfingers, - debug, signing_priv_key_pem, - block_federated) + try: + with open(following_filename, 'r', encoding='utf-8') as fp_foll: + following_handles = fp_foll.readlines() + for follow_handle in following_handles: + follow_handle = follow_handle.strip("\n").strip("\r") + ctr += \ + _update_moved_handle(base_dir, nickname, domain, + follow_handle, session, + http_prefix, cached_webfingers, + debug, signing_priv_key_pem, + block_federated) + except OSError: + print('EX: _move_following_handles_for_account unable to read ' + + following_filename) return ctr @@ -135,8 +139,12 @@ def _update_moved_handle(base_dir: str, nickname: str, domain: str, acct_dir(base_dir, nickname, domain) + '/following.txt' if os.path.isfile(following_filename): following_handles = [] - with open(following_filename, 'r', encoding='utf-8') as foll1: - following_handles = foll1.readlines() + try: + with open(following_filename, 'r', encoding='utf-8') as foll1: + following_handles = foll1.readlines() + except OSError: + print('EX: _update_moved_handle unable to read ' + + following_filename) moved_to_handle = moved_to_nickname + '@' + moved_to_domain_full handle_lower = handle.lower() diff --git a/newsdaemon.py b/newsdaemon.py index 50c9ce108..5085c6af3 100644 --- a/newsdaemon.py +++ b/newsdaemon.py @@ -394,8 +394,12 @@ def _newswire_hashtag_processing(base_dir: str, post_json_object: {}, if not os.path.isfile(rules_filename): return True rules = [] - with open(rules_filename, 'r', encoding='utf-8') as fp_rules: - rules = fp_rules.readlines() + try: + with open(rules_filename, 'r', encoding='utf-8') as fp_rules: + rules = fp_rules.readlines() + except OSError: + print('EX: _newswire_hashtag_processing unable to read ' + + rules_filename) domain_full = get_full_domain(domain, port) @@ -467,35 +471,44 @@ def _create_news_mirror(base_dir: str, domain: str, # no index for mirrors found return True removals = [] - with open(mirror_index_filename, 'r', encoding='utf-8') as index_file: - # remove the oldest directories - ctr = 0 - while no_of_dirs > max_mirrored_articles: - ctr += 1 - if ctr > 5000: - # escape valve - break + try: + with open(mirror_index_filename, 'r', + encoding='utf-8') as fp_index: + # remove the oldest directories + ctr = 0 + while no_of_dirs > max_mirrored_articles: + ctr += 1 + if ctr > 5000: + # escape valve + break - post_id = index_file.readline() - if not post_id: - continue - post_id = post_id.strip() - mirror_article_dir = mirror_dir + '/' + post_id - if os.path.isdir(mirror_article_dir): - rmtree(mirror_article_dir, - ignore_errors=False, onexc=None) - removals.append(post_id) - no_of_dirs -= 1 + post_id = fp_index.readline() + if not post_id: + continue + post_id = post_id.strip() + mirror_article_dir = mirror_dir + '/' + post_id + if os.path.isdir(mirror_article_dir): + rmtree(mirror_article_dir, + ignore_errors=False, onexc=None) + removals.append(post_id) + no_of_dirs -= 1 + except OSError as exc: + print('EX: _create_news_mirror unable to read ' + + mirror_index_filename + ' ' + str(exc)) # remove the corresponding index entries if removals: index_content = '' - with open(mirror_index_filename, 'r', - encoding='utf-8') as index_file: - index_content = index_file.read() - for remove_post_id in removals: - index_content = \ - index_content.replace(remove_post_id + '\n', '') + try: + with open(mirror_index_filename, 'r', + encoding='utf-8') as index_file: + index_content = index_file.read() + for remove_post_id in removals: + index_content = \ + index_content.replace(remove_post_id + '\n', '') + except OSError: + print('EX: _create_news_mirror unable to read ' + + mirror_index_filename) try: with open(mirror_index_filename, 'w+', encoding='utf-8') as index_file: diff --git a/newswire.py b/newswire.py index 1f7f03c01..210cfe0ed 100644 --- a/newswire.py +++ b/newswire.py @@ -385,9 +385,14 @@ def load_hashtag_categories(base_dir: str, language: str) -> None: if not os.path.isfile(hashtag_categories_filename): return - with open(hashtag_categories_filename, 'r', encoding='utf-8') as fp_cat: - xml_str = fp_cat.read() - _xml2str_to_hashtag_categories(base_dir, xml_str, 1024, True) + try: + with open(hashtag_categories_filename, 'r', + encoding='utf-8') as fp_cat: + xml_str = fp_cat.read() + _xml2str_to_hashtag_categories(base_dir, xml_str, 1024, True) + except OSError: + print('EX: load_hashtag_categories unable to read ' + + hashtag_categories_filename) def _xml2str_to_hashtag_categories(base_dir: str, xml_str: str, @@ -1618,68 +1623,73 @@ def _add_account_blogs_to_newswire(base_dir: str, nickname: str, domain: str, if os.path.isfile(moderated_filename): moderated = True - with open(index_filename, 'r', encoding='utf-8') as index_file: - post_filename = 'start' - ctr = 0 - while post_filename: - post_filename = index_file.readline() - if post_filename: - # if this is a full path then remove the directories - if '/' in post_filename: - post_filename = post_filename.split('/')[-1] + try: + with open(index_filename, 'r', encoding='utf-8') as index_file: + post_filename = 'start' + ctr = 0 + while post_filename: + post_filename = index_file.readline() + if post_filename: + # if this is a full path then remove the directories + if '/' in post_filename: + post_filename = post_filename.split('/')[-1] - # filename of the post without any extension or path - # This should also correspond to any index entry in - # the posts cache - post_url = remove_eol(post_filename) - post_url = post_url.replace('.json', '').strip() + # filename of the post without any extension or path + # This should also correspond to any index entry in + # the posts cache + post_url = remove_eol(post_filename) + post_url = post_url.replace('.json', '').strip() - # read the post from file - full_post_filename = \ - locate_post(base_dir, nickname, - domain, post_url, False) - if not full_post_filename: - print('Unable to locate post for newswire ' + post_url) - ctr += 1 - if ctr >= max_blogs_per_account: - break - continue + # read the post from file + full_post_filename = \ + locate_post(base_dir, nickname, + domain, post_url, False) + if not full_post_filename: + print('Unable to locate post for newswire ' + post_url) + ctr += 1 + if ctr >= max_blogs_per_account: + break + continue - post_json_object = None - if full_post_filename: - post_json_object = load_json(full_post_filename) - if _is_newswire_blog_post(post_json_object): - published = post_json_object['object']['published'] - published = published.replace('T', ' ') - published = published.replace('Z', '+00:00') - votes = [] - if os.path.isfile(full_post_filename + '.votes'): - votes = load_json(full_post_filename + '.votes') - content = \ - get_base_content_from_post(post_json_object, - system_language) - description = first_paragraph_from_string(content) - description = remove_html(description) - tags_from_post = _get_hashtags_from_post(post_json_object) - summary = post_json_object['object']['summary'] - url_str = \ - get_url_from_post(post_json_object['object']['url']) - url2 = remove_html(url_str) - fediverse_handle = '' - extra_links = [] - _add_newswire_dict_entry(base_dir, - newswire, published, - summary, url2, - votes, full_post_filename, - description, moderated, False, - tags_from_post, - max_tags, session, debug, - None, system_language, - fediverse_handle, extra_links) + post_json_object = None + if full_post_filename: + post_json_object = load_json(full_post_filename) + if _is_newswire_blog_post(post_json_object): + published = post_json_object['object']['published'] + published = published.replace('T', ' ') + published = published.replace('Z', '+00:00') + votes = [] + if os.path.isfile(full_post_filename + '.votes'): + votes = load_json(full_post_filename + '.votes') + content = \ + get_base_content_from_post(post_json_object, + system_language) + description = first_paragraph_from_string(content) + description = remove_html(description) + tags_from_post = \ + _get_hashtags_from_post(post_json_object) + summary = post_json_object['object']['summary'] + url2 = post_json_object['object']['url'] + url_str = get_url_from_post(url2) + url3 = remove_html(url_str) + fediverse_handle = '' + extra_links = [] + _add_newswire_dict_entry(base_dir, + newswire, published, + summary, url3, + votes, full_post_filename, + description, moderated, False, + tags_from_post, + max_tags, session, debug, + None, system_language, + fediverse_handle, extra_links) - ctr += 1 - if ctr >= max_blogs_per_account: - break + ctr += 1 + if ctr >= max_blogs_per_account: + break + except OSError as exc: + print('EX: _add_account_blogs_to_newswire unable to read ' + + index_filename + ' ' + str(exc)) def _add_blogs_to_newswire(base_dir: str, domain: str, newswire: {}, @@ -1755,8 +1765,12 @@ def get_dict_from_newswire(session, base_dir: str, domain: str, # add rss feeds rss_feed = [] - with open(subscriptions_filename, 'r', encoding='utf-8') as fp_sub: - rss_feed = fp_sub.readlines() + try: + with open(subscriptions_filename, 'r', encoding='utf-8') as fp_sub: + rss_feed = fp_sub.readlines() + except OSError: + print('EX: get_dict_from_newswire unable to read ' + + subscriptions_filename) result = {} for url in rss_feed: url = url.strip() diff --git a/person.py b/person.py index 120894aed..8ece38a7e 100644 --- a/person.py +++ b/person.py @@ -1273,8 +1273,11 @@ def reenable_account(base_dir: str, nickname: str) -> None: suspended_filename = data_dir(base_dir) + '/suspended.txt' if os.path.isfile(suspended_filename): lines = [] - with open(suspended_filename, 'r', encoding='utf-8') as fp_sus: - lines = fp_sus.readlines() + try: + with open(suspended_filename, 'r', encoding='utf-8') as fp_sus: + lines = fp_sus.readlines() + except OSError: + print('EX: reenable_account unable to read ' + suspended_filename) try: with open(suspended_filename, 'w+', encoding='utf-8') as fp_sus: for suspended in lines: @@ -1298,8 +1301,11 @@ def suspend_account(base_dir: str, nickname: str, domain: str) -> None: # Don't suspend moderators moderators_file = data_dir(base_dir) + '/moderators.txt' if os.path.isfile(moderators_file): - with open(moderators_file, 'r', encoding='utf-8') as fp_mod: - lines = fp_mod.readlines() + try: + with open(moderators_file, 'r', encoding='utf-8') as fp_mod: + lines = fp_mod.readlines() + except OSError: + print('EX: suspend_account unable too read ' + moderators_file) for moderator in lines: if moderator.strip('\n').strip('\r') == nickname: return @@ -1319,8 +1325,11 @@ def suspend_account(base_dir: str, nickname: str, domain: str) -> None: suspended_filename = data_dir(base_dir) + '/suspended.txt' if os.path.isfile(suspended_filename): - with open(suspended_filename, 'r', encoding='utf-8') as fp_sus: - lines = fp_sus.readlines() + try: + with open(suspended_filename, 'r', encoding='utf-8') as fp_sus: + lines = fp_sus.readlines() + except OSError: + print('EX: suspend_account unable to read 2 ' + suspended_filename) for suspended in lines: if suspended.strip('\n').strip('\r') == nickname: return @@ -1356,8 +1365,12 @@ def can_remove_post(base_dir: str, # is the post by a moderator? moderators_file = data_dir(base_dir) + '/moderators.txt' if os.path.isfile(moderators_file): - with open(moderators_file, 'r', encoding='utf-8') as fp_mod: - lines = fp_mod.readlines() + lines = [] + try: + with open(moderators_file, 'r', encoding='utf-8') as fp_mod: + lines = fp_mod.readlines() + except OSError: + print('EX: can_remove_post unable to read ' + moderators_file) for moderator in lines: if domain_full + '/users/' + \ moderator.strip('\n') + '/' in post_id: @@ -1389,8 +1402,12 @@ def _remove_tags_for_nickname(base_dir: str, nickname: str, if not text_in_file(match_str, tag_filename): continue lines = [] - with open(tag_filename, 'r', encoding='utf-8') as fp_tag: - lines = fp_tag.readlines() + try: + with open(tag_filename, 'r', encoding='utf-8') as fp_tag: + lines = fp_tag.readlines() + except OSError: + print('EX: _remove_tags_for_nickname unable to read ' + + tag_filename) try: with open(tag_filename, 'w+', encoding='utf-8') as tag_file: for tagline in lines: @@ -1415,8 +1432,12 @@ def remove_account(base_dir: str, nickname: str, # Don't remove moderators moderators_file = data_dir(base_dir) + '/moderators.txt' if os.path.isfile(moderators_file): - with open(moderators_file, 'r', encoding='utf-8') as fp_mod: - lines = fp_mod.readlines() + lines = [] + try: + with open(moderators_file, 'r', encoding='utf-8') as fp_mod: + lines = fp_mod.readlines() + except OSError: + print('EX: remove_account unable to read ' + moderators_file) for moderator in lines: if moderator.strip('\n') == nickname: return False @@ -1542,26 +1563,32 @@ def is_person_snoozed(base_dir: str, nickname: str, domain: str, return False # remove the snooze entry if it has timed out replace_str = None - with open(snoozed_filename, 'r', encoding='utf-8') as snoozed_file: - for line in snoozed_file: - # is this the entry for the actor? - if line.startswith(snooze_actor + ' '): - snoozed_time_str1 = line.split(' ')[1] - snoozed_time_str = remove_eol(snoozed_time_str1) - # is there a time appended? - if snoozed_time_str.isdigit(): - snoozed_time = int(snoozed_time_str) - curr_time = int(time.time()) - # has the snooze timed out? - if int(curr_time - snoozed_time) > 60 * 60 * 24: + try: + with open(snoozed_filename, 'r', encoding='utf-8') as fp_snoozed: + for line in fp_snoozed: + # is this the entry for the actor? + if line.startswith(snooze_actor + ' '): + snoozed_time_str1 = line.split(' ')[1] + snoozed_time_str = remove_eol(snoozed_time_str1) + # is there a time appended? + if snoozed_time_str.isdigit(): + snoozed_time = int(snoozed_time_str) + curr_time = int(time.time()) + # has the snooze timed out? + if int(curr_time - snoozed_time) > 60 * 60 * 24: + replace_str = line + else: replace_str = line - else: - replace_str = line - break + break + except OSError: + print('EX: is_person_snoozed unable to read ' + snoozed_filename) if replace_str: content = None - with open(snoozed_filename, 'r', encoding='utf-8') as snoozed_file: - content = snoozed_file.read().replace(replace_str, '') + try: + with open(snoozed_filename, 'r', encoding='utf-8') as fp_snoozed: + content = fp_snoozed.read().replace(replace_str, '') + except OSError: + print('EX: is_person_snoozed unable to read 2 ' + snoozed_filename) if content: try: with open(snoozed_filename, 'w+', @@ -1610,15 +1637,21 @@ def person_unsnooze(base_dir: str, nickname: str, domain: str, if not text_in_file(snooze_actor + ' ', snoozed_filename): return replace_str = None - with open(snoozed_filename, 'r', encoding='utf-8') as snoozed_file: - for line in snoozed_file: - if line.startswith(snooze_actor + ' '): - replace_str = line - break + try: + with open(snoozed_filename, 'r', encoding='utf-8') as fp_snoozed: + for line in fp_snoozed: + if line.startswith(snooze_actor + ' '): + replace_str = line + break + except OSError: + print('EX: person_unsnooze unable to read ' + snoozed_filename) if replace_str: content = None - with open(snoozed_filename, 'r', encoding='utf-8') as snoozed_file: - content = snoozed_file.read().replace(replace_str, '') + try: + with open(snoozed_filename, 'r', encoding='utf-8') as fp_snoozed: + content = fp_snoozed.read().replace(replace_str, '') + except OSError: + print('EX: person_unsnooze unable to read 2 ' + snoozed_filename) if content is not None: try: with open(snoozed_filename, 'w+', @@ -1658,9 +1691,13 @@ def get_person_notes(base_dir: str, nickname: str, domain: str, acct_dir(base_dir, nickname, domain) + \ '/notes/' + handle + '.txt' if os.path.isfile(person_notes_filename): - with open(person_notes_filename, 'r', - encoding='utf-8') as fp_notes: - person_notes = fp_notes.read() + try: + with open(person_notes_filename, 'r', + encoding='utf-8') as fp_notes: + person_notes = fp_notes.read() + except OSError: + print('EX: get_person_notes unable to read ' + + person_notes_filename) return person_notes @@ -1907,8 +1944,11 @@ def get_person_avatar_url(base_dir: str, person_url: str, if ext != 'svg': return im_path content = '' - with open(im_filename, 'r', encoding='utf-8') as fp_im: - content = fp_im.read() + try: + with open(im_filename, 'r', encoding='utf-8') as fp_im: + content = fp_im.read() + except OSError: + print('EX: get_person_avatar_url unable to read ' + im_filename) if not dangerous_svg(content, False): return im_path diff --git a/posts.py b/posts.py index bf64e510e..a20835fb9 100644 --- a/posts.py +++ b/posts.py @@ -168,18 +168,23 @@ def is_moderator(base_dir: str, nickname: str) -> bool: return True return False - with open(moderators_file, 'r', encoding='utf-8') as fp_mod: - lines = fp_mod.readlines() - if len(lines) == 0: - admin_name = get_config_param(base_dir, 'admin') - if not admin_name: - return False - if admin_name == nickname: - return True - for moderator in lines: - moderator = moderator.strip('\n').strip('\r') - if moderator == nickname: - return True + lines = [] + try: + with open(moderators_file, 'r', encoding='utf-8') as fp_mod: + lines = fp_mod.readlines() + except OSError: + print('EX: is_moderator unable to read ' + moderators_file) + + if len(lines) == 0: + admin_name = get_config_param(base_dir, 'admin') + if not admin_name: + return False + if admin_name == nickname: + return True + for moderator in lines: + moderator = moderator.strip('\n').strip('\r') + if moderator == nickname: + return True return False @@ -193,13 +198,16 @@ def no_of_followers_on_domain(base_dir: str, handle: str, return 0 ctr = 0 - with open(filename, 'r', encoding='utf-8') as followers_file: - for follower_handle in followers_file: - if '@' in follower_handle: - follower_domain = follower_handle.split('@')[1] - follower_domain = remove_eol(follower_domain) - if domain == follower_domain: - ctr += 1 + try: + with open(filename, 'r', encoding='utf-8') as followers_file: + for follower_handle in followers_file: + if '@' in follower_handle: + follower_domain = follower_handle.split('@')[1] + follower_domain = remove_eol(follower_domain) + if domain == follower_domain: + ctr += 1 + except OSError: + print('EX: no_of_followers_on_domain unable to read ' + filename) return ctr @@ -1991,8 +1999,12 @@ def get_pinned_post_as_json(base_dir: str, http_prefix: str, actor = local_actor_url(http_prefix, nickname, domain_full) if os.path.isfile(pinned_filename): pinned_content = None - with open(pinned_filename, 'r', encoding='utf-8') as pin_file: - pinned_content = pin_file.read() + try: + with open(pinned_filename, 'r', encoding='utf-8') as fp_pin: + pinned_content = fp_pin.read() + except OSError: + print('EX: get_pinned_post_as_json unable to read ' + + pinned_filename) if pinned_content: pinned_post_json = { 'atomUri': actor + '/pinned', @@ -2214,23 +2226,28 @@ def _append_citations_to_blog_post(base_dir: str, if not os.path.isfile(citations_filename): return citations_separator = '#####' - with open(citations_filename, 'r', encoding='utf-8') as fp_cit: - citations = fp_cit.readlines() - for line in citations: - if citations_separator not in line: - continue - sections = line.strip().split(citations_separator) - if len(sections) != 3: - continue - # date_str = sections[0] - title = sections[1] - link = sections[2] - tag_json = { - "type": "Article", - "name": title, - "url": link - } - blog_json['object']['tag'].append(tag_json) + citations = [] + try: + with open(citations_filename, 'r', encoding='utf-8') as fp_cit: + citations = fp_cit.readlines() + except OSError: + print('EX: _append_citations_to_blog_post unable to read ' + + citations_filename) + for line in citations: + if citations_separator not in line: + continue + sections = line.strip().split(citations_separator) + if len(sections) != 3: + continue + # date_str = sections[0] + title = sections[1] + link = sections[2] + tag_json = { + "type": "Article", + "name": title, + "url": link + } + blog_json['object']['tag'].append(tag_json) def create_blog_post(base_dir: str, @@ -2634,36 +2651,39 @@ def create_report_post(base_dir: str, moderators_list = [] moderators_file = data_dir(base_dir) + '/moderators.txt' if os.path.isfile(moderators_file): - with open(moderators_file, 'r', encoding='utf-8') as fp_mod: - for line in fp_mod: - line = line.strip('\n').strip('\r') - if line.startswith('#'): - continue - if line.startswith('/users/'): - line = line.replace('users', '') - if line.startswith('@'): - line = line[1:] - if '@' in line: - nick = line.split('@')[0] - moderator_actor = \ - local_actor_url(http_prefix, nick, domain_full) - if moderator_actor not in moderators_list: - moderators_list.append(moderator_actor) - continue - if line.startswith('http') or \ - line.startswith('ipfs') or \ - line.startswith('ipns') or \ - line.startswith('hyper'): - # must be a local address - no remote moderators - if '://' + domain_full + '/' in line: - if line not in moderators_list: - moderators_list.append(line) - else: - if '/' not in line: + try: + with open(moderators_file, 'r', encoding='utf-8') as fp_mod: + for line in fp_mod: + line = line.strip('\n').strip('\r') + if line.startswith('#'): + continue + if line.startswith('/users/'): + line = line.replace('users', '') + if line.startswith('@'): + line = line[1:] + if '@' in line: + nick = line.split('@')[0] moderator_actor = \ - local_actor_url(http_prefix, line, domain_full) + local_actor_url(http_prefix, nick, domain_full) if moderator_actor not in moderators_list: moderators_list.append(moderator_actor) + continue + if line.startswith('http') or \ + line.startswith('ipfs') or \ + line.startswith('ipns') or \ + line.startswith('hyper'): + # must be a local address - no remote moderators + if '://' + domain_full + '/' in line: + if line not in moderators_list: + moderators_list.append(line) + else: + if '/' not in line: + moderator_actor = \ + local_actor_url(http_prefix, line, domain_full) + if moderator_actor not in moderators_list: + moderators_list.append(moderator_actor) + except OSError: + print('EX: create_report_post unable to read ' + moderators_file) if len(moderators_list) == 0: # if there are no moderators then the admin becomes the moderator admin_nickname = get_config_param(base_dir, 'admin') @@ -3305,17 +3325,21 @@ def group_followers_by_domain(base_dir: str, nickname: str, domain: str) -> {}: if not os.path.isfile(followers_filename): return None grouped = {} - with open(followers_filename, 'r', encoding='utf-8') as foll_file: - for follower_handle in foll_file: - if '@' not in follower_handle: - continue - fhandle1 = follower_handle.strip() - fhandle = remove_eol(fhandle1) - follower_domain = fhandle.split('@')[1] - if not grouped.get(follower_domain): - grouped[follower_domain] = [fhandle] - else: - grouped[follower_domain].append(fhandle) + try: + with open(followers_filename, 'r', encoding='utf-8') as fp_foll: + for follower_handle in fp_foll: + if '@' not in follower_handle: + continue + fhandle1 = follower_handle.strip() + fhandle = remove_eol(fhandle1) + follower_domain = fhandle.split('@')[1] + if not grouped.get(follower_domain): + grouped[follower_domain] = [fhandle] + else: + grouped[follower_domain].append(fhandle) + except OSError: + print('EX: group_followers_by_domain unable to read ' + + followers_filename) return grouped @@ -4339,6 +4363,8 @@ def create_outbox(base_dir: str, nickname: str, domain: str, def create_moderation(base_dir: str, nickname: str, domain: str, port: int, http_prefix: str, items_per_page: int, header_only: bool, page_number: int) -> {}: + """ + """ box_dir = create_person_dir(nickname, domain, base_dir, 'inbox') boxname = 'moderation' @@ -4369,9 +4395,14 @@ def create_moderation(base_dir: str, nickname: str, domain: str, port: int, if is_moderator(base_dir, nickname): moderation_index_file = data_dir(base_dir) + '/moderation.txt' if os.path.isfile(moderation_index_file): - with open(moderation_index_file, 'r', - encoding='utf-8') as index_file: - lines = index_file.readlines() + lines = [] + try: + with open(moderation_index_file, 'r', + encoding='utf-8') as index_file: + lines = index_file.readlines() + except OSError: + print('EX: create_moderation unable to read ' + + moderation_index_file) box_header['totalItems'] = len(lines) if header_only: return box_header @@ -4499,23 +4530,29 @@ def _add_post_to_timeline(file_path: str, boxname: str, posts_in_box: [], box_actor: str) -> bool: """ Reads a post from file and decides whether it is valid """ - with open(file_path, 'r', encoding='utf-8') as post_file: - post_str = post_file.read() + post_str = '' + try: + with open(file_path, 'r', encoding='utf-8') as fp_post: + post_str = fp_post.read() + except OSError: + print('EX: _add_post_to_timeline unable to read ' + file_path) - if file_path.endswith('.json'): - replies_filename = file_path.replace('.json', '.replies') - if os.path.isfile(replies_filename): - # append a replies identifier, which will later be removed - post_str += '' + if not post_str: + return False - mitm_filename = file_path.replace('.json', '.mitm') - if os.path.isfile(mitm_filename): - # append a mitm identifier, which will later be removed - post_str += '' + if file_path.endswith('.json'): + replies_filename = file_path.replace('.json', '.replies') + if os.path.isfile(replies_filename): + # append a replies identifier, which will later be removed + post_str += '' - return _add_post_string_to_timeline(post_str, boxname, posts_in_box, - box_actor) - return False + mitm_filename = file_path.replace('.json', '.mitm') + if os.path.isfile(mitm_filename): + # append a mitm identifier, which will later be removed + post_str += '' + + return _add_post_string_to_timeline(post_str, boxname, + posts_in_box, box_actor) def remove_post_interactions(post_json_object: {}, force: bool) -> bool: @@ -4641,111 +4678,115 @@ def _create_box_items(base_dir: str, first_post_id = first_post_id.replace('--', '#') first_post_id = first_post_id.replace('/', '#') - with open(index_filename, 'r', encoding='utf-8') as index_file: - posts_added_to_timeline = 0 - while posts_added_to_timeline < items_per_page: - post_filename = index_file.readline() + try: + with open(index_filename, 'r', encoding='utf-8') as index_file: + posts_added_to_timeline = 0 + while posts_added_to_timeline < items_per_page: + post_filename = index_file.readline() - if not post_filename: - break + if not post_filename: + break - # if a first post is specified then wait until it is found - # before starting to generate the timeline - if first_post_id and total_posts_count == 0: - if first_post_id not in post_filename: - continue - total_posts_count = \ - int((page_number - 1) * items_per_page) + # if a first post is specified then wait until it is found + # before starting to generate the timeline + if first_post_id and total_posts_count == 0: + if first_post_id not in post_filename: + continue + total_posts_count = \ + int((page_number - 1) * items_per_page) - # Has this post passed through the newswire voting stage? - if not _passed_newswire_voting(newswire_votes_threshold, - base_dir, domain, - post_filename, - positive_voting, - voting_time_mins): - continue - - # Skip through any posts previous to the current page - if not first_post_id: - if total_posts_count < \ - int((page_number - 1) * items_per_page): - total_posts_count += 1 + # Has this post passed through the newswire voting stage? + if not _passed_newswire_voting(newswire_votes_threshold, + base_dir, domain, + post_filename, + positive_voting, + voting_time_mins): continue - # if this is a full path then remove the directories - if '/' in post_filename: - post_filename = post_filename.split('/')[-1] + # Skip through any posts previous to the current page + if not first_post_id: + if total_posts_count < \ + int((page_number - 1) * items_per_page): + total_posts_count += 1 + continue - # filename of the post without any extension or path - # This should also correspond to any index entry in - # the posts cache - post_url = remove_eol(post_filename) - post_url = post_url.replace('.json', '').strip() + # if this is a full path then remove the directories + if '/' in post_filename: + post_filename = post_filename.split('/')[-1] - # is this a duplicate? - if post_url in post_urls_in_box: - continue + # filename of the post without any extension or path + # This should also correspond to any index entry in + # the posts cache + post_url = remove_eol(post_filename) + post_url = post_url.replace('.json', '').strip() - # is the post cached in memory? - if recent_posts_cache.get('index'): - if post_url in recent_posts_cache['index']: - if recent_posts_cache['json'].get(post_url): - url = recent_posts_cache['json'][post_url] - if _add_post_string_to_timeline(url, - boxname, - posts_in_box, - box_actor): - total_posts_count += 1 - posts_added_to_timeline += 1 - post_urls_in_box.append(post_url) - continue - print('Post not added to timeline') - - # read the post from file - full_post_filename = \ - locate_post(base_dir, nickname, - original_domain, post_url, False) - if full_post_filename: - # has the post been rejected? - if os.path.isfile(full_post_filename + '.reject'): + # is this a duplicate? + if post_url in post_urls_in_box: continue - if _add_post_to_timeline(full_post_filename, boxname, - posts_in_box, box_actor): - posts_added_to_timeline += 1 - total_posts_count += 1 - post_urls_in_box.append(post_url) + # is the post cached in memory? + if recent_posts_cache.get('index'): + if post_url in recent_posts_cache['index']: + if recent_posts_cache['json'].get(post_url): + url = recent_posts_cache['json'][post_url] + if _add_post_string_to_timeline(url, + boxname, + posts_in_box, + box_actor): + total_posts_count += 1 + posts_added_to_timeline += 1 + post_urls_in_box.append(post_url) + continue + print('Post not added to timeline') + + # read the post from file + full_post_filename = \ + locate_post(base_dir, nickname, + original_domain, post_url, False) + if full_post_filename: + # has the post been rejected? + if os.path.isfile(full_post_filename + '.reject'): + continue + + if _add_post_to_timeline(full_post_filename, boxname, + posts_in_box, box_actor): + posts_added_to_timeline += 1 + total_posts_count += 1 + post_urls_in_box.append(post_url) + else: + print('WARN: Unable to add post ' + post_url + + ' nickname ' + nickname + + ' timeline ' + boxname) else: - print('WARN: Unable to add post ' + post_url + - ' nickname ' + nickname + - ' timeline ' + boxname) - else: - if timeline_nickname != nickname: - # if this is the features timeline - full_post_filename = \ - locate_post(base_dir, timeline_nickname, - original_domain, post_url, False) - if full_post_filename: - if _add_post_to_timeline(full_post_filename, - boxname, - posts_in_box, box_actor): - posts_added_to_timeline += 1 - total_posts_count += 1 - post_urls_in_box.append(post_url) + if timeline_nickname != nickname: + # if this is the features timeline + full_post_filename = \ + locate_post(base_dir, timeline_nickname, + original_domain, post_url, False) + if full_post_filename: + if _add_post_to_timeline(full_post_filename, + boxname, + posts_in_box, box_actor): + posts_added_to_timeline += 1 + total_posts_count += 1 + post_urls_in_box.append(post_url) + else: + print('WARN: Unable to add features post ' + + post_url + ' nickname ' + nickname + + ' timeline ' + boxname) else: - print('WARN: Unable to add features post ' + - post_url + ' nickname ' + nickname + - ' timeline ' + boxname) + print('WARN: features timeline. ' + + 'Unable to locate post ' + post_url) else: - print('WARN: features timeline. ' + - 'Unable to locate post ' + post_url) - else: - if timeline_nickname == 'news': - print('WARN: Unable to locate news post ' + - post_url + ' nickname ' + nickname) - else: - print('WARN: Unable to locate post ' + post_url + - ' nickname ' + nickname) + if timeline_nickname == 'news': + print('WARN: Unable to locate news post ' + + post_url + ' nickname ' + nickname) + else: + print('WARN: Unable to locate post ' + post_url + + ' nickname ' + nickname) + except OSError as exc: + print('EX: _create_box_items unable to read ' + index_filename + + ' ' + str(exc)) return total_posts_count, posts_added_to_timeline @@ -5732,8 +5773,12 @@ def get_public_post_domains_blocked(session, base_dir: str, # read the blocked domains as a single string blocked_str = '' - with open(blocking_filename, 'r', encoding='utf-8') as fp_block: - blocked_str = fp_block.read() + try: + with open(blocking_filename, 'r', encoding='utf-8') as fp_block: + blocked_str = fp_block.read() + except OSError: + print('EX: get_public_post_domains_blocked unable to read ' + + blocking_filename) blocked_domains = [] for domain_name in post_domains: @@ -5784,9 +5829,13 @@ def check_domains(session, base_dir: str, update_follower_warnings = False follower_warning_str = '' if os.path.isfile(follower_warning_filename): - with open(follower_warning_filename, 'r', - encoding='utf-8') as fp_warn: - follower_warning_str = fp_warn.read() + try: + with open(follower_warning_filename, 'r', + encoding='utf-8') as fp_warn: + follower_warning_str = fp_warn.read() + except OSError: + print('EX: check_domains unable to read ' + + follower_warning_filename) if single_check: # checks a single random non-mutual @@ -5852,61 +5901,66 @@ def populate_replies_json(base_dir: str, nickname: str, domain: str, pub_str = 'https://www.w3.org/ns/activitystreams#Public' # populate the items list with replies replies_boxes = ('outbox', 'inbox') - with open(post_replies_filename, 'r', encoding='utf-8') as replies_file: - for message_id in replies_file: - reply_found = False - # examine inbox and outbox - for boxname in replies_boxes: - message_id2 = remove_eol(message_id) - search_filename = \ - acct_dir(base_dir, nickname, domain) + '/' + \ - boxname + '/' + \ - message_id2.replace('/', '#') + '.json' - if os.path.isfile(search_filename): - if authorized or \ - text_in_file(pub_str, search_filename): - post_json_object = load_json(search_filename) - if post_json_object: - if post_json_object['object'].get('cc'): + try: + with open(post_replies_filename, 'r', + encoding='utf-8') as replies_file: + for message_id in replies_file: + reply_found = False + # examine inbox and outbox + for boxname in replies_boxes: + message_id2 = remove_eol(message_id) + search_filename = \ + acct_dir(base_dir, nickname, domain) + '/' + \ + boxname + '/' + \ + message_id2.replace('/', '#') + '.json' + if os.path.isfile(search_filename): + if authorized or \ + text_in_file(pub_str, search_filename): + post_json_object = load_json(search_filename) + if post_json_object: pjo = post_json_object - if (authorized or - (pub_str in pjo['object']['to'] or - pub_str in pjo['object']['cc'])): - replies_json['orderedItems'].append(pjo) - reply_found = True - else: - if authorized or \ - pub_str in post_json_object['object']['to']: - pjo = post_json_object - replies_json['orderedItems'].append(pjo) - reply_found = True - break - # if not in either inbox or outbox then examine the shared inbox - if not reply_found: - message_id2 = remove_eol(message_id) - search_filename = \ - data_dir(base_dir) + '/inbox@' + \ - domain + '/inbox/' + \ - message_id2.replace('/', '#') + '.json' - if os.path.isfile(search_filename): - if authorized or \ - text_in_file(pub_str, search_filename): - # get the json of the reply and append it to - # the collection - post_json_object = load_json(search_filename) - if post_json_object: - if post_json_object['object'].get('cc'): + ordered_items = replies_json['orderedItems'] + if pjo['object'].get('cc'): + if (authorized or + (pub_str in pjo['object']['to'] or + pub_str in pjo['object']['cc'])): + ordered_items.append(pjo) + reply_found = True + else: + if authorized or \ + pub_str in pjo['object']['to']: + ordered_items.append(pjo) + reply_found = True + break + # if not in either inbox or outbox then examine the + # shared inbox + if not reply_found: + message_id2 = remove_eol(message_id) + search_filename = \ + data_dir(base_dir) + '/inbox@' + \ + domain + '/inbox/' + \ + message_id2.replace('/', '#') + '.json' + if os.path.isfile(search_filename): + if authorized or \ + text_in_file(pub_str, search_filename): + # get the json of the reply and append it to + # the collection + post_json_object = load_json(search_filename) + if post_json_object: pjo = post_json_object - if (authorized or - (pub_str in pjo['object']['to'] or - pub_str in pjo['object']['cc'])): - pjo = post_json_object - replies_json['orderedItems'].append(pjo) - else: - if authorized or \ - pub_str in post_json_object['object']['to']: - pjo = post_json_object - replies_json['orderedItems'].append(pjo) + ordered_items = replies_json['orderedItems'] + if pjo['object'].get('cc'): + if (authorized or + (pub_str in pjo['object']['to'] or + pub_str in pjo['object']['cc'])): + ordered_items.append(pjo) + else: + if authorized or \ + pub_str in pjo['object']['to']: + ordered_items.append(pjo) + except OSError: + print('EX: populate_replies_json unable to read ' + + post_replies_filename) def _reject_announce(announce_filename: str, diff --git a/question.py b/question.py index 85573a14f..8d56ef07c 100644 --- a/question.py +++ b/question.py @@ -145,33 +145,39 @@ def question_update_votes(base_dir: str, nickname: str, domain: str, print('EX: unable to append to voters file ' + voters_filename) else: # change an entry in the voters file - with open(voters_filename, 'r', - encoding='utf-8') as voters_file: - lines = voters_file.readlines() - newlines = [] - save_voters_file = False - for vote_line in lines: - if vote_line.startswith(actor_url + - voters_file_separator): - new_vote_line = actor_url + \ - voters_file_separator + reply_vote + '\n' - if vote_line == new_vote_line: - break - save_voters_file = True - newlines.append(new_vote_line) - else: - newlines.append(vote_line) - if save_voters_file: - try: - with open(voters_filename, 'w+', - encoding='utf-8') as voters_file: - for vote_line in newlines: - voters_file.write(vote_line) - except OSError: - print('EX: unable to write voters file2 ' + - voters_filename) + lines = [] + try: + with open(voters_filename, 'r', + encoding='utf-8') as voters_file: + lines = voters_file.readlines() + except OSError: + print('EX: question_update_votes unable to read ' + + voters_filename) + + newlines = [] + save_voters_file = False + for vote_line in lines: + if vote_line.startswith(actor_url + + voters_file_separator): + new_vote_line = actor_url + \ + voters_file_separator + reply_vote + '\n' + if vote_line == new_vote_line: + break + save_voters_file = True + newlines.append(new_vote_line) else: - return None, None + newlines.append(vote_line) + if save_voters_file: + try: + with open(voters_filename, 'w+', + encoding='utf-8') as voters_file: + for vote_line in newlines: + voters_file.write(vote_line) + except OSError: + print('EX: unable to write voters file2 ' + + voters_filename) + else: + return None, None # update the vote counts question_totals_changed = False @@ -179,12 +185,17 @@ def question_update_votes(base_dir: str, nickname: str, domain: str, if not possible_answer.get('name'): continue total_items = 0 - with open(voters_filename, 'r', encoding='utf-8') as voters_file: - lines = voters_file.readlines() - for vote_line in lines: - if vote_line.endswith(voters_file_separator + - possible_answer['name'] + '\n'): - total_items += 1 + lines = [] + try: + with open(voters_filename, 'r', encoding='utf-8') as fp_voters: + lines = fp_voters.readlines() + except OSError: + print('EX: question_update_votes unable to read ' + + voters_filename) + for vote_line in lines: + if vote_line.endswith(voters_file_separator + + possible_answer['name'] + '\n'): + total_items += 1 if possible_answer['replies']['totalItems'] != total_items: possible_answer['replies']['totalItems'] = total_items question_totals_changed = True diff --git a/reaction.py b/reaction.py index 618d02fd0..882217e5d 100644 --- a/reaction.py +++ b/reaction.py @@ -472,7 +472,8 @@ def _update_common_reactions(base_dir: str, emoji_content: str) -> None: encoding='utf-8') as fp_react: common_reactions = fp_react.readlines() except OSError: - print('EX: unable to load common reactions file') + print('EX: unable to load common reactions file' + + common_reactions_filename) if common_reactions: new_common_reactions = [] reaction_found = False diff --git a/roles.py b/roles.py index 6bca519f0..75d5e8677 100644 --- a/roles.py +++ b/roles.py @@ -283,19 +283,23 @@ def is_devops(base_dir: str, nickname: str) -> bool: return True return False - with open(devops_file, 'r', encoding='utf-8') as fp_mod: - lines = fp_mod.readlines() - if len(lines) == 0: - # if there is nothing in the file - admin_name = get_config_param(base_dir, 'admin') - if not admin_name: - return False - if admin_name == nickname: - return True - for devops in lines: - devops = devops.strip('\n').strip('\r') - if devops == nickname: - return True + lines = [] + try: + with open(devops_file, 'r', encoding='utf-8') as fp_mod: + lines = fp_mod.readlines() + except OSError: + print('EX: is_devops unable to read ' + devops_file) + if len(lines) == 0: + # if there is nothing in the file + admin_name = get_config_param(base_dir, 'admin') + if not admin_name: + return False + if admin_name == nickname: + return True + for devops in lines: + devops = devops.strip('\n').strip('\r') + if devops == nickname: + return True return False diff --git a/shares.py b/shares.py index 1471c47a2..f787ec05e 100644 --- a/shares.py +++ b/shares.py @@ -1770,11 +1770,15 @@ def _generate_next_shares_token_update(base_dir: str, token_update_filename = token_update_dir + '/.tokenUpdate' next_update_sec = None if os.path.isfile(token_update_filename): - with open(token_update_filename, 'r', encoding='utf-8') as fp_tok: - next_update_str = fp_tok.read() - if next_update_str: - if next_update_str.isdigit(): - next_update_sec = int(next_update_str) + try: + with open(token_update_filename, 'r', encoding='utf-8') as fp_tok: + next_update_str = fp_tok.read() + if next_update_str: + if next_update_str.isdigit(): + next_update_sec = int(next_update_str) + except OSError: + print('EX: _generate_next_shares_token_update unable to read ' + + token_update_filename) curr_time = int(time.time()) updated = False if next_update_sec: @@ -1818,11 +1822,15 @@ def _regenerate_shares_token(base_dir: str, domain_full: str, if not os.path.isfile(token_update_filename): return next_update_sec = None - with open(token_update_filename, 'r', encoding='utf-8') as fp_tok: - next_update_str = fp_tok.read() - if next_update_str: - if next_update_str.isdigit(): - next_update_sec = int(next_update_str) + try: + with open(token_update_filename, 'r', encoding='utf-8') as fp_tok: + next_update_str = fp_tok.read() + if next_update_str: + if next_update_str.isdigit(): + next_update_sec = int(next_update_str) + except OSError: + print('EX: _regenerate_shares_token unable to read ' + + token_update_filename) if not next_update_sec: return curr_time = int(time.time()) diff --git a/siteactive.py b/siteactive.py index ce785f6be..5aec4e91f 100644 --- a/siteactive.py +++ b/siteactive.py @@ -180,5 +180,6 @@ def load_unavailable_sites(base_dir: str) -> []: encoding='utf-8') as fp_sites: sites_unavailable = fp_sites.read().split('\n') except OSError: - print('EX: unable to save unavailable sites') + print('EX: unable to read unavailable sites ' + + unavailable_sites_filename) return sites_unavailable diff --git a/speaker.py b/speaker.py index 51428c8b0..9fbdee335 100644 --- a/speaker.py +++ b/speaker.py @@ -150,24 +150,28 @@ def _speaker_pronounce(base_dir: str, say_text: str, translate: {}) -> str: ")": "," } if os.path.isfile(pronounce_filename): - with open(pronounce_filename, 'r', encoding='utf-8') as fp_pro: - pronounce_list = fp_pro.readlines() - for conversion in pronounce_list: - separator = None - if '->' in conversion: - separator = '->' - elif ';' in conversion: - separator = ';' - elif ':' in conversion: - separator = ':' - elif ',' in conversion: - separator = ',' - if not separator: - continue + try: + with open(pronounce_filename, 'r', encoding='utf-8') as fp_pro: + pronounce_list = fp_pro.readlines() + for conversion in pronounce_list: + separator = None + if '->' in conversion: + separator = '->' + elif ';' in conversion: + separator = ';' + elif ':' in conversion: + separator = ':' + elif ',' in conversion: + separator = ',' + if not separator: + continue - text = conversion.split(separator)[0].strip() - converted = conversion.split(separator)[1].strip() - convert_dict[text] = converted + text = conversion.split(separator)[0].strip() + converted = conversion.split(separator)[1].strip() + convert_dict[text] = converted + except OSError: + print('EX: _speaker_pronounce unable to read ' + + pronounce_filename) for text, converted in convert_dict.items(): if text in say_text: say_text = say_text.replace(text, converted) @@ -528,13 +532,18 @@ def _post_to_speaker_json(base_dir: str, http_prefix: str, accounts_dir = acct_dir(base_dir, nickname, domain_full) approve_follows_filename = accounts_dir + '/followrequests.txt' if os.path.isfile(approve_follows_filename): - with open(approve_follows_filename, 'r', encoding='utf-8') as fp_foll: - follows = fp_foll.readlines() - if len(follows) > 0: - follow_requests_exist = True - for i, _ in enumerate(follows): - follows[i] = follows[i].strip() - follow_requests_list = follows + try: + with open(approve_follows_filename, 'r', + encoding='utf-8') as fp_foll: + follows = fp_foll.readlines() + if len(follows) > 0: + follow_requests_exist = True + for i, _ in enumerate(follows): + follows[i] = follows[i].strip() + follow_requests_list = follows + except OSError: + print('EX: _post_to_speaker_json unable to read ' + + approve_follows_filename) post_dm = False dm_filename = accounts_dir + '/.newDM' if os.path.isfile(dm_filename): @@ -546,8 +555,12 @@ def _post_to_speaker_json(base_dir: str, http_prefix: str, liked_by = '' like_filename = accounts_dir + '/.newLike' if os.path.isfile(like_filename): - with open(like_filename, 'r', encoding='utf-8') as fp_like: - liked_by = fp_like.read() + try: + with open(like_filename, 'r', encoding='utf-8') as fp_like: + liked_by = fp_like.read() + except OSError: + print('EX: _post_to_speaker_json unable to read 2 ' + + like_filename) calendar_filename = accounts_dir + '/.newCalendar' post_cal = os.path.isfile(calendar_filename) share_filename = accounts_dir + '/.newShare' diff --git a/theme.py b/theme.py index 0b1c03c7f..2c91fd979 100644 --- a/theme.py +++ b/theme.py @@ -46,25 +46,29 @@ def import_theme(base_dir: str, filename: str) -> bool: ' missing from imported theme') return False new_theme_name = None - with open(temp_theme_dir + '/name.txt', 'r', - encoding='utf-8') as fp_theme: - new_theme_name1 = fp_theme.read() - new_theme_name = remove_eol(new_theme_name1) - if len(new_theme_name) > 20: - print('WARN: Imported theme name is too long') - return False - if len(new_theme_name) < 2: - print('WARN: Imported theme name is too short') - return False - new_theme_name = new_theme_name.lower() - forbidden_chars = ( - ' ', ';', '/', '\\', '?', '!', '#', '@', - ':', '%', '&', '"', '+', '<', '>', '$' - ) - for char in forbidden_chars: - if char in new_theme_name: - print('WARN: theme name contains forbidden character') + try: + with open(temp_theme_dir + '/name.txt', 'r', + encoding='utf-8') as fp_theme: + new_theme_name1 = fp_theme.read() + new_theme_name = remove_eol(new_theme_name1) + if len(new_theme_name) > 20: + print('WARN: Imported theme name is too long') return False + if len(new_theme_name) < 2: + print('WARN: Imported theme name is too short') + return False + new_theme_name = new_theme_name.lower() + forbidden_chars = ( + ' ', ';', '/', '\\', '?', '!', '#', '@', + ':', '%', '&', '"', '+', '<', '>', '$' + ) + for char in forbidden_chars: + if char in new_theme_name: + print('WARN: theme name contains forbidden character') + return False + except OSError: + print('EX: import_theme unable to read ' + + temp_theme_dir + '/name.txt') if not new_theme_name: return False