mirror of https://gitlab.com/bashrc2/epicyon
More efficient detection of non-empty lists
parent
c431556df2
commit
fccaaca07f
|
@ -303,7 +303,6 @@ def bookmark_post(recent_posts_cache: {},
|
||||||
'object': object_url
|
'object': object_url
|
||||||
}
|
}
|
||||||
if cc_list:
|
if cc_list:
|
||||||
if len(cc_list) > 0:
|
|
||||||
new_bookmark_json['cc'] = cc_list
|
new_bookmark_json['cc'] = cc_list
|
||||||
|
|
||||||
# Extract the domain and nickname from a statuses link
|
# Extract the domain and nickname from a statuses link
|
||||||
|
@ -364,7 +363,6 @@ def undo_bookmark_post(recent_posts_cache: {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if cc_list:
|
if cc_list:
|
||||||
if len(cc_list) > 0:
|
|
||||||
new_undo_bookmark_json['cc'] = cc_list
|
new_undo_bookmark_json['cc'] = cc_list
|
||||||
new_undo_bookmark_json['object']['cc'] = cc_list
|
new_undo_bookmark_json['object']['cc'] = cc_list
|
||||||
|
|
||||||
|
|
2
cache.py
2
cache.py
|
@ -154,7 +154,7 @@ def expire_person_cache(person_cache: {}):
|
||||||
days_since_cached = (curr_time - cache_time).days
|
days_since_cached = (curr_time - cache_time).days
|
||||||
if days_since_cached > 2:
|
if days_since_cached > 2:
|
||||||
removals.append(person_url)
|
removals.append(person_url)
|
||||||
if len(removals) > 0:
|
if removals:
|
||||||
for person_url in removals:
|
for person_url in removals:
|
||||||
del person_cache[person_url]
|
del person_cache[person_url]
|
||||||
print(str(len(removals)) + ' actors were expired from the cache')
|
print(str(len(removals)) + ' actors were expired from the cache')
|
||||||
|
|
|
@ -97,7 +97,7 @@ def html_watch_points_graph(base_dir: str, fitness: {}, fitness_id: str,
|
||||||
|
|
||||||
# get the maximum time
|
# get the maximum time
|
||||||
max_average_time = float(1)
|
max_average_time = float(1)
|
||||||
if len(watch_points_list) > 0:
|
if watch_points_list:
|
||||||
max_average_time = float(watch_points_list[0].split(' ')[0])
|
max_average_time = float(watch_points_list[0].split(' ')[0])
|
||||||
for watch_point in watch_points_list:
|
for watch_point in watch_points_list:
|
||||||
average_time = float(watch_point.split(' ')[0])
|
average_time = float(watch_point.split(' ')[0])
|
||||||
|
|
2
inbox.py
2
inbox.py
|
@ -3800,7 +3800,7 @@ def run_inbox_queue(server,
|
||||||
# Copy any posts addressed to followers into the shared inbox
|
# Copy any posts addressed to followers into the shared inbox
|
||||||
# this avoid copying file multiple times to potentially many
|
# this avoid copying file multiple times to potentially many
|
||||||
# individual inboxes
|
# individual inboxes
|
||||||
if len(recipients_dict_followers) > 0:
|
if len(recipients_dict_followers.items()) > 0:
|
||||||
shared_inbox_post_filename = \
|
shared_inbox_post_filename = \
|
||||||
queue_json['destination'].replace(inbox_handle, inbox_handle)
|
queue_json['destination'].replace(inbox_handle, inbox_handle)
|
||||||
if not os.path.isfile(shared_inbox_post_filename):
|
if not os.path.isfile(shared_inbox_post_filename):
|
||||||
|
|
2
posts.py
2
posts.py
|
@ -4606,7 +4606,7 @@ def create_moderation(base_dir: str, nickname: str, domain: str, port: int,
|
||||||
return box_header
|
return box_header
|
||||||
|
|
||||||
page_lines: list[str] = []
|
page_lines: list[str] = []
|
||||||
if len(lines) > 0:
|
if lines:
|
||||||
end_line_number = \
|
end_line_number = \
|
||||||
len(lines) - 1 - int(items_per_page * page_number)
|
len(lines) - 1 - int(items_per_page * page_number)
|
||||||
end_line_number = max(end_line_number, 0)
|
end_line_number = max(end_line_number, 0)
|
||||||
|
|
|
@ -103,7 +103,6 @@ def _reactionpost(recent_posts_cache: {},
|
||||||
'content': emoji_content
|
'content': emoji_content
|
||||||
}
|
}
|
||||||
if cc_list:
|
if cc_list:
|
||||||
if len(cc_list) > 0:
|
|
||||||
new_reaction_json['cc'] = cc_list
|
new_reaction_json['cc'] = cc_list
|
||||||
|
|
||||||
# Extract the domain and nickname from a statuses link
|
# Extract the domain and nickname from a statuses link
|
||||||
|
|
|
@ -545,7 +545,6 @@ def _post_to_speaker_json(base_dir: str, http_prefix: str,
|
||||||
print('EX: _post_to_speaker_json unable to read ' +
|
print('EX: _post_to_speaker_json unable to read ' +
|
||||||
approve_follows_filename)
|
approve_follows_filename)
|
||||||
if follows:
|
if follows:
|
||||||
if len(follows) > 0:
|
|
||||||
follow_requests_exist = True
|
follow_requests_exist = True
|
||||||
for i, _ in enumerate(follows):
|
for i, _ in enumerate(follows):
|
||||||
follows[i] = follows[i].strip()
|
follows[i] = follows[i].strip()
|
||||||
|
|
Loading…
Reference in New Issue