merge-requests/30/head
Bob Mottram 2023-02-20 11:55:54 +00:00
parent 5ef4446f29
commit 18bbc4f647
2 changed files with 11 additions and 29 deletions

View File

@ -18941,9 +18941,17 @@ class PubServer(BaseHTTPRequestHandler):
self.server.getreq_busy = False
return
else:
# redirect to the hashtag url
self._redirect_headers(hashtag_url, None, calling_domain)
self.server.getreq_busy = False
hashtag = urllib.parse.unquote(hashtag_url.split('/')[-1])
tags_filename = \
self.server.base_dir + '/tags/' + hashtag + '.txt'
if os.path.isfile(tags_filename):
# redirect to the local hashtag screen
self.server.getreq_busy = False
self._redirect_headers(hashtag_url, cookie, calling_domain)
else:
# redirect to the upstream hashtag url
self.server.getreq_busy = False
self._redirect_headers(hashtag_url, None, calling_domain)
return
# hashtag search

View File

@ -1103,34 +1103,8 @@ def html_hashtag_search_remote(nickname: str, domain: str, port: int,
print('No orderedItems in hashtag collection ' + str(hashtag_json))
else:
print('WARN: no hashtags returned for url ' + hashtag_url)
if not lines:
# look for local hashtags
tags_filename = base_dir + '/tags/' + hashtag + '.txt'
if not os.path.isfile(tags_filename):
return ''
try:
with open(tags_filename, 'r', encoding='utf-8') as fp_tags:
lines = fp_tags.read().splitlines()
except OSError:
print('EX: unable to read hashtag file ' + tags_filename)
if lines:
curr_page = 1
item_ctr = 0
new_lines = []
for line in lines:
section = line.split(' ')
if len(section) < 3:
continue
if curr_page == page_number:
new_lines.append(section[2].replace('#', '/'))
item_ctr += 1
if item_ctr >= posts_per_page:
item_ctr = 0
curr_page += 1
lines = new_lines
if not lines:
return ''
print('DEBUG: hashtag lines: ' + str(lines))
separator_str = html_post_separator(base_dir, None)