epicyon/newsdaemon.py

499 lines
18 KiB
Python
Raw Normal View History

2020-10-07 12:05:49 +00:00
__filename__ = "newsdaemon.py"
__author__ = "Bob Mottram"
__license__ = "AGPL3+"
__version__ = "1.1.0"
__maintainer__ = "Bob Mottram"
__email__ = "bob@freedombone.net"
__status__ = "Production"
import os
2020-10-07 12:05:49 +00:00
import time
2020-10-09 10:05:01 +00:00
import datetime
2020-10-07 18:46:42 +00:00
from collections import OrderedDict
2020-10-07 12:05:49 +00:00
from newswire import getDictFromNewswire
2020-10-16 21:33:18 +00:00
# from posts import sendSignedJson
2020-10-07 21:26:03 +00:00
from posts import createNewsPost
2020-10-11 09:33:31 +00:00
from content import removeHtmlTag
from content import dangerousMarkup
2020-10-17 12:05:41 +00:00
from content import validHashTag
2020-10-09 09:02:01 +00:00
from utils import loadJson
from utils import saveJson
2020-10-07 16:55:15 +00:00
from utils import getStatusNumber
2020-10-17 13:39:04 +00:00
from inbox import storeHashTags
2020-10-07 12:05:49 +00:00
2020-10-08 12:29:40 +00:00
2020-10-09 09:43:34 +00:00
def updateFeedsOutboxIndex(baseDir: str, domain: str, postId: str) -> None:
"""Updates the index used for imported RSS feeds
"""
2020-10-07 16:55:15 +00:00
basePath = baseDir + '/accounts/news@' + domain
indexFilename = basePath + '/outbox.index'
if os.path.isfile(indexFilename):
2020-10-07 18:46:42 +00:00
if postId not in open(indexFilename).read():
try:
with open(indexFilename, 'r+') as feedsFile:
content = feedsFile.read()
feedsFile.seek(0, 0)
feedsFile.write(postId + '\n' + content)
print('DEBUG: feeds post added to index')
except Exception as e:
print('WARN: Failed to write entry to feeds posts index ' +
indexFilename + ' ' + str(e))
else:
feedsFile = open(indexFilename, 'w+')
if feedsFile:
2020-10-07 16:55:15 +00:00
feedsFile.write(postId + '\n')
feedsFile.close()
2020-10-09 12:15:20 +00:00
def saveArrivedTime(baseDir: str, postFilename: str, arrived: str) -> None:
"""Saves the time when an rss post arrived to a file
"""
arrivedFile = open(postFilename + '.arrived', 'w+')
if arrivedFile:
arrivedFile.write(arrived)
arrivedFile.close()
2020-10-10 09:36:23 +00:00
def removeControlCharacters(content: str) -> str:
2020-10-11 09:33:31 +00:00
"""TODO this is hacky and a better solution is needed
the unicode is messing up somehow
"""
lookups = {
"8211": "-",
"8230": "...",
"8216": "'",
"8217": "'",
"8220": '"',
"8221": '"'
}
for code, ch in lookups.items():
content = content.replace('&' + code + ';', ch)
content = content.replace('&#' + code + ';', ch)
return content
2020-10-10 09:36:23 +00:00
2020-10-10 08:54:13 +00:00
2020-10-17 12:05:41 +00:00
def hasttagRuleResolve(tree: [], hashtags: []) -> bool:
"""Returns whether the tree for a hashtag rule evaluates to true or false
"""
if not tree:
return False
if tree[0] == 'not':
if len(tree) == 2:
if isinstance(tree[1], str):
return tree[1] not in hashtags
elif isinstance(tree[1], list):
return not hasttagRuleResolve(tree[1], hashtags)
elif tree[0] == 'and':
if len(tree) == 3:
firstArg = False
if isinstance(tree[1], str):
firstArg = (tree[1] in hashtags)
elif isinstance(tree[1], list):
firstArg = (hasttagRuleResolve(tree[1], hashtags))
secondArg = False
if isinstance(tree[2], str):
secondArg = (tree[2] in hashtags)
elif isinstance(tree[2], list):
secondArg = (hasttagRuleResolve(tree[2], hashtags))
return firstArg and secondArg
elif tree[0] == 'or':
if len(tree) == 3:
firstArg = False
if isinstance(tree[1], str):
firstArg = (tree[1] in hashtags)
elif isinstance(tree[1], list):
firstArg = (hasttagRuleResolve(tree[1], hashtags))
secondArg = False
if isinstance(tree[2], str):
secondArg = (tree[2] in hashtags)
elif isinstance(tree[2], list):
secondArg = (hasttagRuleResolve(tree[2], hashtags))
return firstArg or secondArg
elif tree[0].startswith('#') and len(tree) == 1:
return tree[0] in hashtags
return False
def hashtagRuleTree(operators: [],
conditionsStr: str,
tagsInConditions: []) -> []:
"""Walks the tree
"""
if not operators and conditionsStr:
conditionsStr = conditionsStr.strip()
if conditionsStr.startswith('#') or conditionsStr in operators:
if conditionsStr.startswith('#'):
if conditionsStr not in tagsInConditions:
if ' ' not in conditionsStr:
tagsInConditions.append(conditionsStr)
return [conditionsStr.strip()]
else:
return None
if not operators or not conditionsStr:
return None
tree = None
conditionsStr = conditionsStr.strip()
if conditionsStr.startswith('#') or conditionsStr in operators:
if conditionsStr.startswith('#'):
if conditionsStr not in tagsInConditions:
if ' ' not in conditionsStr:
tagsInConditions.append(conditionsStr)
tree = [conditionsStr.strip()]
ctr = 0
while ctr < len(operators):
op = operators[ctr]
if op not in conditionsStr:
ctr += 1
continue
else:
tree = [op]
sections = conditionsStr.split(op)
for subConditionStr in sections:
result = hashtagRuleTree(operators[ctr + 1:], subConditionStr,
tagsInConditions)
if result:
tree.append(result)
break
return tree
2020-10-16 21:33:18 +00:00
def newswireHashtagProcessing(session, baseDir: str, postJsonObject: {},
hashtags: str, httpPrefix: str,
domain: str, port: int,
personCache: {},
cachedWebfingers: {},
federationList: [],
sendThreads: [], postLog: []) -> bool:
"""Applies hashtag rules to a news post.
Returns true if the post should be saved to the news timeline
of this instance
"""
2020-10-17 12:05:41 +00:00
rulesFilename = baseDir + '/accounts/hashtagrules.txt'
if not os.path.isfile(rulesFilename):
return True
rules = []
with open(rulesFilename, "r") as f:
rules = f.readlines()
domainFull = domain
if port:
if port != 80 and port != 443:
domainFull = domain + ':' + str(port)
actionOccurred = False
operators = ('not', 'and', 'or')
for ruleStr in rules:
if not ruleStr:
continue
if not ruleStr.startswith('if '):
continue
if ' then ' not in ruleStr:
continue
conditionsStr = ruleStr.split('if ', 1)[1]
conditionsStr = conditionsStr.split(' then ')[0]
tagsInConditions = []
tree = hashtagRuleTree(operators, conditionsStr, tagsInConditions)
# does the rule contain any hashtags?
if not tagsInConditions:
continue
if not hasttagRuleResolve(tree, hashtags):
continue
# the condition matches, so do something
actionStr = ruleStr.split(' then ')[1].strip()
# add a hashtag
if actionStr.startswith('add '):
addHashtag = actionStr.split('add ', 1)[1].strip()
if addHashtag.startswith('#'):
if addHashtag not in hashtags:
hashtags.append(addHashtag)
htId = addHashtag.replace('#', '')
if validHashTag(htId):
hashtagUrl = \
httpPrefix + "://" + domainFull + "/tags/" + htId
postJsonObject['object']['tag'][htId] = {
'href': hashtagUrl,
'name': addHashtag,
'type': 'Hashtag'
}
hashtagHtml = \
2020-10-17 12:08:20 +00:00
" <a href=\"" + hashtagUrl + \
2020-10-17 12:05:41 +00:00
"\" class=\"mention hashtag\" " + \
"rel=\"tag\">#<span>" + \
htId + "</span></a>"
content = postJsonObject['object']['content']
if content.endswith('</p>'):
content = \
content[:len(content) - len('</p>')] + \
hashtagHtml + '</p>'
else:
content += hashtagHtml
postJsonObject['object']['content'] = content
2020-10-17 13:39:04 +00:00
storeHashTags(baseDir, 'news', postJsonObject)
2020-10-17 12:05:41 +00:00
actionOccurred = True
# remove a hashtag
if actionStr.startswith('remove '):
rmHashtag = actionStr.split('remove ', 1)[1].strip()
if rmHashtag.startswith('#'):
if rmHashtag in hashtags:
hashtags.remove(rmHashtag)
htId = addHashtag.replace('#', '')
hashtagUrl = \
httpPrefix + "://" + domainFull + "/tags/" + htId
hashtagHtml = \
"<a href=\"" + hashtagUrl + \
"\" class=\"mention hashtag\" " + \
"rel=\"tag\">#<span>" + \
htId + "</span></a>"
content = postJsonObject['object']['content']
if hashtagHtml in content:
2020-10-17 12:08:20 +00:00
content = \
content.replace(hashtagHtml, '').replace(' ', ' ')
postJsonObject['object']['content'] = content
2020-10-17 12:05:41 +00:00
del postJsonObject['object']['tag'][htId]
actionOccurred = True
2020-10-16 21:33:18 +00:00
# TODO
# If routing to another instance
# sendSignedJson(postJsonObject: {}, session, baseDir: str,
# nickname: str, domain: str, port: int,
# toNickname: str, toDomain: str, toPort: int, cc: str,
# httpPrefix: str, False, False,
# federationList: [],
# sendThreads: [], postLog: [], cachedWebfingers: {},
# personCache: {}, False, __version__) -> int:
2020-10-17 12:05:41 +00:00
if actionOccurred:
return True
2020-10-16 21:33:18 +00:00
return True
def convertRSStoActivityPub(baseDir: str, httpPrefix: str,
domain: str, port: int,
newswire: {},
translate: {},
recentPostsCache: {}, maxRecentPosts: int,
session, cachedWebfingers: {},
2020-10-16 21:33:18 +00:00
personCache: {},
federationList: [],
sendThreads: [], postLog: []) -> None:
"""Converts rss items in a newswire into posts
"""
2020-10-07 16:55:15 +00:00
basePath = baseDir + '/accounts/news@' + domain + '/outbox'
if not os.path.isdir(basePath):
os.mkdir(basePath)
2020-10-09 10:05:01 +00:00
# oldest items first
2020-10-07 18:46:42 +00:00
newswireReverse = \
2020-10-07 19:41:55 +00:00
OrderedDict(sorted(newswire.items(), reverse=False))
2020-10-07 18:46:42 +00:00
for dateStr, item in newswireReverse.items():
2020-10-07 20:03:39 +00:00
originalDateStr = dateStr
# convert the date to the format used by ActivityPub
dateStr = dateStr.replace(' ', 'T')
dateStr = dateStr.replace('+00:00', 'Z')
2020-10-07 16:55:15 +00:00
statusNumber, published = getStatusNumber(dateStr)
newPostId = \
httpPrefix + '://' + domain + \
'/users/news/statuses/' + statusNumber
# file where the post is stored
2020-10-07 16:55:15 +00:00
filename = basePath + '/' + newPostId.replace('/', '#') + '.json'
if os.path.isfile(filename):
2020-10-08 12:52:15 +00:00
# don't create the post if it already exists
2020-10-08 14:35:26 +00:00
# set the url
2020-10-08 12:37:14 +00:00
newswire[originalDateStr][1] = \
'/users/news/statuses/' + statusNumber
2020-10-08 14:35:26 +00:00
# set the filename
newswire[originalDateStr][3] = filename
continue
2020-10-11 11:00:28 +00:00
rssTitle = removeControlCharacters(item[0])
url = item[1]
2020-10-11 09:33:31 +00:00
if dangerousMarkup(url) or dangerousMarkup(rssTitle):
continue
2020-10-07 13:55:27 +00:00
rssDescription = ''
# get the rss description if it exists
2020-10-11 11:00:28 +00:00
rssDescription = removeControlCharacters(item[4])
2020-10-10 10:16:06 +00:00
if rssDescription.startswith('<![CDATA['):
rssDescription = rssDescription.replace('<![CDATA[', '')
rssDescription = rssDescription.replace(']]>', '')
2020-10-10 10:24:40 +00:00
rssDescription = '<p>' + rssDescription + '<p>'
# add the off-site link to the description
2020-10-11 09:33:31 +00:00
if rssDescription and not dangerousMarkup(rssDescription):
2020-10-10 10:10:20 +00:00
rssDescription += \
2020-10-10 11:59:09 +00:00
'<br><a href="' + url + '">' + \
2020-10-10 10:10:20 +00:00
translate['Read more...'] + '</a>'
else:
2020-10-10 10:10:20 +00:00
rssDescription = \
2020-10-10 10:01:14 +00:00
'<a href="' + url + '">' + \
2020-10-10 10:10:20 +00:00
translate['Read more...'] + '</a>'
2020-10-11 09:33:31 +00:00
# remove image dimensions
if '<img' in rssDescription:
2020-10-11 10:31:26 +00:00
rssDescription = removeHtmlTag(rssDescription, 'width')
rssDescription = removeHtmlTag(rssDescription, 'height')
2020-10-11 09:33:31 +00:00
2020-10-07 16:55:15 +00:00
followersOnly = False
useBlurhash = False
2020-10-09 10:08:01 +00:00
# NOTE: the id when the post is created will not be
# consistent (it's based on the current time, not the
# published time), so we change that later
2020-10-07 21:26:03 +00:00
blog = createNewsPost(baseDir,
2020-10-07 22:25:30 +00:00
domain, port, httpPrefix,
2020-10-11 11:00:28 +00:00
rssDescription,
2020-10-11 10:57:18 +00:00
followersOnly, False,
2020-10-07 21:26:03 +00:00
None, None, None, useBlurhash,
2020-10-11 11:00:28 +00:00
rssTitle)
2020-10-07 16:55:15 +00:00
if not blog:
continue
idStr = \
httpPrefix + '://' + domain + '/users/news' + \
'/statuses/' + statusNumber + '/replies'
blog['news'] = True
2020-10-09 10:05:01 +00:00
# note the time of arrival
currTime = datetime.datetime.utcnow()
blog['object']['arrived'] = currTime.strftime("%Y-%m-%dT%H:%M:%SZ")
2020-10-09 10:08:01 +00:00
# change the id, based upon the published time
2020-10-07 16:55:15 +00:00
blog['object']['replies']['id'] = idStr
blog['object']['replies']['first']['partOf'] = idStr
blog['id'] = newPostId + '/activity'
blog['object']['id'] = newPostId
blog['object']['atomUri'] = newPostId
blog['object']['url'] = \
httpPrefix + '://' + domain + '/@news/' + statusNumber
blog['object']['published'] = dateStr
postId = newPostId.replace('/', '#')
2020-10-09 12:15:20 +00:00
moderated = item[5]
2020-10-16 21:33:18 +00:00
hashtags = item[6]
savePost = newswireHashtagProcessing(session, baseDir, blog, hashtags,
httpPrefix, domain, port,
personCache, cachedWebfingers,
federationList,
sendThreads, postLog)
2020-10-09 12:15:20 +00:00
2020-10-16 21:33:18 +00:00
# save the post and update the index
if savePost:
2020-10-17 12:05:41 +00:00
newswire[originalDateStr][6] = hashtags
2020-10-17 13:39:04 +00:00
storeHashTags(baseDir, 'news', blog)
2020-10-16 21:33:18 +00:00
if saveJson(blog, filename):
updateFeedsOutboxIndex(baseDir, domain, postId + '.json')
# Save a file containing the time when the post arrived
# this can then later be used to construct the news timeline
# excluding items during the voting period
if moderated:
saveArrivedTime(baseDir, filename,
blog['object']['arrived'])
else:
if os.path.isfile(filename + '.arrived'):
os.remove(filename + '.arrived')
# set the url
newswire[originalDateStr][1] = \
'/users/news/statuses/' + statusNumber
# set the filename
newswire[originalDateStr][3] = filename
2020-10-09 09:02:01 +00:00
def mergeWithPreviousNewswire(oldNewswire: {}, newNewswire: {}) -> None:
"""Preserve any votes or generated activitypub post filename
as rss feeds are updated
"""
for published, fields in oldNewswire.items():
if not newNewswire.get(published):
continue
2020-10-13 08:53:59 +00:00
for i in range(1, 5):
newNewswire[published][i] = fields[i]
2020-10-09 09:02:01 +00:00
def runNewswireDaemon(baseDir: str, httpd,
httpPrefix: str, domain: str, port: int,
translate: {}) -> None:
2020-10-07 12:05:49 +00:00
"""Periodically updates RSS feeds
"""
newswireStateFilename = baseDir + '/accounts/.newswirestate.json'
2020-10-09 09:02:01 +00:00
2020-10-07 12:05:49 +00:00
# initial sleep to allow the system to start up
time.sleep(50)
while True:
# has the session been created yet?
if not httpd.session:
print('Newswire daemon waiting for session')
time.sleep(60)
continue
# try to update the feeds
newNewswire = None
try:
newNewswire = \
getDictFromNewswire(httpd.session, baseDir,
httpd.maxNewswirePostsPerSource,
httpd.maxNewswireFeedSizeKb)
2020-10-07 12:05:49 +00:00
except Exception as e:
print('WARN: unable to update newswire ' + str(e))
time.sleep(120)
continue
2020-10-09 09:02:01 +00:00
if not httpd.newswire:
if os.path.isfile(newswireStateFilename):
httpd.newswire = loadJson(newswireStateFilename)
2020-10-09 09:02:01 +00:00
mergeWithPreviousNewswire(httpd.newswire, newNewswire)
2020-10-07 12:05:49 +00:00
httpd.newswire = newNewswire
saveJson(httpd.newswire, newswireStateFilename)
2020-10-07 12:05:49 +00:00
print('Newswire updated')
convertRSStoActivityPub(baseDir,
httpPrefix, domain, port,
newNewswire, translate,
httpd.recentPostsCache,
httpd.maxRecentPosts,
httpd.session,
httpd.cachedWebfingers,
2020-10-16 21:33:18 +00:00
httpd.personCache,
httpd.federationList,
httpd.sendThreads,
httpd.postLog)
print('Newswire feed converted to ActivityPub')
2020-10-07 12:05:49 +00:00
# wait a while before the next feeds update
time.sleep(1200)
def runNewswireWatchdog(projectVersion: str, httpd) -> None:
"""This tries to keep the newswire update thread running even if it dies
"""
print('Starting newswire watchdog')
newswireOriginal = \
httpd.thrPostSchedule.clone(runNewswireDaemon)
httpd.thrNewswireDaemon.start()
while True:
time.sleep(50)
if not httpd.thrNewswireDaemon.isAlive():
httpd.thrNewswireDaemon.kill()
httpd.thrNewswireDaemon = \
newswireOriginal.clone(runNewswireDaemon)
httpd.thrNewswireDaemon.start()
print('Restarting newswire daemon...')