From 0765d21b2426604f3b118587c131a33cfcbf25e5 Mon Sep 17 00:00:00 2001 From: Bob Mottram Date: Sat, 29 Jun 2019 21:21:37 +0100 Subject: [PATCH] Following and followers feeds --- daemon.py | 19 ++++++++- epicyon.py | 24 +++++++++--- follow.py | 111 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 147 insertions(+), 7 deletions(-) diff --git a/daemon.py b/daemon.py index 5f1abb3e1..23e1bf1c7 100644 --- a/daemon.py +++ b/daemon.py @@ -19,6 +19,7 @@ from person import personLookup from person import personKeyLookup from person import personOutboxJson from inbox import inboxPermittedMessage +from follow import getFollowingFeed import os import sys @@ -34,6 +35,9 @@ maxMessageLength=5000 # maximum number of posts to list in outbox feed maxPostsInFeed=20 +# number of follows/followers per page +followsPerPage=2 + # Whether to use https useHttps=True @@ -49,6 +53,7 @@ def readFollowList(filename: str): username,domain = parseHandle(u) if username: followlist.append(username+'@'+domain) + followUsers.close() return followlist class PubServer(BaseHTTPRequestHandler): @@ -112,6 +117,18 @@ class PubServer(BaseHTTPRequestHandler): self._set_headers('application/json') self.wfile.write(json.dumps(outboxFeed).encode('utf-8')) self.GETbusy=False + return + following=getFollowingFeed(thisDomain,self.path,useHttps,followsPerPage) + if following: + self._set_headers('application/json') + self.wfile.write(json.dumps(following).encode('utf-8')) + self.GETbusy=False + return + followers=getFollowingFeed(thisDomain,self.path,useHttps,followsPerPage,'followers') + if followers: + self._set_headers('application/json') + self.wfile.write(json.dumps(followers).encode('utf-8')) + self.GETbusy=False return # look up a person getPerson = personLookup(thisDomain,self.path) @@ -127,12 +144,12 @@ class PubServer(BaseHTTPRequestHandler): self.GETbusy=False return # check that a json file was requested - baseDir=os.getcwd() if not self.path.endswith('.json'): self._404() self.GETbusy=False return # check that the file exists + baseDir=os.getcwd() filename=baseDir+self.path if os.path.isfile(filename): self._set_headers('application/json') diff --git a/epicyon.py b/epicyon.py index 829ee8c89..15a1991b6 100644 --- a/epicyon.py +++ b/epicyon.py @@ -25,6 +25,7 @@ from httpsig import testHttpsig from daemon import runDaemon import socket from follow import clearFollows +from follow import clearFollowers from follow import followPerson from follow import followerOfPerson from follow import unfollowPerson @@ -43,14 +44,25 @@ clearFollows(username,domain) followPerson(username,domain,'badger','wild.com') followPerson(username,domain,'squirrel','secret.com') followPerson(username,domain,'rodent','drainpipe.com') -unfollowPerson(username,domain,'squirrel','secret.com') -sys.exit() +followPerson(username,domain,'batman','mesh.com') +followPerson(username,domain,'giraffe','trees.com') -asHeader = {'Accept': 'application/ld+json; profile="https://www.w3.org/ns/activitystreams"'} +clearFollowers(username,domain) +followerOfPerson(username,domain,'badger','wild.com') +followerOfPerson(username,domain,'squirrel','secret.com') +followerOfPerson(username,domain,'rodent','drainpipe.com') +followerOfPerson(username,domain,'batman','mesh.com') +followerOfPerson(username,domain,'giraffe','trees.com') + +#unfollowPerson(username,domain,'squirrel','secret.com') +#sys.exit() + +#asHeader = {'Accept': 'application/ld+json; profile="https://www.w3.org/ns/activitystreams"'} #userFollowing = getJson(session,"https://mastodon.social/users/Gargron/followers?page=true",asHeader,None) -userFollowing = getJson(session,"https://mastodon.social/users/Gargron/following?page=true",asHeader,None) -pprint(userFollowing) -sys.exit() +#userFollowing = getJson(session,"https://mastodon.social/users/Gargron/following",asHeader,None) +#userFollowing = getJson(session,"https://mastodon.social/users/Gargron/following?page=1",asHeader,None) +#pprint(userFollowing) +#sys.exit() privateKeyPem,publicKeyPem,person,wfEndpoint=createPerson(username,domain,https,True) diff --git a/follow.py b/follow.py index 9d48954ba..dbb570b30 100644 --- a/follow.py +++ b/follow.py @@ -10,6 +10,7 @@ import json from pprint import pprint import os import sys +from person import validUsername def followPerson(username: str, domain: str, followUsername: str, followDomain: str, followFile='following.txt') -> None: """Adds a person to the follow list @@ -67,3 +68,113 @@ def clearFollows(username: str, domain: str,followFile='following.txt') -> None: def clearFollowers(username: str, domain: str) -> None: clearFollows(username, domain,'followers.txt') + +def getNoOfFollows(username: str,domain: str, followFile='following.txt') -> int: + handle=username.lower()+'@'+domain.lower() + baseDir=os.getcwd() + filename=baseDir+'/accounts/'+handle+'/'+followFile + if not os.path.isfile(filename): + return 0 + ctr = 0 + with open(filename, "r") as f: + lines = f.readlines() + for line in lines: + if '#' not in line: + if '@' in line and '.' in line and not line.startswith('http'): + ctr += 1 + elif line.startswith('http') and '/users/' in line: + ctr += 1 + return ctr + +def getNoOfFollowers(username: str,domain: str) -> int: + return getNoOfFollows(username,domain,'followers.txt') + +def getFollowingFeed(domain: str,path: str,https: bool,followsPerPage=12,followFile='following') -> {}: + if '/'+followFile not in path: + return None + # handle page numbers + headerOnly=True + pageNumber=None + if '?page=' in path: + pageNumber=path.split('?page=')[1] + if pageNumber=='true': + pageNumber=1 + else: + try: + pageNumber=int(pageNumber) + except: + pass + path=path.split('?page=')[0] + headerOnly=False + + if not path.endswith('/'+followFile): + return None + username=None + if path.startswith('/users/'): + username=path.replace('/users/','',1).replace('/'+followFile,'') + if path.startswith('/@'): + username=path.replace('/@','',1).replace('/'+followFile,'') + if not username: + return None + if not validUsername(username): + return None + + prefix='https' + if not https: + prefix='http' + + if headerOnly: + following = { + '@context': 'https://www.w3.org/ns/activitystreams', + 'first': prefix+'://'+domain+'/users/'+username+'/'+followFile+'?page=1', + 'id': prefix+'://'+domain+'/users/'+username+'/'+followFile, + 'totalItems': getNoOfFollows(username,domain), + 'type': 'OrderedCollection'} + return following + + if not pageNumber: + pageNumber=1 + + nextPageNumber=int(pageNumber+1) + following = { + '@context': 'https://www.w3.org/ns/activitystreams', + 'id': prefix+'://'+domain+'/users/'+username+'/'+followFile+'?page='+str(pageNumber), + 'orderedItems': [], + 'partOf': prefix+'://'+domain+'/users/'+username+'/'+followFile, + 'totalItems': 0, + 'type': 'OrderedCollectionPage'} + + baseDir=os.getcwd() + handle=username.lower()+'@'+domain.lower() + filename=baseDir+'/accounts/'+handle+'/'+followFile+'.txt' + if not os.path.isfile(filename): + return following + currPage=1 + pageCtr=0 + totalCtr=0 + with open(filename, "r") as f: + lines = f.readlines() + for line in lines: + if '#' not in line: + if '@' in line and '.' in line and not line.startswith('http'): + pageCtr += 1 + totalCtr += 1 + if currPage==pageNumber: + url = prefix + '://' + line.lower().replace('\n','').split('@')[1] + \ + '/users/' + line.lower().replace('\n','').split('@')[0] + following['orderedItems'].append(url) + elif line.startswith('http') and '/users/' in line: + pageCtr += 1 + totalCtr += 1 + if currPage==pageNumber: + following['orderedItems'].append(line.lower().replace('\n','')) + if pageCtr>=followsPerPage: + pageCtr=0 + currPage += 1 + following['totalItems']=totalCtr + lastPage=int(totalCtr/followsPerPage) + if lastPage<1: + lastPage=1 + if nextPageNumber>lastPage: + following['next']=prefix+'://'+domain+'/users/'+username+'/'+followFile+'?page='+str(lastPage) + return following