Compare commits

...

6 Commits
0.2 ... master

Author SHA1 Message Date
f167669bc4 readme updates 2024-04-13 23:35:11 -04:00
0187f66627 update readme files 2024-04-13 23:33:58 -04:00
7b8a6a34fe update README.md 2024-04-13 23:24:16 -04:00
b9e3591299 update README.md 2024-04-13 23:22:36 -04:00
73a80f6d3d add support for TinyURL 2024-04-13 23:03:50 -04:00
e5b701aa55 update console messages 2024-04-13 22:29:15 -04:00
7 changed files with 94 additions and 26 deletions

View File

@ -1,2 +1,24 @@
# Unshorten
# Unshortener
Firefox Extension and API server for revealing links behind shortened URLs.
**Supported sites**:
- t.co
- tinyurl.com
## Usage
In order to deal with CORS, Unshorten must send links to a resolver API.
```shell
cd server
make build
docker -d --name unshortener -p 8000:8000 unshortener-api
```
Build the extension and import into Firefox. Right click on a link and choose
"Unshorten Link". The result will be copied to the clipboard.
```shell
cd extension
make zip
```

View File

@ -1,2 +1,14 @@
# Unshorten
A Firefox extension to unshorten links from sites like Twitter.
# Unshortener Extension
A Firefox extension to reveal links behind URL shorteners like Twitter and TinyURL.
**Supported sites:**
- t.co
- tinyurl.com
## Usage
Build the extension and install it in Firefox. Right-clicking links will reveal a new
"Unshorten link" option which will resolve the link and copy it to the clipboard.
```shell
make zip
```

View File

@ -1,5 +1,6 @@
const shortenerDomains = [
"t.co"
"t.co",
"tinyurl.com"
];
browser.contextMenus.create({
@ -31,13 +32,14 @@ function unshortenUrl(linkUrl) {
fetch("http://localhost:8000/?url=" + linkUrl)
.then(res => {
if (!res.ok) {
console.log("error fetching result");
console.log("Couldn't unshorten URL: " + res.statusText);
}
return res.json();
})
.then(unshortenedUrl => {
console.log("unshortened: " + unshortenedUrl)
navigator.clipboard.writeText(unshortenedUrl)
.catch(err => console.error("couldn't copy to clipboard", err));
});
.catch(err => console.error("Couldn't copy to clipboard: ", err));
})
.catch(err => {console.error("Couldn't contact server:", err)});
}

View File

@ -1,7 +1,7 @@
{
"manifest_version": 2,
"name": "Unshortener",
"version": "0.2",
"version": "0.3",
"description": "Unshorten links from Twitter.",

View File

@ -1,2 +1,14 @@
# Unshorten Server
Simple FastAPI app to unshorten URLs.
# Unshortener Resolver API
Simple FastAPI app to resolve links behind shortened URLs.
**Supported sites**:
- t.co
- tinyurl.com
## Usage
Build and run the Docker container:
```shell
make build
docker -d --name unshortener -p 8000:8000 unshortener-api
```

View File

@ -3,10 +3,11 @@ from typing import Optional
from urllib.parse import urlparse
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from unshorteners import unshorten_twitter
from unshorteners import unshorten_twitter, unshorten_tinyurl
UNSHORTEN = {
't.co': unshorten_twitter
't.co': unshorten_twitter,
'tinyurl.com': unshorten_tinyurl
}
CACHE = {}
@ -32,9 +33,11 @@ async def receive_url(url: Optional[str] = None):
return {"error": f"cannot unshorten {domain}"}
if url in CACHE:
unshortened = CACHE[url]
else:
unshortened = UNSHORTEN[domain](url)
CACHE[url] = unshortened
return CACHE[url]
return unshortened
result = UNSHORTEN[domain](url)
if result:
CACHE[url] = result
return result
return {"error": "server error"}

View File

@ -1,19 +1,36 @@
"""Unshortening functions"""
import re
from typing import Optional
import requests
def unshorten_twitter(url: str):
def unshorten_tinyurl(url: str) -> Optional[str]:
"""Retrieve the actual URL behind a TinyURL."""
try:
response = requests.get(url, timeout=4, allow_redirects=False)
except requests.RequestException:
return None
if response.status_code == 301:
return response.headers.get("location", None)
return None
def unshorten_twitter(url: str) -> Optional[str]:
"""Retrieve the actual URL behind a Twitter URL."""
pattern = re.compile(r"<title>(.*?)<\/title>")
response = requests.get(
url=url,
headers={
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:124.0) Gecko/20100101 Firefox/124.0"
},
timeout=4
)
try:
response = requests.get(
url=url,
headers={
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:124.0) Gecko/20100101 Firefox/124.0"
},
timeout=4
)
except requests.RequestException:
return None
match = pattern.search(response.text)
if match: