Compare commits
204 Commits
yotter-dev
...
dev-indep
Author | SHA1 | Date | |
---|---|---|---|
|
b43a72ab7b | ||
|
71949b8536 | ||
|
b2b4abc541 | ||
|
d174cecdd4 | ||
|
b07957dcb1 | ||
|
4bc4993c2f | ||
|
faf1be26f9 | ||
|
09fbf47ed8 | ||
|
3d56fed2eb | ||
|
dedfe652af | ||
|
da9892333a | ||
|
beb3758961 | ||
|
ebe9740684 | ||
|
664a07f766 | ||
|
4b7e99e9d1 | ||
|
d92f028e54 | ||
|
612114ff3d | ||
|
37fbf298dd | ||
|
8216a9b1f6 | ||
|
f90ce55b1e | ||
|
6e7da09047 | ||
|
b1f538ec14 | ||
|
7bb75909f4 | ||
|
789d5d16f7 | ||
|
11acb12aea | ||
|
f0a5d2f167 | ||
|
bb38b8d9d7 | ||
|
2edcab95a4 | ||
|
43f2904329 | ||
|
0176703f88 | ||
|
2ad6540b57 | ||
|
432d12a695 | ||
|
4370e70258 | ||
|
86e6132266 | ||
|
913506df59 | ||
|
d0aa476f70 | ||
|
1d52e68b3e | ||
|
255c162e6c | ||
|
652b889db9 | ||
|
5770913103 | ||
|
39fb6294d7 | ||
|
34a57e7d05 | ||
|
3aae43e41b | ||
|
3688ad517e | ||
|
07796eae25 | ||
|
005547cb82 | ||
|
bd78ea8b80 | ||
|
46faa07273 | ||
|
d3d67858a6 | ||
|
44d69394f6 | ||
|
7d4bff599b | ||
|
bf0647e95a | ||
|
7feef8c07f | ||
|
fe31152a4f | ||
|
1c2e1a6a00 | ||
|
59211be961 | ||
|
c06a71a086 | ||
|
34afa311aa | ||
|
2821e8859f | ||
|
b7abd7900f | ||
|
117842c5e0 | ||
|
95a19fc76d | ||
|
5b26cb7f2e | ||
|
1a68eb15fb | ||
|
3847244b93 | ||
|
af0872ae12 | ||
|
70abdbfcac | ||
|
92374f2690 | ||
|
e08d8ade7a | ||
|
50a59a41b7 | ||
|
1d732b9e9c | ||
|
ba1f23d77e | ||
|
e0f8ac3ee4 | ||
|
a8b05edba1 | ||
|
d2f3585b6a | ||
|
4525922ee0 | ||
|
887d2177e4 | ||
|
6639f888cd | ||
|
1a8e730a0a | ||
|
4a4cf2bd78 | ||
|
2962e68bc0 | ||
|
b64a50d4ab | ||
|
cd8c632838 | ||
|
cc6d6ec503 | ||
|
8bbdd81df0 | ||
|
1cbfbc03c8 | ||
|
77fef07a6a | ||
|
a9151ebd46 | ||
|
ba28c82ab6 | ||
|
b2c3287144 | ||
|
85c24975e0 | ||
|
9fb1e5597e | ||
|
2b20fa7503 | ||
|
d39ebac719 | ||
|
fac46ee853 | ||
|
d0428db939 | ||
|
bd109b5df3 | ||
|
28176c5823 | ||
|
0bf1a53c7a | ||
|
92689b954c | ||
|
0454c711d4 | ||
|
86b80502b6 | ||
|
94ccbc4f99 | ||
|
8f6c5e4463 | ||
|
5e9f04a64d | ||
|
1fc218605e | ||
|
34d6491a8a | ||
|
9b68d04e5a | ||
|
dcbff244b1 | ||
|
aa54096ad5 | ||
|
2d2f58791a | ||
|
61a694e1d4 | ||
|
889faa776c | ||
|
e4e8754fc8 | ||
|
c3f4247c31 | ||
|
a8b621acd7 | ||
|
7316c3a70c | ||
|
7b0f178fd5 | ||
|
417af1a5f1 | ||
|
36294cb0a9 | ||
|
a7c081174b | ||
|
4b098d7f52 | ||
|
4d4d44ca55 | ||
|
39dafeaade | ||
|
e58fbb873f | ||
|
1162f2d489 | ||
|
b285081ca8 | ||
|
3d8818a818 | ||
|
6514e80ea4 | ||
|
eda19ffebb | ||
|
b004a2da52 | ||
|
c0fc666d83 | ||
|
b84dc6dcc4 | ||
|
b5a0f84628 | ||
|
3564c19838 | ||
|
1585b47a76 | ||
|
9e7af17c73 | ||
|
784fe27b3c | ||
|
a97b7429de | ||
|
933d0b2c83 | ||
|
98b0b16f5a | ||
|
923c3f2a62 | ||
|
64dc9b6658 | ||
|
22b9c84d76 | ||
|
6375ec6626 | ||
|
84b5008937 | ||
|
b5bfca0280 | ||
|
d7ad71bb7f | ||
|
d654ed132d | ||
|
027f35983b | ||
|
ff2d8e67d3 | ||
|
2b496ad70e | ||
|
158117a66a | ||
|
78d6044dd2 | ||
|
016bcd4775 | ||
|
be30aa5c82 | ||
|
2cb60b6cb1 | ||
|
21df1b4182 | ||
|
3bbdede351 | ||
|
1342e34383 | ||
|
aa2372cd13 | ||
|
11d9e07eae | ||
|
b85876e136 | ||
|
1692d08983 | ||
|
dbe13e21ea | ||
|
e1cacd160a | ||
|
d7839fb31b | ||
|
92aa6b55c6 | ||
|
d915f171be | ||
|
3a74e4add8 | ||
|
294c8530de | ||
|
fa893f2d05 | ||
|
4b153cf7e9 | ||
|
3846264dd4 | ||
|
e220f02aad | ||
|
29297a0be4 | ||
|
686a9bef91 | ||
|
ec8f2d8dde | ||
|
9ac7be7a3b | ||
|
4e3a1f4ed3 | ||
|
bd5d300386 | ||
|
36abcbd0d7 | ||
|
4d44cd267f | ||
|
d279413d8f | ||
|
98e817e3db | ||
|
90b8018228 | ||
|
691e35c22c | ||
|
fde55caaf5 | ||
|
efd389f3b7 | ||
|
023798abce | ||
|
d6501f4cb9 | ||
|
0f573dc7df | ||
|
627e5e366f | ||
|
8866791251 | ||
|
450a1c21d9 | ||
|
de0bd653d4 | ||
|
d028a2c343 | ||
|
eef05cc769 | ||
|
f2badcef55 | ||
|
fb2d3a962b | ||
|
7a71b6914a | ||
|
6e8e3b2131 | ||
|
eb9bd592f1 | ||
|
03f8e22667 |
@ -1,3 +1,4 @@
|
||||
.circleci
|
||||
.git
|
||||
.github
|
||||
.gitignore
|
||||
@ -6,3 +7,5 @@ Dockerfile
|
||||
docker-compose.yml
|
||||
LICENSE
|
||||
*.md
|
||||
dockerhash.txt
|
||||
app/static
|
||||
|
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- OS: [e.g. iOS]
|
||||
- Browser [e.g. chrome, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Smartphone (please complete the following information):**
|
||||
- OS: [e.g. iOS8.1]
|
||||
- Browser [e.g. stock browser, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: feature request
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
106
.github/workflows/docker-build.yml
vendored
106
.github/workflows/docker-build.yml
vendored
@ -8,33 +8,39 @@ on:
|
||||
- dev-indep
|
||||
|
||||
jobs:
|
||||
build-docker:
|
||||
cpython-build-docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v1.2.0
|
||||
with:
|
||||
platforms: all
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v1.5.1
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v1.10.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Get hash of latest image
|
||||
run: docker pull python:3-alpine && docker inspect --format='{{index .RepoDigests 0}}' python:3-alpine > dockerhash.txt
|
||||
- name: Write the current version to a file
|
||||
run: "{ git describe --tags --abbrev=0 & date +\"%d-%m-%y\" & git rev-list HEAD --max-count=1 --abbrev-commit;} > version.txt"
|
||||
- name: cache docker cache
|
||||
uses: actions/cache@v2.1.1
|
||||
uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ${{ github.workspace }}/cache
|
||||
key: ${{ runner.os }}-docker-${{ hashFiles('**/requirements.txt') }}
|
||||
key: ${{ runner.os }}-docker-cpython-${{ hashFiles('**/requirements.txt') }}-${{ hashFiles('**/dockerhash.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-docker-${{ hashFiles('**/requirements.txt') }}
|
||||
${{ runner.os }}-docker-cpython-
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v2.6.1
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
@ -43,3 +49,85 @@ jobs:
|
||||
tags: ytorg/yotter:latest
|
||||
cache-from: type=local,src=cache
|
||||
cache-to: type=local,dest=cache
|
||||
pypy-build-docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1.2.0
|
||||
with:
|
||||
platforms: all
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1.5.1
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1.10.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Get hash of latest image
|
||||
run: docker pull pypy:3-slim-buster && docker inspect --format='{{index .RepoDigests 0}}' pypy:3-slim-buster > dockerhash.txt
|
||||
- name: Write the current version to a file
|
||||
run: "{ git describe --tags --abbrev=0 & date +\"%d-%m-%y\" & git rev-list HEAD --max-count=1 --abbrev-commit;} > version.txt"
|
||||
- name: cache docker cache
|
||||
uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ${{ github.workspace }}/cache
|
||||
key: ${{ runner.os }}-docker-pypy-${{ hashFiles('**/requirements.txt') }}-${{ hashFiles('**/dockerhash.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-docker-pypy-
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v2.6.1
|
||||
with:
|
||||
context: .
|
||||
file: ./pypy.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ytorg/yotter:pypy
|
||||
cache-from: type=local,src=cache
|
||||
cache-to: type=local,dest=cache
|
||||
nginx-build-docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1.2.0
|
||||
with:
|
||||
platforms: all
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1.5.1
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1.10.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Get hash of latest image
|
||||
run: docker pull nginx:mainline-alpine && docker inspect --format='{{index .RepoDigests 0}}' nginx:mainline-alpine > dockerhash.txt
|
||||
- name: Write the current version to a file
|
||||
run: "{ git describe --tags --abbrev=0 & date +\"%d-%m-%y\" & git rev-list HEAD --max-count=1 --abbrev-commit;} > version.txt"
|
||||
- name: cache docker cache
|
||||
uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ${{ github.workspace }}/cache
|
||||
key: ${{ runner.os }}-docker-nginx-${{ hashFiles('**/dockerhash.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-docker-nginx-
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v2.6.1
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ytorg/nginx:latest
|
||||
cache-from: type=local,src=cache
|
||||
cache-to: type=local,dest=cache
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -157,3 +157,5 @@ app/cache/*
|
||||
app/cache
|
||||
./app/cache/*
|
||||
./app/cache
|
||||
.github/ISSUE_TEMPLATE/
|
||||
.github/ISSUE_TEMPLATE/*
|
||||
|
190
CHANGELOG.md
190
CHANGELOG.md
@ -1,190 +0,0 @@
|
||||
## Changelog is not updated anymore. [Check commits](https://github.com/ytorg/Yotter/commits/dev-indep) to see new features / fixes.
|
||||
|
||||
## [0.2.7] - 2020.09.26
|
||||
### How to update:
|
||||
1. See [UPDATE](https://github.com/ytorg/Yotter/blob/dev-indep/SELF-HOSTING.md#updating-the-server) if you are on a manual installation. Docker can be updated normally.
|
||||
|
||||
2. **IMPORTANT** You will need to change the nginx config file (`/etc/nginx/sites-enabled/yotter`) by adding `/a` on the proxy rule so it should look like this:
|
||||
```
|
||||
location ~ (/videoplayback|/vi/|/a/) {
|
||||
proxy_buffering off;
|
||||
resolver 1.1.1.1;
|
||||
proxy_pass https://$arg_host;
|
||||
proxy_set_header Host $arg_host;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
}
|
||||
```
|
||||
|
||||
3. Reload nginx `sudo service nginx reload`
|
||||
### Added
|
||||
- [x] Admins can add a donation link to the Instance - Check `yotter-config.json` after update.
|
||||
- [x] You can now use `<instance>/registrations_status/icon` and `<instance>/registrations_status/text` to get registrations info.
|
||||
### Fixed
|
||||
- [x] Channel images were not proxied.
|
||||
|
||||
## [0.2.6] - 2020.09.20
|
||||
### How to update
|
||||
1. See [UPDATE](https://github.com/ytorg/Yotter/blob/dev-indep/SELF-HOSTING.md#updating-the-server) if you are on a manual installation. Docker can be updated normally.
|
||||
|
||||
2. **IMPORTANT** You will need to change the nginx config file (`/etc/nginx/sites-enabled/yotter`) and it should look [like this](https://bin.nixnet.services/?414267b3cfd753b4#EX8Zwpj4iXTBjkGThCiaV88bYZfzvmmaG2sokLKKYQFu).
|
||||
|
||||
3. Reload nginx `sudo service nginx reload`
|
||||
|
||||
### Added
|
||||
- [x] Admin tools - Delete inactive users.
|
||||
- [x] Youtube: Play only audio.
|
||||
|
||||
### Changed
|
||||
- [x] Database character limits.
|
||||
|
||||
## [0.2.5] - 2020.09.10
|
||||
### How to update
|
||||
1. `sudo supervisorctl stop yotter`
|
||||
2. `cd </path/to>/Yotter`
|
||||
3. `git pull`
|
||||
4. `source venv/bin/activate`
|
||||
5. `pip install -r requirements.txt`
|
||||
6. `sudo nano /etc/nginx/sites-enabled/yotter`
|
||||
* Add the following lines right below the `location /static {.....}` block:
|
||||
```
|
||||
location /videoplayback {
|
||||
proxy_buffering off;
|
||||
resolver 1.1.1.1;
|
||||
proxy_pass https://$arg_hostname;
|
||||
proxy_set_header Host $arg_hostname;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
}
|
||||
```
|
||||
|
||||
> Your `/etc/nginx/sites-enabled/yotter` file should look something [like this](https://bin.nixnet.services/?d319d06feb1f5756#HgsMgpN9kob7vB5GpUtdTtqZeCdb5SEggLzwr4YAaYo).
|
||||
7. Edit your `yotter-config.json` file and enable the `nginxVideoStream` option.
|
||||
8. `sudo service nginx reload`
|
||||
9. `sudo supervisorctl start yotter`
|
||||
|
||||
### Added
|
||||
- [x] Nginx video streaming for better experience - [See how to activate it]()
|
||||
- [x] New data extractor module for videos.
|
||||
- [x] Show deleted tweets as deleted instead of error.
|
||||
|
||||
### Fixed
|
||||
- [x] Youtube feed not loading due to datetime not found
|
||||
|
||||
## [0.2.4] - 2020.09.07
|
||||
### Changed
|
||||
- [x] Remove non implemented settings from settings page.
|
||||
- [x] Changed video streaming chunk size.
|
||||
- [x] Video streaming now has a smaller load for the server.
|
||||
- [x] Video streaming is (a bit) more efficient - YoutubeDL moved on /watch instead.
|
||||
- [x] Settings page improved.
|
||||
- [x] Default DB to .env or environment DATABASE_URI and fallback to app.db.
|
||||
|
||||
### Added
|
||||
- [x] Add instance info on settings page.
|
||||
- [x] Documentation: Contributing section.
|
||||
- [x] Option to delete an account.
|
||||
- [x] Show video title on browser tab title on `/watch`.
|
||||
- [x] Ability for admins to allow non-registered users to use the service.
|
||||
- [x] Added last_seen - See #35
|
||||
- [x] See last 40 minutes active users on settings.
|
||||
- [x] Video streaming headers - #27
|
||||
|
||||
### Fixed
|
||||
- [x] Video descriptions overflowing the description box.
|
||||
- [x] Twitter: mentioned users on feed lead to broken links.
|
||||
|
||||
## [0.2.3] - 2020-09-04
|
||||
### Added
|
||||
- [x] Youtube: Proxy all images through Yotter.
|
||||
- [x] General: Add server config file.
|
||||
- [x] General: @Sn0wed1 added a Docker file and Docker installation instructions.
|
||||
|
||||
## [0.2.2] - 2020-08-27
|
||||
### Changed
|
||||
- [x] Twitter: Scrap nitter pages instead of using RSS.
|
||||
- [x] Youtube: Improved video UI
|
||||
- [x] General: Following management page UI improved.
|
||||
### Added
|
||||
- [x] Twitter: Quotations are now shown
|
||||
- [x] Youtube: Ability to seek videos!
|
||||
### Fixed
|
||||
- [x] Twitter: Empty feed was showing an ugly text
|
||||
|
||||
## [0.2.1] - 2020-08-25
|
||||
### Changed
|
||||
- [x] Twitter: Improve general UI and efficiency
|
||||
- [x] Twitter & Youtube: Posts older than 15 days are not shown anymore
|
||||
- [x] Youtube: All channel links now link within Parasitter
|
||||
- [x] Twitter: Improve database logic
|
||||
- [x] Twitter: Remove Following tab and move it to 'following number'
|
||||
- [x] General: Ability to import accounts from exported JSON file
|
||||
|
||||
### Added
|
||||
- [x] General: Welcome page
|
||||
- [x] Youtube: Ability to view a channel page: /channel/<id>
|
||||
- [x] Youtube: Ability to search with full text: Channels and videos.
|
||||
- [x] Youtube: Ability to Follow and Unfollow a user from the channel profile.
|
||||
- [x] Youtube: Manage followed accounts clicking on 'following number'
|
||||
|
||||
### Fixed
|
||||
- [x] Youtube: Channel link on channel videos not working.
|
||||
- [x] General: Most usernames were already taken.
|
||||
|
||||
## [0.2.1a] - 2020-08-16
|
||||
#### Breaking dependence with Invidious.
|
||||
### Changed
|
||||
- [x] Get video info through `youtube-dl`
|
||||
- [x] Stream video to client through local proxy.
|
||||
- [x] List videos without Invidious RSS feed.
|
||||
- [x] Use Video.js player.
|
||||
- [x] Search no longer depends on Invidious / APIs
|
||||
|
||||
## [0.2.0] - 2020-07-29
|
||||
### Added
|
||||
- [x] Export your followed accounts (Youtube and Twitter) to a JSON file
|
||||
- [x] Youtube: follow Youtube accounts.
|
||||
- [x] Youtube: Manage suscriptions
|
||||
- [x] Youtube: Show video time duration
|
||||
|
||||
### Changed
|
||||
- [x] Efficiency improvements. ~1s reduction on fetching time.
|
||||
- [x] Minor UI changes.
|
||||
|
||||
### Fixed
|
||||
- [x] Saving posts didn't work on 2020.07.24 update.
|
||||
|
||||
## [0.1.0] - 2020-07-19
|
||||
### Added
|
||||
- [x] Ability to save posts.
|
||||
- [x] Ability to remove a saved post.
|
||||
- [x] Error pages: Error 500, Error 405
|
||||
- [x] Open the original post on Nitter.
|
||||
|
||||
### Changed
|
||||
- [x] Significant improvement on fetching feed efficiency - Parallelism applied.
|
||||
- [x] Changelogs now using [Keep a changelog](https://keepachangelog.com/en/1.0.0/) style.
|
||||
|
||||
### Fixed
|
||||
- [x] "Saved" menu element logged out instead of showing saved posts.
|
||||
|
||||
## [0.0.2] - 2020-07-14
|
||||
### Added
|
||||
- [x] First implementation of saved posts - Not working yet.
|
||||
- [x] Error 404 page.
|
||||
- [x] Empty feed now shows a notice.
|
||||
- [x] Requirements.txt file for a better installation and update experience.
|
||||
- [x] Pinned posts are now marked as it.
|
||||
|
||||
### Changed
|
||||
- [x] More flexible user search. Search by username and show a list of possible results.
|
||||
- [x] Minor UI fixes.
|
||||
- [x] Fetching of accounts in a slightly more efficient way.
|
||||
|
||||
|
||||
|
||||
## [0.0.1] - 2020-07-13
|
||||
### Added
|
||||
- [x] Ability to follow accounts.
|
||||
- [x] Ability to unfollow accounts.
|
||||
- [x] Ability to register users.
|
||||
- [x] Ability to visit a user profile.
|
||||
- [x] Search a user by its exact username.
|
@ -8,7 +8,7 @@ WORKDIR /usr/src/app
|
||||
COPY ./requirements.txt /usr/src/app
|
||||
|
||||
# Build Dependencies
|
||||
RUN apk --no-cache add gcc musl-dev libffi-dev openssl-dev libxml2-dev libxslt-dev file llvm-dev make g++
|
||||
RUN apk --no-cache add gcc musl-dev libffi-dev openssl-dev libxml2-dev libxslt-dev file llvm-dev make g++ cargo rust
|
||||
|
||||
# Python Dependencies
|
||||
RUN pip install --no-cache-dir --prefix=/install wheel cryptography gunicorn pymysql
|
||||
|
101
README.md
101
README.md
@ -1,8 +1,12 @@
|
||||
## This project is no longer maintained. Visit [this repo](https://github.com/TeamPiped/Piped) for an alternative.
|
||||
|
||||
<p align="center"> <img width="700" src="app/static/img/banner.png"> </img></p>
|
||||
<p align="center">
|
||||
<a href="https://www.gnu.org/licenses/gpl-3.0"><img alt="License: GPL v3" src="https://img.shields.io/badge/License-AGPLv3-blue.svg"></img></a>
|
||||
<a href="https://github.com/pluja/Yotter"><img alt="Development state" src="https://img.shields.io/badge/State-Beta-blue.svg"></img></a>
|
||||
<a href="https://github.com/pluja/Yotter/pulls"><img alt="Pull Requests Welcome" src="https://img.shields.io/badge/PRs-Welcome-green.svg"></img></a>
|
||||
<a href="https://git.kavin.rocks/kavin/Yotter"><img alt="Mirror 1" src="https://img.shields.io/badge/Mirror1-git.kavin.rocks-teal"></img></a>
|
||||
<a href="https://84.38.177.154/libremirrors/ytorg/Yotter"><img alt="Mirror 2" src="https://img.shields.io/badge/Mirror2-git.rip-purple"></img></a>
|
||||
</p>
|
||||
|
||||
Yotter allows you to follow and gather all the content from your favorite Twitter and YouTube accounts in a *beautiful* feed so you can stay up to date without compromising your privacy at all. Yotter is written with Python and Flask and uses Semantic-UI as its CSS framework.
|
||||
@ -12,15 +16,16 @@ Yotter is possible thanks to several open-source projects that are listed on the
|
||||
# Index:
|
||||
* [Why](#why)
|
||||
* [Features](#features)
|
||||
* [Roadmap](#roadmap)
|
||||
* [FAQ](#FAQ)
|
||||
* [Privacy and Security](#privacy)
|
||||
* [Public instances](#public-instances)
|
||||
* [Self hosting](https://github.com/ytorg/Yotter/blob/dev-indep/SELF-HOSTING.md)
|
||||
* [Update the server](https://github.com/ytorg/Yotter/blob/dev-indep/SELF-HOSTING.md#updating-the-server)
|
||||
* [Contributing and contact](#contributing)
|
||||
* [Powered by](#powered-by)
|
||||
* [Donate](#donate)
|
||||
* [Screenshots](#screenshots)
|
||||
* [Redirect Extensions](#redirect)
|
||||
|
||||
# Why
|
||||
Youtube and Twitter are well-known by their invasive and data-stealing *'privacy policies'*. You give them a **lot** of data; from ideology to music taste, your likes and dislikes, your free-time schedule, and much more than you think. This much data gives such companies a control over you that you would never thought.
|
||||
@ -29,7 +34,7 @@ With the *particular* data about you, they can get money from the highest bidder
|
||||
|
||||
Further more, they don't care about **what you in particular watch**, this is only sold to the highest bidder who then may or may not do the harm. What they care more about is **what people watch** this is the important data and the one that allows to manipulate, bias, censor, etc.
|
||||
|
||||
So we need platforms and spaces where we can freely watch and listen content without these watchful eyes upon us. Ideally, everyone would use a free (as in freedom) and decentralized platform like [Peertube](https://joinpeertube.org/), [LBRY](https://lbry.tv/), [Mastodon](https://joinmastodon.org/) or [Pleroma](https://pleroma.social/) but things are not like this. The main multimedia content factory is Youtube and the microblogging king is Twitter. So we will do whatever is possible to be able to watch and read the content and avoid the surveillance that seeks us these days. We will resist.
|
||||
So we need platforms and spaces where we can freely watch and listen content without these watchful eyes upon us. Ideally, everyone would use a free (as in freedom) and decentralized platform like [Peertube](https://joinpeertube.org/), [Odysee](https://odysee.com/), [Mastodon](https://joinmastodon.org/) or [Pleroma](https://pleroma.social/) but things are not like this. The main multimedia content factory is Youtube and the microblogging king is Twitter. So we will do whatever is possible to be able to watch and read the content and avoid the surveillance that seeks us these days. We will resist.
|
||||
|
||||
# Features:
|
||||
- [x] No Ads.
|
||||
@ -44,15 +49,28 @@ So we need platforms and spaces where we can freely watch and listen content wit
|
||||
- [x] Save your favourite Tweets.
|
||||
- [x] Tor-friendly.
|
||||
- [x] Terminal-browser friendly.
|
||||
- [x] Fair non-adictive UX - No recommendations, no trending, no tops. Just your feed and your searches.
|
||||
|
||||
*Video player is VideoJS, which uses JavaScript. But if JavaScript is disabled Yotter still works perfectly and uses the default HTML video player.
|
||||
> And many more to come!
|
||||
|
||||
### Roadmap
|
||||
The following features are planned to be implemented in the near future:
|
||||
* [ ] Improve performance and efficiency
|
||||
|
||||
#### Youtube specific:
|
||||
* [ ] Subtitles
|
||||
* [ ] > 720p Quality
|
||||
* [ ] Save youtube videos
|
||||
* [ ] Support for live streams
|
||||
|
||||
#### Twitter specific:
|
||||
* [ ] Translations
|
||||
|
||||
# FAQ
|
||||
### What's the difference between this and Invidious?
|
||||
At first I started working on this project as a solution for following Twitter accounts (a thing that can't be done with Nitter) and getting a Twitter-like feed. Weeks later the leader of Invidious, Omar Roth, announced that he was stepping away from the project. As an Invidious active user, this made me think that a new alternative was needed for the community and also an alternative with an easier programmin language for most people (as Invidious is written in Crystal). So I started developing a '*written-in-python Invidious alternative*' and it went quite well.
|
||||
At first I started working on this project as a solution for following Twitter accounts (a thing that can't be done with Nitter) and getting a Twitter-like feed. Weeks later the leader of Invidious, Omar Roth, announced that he was stepping away from the project. As an Invidious active user, this made me think that a new alternative was needed for the community and also an alternative with an easier programming language for most people (as Invidious is written in Crystal). So I started developing a '*written-in-python Invidious alternative*' and it went quite well.
|
||||
|
||||
I hope that this project can prosperate, gain contributors, new instances and create a good community around it.
|
||||
I hope that this project can prosper, gain contributors, new instances and create a good community around it.
|
||||
|
||||
### Why do I have to register to use Yotter?
|
||||
|
||||
@ -64,6 +82,15 @@ Registering has two main reasons:
|
||||
|
||||
Admins are allowed to remove restrictions on any page they want. [Check this section](https://github.com/ytorg/Yotter/blob/dev-indep/SELF-HOSTING.md#removing-log-in-restrictions) to learn how.
|
||||
|
||||
If you want to use Yotter, it is recommended to self-host your own instance. You can use it for personal use or open it to the world. Self-hosting makes Yotter stronger and gives you full power. See [self hosting guide](https://github.com/ytorg/Yotter/blob/dev-indep/SELF-HOSTING.md).
|
||||
|
||||
### Will you ever implement video recommendations, trending videos, etc?
|
||||
No. From my point of view, these are toxic features. I, and every user, should be using all *social media* to get the content **they** want. Recomendations, trending, autoplay next video, etc. are all features designed to trap users on using the app, to make them forget about the time spent there and to create an addiction to it. No, I won't implement any toxic features on Yotter. Yotter will keep the UI clean, fast and simple.
|
||||
|
||||
You get your feed from followed accounts and you can search for any video you like. Only thing I would consider implementing would be some kind of page where you can ask for recommendations for a particular video. This way the user would, voluntarily, ask for the recommendations rather than having a temptation to click on a new, youtube-bias-recommended video.
|
||||
|
||||
Please read: [1](https://arxiv.org/abs/1912.11211), [2](https://medium.com/dataseries/how-youtube-is-addictive-259d5c575883), [3](https://www.their.tube/), [4](https://www.sciencedirect.com/science/article/pii/S1386505619308743?via%3Dihub)
|
||||
|
||||
# Privacy
|
||||
#### Connections
|
||||
Yotter cares about your privacy, and for this it will never make any connection to Twitter or Youtube on the client. Every request is proxied through the Yotter server; video streaming, photos, data gathering, scrapping, etc.
|
||||
@ -90,11 +117,11 @@ I always recommend self-hosting, as you will be the only person with access to y
|
||||
> Important note: The **client** never connects to Google / Youtube however, the server does in order to gather all the necessary things!
|
||||
|
||||
# Public Instances
|
||||
| name |server location|Capacity|registrations|
|
||||
| ------------ | ------------ | ------------ |------------|
|
||||
| https://yotter.xyz/ |Germany| [Check](https://yotter.xyz/registrations_status/text)|<img src="https://yotter.xyz/registrations_status/icon?4" width="17">|
|
||||
| https://yotter.kavin.rocks/ |India| [Check](https://yotter.kavin.rocks/registrations_status/text) |<img src="https://yotter.kavin.rocks/registrations_status/icon?4" width="15">|
|
||||
| https://yotter.jank.media/ |Germany| [Check](https://yotter.jank.media/registrations_status/text)|<img src="https://yotter.jank.media/registrations_status/icon?4" width="15">|
|
||||
| Name |Server location|Status & Register|
|
||||
| ------------ | ------------ | ------------ |
|
||||
| https://yotter.xyz/ |Germany| [Go](https://yotter.xyz/status)|
|
||||
| https://yotter.kavin.rocks/ |India| [Go](https://yotter.kavin.rocks/status)|
|
||||
| https://yotter.jank.media/ |Germany| [Go](https://yotter.jank.media/status)|
|
||||
|
||||
# Contributing
|
||||
Contributors are always welcome. You can help in many ways: Coding, organizing, designing, [donating](#donate), maintaining... You choose what you want to help with!
|
||||
@ -107,35 +134,53 @@ We have a [Matrix](https://matrix.org/) room where we discuss anything related w
|
||||
<a href="https://reddit.com/r/Yotter"><img alt="Join Matrix" src="https://img.shields.io/badge/Reddit-r/Yotter-orange.svg">
|
||||
|
||||
# Powered by:
|
||||
These are projects that either make Yotter possible as an essential part of it or that served as inspiration for some parts of the code.
|
||||
These are projects that either make Yotter possible as an **essential part** of it or that served as **inspiration for some parts** of the code.
|
||||
|
||||
* [Nitter](https://nitter.net/)
|
||||
* [youtube-dl](https://github.com/ytdl-org/youtube-dl)
|
||||
* [Youtube-local](https://github.com/user234683/youtube-local)
|
||||
* [youtube-dlc](https://github.com/blackjack4494/yt-dlc)
|
||||
* [Flask](https://flask.palletsprojects.com/)
|
||||
* [SQLAlchemy](https://docs.sqlalchemy.org/en/13/)
|
||||
* [Semantic-UI](https://semantic-ui.com)
|
||||
* [requests-futures](https://github.com/ross/requests-futures)
|
||||
* [microblog](https://github.com/miguelgrinberg/microblog)
|
||||
* [Video.js](https://videojs.com/)
|
||||
* [My fork of youtube_search](https://github.com/pluja/youtube_search-fork)
|
||||
* [Youtube-local](https://github.com/user234683/youtube-local)
|
||||
* [Invidious](https://github.com/iv-org/invidious)
|
||||
|
||||
# Donate
|
||||
# [Donate](https://github.com/pluja/pluja/blob/main/SUPPORT.md)
|
||||
|
||||
[Click here to see donation options](https://github.com/pluja/pluja/blob/main/SUPPORT.md)
|
||||
|
||||
This project is completely free and Open Source and will always be.
|
||||
|
||||
All donations are used to mantain the [yotter.xyz](https://yotter.xyz/) public instance. [This is the server](https://www.netcup.eu/bestellen/produkt.php?produkt=2598) that I have rented for now.
|
||||
|
||||
#### Crypto (preferred):
|
||||
- **Bitcoin**: `bc1q5y3g907ju0pt40me7dr9fy5uhfhfgfv9k3kh3z`
|
||||
- **Monero**: `48nQGAXaC6eFK2Wo7SVVyF9xL333gDHjzdmRL3XETEqbU3w4CcKjjHVUZPU4W3dg1oJL8be3iGtUAQsgV88dzbS7QNpZjC2`
|
||||
#### Fiat:
|
||||
- <a href="https://liberapay.com/pluja/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
|
||||
Donations are used to mantain the [yotter.xyz](https://yotter.xyz/) public instance. [This is the server](https://www.netcup.eu/bestellen/produkt.php?produkt=2598) that I have rented for now.
|
||||
|
||||
## Screenshots
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/6AfXO57.png"> </img></p>
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/jipjySH.png"> </img></p>
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/JMUW6VH.png"> </img></p>
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/a7rM4sv.png"> </img></p>
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/skXFMqE.png"> </img></p>
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/AurVw5M.png"> </img></p>
|
||||
#### Twitter / Tweets / Profiles
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/tA15ciH.png"> </img></p>
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/BYswFy6.png"> </img></p>
|
||||
|
||||
#### Twitter search
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/KalBDa5.png"> </img></p>
|
||||
|
||||
#### Youtube feed
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/rHsKl0e.png"> </img></p>
|
||||
|
||||
#### Youtube video page / Comments
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/pQhLcvI.png"> </img></p>
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/kZPGUdq.png"> </img></p>
|
||||
|
||||
#### Youtube channel page
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/zybRB7X.png"> </img></p>
|
||||
|
||||
#### Youtube search
|
||||
<p align="center"> <img width="720" src="https://i.imgur.com/XHCSNTP.png"> </img></p>
|
||||
|
||||
## Redirect
|
||||
If you want to worry less and enjoy Yotter more, you can use any of the following extensions to redirect Youtube to Yotter automatically:
|
||||
|
||||
* [Youtter](https://addons.mozilla.org/en-US/firefox/addon/youtter/?utm_source=addons.mozilla.org&utm_medium=referral&utm_content=search) - Firefox
|
||||
* [Privacy Redirect](https://addons.mozilla.org/en-US/firefox/addon/youtter/?utm_source=addons.mozilla.org&utm_medium=referral&utm_content=search) - Chromium
|
||||
* Set Yotter as a Invidious instance on extension options.
|
||||
* [HTTPSEverywhere](https://www.eff.org/https-everywhere) - Both
|
||||
* You can set up redirects. Use a "http://youtube.com/ -> https://yotterinstance.xyz/" redirect.
|
||||
|
@ -61,12 +61,13 @@ Now you should be logged in. Make sure to set up a good password. It is recommen
|
||||
```
|
||||
git clone https://github.com/ytorg/Yotter && cd Yotter
|
||||
docker-compose up -d
|
||||
chown -R www-data:www-data /var/run/ytproxy
|
||||
```
|
||||
> You may need to use `sudo` for turning up the docker-compose
|
||||
2. Configure nginx as a reverse proxy to your docker container:
|
||||
* Create a new nginx configuration file:
|
||||
- `sudo nano /etc/nginx/sites-enabled/yotter`
|
||||
* Paste the content of [this file](https://paste.ubuntu.com/p/Bzd9SRCJSG/) to the config file.
|
||||
* Paste the content of [this file](https://paste.ubuntu.com/p/248hh6crWH/) to the config file.
|
||||
- Change `<example.com>` by your domain.
|
||||
* Generate a ssl certificate:
|
||||
- Follow [Let's Encrypt](https://certbot.eff.org/lets-encrypt/ubuntufocal-nginx) guide **(Recommended)**
|
||||
@ -122,7 +123,7 @@ If after the MySQL-server installation you have not been prompted to create a pa
|
||||
* `pip install cryptography`
|
||||
* `pip install -r requirements.txt`
|
||||
|
||||
> You can edit the `yotter-config.json` file. [Check out all the options here](https://github.com/ytorg/Yotter/blob/dev-indep/README.md#configure-the-server)
|
||||
> You can edit the `yotter-config.json` file. [Check out all the options here](#configure-the-server)
|
||||
|
||||
5. Install gunicorn (production web server for Python apps) and pymysql:
|
||||
`pip install gunicorn pymysql`
|
||||
@ -202,9 +203,69 @@ killasgroup=true
|
||||
After you write this configuration file, you have to reload the supervisor service for it to be imported:
|
||||
`sudo supervisorctl reload`
|
||||
|
||||
#### Step 4: Nginx set up and HTTPS
|
||||
#### Step 4: Set up Nginx, http3 proxy and HTTPS
|
||||
The Yotter application server powered by gunicorn is now running privately port 8000. Now we need to expose the application to the outside world by enabling public facing web server on ports 80 and 443, the two ports too need to be opened on the firewall to handle the web traffic of the application. I want this to be a secure deployment, so I'm going to configure port 80 to forward all traffic to port 443, which is going to be encrypted. [ref](https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-xvii-deployment-on-linux).
|
||||
|
||||
First we will get and set up the `http3-ytproxy`. For this we will need to [install go](https://github.com/golang/go/wiki/Ubuntu) but if you are on Ubuntu 20.04 or you have `snap` installed you can just run `sudo snap install --classic go` to get `go` installed.
|
||||
|
||||
Then you will need to run the following commands:
|
||||
```
|
||||
cd $HOME
|
||||
git clone https://github.com/FireMasterK/http3-ytproxy
|
||||
cd http3-ytproxy
|
||||
go build -ldflags "-s -w" main.go
|
||||
mv main http3-ytproxy
|
||||
mkdir socket
|
||||
chown -R www-data:www-data socket
|
||||
```
|
||||
|
||||
Now we will configure a `systemd` service to run the http3-ytproxy. For this you will need to `sudo nano /lib/systemd/system/http3-ytproxy.service` to start a the `nano` text editor. Now copy and paste this and save:
|
||||
|
||||
> IMPORTANT: You may need to change some paths to fit your system!
|
||||
|
||||
```
|
||||
[Unit]
|
||||
Description=Sleep service
|
||||
ConditionPathExists=/home/ubuntu/http3-ytproxy/http3-ytproxy
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=www-data
|
||||
Group=www-data
|
||||
LimitNOFILE=1024
|
||||
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
WorkingDirectory=/home/ubuntu/http3-ytproxy
|
||||
ExecStart=/home/ubuntu/http3-ytproxy/http3-ytproxy
|
||||
|
||||
# make sure log directory exists and owned by syslog
|
||||
PermissionsStartOnly=true
|
||||
ExecStartPre=/bin/mkdir -p /var/log/http3-ytproxy
|
||||
ExecStartPre=/bin/chown syslog:adm /var/log/http3-ytproxy
|
||||
ExecStartPre=/bin/chmod 755 /var/log/http3-ytproxy
|
||||
StandardOutput=syslog
|
||||
StandardError=syslog
|
||||
SyslogIdentifier=http3-ytproxy
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
> IMPORTANT NOTE: Some distros have the Nginx user as `nginx` instead of `www-data`, if this is the case you should change the `User=` and `Group=` variables from the service file.
|
||||
|
||||
Now you are ready to enable and start the service:
|
||||
```
|
||||
sudo systemctl enable http3-ytproxy.service
|
||||
sudo systemctl start http3-ytproxy.service
|
||||
```
|
||||
|
||||
If you did everything ok you should see no errors when running `sudo journalctl -f -u http3-ytproxy`.
|
||||
|
||||
Now we will set up Nginx. To do so:
|
||||
|
||||
* `sudo rm /etc/nginx/sites-enabled/default`
|
||||
|
||||
Create a new Nginx site, you can run `sudo nano /etc/nginx/sites-enabled/yotter`
|
||||
@ -224,20 +285,26 @@ server {
|
||||
expires 30d;
|
||||
}
|
||||
|
||||
location ~ (/videoplayback|/vi/|/a/) {
|
||||
proxy_buffering off;
|
||||
resolver 1.1.1.1;
|
||||
proxy_pass https://$arg_host;
|
||||
proxy_set_header Host $arg_host;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
}
|
||||
location ~ (^/videoplayback$|/videoplayback/|/vi/|/a/|/ytc/) {
|
||||
proxy_pass http://unix:/home/ubuntu/http3-ytproxy/socket/http-proxy.sock;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
aio_write on;
|
||||
aio threads=default;
|
||||
directio 512;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
}
|
||||
}
|
||||
```
|
||||
Make sure to replace `<yourdomain>` by the domain you are willing to use for your instance (i.e example.com). You can now edit `yotter-config.json` and set `nginxVideoStream` to `true`.
|
||||
> Note: You may need to change the proxy-pass line to fit your system. It should point to the socket created on the `http3-ytproxy/socket` folder.
|
||||
|
||||
Make sure to replace `<yourdomain>` by the domain you are willing to use for your instance (i.e example.com). You can now edit `yotter-config.json` and set `isInstance` to `true`.
|
||||
|
||||
You will also need to change the `</path/to>` after `alias` to fit your system. You have to point to the Yotter folder, in this set up it would be `/home/ubuntu` as it is the location where we cloned the Yotter app. This alias is created to handle static files directly, without forwarding to the application.
|
||||
|
||||
Once done, you can run `sudo service nginx reload`. If everything so far went OK, you can now set the `nginxVideoStream` to `true` on the `yotter-config.json` file.
|
||||
Once done, you can run `sudo service nginx reload`. If everything so far went OK, you can now set the `isInstance` to `true` on the `yotter-config.json` file.
|
||||
|
||||
Now you need to install an SSL certificate on your server so you can use HTTPS. If you are running Ubuntu 20LTS or already have `snap` installed, you can proceed as follows:
|
||||
|
||||
@ -250,6 +317,8 @@ Now we will run certbot and we need to tell that we run an nginx server. Here yo
|
||||
|
||||
[Follow this instructions to install certbot and generate an ssl certificate so your server can use HTTPS](https://certbot.eff.org/lets-encrypt/ubuntufocal-nginx)
|
||||
|
||||
Finally, once this is done, you should edit the `yotter` nginx config and change the `listen 443 ssl;` line to `listen 443 ssl http2;`
|
||||
|
||||
#### Updating the server
|
||||
Updating the server should always be pretty easy. These steps need to be run on the Yotter folder and with the python virtual env activated.
|
||||
|
||||
@ -274,7 +343,7 @@ Currently available config is:
|
||||
* **maxInstanceUsers**: Max users on the instance. When set to `0` it closes registrations.
|
||||
* **serverLocation**: Location of the server.
|
||||
* **restrictPublicUsage**: When set to `false` the instance allows non-registered users to use some routes (i.e /watch?v=..., /ytsearch, /channel...). See [this section](https://github.com/pluja/Yotter/blob/dev-indep/SELF-HOSTING.md#removing-log-in-restrictions)
|
||||
* **nginxVideoStream**: Wether or not to use Nginx as streaming engine. It is recommended for public instances. [See this link]()
|
||||
* **isInstance**: If your installation is on a server using Nginx, it must be True. Only false if running on a local machine. [See this link]()
|
||||
* **maintenance_mode**: Activates a message on the server warning users of maintenance mode.
|
||||
* **show_admin_message**: Shows a message from the admin with title as `admin_message_title` and body as `admin_message`
|
||||
* **admin_user**: Username of the admin user.
|
||||
|
@ -27,7 +27,7 @@ class User(UserMixin, db.Model):
|
||||
posts = db.relationship('Post', backref='author', lazy='dynamic')
|
||||
|
||||
def __repr__(self):
|
||||
return '<User {}>'.format(self.username)
|
||||
return f'<User {self.username}>'
|
||||
|
||||
def set_last_seen(self):
|
||||
self.last_seen = datetime.utcnow()
|
||||
@ -153,7 +153,7 @@ class youtubeFollow(db.Model):
|
||||
back_populates="youtubeFollowed")
|
||||
|
||||
def __repr__(self):
|
||||
return '<youtubeFollow {}>'.format(self.channelName)
|
||||
return f'<youtubeFollow {self.channelName}>'
|
||||
|
||||
class twitterFollow(db.Model):
|
||||
__tablename__ = 'twitterAccount'
|
||||
@ -164,7 +164,7 @@ class twitterFollow(db.Model):
|
||||
back_populates="twitterFollowed")
|
||||
|
||||
def __repr__(self):
|
||||
return '<twitterFollow {}>'.format(self.username)
|
||||
return f'<twitterFollow {self.username}>'
|
||||
|
||||
class Post(db.Model):
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
@ -175,5 +175,4 @@ class Post(db.Model):
|
||||
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
|
||||
|
||||
def __repr__(self):
|
||||
return '<Post {}>'.format(self.body)
|
||||
|
||||
return f'<Post {self.body}>'
|
||||
|
276
app/routes.py
276
app/routes.py
@ -7,6 +7,8 @@ import random
|
||||
import re
|
||||
import time
|
||||
import urllib
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
from concurrent.futures import as_completed
|
||||
|
||||
import bleach
|
||||
@ -27,8 +29,13 @@ from youtube_search import YoutubeSearch
|
||||
from app import app, db
|
||||
from app.forms import LoginForm, RegistrationForm, EmptyForm, SearchForm, ChannelForm
|
||||
from app.models import User, twitterPost, ytPost, Post, youtubeFollow, twitterFollow
|
||||
|
||||
from youtube import comments, utils, channel as ytch, search as yts
|
||||
from youtube import watch as ytwatch
|
||||
from youtube import video as ytvid
|
||||
|
||||
from nitter import feed as nitterfeed
|
||||
from nitter import user as nitteruser
|
||||
|
||||
#########################################
|
||||
|
||||
@ -78,37 +85,41 @@ def twitter(page=0):
|
||||
followCount = len(followingList)
|
||||
page = int(page)
|
||||
avatarPath = "img/avatars/1.png"
|
||||
|
||||
followList = []
|
||||
for f in followingList:
|
||||
followList.append(f.username)
|
||||
posts = []
|
||||
|
||||
cache_file = glob.glob("app/cache/{}_*".format(current_user.username))
|
||||
cache_file = glob.glob(f"app/cache/{current_user.username}_*")
|
||||
if (len(cache_file) > 0):
|
||||
time_diff = round(time.time() - os.path.getmtime(cache_file[0]))
|
||||
else:
|
||||
time_diff = 999
|
||||
# If cache file is more than 1 minute old
|
||||
if page == 0 and time_diff > 60:
|
||||
|
||||
# If cache file is older than 30 minute old
|
||||
if page == 0 and time_diff > 30:
|
||||
if cache_file:
|
||||
for f in cache_file:
|
||||
os.remove(f)
|
||||
feed = getFeed(followingList)
|
||||
cache_file = "{u}_{d}.json".format(u=current_user.username, d=time.strftime("%Y%m%d-%H%M%S"))
|
||||
with open("app/cache/{}".format(cache_file), 'w') as fp:
|
||||
feed = nitterfeed.get_feed(followList)
|
||||
cache_file = f"{current_user.username}_{time.strftime('%Y%m%d-%H%M%S')}.json"
|
||||
with open(f"app/cache/{cache_file}", 'w') as fp:
|
||||
json.dump(feed, fp)
|
||||
|
||||
# Else, refresh feed
|
||||
else:
|
||||
try:
|
||||
cache_file = glob.glob("app/cache/{}*".format(current_user.username))[0]
|
||||
cache_file = glob.glob(f"app/cache/{current_user.username}*")[0]
|
||||
with open(cache_file, 'r') as fp:
|
||||
feed = json.load(fp)
|
||||
except:
|
||||
feed = getFeed(followingList)
|
||||
cache_file = "{u}_{d}.json".format(u=current_user.username, d=time.strftime("%Y%m%d-%H%M%S"))
|
||||
with open("app/cache/{}".format(cache_file), 'w') as fp:
|
||||
feed = nitterfeed.get_feed(followList)
|
||||
cache_file = f"{current_user.username}_{time.strftime('%Y%m%d-%H%M%S')}.json"
|
||||
with open(f"app/cache/{cache_file}", 'w') as fp:
|
||||
json.dump(feed, fp)
|
||||
|
||||
posts.extend(feed)
|
||||
posts.sort(key=lambda x: datetime.datetime.strptime(x['timeStamp'], '%d/%m/%Y %H:%M:%S'), reverse=True)
|
||||
|
||||
# Items range per page
|
||||
page_items = page * 16
|
||||
offset = page_items + 16
|
||||
@ -126,13 +137,7 @@ def twitter(page=0):
|
||||
posts = posts[page_items:offset]
|
||||
else:
|
||||
posts = posts[page_items:]
|
||||
|
||||
if not posts:
|
||||
profilePic = avatarPath
|
||||
else:
|
||||
profilePic = posts[0]['profilePic']
|
||||
return render_template('twitter.html', title='Yotter | Twitter', posts=posts, avatar=avatarPath,
|
||||
profilePic=profilePic, followedCount=followCount, form=form, config=config,
|
||||
return render_template('twitter.html', title='Yotter | Twitter', posts=posts, followedCount=followCount, form=form, config=config,
|
||||
pages=total_pages, init_page=init_page, actual_page=page)
|
||||
|
||||
|
||||
@ -182,7 +187,7 @@ def follow(username):
|
||||
form = EmptyForm()
|
||||
if form.validate_on_submit():
|
||||
if followTwitterAccount(username):
|
||||
flash("{} followed!".format(username))
|
||||
flash(f"{username} followed!")
|
||||
return redirect(request.referrer)
|
||||
|
||||
|
||||
@ -197,7 +202,7 @@ def followTwitterAccount(username):
|
||||
db.session.commit()
|
||||
return True
|
||||
except:
|
||||
flash("Twitter: Couldn't follow {}. Already followed?".format(username))
|
||||
flash(f"Twitter: Couldn't follow {username}. Already followed?")
|
||||
return False
|
||||
else:
|
||||
flash("Something went wrong... try again")
|
||||
@ -210,7 +215,7 @@ def unfollow(username):
|
||||
form = EmptyForm()
|
||||
if form.validate_on_submit():
|
||||
if twUnfollow(username):
|
||||
flash("{} unfollowed!".format(username))
|
||||
flash(f"{username} unfollowed!")
|
||||
return redirect(request.referrer)
|
||||
|
||||
|
||||
@ -243,31 +248,44 @@ def search():
|
||||
if results:
|
||||
return render_template('search.html', form=form, results=results, config=config)
|
||||
else:
|
||||
flash("User {} not found...".format(user))
|
||||
flash(f"User {user} not found...")
|
||||
return redirect(request.referrer)
|
||||
else:
|
||||
return render_template('search.html', form=form, config=config)
|
||||
|
||||
|
||||
@app.route('/u/<username>')
|
||||
@app.route('/<username>')
|
||||
@app.route('/<username>/<page>')
|
||||
@login_required
|
||||
def u(username):
|
||||
def u(username, page=1):
|
||||
page=int(page)
|
||||
if username == "favicon.ico":
|
||||
return redirect(url_for('static', filename='favicons/favicon.ico'))
|
||||
form = EmptyForm()
|
||||
avatarPath = "img/avatars/{}.png".format(str(random.randint(1, 12)))
|
||||
user = getTwitterUserInfo(username)
|
||||
avatarPath = f"img/avatars/{str(random.randint(1, 12))}.png"
|
||||
user = nitteruser.get_user_info(username)
|
||||
if not user:
|
||||
flash("This user is not on Twitter.")
|
||||
return redirect(request.referrer)
|
||||
|
||||
posts = []
|
||||
posts.extend(getPosts(username))
|
||||
if not posts:
|
||||
user['profilePic'] = avatarPath
|
||||
tweets=nitteruser.get_tweets(username, page)
|
||||
if tweets == 'Empty feed':
|
||||
posts = False
|
||||
elif tweets == 'Protected feed':
|
||||
posts = 'Protected'
|
||||
else:
|
||||
posts.extend(tweets)
|
||||
|
||||
return render_template('user.html', posts=posts, user=user, form=form, config=config)
|
||||
if page-1 < 0:
|
||||
prev_page = 0
|
||||
else:
|
||||
prev_page = page-1
|
||||
|
||||
if page > 2:
|
||||
page =2
|
||||
|
||||
return render_template('user.html', posts=posts, user=user, form=form, config=config, page=page, prev_page=prev_page)
|
||||
|
||||
|
||||
#########################
|
||||
@ -282,7 +300,7 @@ def youtube():
|
||||
videos = getYoutubePosts(ids)
|
||||
if videos:
|
||||
videos.sort(key=lambda x: x.date, reverse=True)
|
||||
print("--- {} seconds fetching youtube feed---".format(time.time() - start_time))
|
||||
print(f"--- {time.time() - start_time} seconds fetching youtube feed---")
|
||||
return render_template('youtube.html', title="Yotter | Youtube", videos=videos, followCount=followCount,
|
||||
config=config)
|
||||
|
||||
@ -319,22 +337,21 @@ def ytsearch():
|
||||
filters = {"time": 0, "type": 0, "duration": 0}
|
||||
results = yts.search_by_terms(query, page, autocorrect, sort, filters)
|
||||
|
||||
next_page = "/ytsearch?q={q}&s={s}&p={p}".format(q=query, s=sort, p=int(page) + 1)
|
||||
next_page = f"/ytsearch?q={query}&s={sort}&p={int(page)+1}"
|
||||
if int(page) == 1:
|
||||
prev_page = "/ytsearch?q={q}&s={s}&p={p}".format(q=query, s=sort, p=1)
|
||||
prev_page = f"/ytsearch?q={query}&s={sort}&p={1}"
|
||||
else:
|
||||
prev_page = "/ytsearch?q={q}&s={s}&p={p}".format(q=query, s=sort, p=int(page) - 1)
|
||||
prev_page = f"/ytsearch?q={query}&s={sort}&p={int(page)-1}"
|
||||
|
||||
for video in results['videos']:
|
||||
hostname = urllib.parse.urlparse(video['videoThumb']).netloc
|
||||
video['videoThumb'] = video['videoThumb'].replace("https://{}".format(hostname), "") + "&host=" + hostname
|
||||
video['videoThumb'] = video['videoThumb'].replace(f"https://{hostname}", "") + "&host=" + hostname
|
||||
|
||||
for channel in results['channels']:
|
||||
if config['nginxVideoStream']:
|
||||
if config['isInstance']:
|
||||
channel['thumbnail'] = channel['thumbnail'].replace("~", "/")
|
||||
hostName = urllib.parse.urlparse(channel['thumbnail']).netloc
|
||||
channel['thumbnail'] = channel['thumbnail'].replace("https://{}".format(hostName),
|
||||
"") + "?host=" + hostName
|
||||
channel['thumbnail'] = channel['thumbnail'].replace(f"https://{hostName}", "") + "?host=" + hostName
|
||||
return render_template('ytsearch.html', form=form, btform=button_form, results=results,
|
||||
restricted=config['restrictPublicUsage'], config=config, npage=next_page,
|
||||
ppage=prev_page)
|
||||
@ -351,26 +368,27 @@ def ytfollow(channelId):
|
||||
|
||||
def followYoutubeChannel(channelId):
|
||||
try:
|
||||
channelData = YoutubeSearch.channelInfo(channelId, False)
|
||||
try:
|
||||
if not current_user.is_following_yt(channelId):
|
||||
channelData = ytch.get_channel_tab(channelId, tab='about')
|
||||
if channelData == False:
|
||||
return False
|
||||
follow = youtubeFollow()
|
||||
follow.channelId = channelId
|
||||
follow.channelName = channelData[0]['name']
|
||||
follow.channelName = channelData['channel_name']
|
||||
follow.followers.append(current_user)
|
||||
db.session.add(follow)
|
||||
db.session.commit()
|
||||
flash("{} followed!".format(channelData[0]['name']))
|
||||
flash(f"{channelData['channel_name']} followed!")
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except Exception as e:
|
||||
print(e)
|
||||
flash("Youtube: Couldn't follow {}. Already followed?".format(channelData[0]['name']))
|
||||
return False
|
||||
except KeyError as ke:
|
||||
print("KeyError: {}:'{}' could not be found".format(ke, channelId))
|
||||
flash("Youtube: ChannelId '{}' is not valid".format(channelId))
|
||||
print(f"KeyError: {ke}:'{channelId}' could not be found")
|
||||
flash(f"Youtube: ChannelId '{channelId}' is not valid")
|
||||
return False
|
||||
|
||||
|
||||
@ -391,7 +409,7 @@ def unfollowYoutubeChannel(channelId):
|
||||
if channel:
|
||||
db.session.delete(channel)
|
||||
db.session.commit()
|
||||
flash("{} unfollowed!".format(name))
|
||||
flash(f"{name} unfollowed!")
|
||||
except:
|
||||
flash("There was an error unfollowing the user. Try again.")
|
||||
|
||||
@ -411,30 +429,30 @@ def channel(id):
|
||||
if sort is None:
|
||||
sort = 3
|
||||
|
||||
data = ytch.get_channel_tab_info(id, page, sort)
|
||||
|
||||
data = ytch.get_channel_tab(id, page, sort)
|
||||
for video in data['items']:
|
||||
if config['nginxVideoStream']:
|
||||
if config['isInstance']:
|
||||
hostName = urllib.parse.urlparse(video['thumbnail'][1:]).netloc
|
||||
video['thumbnail'] = video['thumbnail'].replace("https://{}".format(hostName), "")[1:].replace("hqdefault",
|
||||
"mqdefault") + "&host=" + hostName
|
||||
video['thumbnail'] = video['thumbnail'].replace(f"https://{hostName}", "")[1:].replace("hqdefault",
|
||||
"mqdefault") + "&host=" + hostName
|
||||
else:
|
||||
video['thumbnail'] = video['thumbnail'].replace('/', '~')
|
||||
|
||||
if config['nginxVideoStream']:
|
||||
if config['isInstance']:
|
||||
hostName = urllib.parse.urlparse(data['avatar'][1:]).netloc
|
||||
data['avatar'] = data['avatar'].replace("https://{}".format(hostName), "")[1:] + "?host=" + hostName
|
||||
data['avatar'] = data['avatar'].replace(f"https://{hostName}", "")[1:] + "?host=" + hostName
|
||||
else:
|
||||
data['avatar'] = data['avatar'].replace('/', '~')
|
||||
|
||||
next_page = "/channel/{q}?s={s}&p={p}".format(q=id, s=sort, p=int(page) + 1)
|
||||
next_page = f"/channel/{id}?s={sort}&p={int(page)+1}"
|
||||
if int(page) == 1:
|
||||
prev_page = "/channel/{q}?s={s}&p={p}".format(q=id, s=sort, p=1)
|
||||
prev_page = f"/channel/{id}?s={sort}&p={1}"
|
||||
else:
|
||||
prev_page = "/channel/{q}?s={s}&p={p}".format(q=id, s=sort, p=int(page) - 1)
|
||||
prev_page = f"/channel/{id}?s={sort}&p={int(page)-1}"
|
||||
|
||||
return render_template('channel.html', form=form, btform=button_form, data=data,
|
||||
restricted=config['restrictPublicUsage'], config=config, next_page=next_page, prev_page=prev_page)
|
||||
restricted=config['restrictPublicUsage'], config=config, next_page=next_page,
|
||||
prev_page=prev_page)
|
||||
|
||||
|
||||
def get_best_urls(urls):
|
||||
@ -463,49 +481,38 @@ def get_live_urls(urls):
|
||||
@login_required
|
||||
def watch():
|
||||
id = request.args.get('v', None)
|
||||
info = ytwatch.extract_info(id, False, playlist_id=None, index=None)
|
||||
if info == 'Captcha':
|
||||
return render_template('captcha.html', origin=request.referrer)
|
||||
retry = 3
|
||||
while retry != 0 and info['playability_error'] == 'Could not find player':
|
||||
info=ytwatch.extract_info(id, False, playlist_id=None, index=None)
|
||||
retry -= 1
|
||||
info = ytvid.get_info(id)
|
||||
|
||||
vsources = ytwatch.get_video_sources(info, False)
|
||||
# Retry 3 times if no sources are available.
|
||||
retry = 3
|
||||
while retry != 0 and len(vsources) == 0:
|
||||
vsources = ytwatch.get_video_sources(info, False)
|
||||
retry -= 1
|
||||
if info['error'] == False:
|
||||
for format in info['formats']:
|
||||
hostName = urllib.parse.urlparse(format['url']).netloc
|
||||
format['url'] = format['url'].replace(f"https://{hostName}", "") + "&host=" + hostName
|
||||
|
||||
for source in vsources:
|
||||
hostName = urllib.parse.urlparse(source['src']).netloc
|
||||
source['src'] = source['src'].replace("https://{}".format(hostName), "") + "&host=" + hostName
|
||||
for format in info['audio_formats']:
|
||||
hostName = urllib.parse.urlparse(format['url']).netloc
|
||||
format['url'] = format['url'].replace(f"https://{hostName}", "") + "&host=" + hostName
|
||||
|
||||
# Parse video formats
|
||||
for v_format in info['formats']:
|
||||
hostName = urllib.parse.urlparse(v_format['url']).netloc
|
||||
v_format['url'] = v_format['url'].replace("https://{}".format(hostName), "") + "&host=" + hostName
|
||||
if v_format['audio_bitrate'] is not None and v_format['vcodec'] is None:
|
||||
v_format['audio_valid'] = True
|
||||
# Markup description
|
||||
try:
|
||||
info['description'] = Markup(bleach.linkify(info['description'].replace("\n", "<br>"))).replace(
|
||||
'www.youtube.com', config['serverName']).replace('youtube.com', config['serverName']).replace("/join",
|
||||
"")
|
||||
except AttributeError or TypeError:
|
||||
print(info['description'])
|
||||
|
||||
# Markup description
|
||||
try:
|
||||
info['description'] = Markup(bleach.linkify(info['description'].replace("\n", "<br>")))
|
||||
except AttributeError or TypeError:
|
||||
print(info['description'])
|
||||
# Get comments
|
||||
if not info['is_live']:
|
||||
videocomments = comments.video_comments(id, sort=0, offset=0, lc='', secret_key='')
|
||||
videocomments = utils.post_process_comments_info(videocomments)
|
||||
if videocomments is not None:
|
||||
videocomments.sort(key=lambda x: x['likes'], reverse=True)
|
||||
else:
|
||||
videocomments = False
|
||||
|
||||
return render_template("video.html", info=info, title=info['title'], config=config,
|
||||
videocomments=videocomments)
|
||||
|
||||
# Get comments
|
||||
videocomments = comments.video_comments(id, sort=0, offset=0, lc='', secret_key='')
|
||||
videocomments = utils.post_process_comments_info(videocomments)
|
||||
if videocomments is not None:
|
||||
videocomments.sort(key=lambda x: x['likes'], reverse=True)
|
||||
|
||||
# Calculate rating %
|
||||
info['rating'] = str((info['like_count'] / (info['like_count'] + info['dislike_count'])) * 100)[0:4]
|
||||
return render_template("video.html", info=info, title='{}'.format(info['title']), config=config,
|
||||
videocomments=videocomments, vsources=vsources)
|
||||
return render_template("video.html", info=info, title='Scheduled Video', config=config)
|
||||
|
||||
|
||||
def markupString(string):
|
||||
@ -684,16 +691,26 @@ def importdata():
|
||||
flash('No selected file')
|
||||
return redirect(request.referrer)
|
||||
else:
|
||||
option = request.form['import_format']
|
||||
if option == 'yotter':
|
||||
importYotterSubscriptions(file)
|
||||
elif option == 'youtube':
|
||||
importYoutubeSubscriptions(file)
|
||||
flash("Data is being imported. You can keep using Yotter.")
|
||||
importdataasync(file)
|
||||
return redirect(request.referrer)
|
||||
|
||||
return redirect(request.referrer)
|
||||
|
||||
|
||||
def importdataasync(file):
|
||||
p = Process(target=importdataforprocess, args=(file,))
|
||||
p.start()
|
||||
|
||||
|
||||
def importdataforprocess(file):
|
||||
option = request.form['import_format']
|
||||
if option == 'yotter':
|
||||
importYotterSubscriptions(file)
|
||||
elif option == 'youtube':
|
||||
importYoutubeSubscriptions(file)
|
||||
|
||||
|
||||
@app.route('/deleteme', methods=['GET', 'POST'])
|
||||
@login_required
|
||||
def deleteme():
|
||||
@ -764,11 +781,28 @@ def status():
|
||||
filen = url_for('static', filename='img/open.png')
|
||||
caniregister = True
|
||||
|
||||
return render_template('status.html', title='STATUS', count=count, max=config['maxInstanceUsers'], file=filen, cani=caniregister)
|
||||
try:
|
||||
with open ("version.txt", "r") as versionFile:
|
||||
ver=versionFile.readlines()
|
||||
hsh = ver[0]
|
||||
update= ver[1]
|
||||
tag=ver[2]
|
||||
except:
|
||||
try:
|
||||
tag = str(subprocess.check_output(["git", "describe", "--tags", "--abbrev=0"]).strip())[2:-1]
|
||||
hsh = str(subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).strip())[2:-1]
|
||||
update = str(subprocess.check_output(["git", "log", "-1", "--format=%cd"]).strip())[2:-7]
|
||||
except:
|
||||
hsh="Unknown"
|
||||
update="Unknown"
|
||||
tag="Unknown"
|
||||
return render_template('status.html', title='STATUS', count=count, max=config['maxInstanceUsers'], file=filen,
|
||||
cani=caniregister, hash=hsh, update=update, tag=tag)
|
||||
|
||||
|
||||
@app.route('/error/<errno>')
|
||||
def error(errno):
|
||||
return render_template('{}.html'.format(str(errno)), config=config)
|
||||
return render_template(f'{str(errno)}.html', config=config)
|
||||
|
||||
|
||||
def getTimeDiff(t):
|
||||
@ -776,24 +810,26 @@ def getTimeDiff(t):
|
||||
|
||||
if diff.days == 0:
|
||||
if diff.seconds > 3599:
|
||||
timeString = "{}h".format(int((diff.seconds / 60) / 60))
|
||||
num = int((diff.seconds / 60) / 60)
|
||||
timeString = f"{num}h"
|
||||
else:
|
||||
timeString = "{}m".format(int(diff.seconds / 60))
|
||||
num = int(diff.seconds / 60)
|
||||
timeString = f"{num}m"
|
||||
else:
|
||||
timeString = "{}d".format(diff.days)
|
||||
timeString = f"{diff.days}d"
|
||||
return timeString
|
||||
|
||||
|
||||
def isTwitterUser(username):
|
||||
response = requests.get('{instance}{user}/rss'.format(instance=NITTERINSTANCE, user=username))
|
||||
response = requests.get(f'{NITTERINSTANCE}{username}/rss')
|
||||
if response.status_code == 404:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def twitterUserSearch(terms):
|
||||
response = urllib.request.urlopen(
|
||||
'{instance}search?f=users&q={user}'.format(instance=NITTERINSTANCE, user=urllib.parse.quote(terms))).read()
|
||||
url = f'{NITTERINSTANCE}search?f=users&q={urllib.parse.quote(terms)}'
|
||||
response = urllib.request.urlopen(url).read()
|
||||
html = BeautifulSoup(str(response), "lxml")
|
||||
|
||||
results = []
|
||||
@ -807,14 +843,14 @@ def twitterUserSearch(terms):
|
||||
'unicode_escape').encode('latin_1').decode('utf8'),
|
||||
"username": item.find('a', attrs={'class': 'username'}).getText().encode('latin_1').decode(
|
||||
'unicode_escape').encode('latin_1').decode('utf8'),
|
||||
'avatar': "{i}{s}".format(i=NITTERINSTANCE, s=item.find('img', attrs={'class': 'avatar'})['src'][1:])
|
||||
'avatar': NITTERINSTANCE + item.find('img', attrs={'class': 'avatar'})['src'][1:],
|
||||
}
|
||||
results.append(user)
|
||||
return results
|
||||
|
||||
|
||||
def getTwitterUserInfo(username):
|
||||
response = urllib.request.urlopen('{instance}{user}'.format(instance=NITTERINSTANCE, user=username)).read()
|
||||
response = urllib.request.urlopen('{NITTERINSTANCE}{username}').read()
|
||||
# rssFeed = feedparser.parse(response.content)
|
||||
|
||||
html = BeautifulSoup(str(response), "lxml")
|
||||
@ -845,9 +881,7 @@ def getTwitterUserInfo(username):
|
||||
"followers": numerize.numerize(
|
||||
int(html.find_all('span', attrs={'class': 'profile-stat-num'})[2].string.replace(",", ""))),
|
||||
"likes": html.find_all('span', attrs={'class': 'profile-stat-num'})[3].string,
|
||||
"profilePic": "{instance}{pic}".format(instance=NITTERINSTANCE,
|
||||
pic=html.find('a', attrs={'class': 'profile-card-avatar'})['href'][
|
||||
1:])
|
||||
"profilePic": NITTERINSTANCE + html.find('a', attrs={'class': 'profile-card-avatar'})['href'][1:],
|
||||
}
|
||||
return user
|
||||
|
||||
@ -855,9 +889,9 @@ def getTwitterUserInfo(username):
|
||||
def getFeed(urls):
|
||||
feedPosts = []
|
||||
with FuturesSession() as session:
|
||||
futures = [session.get('{instance}{user}'.format(instance=NITTERINSTANCE, user=u.username)) for u in urls]
|
||||
futures = [session.get(f'{NITTERINSTANCE}{u.username}') for u in urls]
|
||||
for future in as_completed(futures):
|
||||
res = future.result().content.decode('utf-8')
|
||||
res= future.result().content
|
||||
html = BeautifulSoup(res, "html.parser")
|
||||
userFeed = html.find_all('div', attrs={'class': 'timeline-item'})
|
||||
if userFeed != []:
|
||||
@ -876,7 +910,8 @@ def getFeed(urls):
|
||||
newPost["twitterName"] = post.find('a', attrs={'class': 'fullname'}).text
|
||||
newPost["timeStamp"] = date_time_str
|
||||
newPost["date"] = post.find('span', attrs={'class': 'tweet-date'}).find('a').text
|
||||
newPost["content"] = Markup(post.find('div', attrs={'class': 'tweet-content'}))
|
||||
content = post.find('div', attrs={'class': 'tweet-content'})
|
||||
newPost["content"] = Markup(str(content).replace("\n", "<br>"))
|
||||
|
||||
if post.find('div', attrs={'class': 'retweet-header'}):
|
||||
newPost["username"] = post.find('div', attrs={'class': 'retweet-header'}).find('div', attrs={
|
||||
@ -923,7 +958,7 @@ def getPosts(account):
|
||||
feedPosts = []
|
||||
|
||||
# Gather profile info.
|
||||
rssFeed = urllib.request.urlopen('{instance}{user}'.format(instance=NITTERINSTANCE, user=account)).read()
|
||||
rssFeed = urllib.request.urlopen(f'{NITTERINSTANCE}{account}').read()
|
||||
# Gather feedPosts
|
||||
res = rssFeed.decode('utf-8')
|
||||
html = BeautifulSoup(res, "html.parser")
|
||||
@ -981,8 +1016,7 @@ def getPosts(account):
|
||||
def getYoutubePosts(ids):
|
||||
videos = []
|
||||
with FuturesSession() as session:
|
||||
futures = [session.get('https://www.youtube.com/feeds/videos.xml?channel_id={id}'.format(id=id.channelId)) for
|
||||
id in ids]
|
||||
futures = [session.get(f'https://www.youtube.com/feeds/videos.xml?channel_id={id.channelId}') for id in ids]
|
||||
for future in as_completed(futures):
|
||||
resp = future.result()
|
||||
rssFeed = feedparser.parse(resp.content)
|
||||
@ -1013,7 +1047,7 @@ def getYoutubePosts(ids):
|
||||
video.timeStamp = getTimeDiff(vid.published_parsed)
|
||||
except:
|
||||
if time != 0:
|
||||
video.timeStamp = "{} days".format(str(time.days))
|
||||
video.timeStamp = f"{str(time.days)} days"
|
||||
else:
|
||||
video.timeStamp = "Unknown"
|
||||
|
||||
@ -1022,9 +1056,9 @@ def getYoutubePosts(ids):
|
||||
video.channelUrl = vid.author_detail.href
|
||||
video.id = vid.yt_videoid
|
||||
video.videoTitle = vid.title
|
||||
if config['nginxVideoStream']:
|
||||
if config['isInstance']:
|
||||
hostName = urllib.parse.urlparse(vid.media_thumbnail[0]['url']).netloc
|
||||
video.videoThumb = vid.media_thumbnail[0]['url'].replace("https://{}".format(hostName), "").replace(
|
||||
video.videoThumb = vid.media_thumbnail[0]['url'].replace(f"https://{hostName}", "").replace(
|
||||
"hqdefault", "mqdefault") + "?host=" + hostName
|
||||
else:
|
||||
video.videoThumb = vid.media_thumbnail[0]['url'].replace('/', '~')
|
||||
|
1
app/static/quality-selector.css
Normal file
1
app/static/quality-selector.css
Normal file
@ -0,0 +1 @@
|
||||
.vjs-quality-selector .vjs-menu-button{margin:0;padding:0;height:100%;width:100%}.vjs-quality-selector .vjs-icon-placeholder{font-family:'VideoJS';font-weight:normal;font-style:normal}.vjs-quality-selector .vjs-icon-placeholder:before{content:'\f110'}.vjs-quality-changing .vjs-big-play-button{display:none}.vjs-quality-changing .vjs-control-bar{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;visibility:visible;opacity:1}
|
4
app/static/videojs-quality-selector.min.js
vendored
Normal file
4
app/static/videojs-quality-selector.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
3
app/static/videojs.hotkeys.min.js
vendored
Normal file
3
app/static/videojs.hotkeys.min.js
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
/* videojs-hotkeys v0.2.27 - https://github.com/ctd1500/videojs-hotkeys */
|
||||
!function(e,t){"undefined"!=typeof window&&window.videojs?t(window.videojs):"function"==typeof define&&define.amd?define("videojs-hotkeys",["video.js"],function(e){return t(e.default||e)}):"undefined"!=typeof module&&module.exports&&(module.exports=t(require("video.js")))}(0,function(e){"use strict";"undefined"!=typeof window&&(window.videojs_hotkeys={version:"0.2.27"});(e.registerPlugin||e.plugin)("hotkeys",function(t){function n(e){return"function"==typeof s?s(e):s}function o(e){null!=e&&"function"==typeof e.then&&e.then(null,function(e){})}var r=this,u=r.el(),l=document,i={volumeStep:.1,seekStep:5,enableMute:!0,enableVolumeScroll:!0,enableHoverScroll:!1,enableFullscreen:!0,enableNumbers:!0,enableJogStyle:!1,alwaysCaptureHotkeys:!1,captureDocumentHotkeys:!1,documentHotkeysFocusElementFilter:function(){return!1},enableModifiersForNumbers:!0,enableInactiveFocus:!0,skipInitialFocus:!1,playPauseKey:function(e){return 32===e.which||179===e.which},rewindKey:function(e){return 37===e.which||177===e.which},forwardKey:function(e){return 39===e.which||176===e.which},volumeUpKey:function(e){return 38===e.which},volumeDownKey:function(e){return 40===e.which},muteKey:function(e){return 77===e.which},fullscreenKey:function(e){return 70===e.which},customKeys:{}},c=e.mergeOptions||e.util.mergeOptions,a=(t=c(i,t||{})).volumeStep,s=t.seekStep,m=t.enableMute,y=t.enableVolumeScroll,f=t.enableHoverScroll,v=t.enableFullscreen,d=t.enableNumbers,p=t.enableJogStyle,b=t.alwaysCaptureHotkeys,h=t.captureDocumentHotkeys,w=t.documentHotkeysFocusElementFilter,k=t.enableModifiersForNumbers,S=t.enableInactiveFocus,K=t.skipInitialFocus,F=e.VERSION;u.hasAttribute("tabIndex")||u.setAttribute("tabIndex","-1"),u.style.outline="none",!b&&r.autoplay()||K||r.one("play",function(){u.focus()}),S&&r.on("userinactive",function(){var e=function(){clearTimeout(t)},t=setTimeout(function(){r.off("useractive",e);var t=l.activeElement,n=u.querySelector(".vjs-control-bar");t&&t.parentElement==n&&u.focus()},10);r.one("useractive",e)}),r.on("play",function(){var e=u.querySelector(".iframeblocker");e&&""===e.style.display&&(e.style.display="block",e.style.bottom="39px")});var q=function(e){var i,c,s=e.which,y=e.preventDefault.bind(e),f=r.duration();if(r.controls()){var S=l.activeElement;if(b||h&&w(S)||S==u||S==u.querySelector(".vjs-tech")||S==u.querySelector(".vjs-control-bar")||S==u.querySelector(".iframeblocker"))switch(g(e,r)){case 1:y(),(b||h)&&e.stopPropagation(),r.paused()?o(r.play()):r.pause();break;case 2:i=!r.paused(),y(),i&&r.pause(),(c=r.currentTime()-n(e))<=0&&(c=0),r.currentTime(c),i&&o(r.play());break;case 3:i=!r.paused(),y(),i&&r.pause(),(c=r.currentTime()+n(e))>=f&&(c=i?f-.001:f),r.currentTime(c),i&&o(r.play());break;case 5:y(),p?(c=r.currentTime()-1,r.currentTime()<=1&&(c=0),r.currentTime(c)):r.volume(r.volume()-a);break;case 4:y(),p?((c=r.currentTime()+1)>=f&&(c=f),r.currentTime(c)):r.volume(r.volume()+a);break;case 6:m&&r.muted(!r.muted());break;case 7:v&&(r.isFullscreen()?r.exitFullscreen():r.requestFullscreen());break;default:if((s>47&&s<59||s>95&&s<106)&&(k||!(e.metaKey||e.ctrlKey||e.altKey))&&d){var K=48;s>95&&(K=96);var F=s-K;y(),r.currentTime(r.duration()*F*.1)}for(var q in t.customKeys){var j=t.customKeys[q];j&&j.key&&j.handler&&j.key(e)&&(y(),j.handler(r,t,e))}}}},j=!1,T=u.querySelector(".vjs-volume-menu-button")||u.querySelector(".vjs-volume-panel");null!=T&&(T.onmouseover=function(){j=!0},T.onmouseout=function(){j=!1});var E=function(e){if(f)t=0;else var t=l.activeElement;if(r.controls()&&(b||t==u||t==u.querySelector(".vjs-tech")||t==u.querySelector(".iframeblocker")||t==u.querySelector(".vjs-control-bar")||j)&&y){e=window.event||e;var n=Math.max(-1,Math.min(1,e.wheelDelta||-e.detail));e.preventDefault(),1==n?r.volume(r.volume()+a):-1==n&&r.volume(r.volume()-a)}},g=function(e,n){return t.playPauseKey(e,n)?1:t.rewindKey(e,n)?2:t.forwardKey(e,n)?3:t.volumeUpKey(e,n)?4:t.volumeDownKey(e,n)?5:t.muteKey(e,n)?6:t.fullscreenKey(e,n)?7:void 0};return r.on("keydown",q),r.on("dblclick",function(e){if(null!=F&&F<="7.1.0"&&r.controls()){var t=e.relatedTarget||e.toElement||l.activeElement;t!=u&&t!=u.querySelector(".vjs-tech")&&t!=u.querySelector(".iframeblocker")||v&&(r.isFullscreen()?r.exitFullscreen():r.requestFullscreen())}}),r.on("mousewheel",E),r.on("DOMMouseScroll",E),h&&document.addEventListener("keydown",function(e){q(e)}),this})});
|
||||
//# sourceMappingURL=videojs.hotkeys.min.js.map
|
@ -5,5 +5,6 @@
|
||||
|
||||
<div class="ui row">
|
||||
<h2 class="ui header">Registrations are currently closed.</h2>
|
||||
<h5 class="ui centered header"><a href="https://github.com/ytorg/Yotter#why-do-i-have-to-register-to-use-yotter">Why do I have to register?</a></h5>
|
||||
</div>
|
||||
</div>
|
@ -19,35 +19,67 @@
|
||||
<span class="category"><i class="retweet icon"></i> {{post.username}}</span>
|
||||
{%endif%}
|
||||
</div>
|
||||
<div class="description break-word">
|
||||
<div style="margin-bottom: 15px;" class="description break-word">
|
||||
<p>{{post.content | safe}}</p>
|
||||
</div>
|
||||
<div class="extra content">
|
||||
{% if post.attachedImg %}
|
||||
<a target="_blank" href="{{post.attachedImg}}"><img alt="Image attachment" class="ui centered fluid rounded medium image" src="{{post.attachedImg}}">
|
||||
<div class="content">
|
||||
{% if post.attachedImages %}
|
||||
{%for img in post.attachedImages %}
|
||||
<a target="_blank" href="{{img}}">
|
||||
<img alt="Image attachment" class="ui centered fluid rounded medium image" src="{{img}}">
|
||||
</a>
|
||||
{%endfor%}
|
||||
{% endif %}
|
||||
{% if post.attachedVideo %}
|
||||
<div class="ui segment"><p><i class="file video icon"></i> <b>This tweet has an attached video.</b></p></div>
|
||||
{%endif%}
|
||||
{% if post.isReply %}
|
||||
<div class="ui card">
|
||||
<div class="content">
|
||||
<div class="header"><a href="/{{post.replyingUser}}">{{post.replyingUser}}</a></div>
|
||||
<div class="meta">{{post.replyingUser}}</div>
|
||||
<div class="description break-word">
|
||||
{{post.replyingTweetContent | safe}}
|
||||
{% if post.replyAttachedImg %}
|
||||
<a target="_blank" href="{{post.replyAttachedImg}}"><img alt="Image attachment" class="ui centered fluid rounded medium image" src="{{post.replyAttachedImg}}"></a>
|
||||
{% endif %}
|
||||
{%if post.unavailableReply%}
|
||||
<div class="ui card">
|
||||
<div class="content">
|
||||
<p> This tweet is unavailable. </p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
{%else%}
|
||||
<div class="ui card">
|
||||
<div class="content">
|
||||
<div class="header"><a href="/{{post.replyingUser}}">{{post.replyingUser}}</a></div>
|
||||
<div class="meta">{{post.replyingUser}}</div>
|
||||
<div class="description break-word">
|
||||
{{post.replyingTweetContent | safe}}
|
||||
|
||||
{% if post.replyAttachedImg %}
|
||||
<a target="_blank" href="{{post.replyAttachedImg}}">
|
||||
<img alt="Image attachment" class="ui centered fluid rounded medium image" src="{{post.replyAttachedImg}}">
|
||||
</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{%endif%}
|
||||
{% endif %}
|
||||
<p>
|
||||
<form class="ui form" action="{{ url_for('savePost', url=post.url.replace('/', '~')) }}" method="post">
|
||||
<button type="submit" class="ui icon button">
|
||||
<button type="submit" class="mini ui icon button">
|
||||
<i class="bookmark outline icon"></i>
|
||||
</button>
|
||||
</button>
|
||||
</form>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="extra content">
|
||||
<span class="left floated">
|
||||
<i class="red heart like icon"></i>
|
||||
{{post.likes}}
|
||||
<span> </span>
|
||||
<i class="grey comment icon"></i>
|
||||
{{post.comments}}
|
||||
</span>
|
||||
<span class="right floated">
|
||||
<i class="blue retweet icon"></i>
|
||||
{{post.retweets}}
|
||||
<i class="grey quote left icon"></i>
|
||||
{{post.quotes}}
|
||||
</span>
|
||||
</div>
|
||||
</div> <!--End tweet-->
|
@ -1,6 +1,6 @@
|
||||
<div class="ui card">
|
||||
<a class="image" href="{{url_for('watch', v=video.id, _method='GET')}}">
|
||||
<img src="https://yotter.xyz{{video.videoThumb}}">
|
||||
<img src="{{video.videoThumb}}">
|
||||
</a>
|
||||
<div class="content">
|
||||
<a class="header" href="{{url_for('watch', v=video.id, _method='GET')}}">{{video.videoTitle}}</a>
|
||||
|
@ -1,17 +0,0 @@
|
||||
{% extends "base.html" %}
|
||||
{% block content %}
|
||||
|
||||
<div class="ui text container center aligned centered">
|
||||
<div class="ui icon negative message">
|
||||
<i class="meh outline icon"></i>
|
||||
<div class="content">
|
||||
<div class="header">
|
||||
Ahh... Here we go again!
|
||||
</div>
|
||||
<p>Google is asking to solve a Captcha. As we don't want you to do it, we'll do it for you. <b> Please, try again in a few seconds.</b></p>
|
||||
<a href="{{origin}}"> Click here to reload </a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{%endblock%}
|
@ -9,11 +9,13 @@
|
||||
{{data.channel_name}}
|
||||
</h2>
|
||||
</div>
|
||||
{% if data.short_description %}
|
||||
<div class="ui vertical segment">
|
||||
<p>{{data.short_description}}</p>
|
||||
</div>
|
||||
{%endif%}
|
||||
<div class="ui vertical segment">
|
||||
<div class="ui tiny statistic">
|
||||
<!--<div class="ui tiny statistic">
|
||||
<div class="value">
|
||||
{%if data.approx_suscriber_count == None%}
|
||||
<i class="user icon"></i> ?
|
||||
@ -24,20 +26,20 @@
|
||||
<div class="label">
|
||||
Followers
|
||||
</div>
|
||||
</div>
|
||||
</div>-->
|
||||
{% if restricted or current_user.is_authenticated %}
|
||||
{% if not current_user.is_following_yt(data.channel_id) %}
|
||||
<form action="{{ url_for('ytfollow', channelId=data.channel_id) }}" method="post">
|
||||
<button type="submit" value="Submit" class="ui red button">
|
||||
<i class="user icon"></i>
|
||||
Suscribe
|
||||
Subscribe
|
||||
</button>
|
||||
</form>
|
||||
{% else %}
|
||||
<form action="{{ url_for('ytunfollow', channelId=data.channel_id) }}" method="post">
|
||||
<button type="submit" value="Submit" class="ui red active button">
|
||||
<i class="user icon"></i>
|
||||
Unsuscribe
|
||||
Unsubscribe
|
||||
</button>
|
||||
</form>
|
||||
{%endif%}
|
||||
@ -54,7 +56,7 @@
|
||||
{% for video in data['items'] %}
|
||||
<div class="ui card">
|
||||
<a class="image" href="{{url_for('watch', v=video.id, _method='GET')}}">
|
||||
<img src="https://yotter.xyz{{video.thumbnail}}">
|
||||
<img src="{{video.thumbnail}}">
|
||||
</a>
|
||||
<div class="content">
|
||||
<a class="header" href="{{url_for('watch', v=video.id, _method='GET')}}">{{video.title}}</a>
|
||||
|
@ -41,9 +41,9 @@
|
||||
<div class="text container ui">
|
||||
<div class="ui warning message">
|
||||
<div class="header">
|
||||
{{config.admin_message_title}}
|
||||
{{config.admin_message_title|safe}}
|
||||
</div>
|
||||
{{config.admin_message}}
|
||||
{{config.admin_message|safe}}
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
@ -1,8 +1,9 @@
|
||||
{% extends "base.html" %}
|
||||
|
||||
{% block content %}
|
||||
{% if registrations %}
|
||||
{% if registrations %}
|
||||
<h2 class="ui centered header">Register</h2>
|
||||
<h5 class="ui centered header"><a href="https://github.com/ytorg/Yotter#why-do-i-have-to-register-to-use-yotter">Why do I have to register?</a></h5>
|
||||
<div class="ui text container" id="container">
|
||||
<form class="ui form" action="" method="post">
|
||||
{{ form.hidden_tag() }}
|
||||
|
@ -6,7 +6,7 @@
|
||||
{{ form.hidden_tag() }}
|
||||
<p>
|
||||
{{ form.username.label }}<br>
|
||||
{{ form.username(size=32) }}<br>
|
||||
{{ form.username(size=32, autofocus=true) }}<br>
|
||||
{% for error in form.username.errors %}
|
||||
<span style="color: red;">[{{ error }}]</span>
|
||||
{% endfor %}
|
||||
|
@ -26,6 +26,7 @@
|
||||
<div class="ui icon header">
|
||||
<i class="user circle outline icon"></i>
|
||||
Can I register?
|
||||
<h5 class="ui centered header"><a href="https://github.com/ytorg/Yotter#why-do-i-have-to-register-to-use-yotter">Why do I have to register?</a></h5>
|
||||
</div>
|
||||
|
||||
{%if cani%}
|
||||
@ -42,5 +43,13 @@
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="ui text container center aligned centered">
|
||||
<div class="ui segments">
|
||||
<div class="ui segment">
|
||||
<p>Yotter version: <b><a href="https://github.com/ytorg/Yotter/tags">{{tag}}</a></b></p>
|
||||
<p>Latest update: <b><a href="https://github.com/ytorg/Yotter/commits/">{{update}}</a></b></p>
|
||||
<p>Commit hash: <b><a href="https://github.com/ytorg/Yotter/commits/">{{hash}}</a></b></p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{%endblock%}
|
@ -1,60 +1,108 @@
|
||||
{% extends "base.html" %}
|
||||
|
||||
{% block content %}
|
||||
<div class="blue ui centered card">
|
||||
<div class="content">
|
||||
<div class="center aligned author">
|
||||
<img alt="Profile picture" class="ui avatar image" src="{{user.profilePic}}">
|
||||
<div class="ui text container center aligned">
|
||||
<div class="ui segments">
|
||||
<div class="ui centered vertical segment">
|
||||
<h2 class="ui header">
|
||||
<img src="{{user.profilePic}}" class="ui circular image">
|
||||
{{user.profileFullName}} <span style="color:grey;font-size: small;">({{user.profileUsername}})</span>
|
||||
</h2>
|
||||
</div>
|
||||
<div class="center aligned header"><a href="https://nitter.net/{{ user.profileUsername.replace('@','') }}">
|
||||
{%if user.profileFullName%}
|
||||
{{user.profileFullName}}
|
||||
{%else%}
|
||||
{{user.profileUsername}}
|
||||
{%endif%}
|
||||
</a></div>
|
||||
<div class="center aligned description">
|
||||
<div class="ui horizontal segments">
|
||||
<div class="ui segment">
|
||||
<div class="ui centered vertical segment">
|
||||
<p>{{user.profileBio}}</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="ui segment">
|
||||
{% if not current_user.is_following_tw(user.profileUsername.replace('@','')) %}
|
||||
<p>
|
||||
<form action="{{ url_for('follow', username=user.profileUsername.replace('@','')) }}" method="post">
|
||||
{{ form.hidden_tag() }}
|
||||
{{ form.submit(value='Follow') }}
|
||||
</form>
|
||||
</p>
|
||||
{% else %}
|
||||
<p>
|
||||
<form action="{{ url_for('unfollow', username=user.profileUsername.replace('@','')) }}" method="post">
|
||||
{{ form.hidden_tag() }}
|
||||
{{ form.submit(value='Unfollow') }}
|
||||
</form>
|
||||
</p>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
<div class="ui horizontal segments">
|
||||
<div class="ui segment">
|
||||
<div class="statistic">
|
||||
<div class="value">
|
||||
<i class="users icon"></i>{{user.followers}}
|
||||
<b>{{user.followers}}</b>
|
||||
</div>
|
||||
<div class="label">
|
||||
Followers
|
||||
<b>FOLLOWERS</b>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="ui segment">
|
||||
<div class="statistic">
|
||||
<div class="value">
|
||||
<b>{{user.following}}</b>
|
||||
</div>
|
||||
<div class="label">
|
||||
<b>FOLLOWING</b>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="ui segment">
|
||||
<div class="statistic">
|
||||
<div class="value">
|
||||
<b>{{user.tweets}}</b>
|
||||
</div>
|
||||
<div class="label">
|
||||
<b>TWEETS</b>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="ui segment">
|
||||
<div class="statistic">
|
||||
<div class="value">
|
||||
<b>{{user.likes}}</b>
|
||||
</div>
|
||||
<div class="label">
|
||||
<b>LIKES</b>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="center aligned extra content">
|
||||
{% if not current_user.is_following_tw(user.profileUsername.replace('@','')) %}
|
||||
<p>
|
||||
<form action="{{ url_for('follow', username=user.profileUsername.replace('@','')) }}" method="post">
|
||||
{{ form.hidden_tag() }}
|
||||
{{ form.submit(value='Follow') }}
|
||||
</form>
|
||||
</p>
|
||||
{% else %}
|
||||
<p>
|
||||
<form action="{{ url_for('unfollow', username=user.profileUsername.replace('@','')) }}" method="post">
|
||||
{{ form.hidden_tag() }}
|
||||
{{ form.submit(value='Unfollow') }}
|
||||
</form>
|
||||
</p>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="text container" id="card-container">
|
||||
<div style="margin-top: 15px;" class="text container" id="card-container">
|
||||
{% if not posts %}
|
||||
{% include '_empty_feed.html' %}
|
||||
<div style="margin-top: 20px;" class="ui container center aligned">
|
||||
<h2> <i class="window close outline icon"></i> This feed is empty. </h3>
|
||||
</div>
|
||||
{% elif posts == 'Protected' %}
|
||||
<div style="margin-top: 20px;" class="ui container center aligned">
|
||||
<h2> <i class="lock icon"></i> This account's tweets are protected. </h3>
|
||||
</div>
|
||||
{% else %}
|
||||
{% for post in posts %}
|
||||
{% include '_twitter_post.html' %}
|
||||
{% endfor %}
|
||||
<div class="scroller">
|
||||
<a href="#top" class="ui button">
|
||||
<i style="margin: 0;" class="chevron up icon"></i>
|
||||
</a>
|
||||
</div>
|
||||
<br>
|
||||
<div class="ui center aligned text container">
|
||||
<a href="/{{user.profileUsername}}/{{prev_page}}"> <button class="ui left attached button"><i class="angle blue left icon"></i></button> </a>
|
||||
<a href="/{{user.profileUsername}}/{{page+1}}"> <button class="right attached ui button"><i class="angle blue right icon"></i></button></a>
|
||||
</div>
|
||||
<br>
|
||||
{% endif %}
|
||||
<div class="scroller">
|
||||
<a href="#top" class="ui button">
|
||||
<i style="margin: 0;" class="chevron up icon"></i>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endblock %}
|
@ -1,106 +1,275 @@
|
||||
<head>
|
||||
<link rel="stylesheet" type= "text/css" href="{{ url_for('static',filename='video-js.min.css') }}">
|
||||
<link rel="stylesheet" type= "text/css" href="{{ url_for('static',filename='video-js.min.css') }}">
|
||||
<script src="{{ url_for('static',filename='video.min.js') }}"></script>
|
||||
|
||||
<link rel="stylesheet" type= "text/css" href="{{ url_for('static',filename='quality-selector.css') }}">
|
||||
<script src="{{ url_for('static',filename='videojs-quality-selector.min.js') }}"></script>
|
||||
</head>
|
||||
{% extends "base.html" %}
|
||||
{% block content %}
|
||||
<div class="ui text container">
|
||||
{% if info.error != None or info.playability_error != None %}
|
||||
<div style="width: 80%;" class="ui container">
|
||||
|
||||
{% if info.error == True %}
|
||||
<div class="ui center aligned text container">
|
||||
<div class="ui segment">
|
||||
<h4 class="ui header">ERROR WITH VIDEO</h4>
|
||||
<h3 class="ui header"><i class="times icon"></i> ERROR WITH VIDEO </h3>
|
||||
<h5 class="ui header">Try to reload the page. Most times this solves the error.</h5>
|
||||
<h4 class="ui header">Other reasons.</h4>
|
||||
<div class="ui list">
|
||||
<div class="item">
|
||||
<div class="header"><i class="calendar icon"> </i>Scheduled Video</div>
|
||||
Scheduled videos are not supported.
|
||||
</div>
|
||||
<div class="item">
|
||||
<div class="header"><i class="red circle icon"> </i>Livestream video</div>
|
||||
Livestream videos are not yet supported.
|
||||
</div>
|
||||
<div class="item">
|
||||
<div class="header">Other reasons</div>
|
||||
If none of the above is the case, you might have found a bug. <a href="https://github.com/ytorg/Yotter/issues/new/choose">Report it!</a>
|
||||
</div>
|
||||
</div>
|
||||
<p>Sorry for the inconveninet. Yotter is in a Beta state, so expect errors!</p>
|
||||
</div>
|
||||
</div>
|
||||
{% elif info.playability_status != None %}
|
||||
<div class="ui center aligned text container">
|
||||
<div class="ui segment">
|
||||
<h4 class="ui header">SCHEDULED VIDEO</h4>
|
||||
<h5 class="ui header">{{video.premieres}}</h5>
|
||||
</div>
|
||||
</div>
|
||||
{% elif info.live %}
|
||||
<div class="video-js-responsive-container vjs-hd">
|
||||
<video-js id=live width="1080" class="video-js vjs-default-skin" controls>
|
||||
<source
|
||||
src="#"
|
||||
type="application/x-mpegURL">
|
||||
</video-js>
|
||||
</div>
|
||||
<div class="ui center aligned text container">
|
||||
{% else %}
|
||||
{% if info.start_time != None %}
|
||||
{% elif info.is_live != None %}
|
||||
<!--<div class="video-js-responsive-container vjs-hd">
|
||||
<video-js id=live width="1080" class="video-js vjs-default-skin" controls>
|
||||
<source
|
||||
src="#"
|
||||
type="application/x-mpegURL">
|
||||
</video-js>
|
||||
</div>-->
|
||||
<div class="ui center aligned text container">
|
||||
<div class="ui segment">
|
||||
<h3 class="ui header"><i class="red small circle icon"></i> LIVESTREAM VIDEO</h3>
|
||||
<h4 class="ui header">FEATURE AVAILABLE SOON</h4>
|
||||
<h5 class="ui header">Livestreams are under developent and still not supported on Yotter.</h5>
|
||||
</div>
|
||||
</div>
|
||||
{%else%}
|
||||
<div class="video-js-responsive-container vjs-hd">
|
||||
<video-js id="video-1" class="video-js vjs-default-skin vjs-big-play-centered"
|
||||
qualitySelector
|
||||
controls
|
||||
autofocus
|
||||
data-setup='{ "playbackRates": [0.5, 1, 1.25, 1.5, 1.75, 2] }'
|
||||
width="1080"
|
||||
buffered
|
||||
preload="none">
|
||||
{% if config.isInstance %}
|
||||
{% for source in info.formats %}
|
||||
<source src="{{source.url}}" type="video/{{source.ext}}" label="{{source.format_note}}">
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
<p class="vjs-no-js">To view this video please enable JavaScript, and consider upgrading to a web browser that
|
||||
<a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a></p>
|
||||
</video-js>
|
||||
</div>
|
||||
{%endif%}
|
||||
|
||||
<div class="ui segments">
|
||||
<div class="ui segment">
|
||||
<h3 class="ui header"><i class="red small circle icon"></i> LIVESTREAM VIDEO</h3>
|
||||
<h4 class="ui header">FEATURE AVAILABLE SOON</h4>
|
||||
<h5 class="ui header">Livestreams are under developent and still not supported on Yotter.</h5>
|
||||
<h2 class="ui header break-word">{{info.title}}</h2>
|
||||
</div>
|
||||
<div class="ui horizontal segments">
|
||||
<div class="center aligned ui segment">
|
||||
<a href="{{ url_for('channel', id=info.channel_id)}}">
|
||||
<i class="user icon"></i> <b>{{info.uploader}}</b>
|
||||
</a>
|
||||
<div class="label">
|
||||
<i class="user icon"></i>{{info.subscriber_count}}
|
||||
</div>
|
||||
</div>
|
||||
<div class="center aligned ui segment">
|
||||
<div class="ui mini statistic">
|
||||
<div class="value">
|
||||
<i class="grey eye icon"></i> <b>{{info.view_count}}</b>
|
||||
</div>
|
||||
<div class="label">
|
||||
views
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="center aligned ui segment">
|
||||
{% if info.average_rating | int > 2.5 %}
|
||||
<div class="ui mini statistic">
|
||||
<div class="value">
|
||||
<i class="green thumbs up icon"></i> <b>{{info.average_rating}}/5</b>
|
||||
</div>
|
||||
<div class="label">
|
||||
Total: {{info.total_likes}} votes
|
||||
</div>
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="ui mini statistic">
|
||||
<div class="value">
|
||||
<i class="red thumbs down icon"></i> <b>{{info.average_rating}}/5</b>
|
||||
</div>
|
||||
<div class="label">
|
||||
Total: {{info.total_likes}} votes
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="ui raised center aligned segment break-word">
|
||||
<p><i class="grey music icon"></i><b>Audio Only</b></p>
|
||||
<audio controls>
|
||||
{% for format in info.audio_formats %}
|
||||
<source src="{{format.url}}">
|
||||
{%endfor%}
|
||||
No audio available.
|
||||
</audio>
|
||||
</div>
|
||||
|
||||
<div class="ui raised segment break-word">
|
||||
<p>{{info.description}}</p>
|
||||
</div>
|
||||
</div>
|
||||
{%else%}
|
||||
<div class="video-js-responsive-container vjs-hd">
|
||||
<video-js autofocus class="video-js vjs-default-skin"
|
||||
data-setup='{ "playbackRates": [0.5, 0.75, 1, 1.25,1.5, 1.75, 2] }'
|
||||
width="1080"
|
||||
controls
|
||||
buffered
|
||||
preload="none">
|
||||
{% if config.nginxVideoStream %}
|
||||
{% for source in vsources %}
|
||||
<source src="{{source.src}}" type="{{source.type}}">
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</video-js>
|
||||
</div>
|
||||
{%endif%}
|
||||
|
||||
<div class="ui segments">
|
||||
<div class="ui segment">
|
||||
<h2 class="ui header break-word">{{info.title}}</h2>
|
||||
</div>
|
||||
<div class="ui horizontal segments">
|
||||
<div class="center aligned ui segment">
|
||||
<a href="{{ url_for('channel', id=info.author_id)}}">
|
||||
<i class="user icon"></i> {{info.author}}
|
||||
</a>
|
||||
</div>
|
||||
<div class="center aligned ui segment">
|
||||
<h4 class="ui header"><i class="grey eye icon"></i>{{info.view_count}}</h4>
|
||||
</div>
|
||||
<div class="center aligned ui segment">
|
||||
{% if info.rating | int > 49 %}
|
||||
<h4 class="ui header"><i class="green thumbs up icon"></i> {{info.rating}}%</h4>
|
||||
{% else %}
|
||||
<h4 class="ui header"><i class="red thumbs down icon"></i> {{info.rating}}%</h4>
|
||||
{% endif %}
|
||||
</div>
|
||||
{%if videocomments%}
|
||||
<div class="ui comments">
|
||||
<h3 class="ui dividing header">Comments</h3>
|
||||
{% for comment in videocomments %}
|
||||
{% include '_video_comment.html' %}
|
||||
{% endfor %}
|
||||
</div>
|
||||
{%endif%}
|
||||
|
||||
<div class="ui raised center aligned segment break-word">
|
||||
<p><i class="grey music icon"></i><b>Audio Only</b></p>
|
||||
<audio controls>
|
||||
{% for format in info.formats %}
|
||||
{% if format.audio_valid %}
|
||||
<source src="{{format.url}}">
|
||||
{%endif%}
|
||||
{%endfor%}
|
||||
No audio available.
|
||||
</audio>
|
||||
</div>
|
||||
|
||||
<div class="ui raised segment break-word">
|
||||
<p>{{info.description}}</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="ui comments">
|
||||
<h3 class="ui dividing header">Comments</h3>
|
||||
{% for comment in videocomments %}
|
||||
{% include '_video_comment.html' %}
|
||||
{% endfor %}
|
||||
</div>
|
||||
|
||||
<script src="{{ url_for('static',filename='video.min.js') }}"></script>
|
||||
{% if info.live %}
|
||||
{% if info.live %}
|
||||
<script src="{{ url_for('static',filename='videojs-http-streaming.min.js')}}"></script>
|
||||
<script>
|
||||
var player = videojs('live');
|
||||
player.play();
|
||||
</script>
|
||||
{% endif %}
|
||||
{%endif%}
|
||||
|
||||
<!-- SETUP QUALITY SELECTOR -->
|
||||
<script>
|
||||
videojs("video-1", {}, function() {
|
||||
var player = this;
|
||||
|
||||
player.controlBar.addChild('QualitySelector');
|
||||
});
|
||||
</script>
|
||||
|
||||
<!-- SETUP CONTROL HOTKEYS -->
|
||||
<script src="{{ url_for('static',filename='videojs.hotkeys.min.js') }}"></script>
|
||||
<script>
|
||||
// initialize the plugin
|
||||
|
||||
videojs('video-1').ready(function() {
|
||||
this.hotkeys({
|
||||
volumeStep: 0.1,
|
||||
seekStep: 5,
|
||||
enableMute: true,
|
||||
enableFullscreen: true,
|
||||
enableNumbers: false,
|
||||
enableVolumeScroll: true,
|
||||
enableHoverScroll: true,
|
||||
|
||||
// Mimic VLC seek behavior, and default to 5.
|
||||
seekStep: function(e) {
|
||||
if (e.ctrlKey && e.altKey) {
|
||||
return 5*60;
|
||||
} else if (e.ctrlKey) {
|
||||
return 60;
|
||||
} else if (e.altKey) {
|
||||
return 10;
|
||||
} else {
|
||||
return 5;
|
||||
}
|
||||
},
|
||||
|
||||
// Enhance existing simple hotkey with a complex hotkey
|
||||
fullscreenKey: function(e) {
|
||||
// fullscreen with the F key or Ctrl+Enter
|
||||
return ((e.which === 70) || (e.ctrlKey && e.which === 13));
|
||||
},
|
||||
|
||||
// Custom Keys
|
||||
customKeys: {
|
||||
|
||||
// Add new simple hotkey
|
||||
simpleKey: {
|
||||
key: function(e) {
|
||||
// Toggle something with S Key
|
||||
return (e.which === 83);
|
||||
},
|
||||
handler: function(player, options, e) {
|
||||
// Example
|
||||
if (player.paused()) {
|
||||
player.play();
|
||||
} else {
|
||||
player.pause();
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// Add new complex hotkey
|
||||
complexKey: {
|
||||
key: function(e) {
|
||||
// Toggle something with CTRL + D Key
|
||||
return (e.ctrlKey && e.which === 68);
|
||||
},
|
||||
handler: function(player, options, event) {
|
||||
// Example
|
||||
if (options.enableMute) {
|
||||
player.muted(!player.muted());
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// Override number keys example from https://github.com/ctd1500/videojs-hotkeys/pull/36
|
||||
numbersKey: {
|
||||
key: function(event) {
|
||||
// Override number keys
|
||||
return ((event.which > 47 && event.which < 59) || (event.which > 95 && event.which < 106));
|
||||
},
|
||||
handler: function(player, options, event) {
|
||||
// Do not handle if enableModifiersForNumbers set to false and keys are Ctrl, Cmd or Alt
|
||||
if (options.enableModifiersForNumbers || !(event.metaKey || event.ctrlKey || event.altKey)) {
|
||||
var sub = 48;
|
||||
if (event.which > 95) {
|
||||
sub = 96;
|
||||
}
|
||||
var number = event.which - sub;
|
||||
player.currentTime(player.duration() * number * 0.1);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
emptyHotkey: {
|
||||
// Empty
|
||||
},
|
||||
|
||||
withoutKey: {
|
||||
handler: function(player, options, event) {
|
||||
console.log('withoutKey handler');
|
||||
}
|
||||
},
|
||||
|
||||
withoutHandler: {
|
||||
key: function(e) {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
malformedKey: {
|
||||
key: function() {
|
||||
console.log('I have a malformed customKey. The Key function must return a boolean.');
|
||||
},
|
||||
handler: function(player, options, event) {
|
||||
//Empty
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
</script>
|
||||
{% endif %}
|
||||
{% endblock %}
|
@ -4,7 +4,7 @@
|
||||
<div class="ui center aligned text container">
|
||||
<form action="{{url_for('ytsearch', _method='GET')}}">
|
||||
<div class="ui search">
|
||||
<input class="prompt" name="q" type="text" placeholder="Search...">
|
||||
<input class="prompt" name="q" type="text" placeholder="Search..." autofocus>
|
||||
<select name="s" id="sort">
|
||||
<option value="0">Relevance</option>
|
||||
<option value="3">Views</option>
|
||||
@ -14,7 +14,7 @@
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
<div class="ui text container"></div>
|
||||
<div class="ui text container">
|
||||
{% if results %}
|
||||
{% if results.channels %}
|
||||
<h3 class="ui dividing header">Users</h3>
|
||||
@ -23,7 +23,7 @@
|
||||
{% for res in results.channels %}
|
||||
<div class="item">
|
||||
<div class="image">
|
||||
{% if config.nginxVideoStream %}
|
||||
{% if config.isInstance %}
|
||||
<img src="{{res.thumbnail}}" alt="Avatar">
|
||||
{% else %}
|
||||
<img alt="Avatar" src="{{ url_for('img', url=res.thumbnail) }}">
|
||||
@ -93,5 +93,5 @@
|
||||
</div>
|
||||
{%endif%}
|
||||
</div>
|
||||
|
||||
</div>
|
||||
{% endblock %}
|
@ -1,4 +1,4 @@
|
||||
version: '3.8'
|
||||
version: "3.8"
|
||||
services:
|
||||
mariadb:
|
||||
image: mariadb:10.5
|
||||
@ -9,9 +9,27 @@ services:
|
||||
MYSQL_PASSWORD: changeme
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- mysql:/var/lib/mysql
|
||||
- mysql:/var/lib/mysql
|
||||
healthcheck:
|
||||
test: ["CMD", "mysqladmin", "ping", "--silent"]
|
||||
test: ["CMD", "mysqladmin", "ping", "--silent"]
|
||||
nginx:
|
||||
image: ytorg/nginx:latest
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
HOSTNAME: 'changeme.example.com'
|
||||
HTTP_PORT: 8080
|
||||
YOTTER_ADDRESS: 'http://yotter:5000'
|
||||
YTPROXY_ADDRESS: 'http://unix:/var/run/ytproxy/http-proxy.sock'
|
||||
ports:
|
||||
- "127.0.0.1:8080:8080"
|
||||
volumes:
|
||||
- "/var/run/ytproxy:/app/socket/"
|
||||
ytproxy:
|
||||
image: 1337kavin/ytproxy:latest
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- "/var/run/ytproxy:/app/socket/"
|
||||
network_mode: host
|
||||
yotter:
|
||||
image: ytorg/yotter:latest
|
||||
restart: unless-stopped
|
||||
@ -21,9 +39,14 @@ services:
|
||||
DATABASE_URL: mysql+pymysql://yotter:changeme@mariadb:3306/yotter
|
||||
depends_on:
|
||||
- mariadb
|
||||
- ytproxy
|
||||
volumes:
|
||||
- migrations:/usr/src/app/migrations
|
||||
- ./yotter-config.json:/usr/src/app/yotter-config.json
|
||||
- migrations:/usr/src/app/migrations
|
||||
- ./yotter-config.json:/usr/src/app/yotter-config.json
|
||||
healthcheck:
|
||||
test: ["CMD", "wget" ,"--no-verbose", "--tries=1", "--spider", "http://localhost:5000"]
|
||||
interval: 1m
|
||||
timeout: 3s
|
||||
volumes:
|
||||
mysql:
|
||||
migrations:
|
||||
|
12
nginx.Dockerfile
Normal file
12
nginx.Dockerfile
Normal file
@ -0,0 +1,12 @@
|
||||
FROM nginx:mainline-alpine
|
||||
|
||||
WORKDIR /var/www
|
||||
COPY ./app/static ./static
|
||||
COPY ./nginx.conf.tmpl /nginx.conf.tmpl
|
||||
|
||||
ENV HOSTNAME= \
|
||||
HTTP_PORT=80 \
|
||||
YOTTER_ADDRESS=http://127.0.0.1:5000 \
|
||||
YTPROXY_ADDRESS=http://unix:/var/run/ytproxy/http-proxy.sock
|
||||
|
||||
CMD ["/bin/sh", "-c", "envsubst '${HOSTNAME} ${HTTP_PORT} ${YOTTER_ADDRESS} ${YTPROXY_ADDRESS}' < /nginx.conf.tmpl > /etc/nginx/conf.d/default.conf && nginx -g 'daemon off;'"]
|
10
nginx.Dockerfile.dockerignore
Normal file
10
nginx.Dockerfile.dockerignore
Normal file
@ -0,0 +1,10 @@
|
||||
.circleci
|
||||
.git
|
||||
.github
|
||||
.gitignore
|
||||
cache
|
||||
Dockerfile
|
||||
docker-compose.yml
|
||||
LICENSE
|
||||
*.md
|
||||
dockerhash.txt
|
30
nginx.conf.tmpl
Normal file
30
nginx.conf.tmpl
Normal file
@ -0,0 +1,30 @@
|
||||
server {
|
||||
listen ${HTTP_PORT};
|
||||
server_name ${HOSTNAME};
|
||||
access_log off;
|
||||
|
||||
location / {
|
||||
proxy_pass ${YOTTER_ADDRESS};
|
||||
proxy_set_header Host $host;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
}
|
||||
|
||||
location /static/ {
|
||||
root /var/www;
|
||||
sendfile on;
|
||||
aio threads=default;
|
||||
}
|
||||
|
||||
location ~ (^/videoplayback$|/videoplayback/|/vi/|/a/|/ytc|/vi_webp/|/sb/) {
|
||||
proxy_pass ${YTPROXY_ADDRESS};
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
aio_write on;
|
||||
aio threads=default;
|
||||
directio 512;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
}
|
||||
}
|
107
nitter/README.md
Normal file
107
nitter/README.md
Normal file
@ -0,0 +1,107 @@
|
||||
- [user.py](#userpy)
|
||||
- [feed.py](#feedpy)
|
||||
- [Tweets format examples](#tweets-format-examples)
|
||||
|
||||
## user.py
|
||||
|
||||
### get_user_info(username)
|
||||
Returns the info of a particular Twitter user without tweets. If the user does not exist, it returns `False`.
|
||||
|
||||
##### Return example:
|
||||
|
||||
`user.get_user_info("Snowden")`
|
||||
```
|
||||
{
|
||||
'profileFullName': 'Edward Snowden',
|
||||
'profileUsername': '@Snowden',
|
||||
'profileBio': 'I used to work for the government. Now I work for the public. President at @FreedomofPress.',
|
||||
'tweets': '5,009',
|
||||
'following': '1',
|
||||
'followers': '4.41M',
|
||||
'likes': '473',
|
||||
'profilePic': 'https://nitter.net/pic/profile_images%2F648888480974508032%2F66_cUYfj.jpg'
|
||||
}
|
||||
```
|
||||
|
||||
### get_tweets(user, page=1)
|
||||
Returns a list with the tweets on the user feed from the specified page (default is 1).
|
||||
|
||||
Example usage: `user.get_tweets("Snowden")`
|
||||
|
||||
### get_feed_tweets(html)
|
||||
This function is used by `get_tweets`. This should not be used as it is a utility function. If you want to know more, you can explore the code.
|
||||
|
||||
## feed.py
|
||||
|
||||
### get_feed(usernames, daysMaxOld=10, includeRT=True)
|
||||
This function returns a chronologically ordered feed given a list of usernames (i.e ['Snowden', 'DanielMicay', 'FluffyPony']). Optional parameters are:
|
||||
* `daysMaxOld`: sets the maximum number of days that the feed posts that will be returned can be.
|
||||
* `includeRT`: If `False` retweets will be excluded from the feed.
|
||||
|
||||
## Tweets format examples:
|
||||
**Normal tweet**:
|
||||
```
|
||||
{
|
||||
'op': '@Snowden',
|
||||
'twitterName': 'Edward Snowden',
|
||||
'timeStamp': '2020-11-03 23:11:40',
|
||||
'date': 'Nov 3',
|
||||
'content': Markup('Vote. There is still time.'),
|
||||
'username': '@Snowden',
|
||||
'isRT': False,
|
||||
'profilePic': 'https://nitter.net/pic/profile_images%2F648888480974508032%2F66_cUYfj_normal.jpg',
|
||||
'url': 'https://nitter.net/Snowden/status/1323764814817218560#m'
|
||||
}
|
||||
```
|
||||
|
||||
**Retweet**:
|
||||
```
|
||||
{
|
||||
'op': '@StellaMoris1',
|
||||
'twitterName': 'Stella Moris',
|
||||
'timeStamp': '2020-11-02 10:21:09',
|
||||
'date': 'Nov 2',
|
||||
'content': Markup("Spoke to Julian. A friend of his killed himself in the early hours of this morning. His body is still in the cell on Julian's wing. Julian is devastated.\n\nManoel Santos was gay. He'd lived in UK for 20 years. The Home Office served him with a deportation notice to Brazil.(Thread)"),
|
||||
'username': ' Edward Snowden retweeted',
|
||||
'isRT': True,
|
||||
'profilePic': 'https://nitter.net/pic/profile_images%2F1303198488184975360%2FiH4BdNIT_normal.jpg',
|
||||
'url': 'https://nitter.net/StellaMoris1/status/1323208519315849217#m'
|
||||
}
|
||||
```
|
||||
|
||||
**Tweet / Retweet with images**:
|
||||
```
|
||||
{
|
||||
'op': '@Reuters',
|
||||
'twitterName': 'Reuters',
|
||||
'timeStamp': '2020-11-02 10:35:07',
|
||||
'date': 'Nov 2',
|
||||
'content': Markup('U.S. whistleblower Edward Snowden seeks Russian passport for sake of future son <a href="http://reut.rs/3mNZQuf">reut.rs/3mNZQuf</a>'),
|
||||
'username': ' Edward Snowden retweeted',
|
||||
'isRT': True,
|
||||
'profilePic': 'https://nitter.net/pic/profile_images%2F1194751949821939712%2F3VBu4_Sa_normal.jpg',
|
||||
'url': 'https://nitter.net/Reuters/status/1323212031978295298#m',
|
||||
'attachedImages': ['https://nitter.net/pic/media%2FElz-VKLWkAAvTf8.jpg%3Fname%3Dorig']
|
||||
}
|
||||
```
|
||||
|
||||
**Tweet quoting antoher user**
|
||||
```
|
||||
{
|
||||
'op': '@lsjourneys',
|
||||
'twitterName': 'Lsjourney',
|
||||
'timeStamp': '2020-10-28 21:17:09',
|
||||
'date': 'Oct 28',
|
||||
'content': Markup('citizenfive 👶'),
|
||||
'username': ' Edward Snowden retweeted',
|
||||
'isRT': True,
|
||||
'profilePic': 'https://nitter.net/pic/profile_images%2F647551437875101696%2FBA2I4vuf_normal.jpg',
|
||||
'url': 'https://nitter.net/lsjourneys/status/1321561665117310979#m',
|
||||
'isReply': True,
|
||||
'replyingTweetContent': Markup('<div class="quote-text">A long time in the making: our greatest collaboration is coming soon.</div>'),
|
||||
'replyAttachedImages': ['https://nitter.net/pic/media%2FElcdC-BXgAwtT79.jpg%3Fname%3Dorig'],
|
||||
'replyingUser': '@lsjourneys'
|
||||
}
|
||||
```
|
||||
|
||||
> Video is not fully supported yet. A parameter `'attachedVideo': True` is added when a video is present on the tweet.
|
50
nitter/feed.py
Normal file
50
nitter/feed.py
Normal file
@ -0,0 +1,50 @@
|
||||
from requests_futures.sessions import FuturesSession
|
||||
from multiprocessing import Process
|
||||
from werkzeug.datastructures import Headers
|
||||
from concurrent.futures import as_completed
|
||||
from numerize import numerize
|
||||
from bs4 import BeautifulSoup
|
||||
from operator import itemgetter, attrgetter
|
||||
from re import findall
|
||||
from nitter import user
|
||||
import time, datetime
|
||||
import requests
|
||||
import bleach
|
||||
import urllib
|
||||
import json
|
||||
import re
|
||||
|
||||
config = json.load(open('yotter-config.json'))
|
||||
|
||||
def get_feed(usernames, daysMaxOld=10, includeRT=True):
|
||||
'''
|
||||
Returns feed tweets given a set of usernames
|
||||
'''
|
||||
feedTweets = []
|
||||
with FuturesSession() as session:
|
||||
futures = [session.get(f'{config["nitterInstance"]}{u}') for u in usernames]
|
||||
for future in as_completed(futures):
|
||||
res = future.result().content.decode('utf-8')
|
||||
html = BeautifulSoup(res, "html.parser")
|
||||
feedPosts = user.get_feed_tweets(html)
|
||||
feedTweets.append(feedPosts)
|
||||
|
||||
userFeed = []
|
||||
for feed in feedTweets:
|
||||
if not includeRT:
|
||||
for tweet in feed:
|
||||
if tweet['isRT']:
|
||||
continue
|
||||
else:
|
||||
userFeed.append(tweet)
|
||||
else:
|
||||
userFeed+=feed
|
||||
try:
|
||||
for uf in userFeed:
|
||||
if uf == 'emptyFeed':
|
||||
userFeed.remove(uf)
|
||||
userFeed.sort(key=lambda item:item['timeStamp'], reverse=True)
|
||||
except:
|
||||
print("Error sorting feed - nitter/feed.py")
|
||||
return userFeed
|
||||
return userFeed
|
175
nitter/user.py
Normal file
175
nitter/user.py
Normal file
@ -0,0 +1,175 @@
|
||||
from flask import Markup
|
||||
from requests_futures.sessions import FuturesSession
|
||||
from werkzeug.datastructures import Headers
|
||||
from concurrent.futures import as_completed
|
||||
from numerize import numerize
|
||||
from bs4 import BeautifulSoup
|
||||
from re import findall
|
||||
import time, datetime
|
||||
import requests
|
||||
import bleach
|
||||
import urllib
|
||||
import json
|
||||
import re
|
||||
|
||||
##########################
|
||||
#### Config variables ####
|
||||
##########################
|
||||
config = json.load(open('yotter-config.json'))
|
||||
config['nitterInstance']
|
||||
|
||||
def get_user_info(username):
|
||||
response = urllib.request.urlopen(f'{config["nitterInstance"]}{username}').read()
|
||||
#rssFeed = feedparser.parse(response.content)
|
||||
|
||||
html = BeautifulSoup(str(response), "lxml")
|
||||
if html.body.find('div', attrs={'class':'error-panel'}):
|
||||
return False
|
||||
else:
|
||||
html = html.body.find('div', attrs={'class':'profile-card'})
|
||||
|
||||
if html.find('a', attrs={'class':'profile-card-fullname'}):
|
||||
fullName = html.find('a', attrs={'class':'profile-card-fullname'}).getText().encode('latin1').decode('unicode_escape').encode('latin1').decode('utf8')
|
||||
else:
|
||||
fullName = None
|
||||
|
||||
if html.find('div', attrs={'class':'profile-bio'}):
|
||||
profileBio = html.find('div', attrs={'class':'profile-bio'}).getText().encode('latin1').decode('unicode_escape').encode('latin1').decode('utf8')
|
||||
else:
|
||||
profileBio = None
|
||||
|
||||
user = {
|
||||
"profileFullName":fullName,
|
||||
"profileUsername":html.find('a', attrs={'class':'profile-card-username'}).string.encode('latin_1').decode('unicode_escape').encode('latin_1').decode('utf8'),
|
||||
"profileBio":profileBio,
|
||||
"tweets":html.find_all('span', attrs={'class':'profile-stat-num'})[0].string,
|
||||
"following":html.find_all('span', attrs={'class':'profile-stat-num'})[1].string,
|
||||
"followers":numerize.numerize(int(html.find_all('span', attrs={'class':'profile-stat-num'})[2].string.replace(",",""))),
|
||||
"likes":html.find_all('span', attrs={'class':'profile-stat-num'})[3].string,
|
||||
"profilePic":config['nitterInstance'] + html.find('a', attrs={'class':'profile-card-avatar'})['href'][1:],
|
||||
}
|
||||
return user
|
||||
|
||||
def get_tweets(user, page=1):
|
||||
feed = urllib.request.urlopen(f'{config["nitterInstance"]}{user}').read()
|
||||
#Gather feedPosts
|
||||
res = feed.decode('utf-8')
|
||||
html = BeautifulSoup(res, "html.parser")
|
||||
feedPosts = get_feed_tweets(html)
|
||||
|
||||
if page == 2:
|
||||
nextPage = html.find('div', attrs={'class':'show-more'}).find('a')['href']
|
||||
url = f'{config["nitterInstance"]}{user}{nextPage}'
|
||||
print(url)
|
||||
feed = urllib.request.urlopen(url).read()
|
||||
res = feed.decode('utf-8')
|
||||
html = BeautifulSoup(res, "html.parser")
|
||||
feedPosts = get_feed_tweets(html)
|
||||
return feedPosts
|
||||
|
||||
def yotterify(text):
|
||||
URLS = ['https://youtube.com']
|
||||
text = str(text)
|
||||
for url in URLS:
|
||||
text.replace(url, "")
|
||||
return text
|
||||
|
||||
def get_feed_tweets(html):
|
||||
feedPosts = []
|
||||
if 'No items found' in str(html.body):
|
||||
return 'Empty feed'
|
||||
if "This account's tweets are protected." in str(html.body):
|
||||
return 'Protected feed'
|
||||
userFeed = html.find_all('div', attrs={'class':'timeline-item'})
|
||||
if userFeed != []:
|
||||
for post in userFeed[:-1]:
|
||||
if 'show-more' in str(post):
|
||||
continue
|
||||
date_time_str = post.find('span', attrs={'class':'tweet-date'}).find('a')['title'].replace(",","")
|
||||
|
||||
if post.find('div', attrs={'class':'pinned'}):
|
||||
if post.find('div', attrs={'class':'pinned'}).find('span', attrs={'icon-pin'}):
|
||||
continue
|
||||
|
||||
tweet = {}
|
||||
tweet['op'] = post.find('a', attrs={'class':'username'}).text
|
||||
tweet['twitterName'] = post.find('a', attrs={'class':'fullname'}).text
|
||||
tweet['timeStamp'] = str(datetime.datetime.strptime(date_time_str, '%d/%m/%Y %H:%M:%S'))
|
||||
tweet['date'] = post.find('span', attrs={'class':'tweet-date'}).find('a').text
|
||||
tweet['content'] = Markup(yotterify(post.find('div', attrs={'class':'tweet-content'}).decode_contents().replace("\n", "<br>")))
|
||||
|
||||
if post.find('div', attrs={'class':'retweet-header'}):
|
||||
tweet['username'] = post.find('div', attrs={'class':'retweet-header'}).find('div', attrs={'class':'icon-container'}).text
|
||||
tweet['isRT'] = True
|
||||
else:
|
||||
tweet['username'] = tweet['op']
|
||||
tweet['isRT'] = False
|
||||
|
||||
tweet['profilePic'] = config['nitterInstance']+post.find('a', attrs={'class':'tweet-avatar'}).find('img')['src'][1:]
|
||||
tweet['url'] = config['nitterInstance'] + post.find('a', attrs={'class':'tweet-link'})['href'][1:]
|
||||
|
||||
# Is quoting another tweet
|
||||
if post.find('div', attrs={'class':'quote'}):
|
||||
tweet['isReply'] = True
|
||||
quote = post.find('div', attrs={'class':'quote'})
|
||||
|
||||
if 'unavailable' in str(quote):
|
||||
tweet['unavailableReply'] = True
|
||||
else:
|
||||
tweet['unavailableReply'] = False
|
||||
|
||||
if not tweet['unavailableReply']:
|
||||
if quote.find('div', attrs={'class':'quote-text'}):
|
||||
try:
|
||||
tweet['replyingTweetContent'] = Markup(quote.find('div', attrs={'class':'quote-text'}).replace("\n", "<br>"))
|
||||
except:
|
||||
tweet['replyingTweetContent'] = Markup(quote.find('div', attrs={'class':'quote-text'}))
|
||||
|
||||
if quote.find('a', attrs={'class':'still-image'}):
|
||||
tweet['replyAttachedImages'] = []
|
||||
images = quote.find_all('a', attrs={'class':'still-image'})
|
||||
for img in images:
|
||||
img = BeautifulSoup(str(img), "lxml")
|
||||
url = config['nitterInstance'] + img.find('a')['href'][1:]
|
||||
tweet['replyAttachedImages'].append(url)
|
||||
tweet['replyingUser']=quote.find('a', attrs={'class':'username'}).text
|
||||
post.find('div', attrs={'class':'quote'}).decompose()
|
||||
else:
|
||||
tweet['isReply'] = False
|
||||
|
||||
# Has attatchments
|
||||
if post.find('div', attrs={'class':'attachments'}):
|
||||
# Images
|
||||
if post.find('div', attrs={'class':'attachments'}).find('a', attrs={'class':'still-image'}):
|
||||
tweet['attachedImages'] = []
|
||||
images = post.find('div', attrs={'class':'attachments'}).find_all('a', attrs={'class':'still-image'})
|
||||
for img in images:
|
||||
img = BeautifulSoup(str(img), 'lxml')
|
||||
url = config['nitterInstance'] + img.find('a')['href'][1:]
|
||||
tweet['attachedImages'].append(url)
|
||||
else:
|
||||
tweet['attachedImages'] = False
|
||||
# Videos
|
||||
if post.find('div', attrs={'attachments'}).find('div', attrs={'gallery-video'}):
|
||||
tweet['attachedVideo'] = True
|
||||
else:
|
||||
tweet['attachedVideo'] = False
|
||||
else:
|
||||
tweet['attachedVideo'] = False
|
||||
tweet['attachedImages'] = False
|
||||
|
||||
if post.find('div', attrs={'class':'tweet-stats'}):
|
||||
stats = post.find('div', attrs={'class':'tweet-stats'}).find_all('span', attrs={'class':'tweet-stat'})
|
||||
for stat in stats:
|
||||
if 'comment' in str(stat):
|
||||
tweet['comments'] = stat.find('div',attrs={'class':'icon-container'}).text
|
||||
elif 'retweet' in str(stat):
|
||||
tweet['retweets'] = stat.find('div',attrs={'class':'icon-container'}).text
|
||||
elif 'heart' in str(stat):
|
||||
tweet['likes'] = stat.find('div',attrs={'class':'icon-container'}).text
|
||||
else:
|
||||
tweet['quotes'] = stat.find('div',attrs={'class':'icon-container'}).text
|
||||
feedPosts.append(tweet)
|
||||
else:
|
||||
return {"emptyFeed": True}
|
||||
return feedPosts
|
46
pypy.Dockerfile
Normal file
46
pypy.Dockerfile
Normal file
@ -0,0 +1,46 @@
|
||||
FROM pypy:3-slim-buster AS base
|
||||
|
||||
# Image to Build Dependencies
|
||||
FROM base AS builder
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
COPY ./requirements.txt /usr/src/app
|
||||
|
||||
# Build Dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -yq build-essential libssl-dev libffi-dev libxml2-dev libxslt-dev zlib1g-dev curl \
|
||||
&& rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/*
|
||||
|
||||
# install rust toolchain
|
||||
RUN curl https://sh.rustup.rs -sSf | \
|
||||
sh -s -- --default-toolchain stable -y
|
||||
|
||||
ENV PATH=/root/.cargo/bin:$PATH
|
||||
|
||||
# Python Dependencies
|
||||
RUN pip install --no-warn-script-location --ignore-installed --no-cache-dir --prefix=/install wheel cryptography gunicorn pymysql
|
||||
RUN pip install --no-warn-script-location --ignore-installed --no-cache-dir --prefix=/install -r requirements.txt
|
||||
|
||||
# Runtime Environment Image
|
||||
FROM base
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
COPY --from=builder /install/bin /usr/local/bin
|
||||
COPY --from=builder /install/site-packages /opt/pypy/site-packages
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libxml2 libxslt1.1 \
|
||||
&& rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/*
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN flask db init
|
||||
|
||||
CMD flask db stamp head \
|
||||
&& flask db migrate \
|
||||
&& flask db upgrade \
|
||||
&& gunicorn -b 0.0.0.0:5000 -k gevent -w 4 yotter:app
|
||||
|
||||
EXPOSE 5000
|
@ -1,65 +1,44 @@
|
||||
alembic==1.4.3
|
||||
astroid==2.4.2
|
||||
async-timeout==3.0.1
|
||||
attrs==20.2.0
|
||||
beautifulsoup4==4.9.2
|
||||
bleach==3.2.1
|
||||
Brotli==1.0.9
|
||||
bs4==0.0.1
|
||||
cachetools==4.1.1
|
||||
certifi==2020.6.20
|
||||
beautifulsoup4==4.9.3
|
||||
bleach==3.3.0
|
||||
cachetools==4.2.0
|
||||
certifi==2020.12.5
|
||||
chardet==3.0.4
|
||||
click==7.1.2
|
||||
defusedxml==0.6.0
|
||||
dnspython==2.0.0
|
||||
email-validator==1.1.1
|
||||
feedparser==6.0.1
|
||||
feedparser==6.0.2
|
||||
Flask==1.1.2
|
||||
Flask-Caching==1.9.0
|
||||
Flask-Login==0.5.0
|
||||
Flask-Migrate==2.5.3
|
||||
Flask-SQLAlchemy==2.4.4
|
||||
Flask-WTF==0.14.3
|
||||
future==0.18.2
|
||||
gevent==20.9.0
|
||||
greenlet==0.4.17
|
||||
idna==2.10
|
||||
isort==5.5.3
|
||||
itsdangerous==1.1.0
|
||||
Jinja2==2.11.2
|
||||
lazy-object-proxy==1.5.1
|
||||
llvmlite==0.34.0
|
||||
lxml==4.5.2
|
||||
Jinja2==2.11.3
|
||||
lxml>=4.6.3
|
||||
Mako==1.1.3
|
||||
MarkupSafe==1.1.1
|
||||
mccabe==0.6.1
|
||||
multidict==4.7.6
|
||||
numerize==0.12
|
||||
numpy==1.19.2
|
||||
packaging==20.4
|
||||
pylint==2.6.0
|
||||
PyMySQL==0.10.1
|
||||
packaging==20.8
|
||||
pyparsing==2.4.7
|
||||
PySocks==1.7.1
|
||||
python-anticaptcha==0.7.1
|
||||
python-dateutil==2.8.1
|
||||
python-dotenv==0.14.0
|
||||
python-dotenv==0.15.0
|
||||
python-editor==1.0.4
|
||||
requests==2.24.0
|
||||
requests==2.25.1
|
||||
requests-futures==1.0.0
|
||||
sgmllib3k==1.0.0
|
||||
six==1.15.0
|
||||
socks==0
|
||||
soupsieve==2.0.1
|
||||
SQLAlchemy==1.3.19
|
||||
style==1.1.6
|
||||
toml==0.10.1
|
||||
urllib3==1.25.10
|
||||
SQLAlchemy==1.3.22
|
||||
urllib3==1.26.5
|
||||
webencodings==0.5.1
|
||||
Werkzeug==1.0.1
|
||||
wrapt==1.12.1
|
||||
WTForms==2.3.3
|
||||
yarl==1.6.0
|
||||
youtube-dl==2020.9.20
|
||||
youtube-dlc==2020.11.11.post3
|
||||
youtube-search-fork==1.2.5
|
||||
zope.event==4.5.0
|
||||
zope.interface==5.1.0
|
||||
zope.interface==5.2.0
|
||||
|
@ -1,75 +0,0 @@
|
||||
from requests_futures.sessions import FuturesSession
|
||||
from werkzeug.datastructures import Headers
|
||||
from flask import Markup
|
||||
from concurrent.futures import as_completed
|
||||
from numerize import numerize
|
||||
from bs4 import BeautifulSoup
|
||||
from re import findall
|
||||
import time, datetime
|
||||
import requests
|
||||
import bleach
|
||||
import urllib
|
||||
import json
|
||||
import re
|
||||
|
||||
NITTERINSTANCE = "https://nitter.net/"
|
||||
|
||||
def get_feed(usernames, maxOld):
|
||||
'''
|
||||
Returns feed tweets given a set of usernames
|
||||
'''
|
||||
feedTweets = []
|
||||
with FuturesSession() as session:
|
||||
futures = [session.get('{instance}{user}'.format(instance=NITTERINSTANCE, user=u)) for u in usernames]
|
||||
for future in as_completed(futures):
|
||||
res = future.result().content.decode('utf-8')
|
||||
html = BeautifulSoup(res, "html.parser")
|
||||
userFeed = html.find_all('div', attrs={'class':'timeline-item'})
|
||||
if userFeed != []:
|
||||
for post in userFeed[:-1]:
|
||||
tweet = {}
|
||||
date_time_str = post.find('span', attrs={'class':'tweet-date'}).find('a')['title'].replace(",","")
|
||||
time = datetime.datetime.now() - datetime.datetime.strptime(date_time_str, '%d/%m/%Y %H:%M:%S')
|
||||
if time.days >= maxOld:
|
||||
continue
|
||||
|
||||
if post.find('div', attrs={'class':'pinned'}):
|
||||
if post.find('div', attrs={'class':'pinned'}).find('span', attrs={'icon-pin'}):
|
||||
continue
|
||||
|
||||
tweet['originalPoster'] = post.find('a', attrs={'class':'username'}).text
|
||||
tweet['twitterName'] = post.find('a', attrs={'class':'fullname'}).text
|
||||
tweet['timeStamp'] = datetime.datetime.strptime(date_time_str, '%d/%m/%Y %H:%M:%S')
|
||||
tweet['date'] = post.find('span', attrs={'class':'tweet-date'}).find('a').text
|
||||
tweet['content'] = Markup(post.find('div', attrs={'class':'tweet-content'}))
|
||||
|
||||
if post.find('div', attrs={'class':'retweet-header'}):
|
||||
tweet['username'] = post.find('div', attrs={'class':'retweet-header'}).find('div', attrs={'class':'icon-container'}).text
|
||||
tweet['isRT'] = True
|
||||
else:
|
||||
tweet['username'] = tweet['originalPoster']
|
||||
tweet['isRT'] = False
|
||||
|
||||
tweet['profilePic'] = NITTERINSTANCE+post.find('a', attrs={'class':'tweet-avatar'}).find('img')['src'][1:]
|
||||
url = NITTERINSTANCE + post.find('a', attrs={'class':'tweet-link'})['href'][1:]
|
||||
if post.find('div', attrs={'class':'quote'}):
|
||||
tweet['isReply'] = True
|
||||
tweet['quote'] = post.find('div', attrs={'class':'quote'})
|
||||
if tweet['quote'].find('div', attrs={'class':'quote-text'}):
|
||||
tweet['replyingTweetContent'] = Markup(tweet['quote'].find('div', attrs={'class':'quote-text'}))
|
||||
|
||||
if tweet['quote'].find('a', attrs={'class':'still-image'}):
|
||||
tweet['replyAttachedImg'] = NITTERINSTANCE+tweet['quote'].find('a', attrs={'class':'still-image'})['href'][1:]
|
||||
|
||||
if tweet['quote'].find('div', attrs={'class':'unavailable-quote'}):
|
||||
tweet['replyingUser']="Unavailable"
|
||||
else:
|
||||
tweet['replyingUser']=tweet['quote'].find('a', attrs={'class':'username'}).text
|
||||
post.find('div', attrs={'class':'quote'}).decompose()
|
||||
|
||||
if post.find('div', attrs={'class':'attachments'}):
|
||||
if not post.find(class_='quote'):
|
||||
if post.find('div', attrs={'class':'attachments'}).find('a', attrs={'class':'still-image'}):
|
||||
attachedImg = NITTERINSTANCE + post.find('div', attrs={'class':'attachments'}).find('a')['href'][1:]
|
||||
feedTweets.append(tweet)
|
||||
return feedTweets
|
116
tw_data/user.py
116
tw_data/user.py
@ -1,116 +0,0 @@
|
||||
from flask import Markup
|
||||
from requests_futures.sessions import FuturesSession
|
||||
from werkzeug.datastructures import Headers
|
||||
from concurrent.futures import as_completed
|
||||
from numerize import numerize
|
||||
from bs4 import BeautifulSoup
|
||||
from re import findall
|
||||
import time, datetime
|
||||
import requests
|
||||
import bleach
|
||||
import urllib
|
||||
import json
|
||||
import re
|
||||
|
||||
##########################
|
||||
#### Config variables ####
|
||||
##########################
|
||||
NITTERINSTANCE = 'https://nitter.net/'
|
||||
|
||||
def get_uer_info(username):
|
||||
response = urllib.request.urlopen('{instance}{user}'.format(instance=NITTERINSTANCE, user=username)).read()
|
||||
#rssFeed = feedparser.parse(response.content)
|
||||
|
||||
html = BeautifulSoup(str(response), "lxml")
|
||||
if html.body.find('div', attrs={'class':'error-panel'}):
|
||||
return False
|
||||
else:
|
||||
html = html.body.find('div', attrs={'class':'profile-card'})
|
||||
|
||||
if html.find('a', attrs={'class':'profile-card-fullname'}):
|
||||
fullName = html.find('a', attrs={'class':'profile-card-fullname'}).getText().encode('latin1').decode('unicode_escape').encode('latin1').decode('utf8')
|
||||
else:
|
||||
fullName = None
|
||||
|
||||
if html.find('div', attrs={'class':'profile-bio'}):
|
||||
profileBio = html.find('div', attrs={'class':'profile-bio'}).getText().encode('latin1').decode('unicode_escape').encode('latin1').decode('utf8')
|
||||
else:
|
||||
profileBio = None
|
||||
|
||||
user = {
|
||||
"profileFullName":fullName,
|
||||
"profileUsername":html.find('a', attrs={'class':'profile-card-username'}).string.encode('latin_1').decode('unicode_escape').encode('latin_1').decode('utf8'),
|
||||
"profileBio":profileBio,
|
||||
"tweets":html.find_all('span', attrs={'class':'profile-stat-num'})[0].string,
|
||||
"following":html.find_all('span', attrs={'class':'profile-stat-num'})[1].string,
|
||||
"followers":numerize.numerize(int(html.find_all('span', attrs={'class':'profile-stat-num'})[2].string.replace(",",""))),
|
||||
"likes":html.find_all('span', attrs={'class':'profile-stat-num'})[3].string,
|
||||
"profilePic":"{instance}{pic}".format(instance=NITTERINSTANCE, pic=html.find('a', attrs={'class':'profile-card-avatar'})['href'][1:])
|
||||
}
|
||||
return user
|
||||
|
||||
def get_tweets(user, page=1):
|
||||
feed = urllib.request.urlopen('{instance}{user}'.format(instance=NITTERINSTANCE, user=user)).read()
|
||||
#Gather feedPosts
|
||||
res = feed.decode('utf-8')
|
||||
html = BeautifulSoup(res, "html.parser")
|
||||
feedPosts = get_feed_tweets(html)
|
||||
|
||||
if page == 2:
|
||||
nextPage = html.find('div', attrs={'class':'show-more'}).find('a')['href']
|
||||
print('{instance}{user}{page}'.format(instance=NITTERINSTANCE, user=user, page=nextPage))
|
||||
feed = urllib.request.urlopen('{instance}{user}{page}'.format(instance=NITTERINSTANCE, user=user, page=nextPage)).read()
|
||||
res = feed.decode('utf-8')
|
||||
html = BeautifulSoup(res, "html.parser")
|
||||
feedPosts = get_feed_tweets(html)
|
||||
return feedPosts
|
||||
|
||||
def get_feed_tweets(html):
|
||||
feedPosts = []
|
||||
userFeed = html.find_all('div', attrs={'class':'timeline-item'})
|
||||
if userFeed != []:
|
||||
for post in userFeed[:-1]:
|
||||
if 'show-more' in str(post):
|
||||
continue
|
||||
date_time_str = post.find('span', attrs={'class':'tweet-date'}).find('a')['title'].replace(",","")
|
||||
|
||||
if post.find('div', attrs={'class':'pinned'}):
|
||||
if post.find('div', attrs={'class':'pinned'}).find('span', attrs={'icon-pin'}):
|
||||
continue
|
||||
|
||||
tweet = {}
|
||||
tweet['op'] = post.find('a', attrs={'class':'username'}).text
|
||||
tweet['twitterName'] = post.find('a', attrs={'class':'fullname'}).text
|
||||
tweet['timeStamp'] = str(datetime.datetime.strptime(date_time_str, '%d/%m/%Y %H:%M:%S'))
|
||||
tweet['date'] = post.find('span', attrs={'class':'tweet-date'}).find('a').text
|
||||
tweet['content'] = Markup(post.find('div', attrs={'class':'tweet-content'}).decode_contents())
|
||||
|
||||
if post.find('div', attrs={'class':'retweet-header'}):
|
||||
tweet['username'] = post.find('div', attrs={'class':'retweet-header'}).find('div', attrs={'class':'icon-container'}).text
|
||||
tweet['isRT'] = True
|
||||
else:
|
||||
tweet['username'] = tweet['op']
|
||||
tweet['isRT'] = False
|
||||
|
||||
tweet['profilePic'] = NITTERINSTANCE+post.find('a', attrs={'class':'tweet-avatar'}).find('img')['src'][1:]
|
||||
tweet['url'] = NITTERINSTANCE + post.find('a', attrs={'class':'tweet-link'})['href'][1:]
|
||||
if post.find('div', attrs={'class':'quote'}):
|
||||
tweet['isReply'] = True
|
||||
quote = post.find('div', attrs={'class':'quote'})
|
||||
if quote.find('div', attrs={'class':'quote-text'}):
|
||||
tweet['replyingTweetContent'] = Markup(quote.find('div', attrs={'class':'quote-text'}))
|
||||
|
||||
if quote.find('a', attrs={'class':'still-image'}):
|
||||
tweet['replyAttachedImg'] = NITTERINSTANCE+quote.find('a', attrs={'class':'still-image'})['href'][1:]
|
||||
|
||||
tweet['replyingUser']=quote.find('a', attrs={'class':'username'}).text
|
||||
post.find('div', attrs={'class':'quote'}).decompose()
|
||||
|
||||
if post.find('div', attrs={'class':'attachments'}):
|
||||
if not post.find(class_='quote'):
|
||||
if post.find('div', attrs={'class':'attachments'}).find('a', attrs={'class':'still-image'}):
|
||||
tweet['attachedImg'] = NITTERINSTANCE + post.find('div', attrs={'class':'attachments'}).find('a')['href'][1:]
|
||||
feedPosts.append(tweet)
|
||||
else:
|
||||
return {"emptyFeed": True}
|
||||
return feedPosts
|
@ -1,16 +1,15 @@
|
||||
{
|
||||
"serverName": "yotter.xyz",
|
||||
"nitterInstance": "https://nitter.net/",
|
||||
"maxInstanceUsers": 100,
|
||||
"nitterInstance": "https://nitter.mastodont.cat/",
|
||||
"maxInstanceUsers": 200,
|
||||
"serverLocation": "Germany",
|
||||
"restrictPublicUsage":true,
|
||||
"nginxVideoStream":true,
|
||||
"isInstance":true,
|
||||
"maintenance_mode":false,
|
||||
"show_admin_message":false,
|
||||
"admin_message_title":"Message from the admin",
|
||||
"admin_message":"Message from the admin text",
|
||||
"admin_user":"admin_username",
|
||||
"max_old_user_days": 60,
|
||||
"donate_url": "",
|
||||
"anticaptcha":""
|
||||
"donate_url": ""
|
||||
}
|
||||
|
@ -105,25 +105,36 @@ def channel_ctoken_v1(channel_id, page, sort, tab, view=1):
|
||||
|
||||
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
||||
|
||||
def get_channel_tab_info(channel_id, page="1", sort=3, tab='videos', view=1, print_status=True):
|
||||
def get_channel_tab(channel_id, page="1", sort=3, tab='videos', view=1,
|
||||
ctoken=None, print_status=True):
|
||||
message = 'Got channel tab' if print_status else None
|
||||
|
||||
if int(sort) == 2 and int(page) > 1:
|
||||
ctoken = channel_ctoken_v1(channel_id, page, sort, tab, view)
|
||||
ctoken = ctoken.replace('=', '%3D')
|
||||
url = ('https://www.youtube.com/channel/' + channel_id + '/' + tab
|
||||
+ '?action_continuation=1&continuation=' + ctoken
|
||||
+ '&pbj=1')
|
||||
content = util.fetch_url(url, headers_desktop + real_cookie,
|
||||
debug_name='channel_tab', report_text=message)
|
||||
else:
|
||||
if not ctoken:
|
||||
ctoken = channel_ctoken_v3(channel_id, page, sort, tab, view)
|
||||
ctoken = ctoken.replace('=', '%3D')
|
||||
url = 'https://www.youtube.com/browse_ajax?ctoken=' + ctoken
|
||||
content = util.fetch_url(url,
|
||||
headers_desktop + generic_cookie,
|
||||
debug_name='channel_tab', report_text=message)
|
||||
|
||||
# Not sure what the purpose of the key is or whether it will change
|
||||
# For now it seems to be constant for the API endpoint, not dependent
|
||||
# on the browsing session or channel
|
||||
key = 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
|
||||
url = 'https://www.youtube.com/youtubei/v1/browse?key=' + key
|
||||
|
||||
data = {
|
||||
'context': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20180830',
|
||||
},
|
||||
},
|
||||
'continuation': ctoken,
|
||||
}
|
||||
|
||||
content_type_header = (('Content-Type', 'application/json'),)
|
||||
content = util.fetch_url(
|
||||
url, headers_desktop + content_type_header,
|
||||
data=json.dumps(data), debug_name='channel_tab', report_text=message)
|
||||
info = yt_data_extract.extract_channel_info(json.loads(content), tab)
|
||||
if info['error'] is not None:
|
||||
return False
|
||||
@ -174,12 +185,31 @@ def get_number_of_videos_general(base_url):
|
||||
return get_number_of_videos_channel(get_channel_id(base_url))
|
||||
|
||||
def get_channel_search_json(channel_id, query, page):
|
||||
params = proto.string(2, 'search') + proto.string(15, str(page))
|
||||
offset = proto.unpadded_b64encode(proto.uint(3, (page-1)*30))
|
||||
params = proto.string(2, 'search') + proto.string(15, offset)
|
||||
params = proto.percent_b64encode(params)
|
||||
ctoken = proto.string(2, channel_id) + proto.string(3, params) + proto.string(11, query)
|
||||
ctoken = base64.urlsafe_b64encode(proto.nested(80226972, ctoken)).decode('ascii')
|
||||
|
||||
polymer_json = util.fetch_url("https://www.youtube.com/browse_ajax?ctoken=" + ctoken, headers_desktop, debug_name='channel_search')
|
||||
key = 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
|
||||
url = 'https://www.youtube.com/youtubei/v1/browse?key=' + key
|
||||
|
||||
data = {
|
||||
'context': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20180830',
|
||||
},
|
||||
},
|
||||
'continuation': ctoken,
|
||||
}
|
||||
|
||||
content_type_header = (('Content-Type', 'application/json'),)
|
||||
polymer_json = util.fetch_url(
|
||||
url, headers_desktop + content_type_header,
|
||||
data=json.dumps(data), debug_name='channel_search')
|
||||
|
||||
return polymer_json
|
||||
|
||||
@ -258,5 +288,3 @@ def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
||||
parameters_dictionary = request.args,
|
||||
**info
|
||||
)
|
||||
|
||||
|
||||
|
@ -155,13 +155,13 @@ def get_info_grid_video_item(item, channel=None):
|
||||
'timeStamp':published,
|
||||
'duration':duration,
|
||||
'channelName':channel['username'],
|
||||
'authorUrl':"/channel/{}".format(channel['channelId']),
|
||||
'authorUrl':f"/channel/{channel['channelId']}",
|
||||
'channelId':channel['channelId'],
|
||||
'id':item['videoId'],
|
||||
'videoUrl':"/watch?v={}".format(item['videoId']),
|
||||
'videoUrl':f"/watch?v={item['videoId']}",
|
||||
'isLive':isLive,
|
||||
'isUpcoming':isUpcoming,
|
||||
'videoThumb':item['thumbnail']['thumbnails'][0]['url']
|
||||
'videoThumb':item['thumbnail']['thumbnails'][0]['url'],
|
||||
}
|
||||
return video
|
||||
|
||||
@ -172,18 +172,18 @@ def get_author_info_from_channel(content):
|
||||
channel = {
|
||||
"channelId": cmd['channelId'],
|
||||
"username": cmd['title'],
|
||||
"thumbnail": "https:{}".format(cmd['avatar']['thumbnails'][0]['url'].replace("/", "~")),
|
||||
"thumbnail": f"https:{cmd['avatar']['thumbnails'][0]['url'].replace('/', '~')}",
|
||||
"description":description,
|
||||
"suscribers": cmd['subscriberCountText']['runs'][0]['text'].split(" ")[0],
|
||||
"banner": cmd['banner']['thumbnails'][0]['url']
|
||||
"banner": cmd['banner']['thumbnails'][0]['url'],
|
||||
}
|
||||
return channel
|
||||
|
||||
def get_channel_info(channelId, videos=True, page=1, sort=3):
|
||||
if id_or_username(channelId) == "channel":
|
||||
videos = []
|
||||
ciUrl = "https://www.youtube.com/channel/{}".format(channelId)
|
||||
mainUrl = "https://www.youtube.com/browse_ajax?ctoken={}".format(channel_ctoken_desktop(channelId, page, sort, "videos"))
|
||||
ciUrl = f"https://www.youtube.com/channel/{channelId}"
|
||||
mainUrl = f"https://www.youtube.com/browse_ajax?ctoken={channel_ctoken_desktop(channelId, page, sort, 'videos')}"
|
||||
content = json.loads(requests.get(mainUrl, headers=headers).text)
|
||||
req = requests.get(ciUrl, headers=headers).text
|
||||
|
||||
@ -210,4 +210,4 @@ def get_channel_info(channelId, videos=True, page=1, sort=3):
|
||||
return {"channel":authorInfo}
|
||||
|
||||
else:
|
||||
baseUrl = "https://www.youtube.com/user/{}".format(channelId)
|
||||
baseUrl = f"https://www.youtube.com/user/{channelId}"
|
||||
|
@ -159,10 +159,9 @@ def get_video_renderer_item_info(item):
|
||||
'authorUrl':"/channel/{}".format(item['ownerText']['runs'][0]['navigationEndpoint']['browseEndpoint']['browseId']),
|
||||
'channelId':item['ownerText']['runs'][0]['navigationEndpoint']['browseEndpoint']['browseId'],
|
||||
'id':item['videoId'],
|
||||
'videoUrl':"/watch?v={}".format(item['videoId']),
|
||||
'videoUrl':f"/watch?v={item['videoId']}",
|
||||
'isLive':isLive,
|
||||
'isUpcoming':isUpcoming,
|
||||
'videoThumb':item['thumbnail']['thumbnails'][0]['url']
|
||||
'videoThumb':item['thumbnail']['thumbnails'][0]['url'],
|
||||
}
|
||||
return video
|
||||
|
||||
|
180
youtube/util.py
180
youtube/util.py
@ -1,13 +1,9 @@
|
||||
import gzip
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from youtube import yt_data_extract
|
||||
|
||||
try:
|
||||
import brotli
|
||||
|
||||
have_brotli = True
|
||||
except ImportError:
|
||||
have_brotli = False
|
||||
@ -19,7 +15,7 @@ import json
|
||||
import gevent
|
||||
import gevent.queue
|
||||
import gevent.lock
|
||||
from python_anticaptcha import AnticaptchaClient, NoCaptchaTaskProxylessTask
|
||||
|
||||
# The trouble with the requests library: It ships its own certificate bundle via certifi
|
||||
# instead of using the system certificate store, meaning self-signed certificates
|
||||
# configured by the user will not work. Some draconian networks block TLS unless a corporate
|
||||
@ -55,12 +51,13 @@ import urllib3.contrib.socks
|
||||
|
||||
URL_ORIGIN = "/https://www.youtube.com"
|
||||
|
||||
connection_pool = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')
|
||||
connection_pool = urllib3.PoolManager(cert_reqs = 'CERT_REQUIRED')
|
||||
|
||||
def get_pool(use_tor):
|
||||
return connection_pool
|
||||
|
||||
class HTTPAsymmetricCookieProcessor(urllib.request.BaseHandler):
|
||||
'''Separate cookiejars for receiving and sending'''
|
||||
|
||||
def __init__(self, cookiejar_send=None, cookiejar_receive=None):
|
||||
self.cookiejar_send = cookiejar_send
|
||||
self.cookiejar_receive = cookiejar_receive
|
||||
@ -78,7 +75,6 @@ class HTTPAsymmetricCookieProcessor(urllib.request.BaseHandler):
|
||||
https_request = http_request
|
||||
https_response = http_response
|
||||
|
||||
|
||||
class FetchError(Exception):
|
||||
def __init__(self, code, reason='', ip=None):
|
||||
Exception.__init__(self, 'HTTP error during request: ' + code + ' ' + reason)
|
||||
@ -86,7 +82,6 @@ class FetchError(Exception):
|
||||
self.reason = reason
|
||||
self.ip = ip
|
||||
|
||||
|
||||
def decode_content(content, encoding_header):
|
||||
encodings = encoding_header.replace(' ', '').split(',')
|
||||
for encoding in reversed(encodings):
|
||||
@ -98,68 +93,6 @@ def decode_content(content, encoding_header):
|
||||
content = gzip.decompress(content)
|
||||
return content
|
||||
|
||||
|
||||
def bypass_captcha(session, response, url, cookies):
|
||||
print("vvv COOKIES DICT vvv")
|
||||
inputs = {}
|
||||
html = BeautifulSoup(str(response.text), "lxml")
|
||||
|
||||
# If there's a captcha and we need to solve it...
|
||||
if html.body.find('div', attrs={'class': 'g-recaptcha'}):
|
||||
# Get the captcha form
|
||||
form = html.body.find('form', attrs={"action": "/das_captcha"})
|
||||
|
||||
# Set up form inputs for request
|
||||
for _input in form.find_all('input'):
|
||||
try:
|
||||
print(_input["name"] + " -> " + _input["value"])
|
||||
inputs[_input["name"]] = _input["value"]
|
||||
except KeyError:
|
||||
continue
|
||||
print("\n vvv Form inputs created vvv ")
|
||||
print(inputs)
|
||||
|
||||
# Get CAPTCHA keys
|
||||
site_key = html.body.find('div', attrs={'class': 'g-recaptcha'})['data-sitekey']
|
||||
s_value = html.body.find('input', attrs={'name': 'session_token'})['value']
|
||||
|
||||
# Get anti-captcha API key from config
|
||||
config = json.load(open('yotter-config.json'))
|
||||
# Generate anti-captcha request payload
|
||||
body = {'clientKey': config['anticaptcha']}
|
||||
task = {'type': "NoCaptchaTaskProxyless",
|
||||
'websiteURL': url,
|
||||
'websiteKey': site_key,
|
||||
'recaptchaDataSValue': s_value}
|
||||
body['task'] = task
|
||||
|
||||
# Create the task.
|
||||
response = requests.post("https://api.anti-captcha.com/createTask", json=body).json()
|
||||
task_id = response["taskId"]
|
||||
print("Task was created: {}. Waiting...".format(task_id))
|
||||
|
||||
# Wait until task is completed
|
||||
body = {"clientKey": config['anticaptcha'], "taskId": task_id}
|
||||
response = requests.post("https://api.anti-captcha.com/getTaskResult", json=body).json()
|
||||
ready = response["status"] == "ready"
|
||||
while not ready:
|
||||
print(response['status'])
|
||||
response = requests.post("https://api.anti-captcha.com/getTaskResult", json=body).json()
|
||||
ready = response["status"] == "ready"
|
||||
|
||||
|
||||
inputs['g-recaptcha-response'] = response['solution']['gRecaptchaResponse']
|
||||
print(response)
|
||||
# Print POST request headers
|
||||
yt_rq = requests.post("https://youtube.com/das_captcha", data=inputs,
|
||||
headers={"Content-Type": "application/x-www-form-urlencoded",
|
||||
"Accept-Language": "en-US,en;q=0.5",
|
||||
"User-Agent":'Mozilla/5.0 (Windows NT 10.0; rv:78.0) Gecko/20100101 Firefox/78.0',
|
||||
"Referer": "https://www.youtube.com/das_captcha",
|
||||
"Origin": "https://www.youtube.com"}, cookies=session.cookies).headers
|
||||
print(yt_rq['Cookie'])
|
||||
|
||||
|
||||
def fetch_url_response(url, headers=(), timeout=15, data=None,
|
||||
cookiejar_send=None, cookiejar_receive=None,
|
||||
use_tor=True, max_redirects=None):
|
||||
@ -172,7 +105,7 @@ def fetch_url_response(url, headers=(), timeout=15, data=None,
|
||||
When both are set to the same object, cookies will be sent from the object,
|
||||
and response cookies will be merged into it.
|
||||
'''
|
||||
headers = dict(headers) # Note: Calling dict() on a dict will make a copy
|
||||
headers = dict(headers) # Note: Calling dict() on a dict will make a copy
|
||||
if have_brotli:
|
||||
headers['Accept-Encoding'] = 'gzip, br'
|
||||
else:
|
||||
@ -187,74 +120,57 @@ def fetch_url_response(url, headers=(), timeout=15, data=None,
|
||||
if data is not None:
|
||||
method = "POST"
|
||||
if isinstance(data, str):
|
||||
data = data.encode('ascii')
|
||||
data = data.encode('utf-8')
|
||||
elif not isinstance(data, bytes):
|
||||
data = urllib.parse.urlencode(data).encode('ascii')
|
||||
data = urllib.parse.urlencode(data).encode('utf-8')
|
||||
|
||||
if cookiejar_send is not None or cookiejar_receive is not None: # Use urllib
|
||||
if cookiejar_send is not None or cookiejar_receive is not None: # Use urllib
|
||||
req = urllib.request.Request(url, data=data, headers=headers)
|
||||
cookie_processor = HTTPAsymmetricCookieProcessor(cookiejar_send=cookiejar_send,
|
||||
cookiejar_receive=cookiejar_receive)
|
||||
|
||||
cookie_processor = HTTPAsymmetricCookieProcessor(cookiejar_send=cookiejar_send, cookiejar_receive=cookiejar_receive)
|
||||
opener = urllib.request.build_opener(cookie_processor)
|
||||
|
||||
response = opener.open(req, timeout=timeout)
|
||||
cleanup_func = (lambda r: None)
|
||||
|
||||
else: # Use a urllib3 pool. Cookies can't be used since urllib3 doesn't have easy support for them.
|
||||
else: # Use a urllib3 pool. Cookies can't be used since urllib3 doesn't have easy support for them.
|
||||
# default: Retry.DEFAULT = Retry(3)
|
||||
# (in connectionpool.py in urllib3)
|
||||
# According to the documentation for urlopen, a redirect counts as a
|
||||
# retry. So there are 3 redirects max by default.
|
||||
|
||||
session = requests.Session()
|
||||
print("Starting python GET request to "+url+"...")
|
||||
response = session.get(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:78.0) Gecko/20100101 Firefox/78.0', "Accept-Language": "en-US,en;q=0.5"})
|
||||
|
||||
# Strings that appear when there's a Captcha.
|
||||
string_de = "Fülle das folgende Feld aus, um YouTube weiter zu nutzen."
|
||||
string_en = "To continue with your YouTube experience, please fill out the form below."
|
||||
# If there's a captcha, bypass it.
|
||||
if string_de in response.text or string_en in response.text:
|
||||
bypass_captcha(session, response, url, session.cookies)
|
||||
return "Captcha", "Captcha"
|
||||
|
||||
if max_redirects:
|
||||
retries = urllib3.Retry(3 + max_redirects, redirect=max_redirects)
|
||||
retries = urllib3.Retry(3+max_redirects, redirect=max_redirects)
|
||||
else:
|
||||
retries = urllib3.Retry(3)
|
||||
|
||||
pool = connection_pool
|
||||
response = pool.request(method, url, headers=headers,
|
||||
pool = get_pool(use_tor)
|
||||
response = pool.request(method, url, headers=headers, body=data,
|
||||
timeout=timeout, preload_content=False,
|
||||
decode_content=False, retries=retries)
|
||||
|
||||
cleanup_func = (lambda r: r.release_conn())
|
||||
|
||||
return response, cleanup_func
|
||||
|
||||
|
||||
def fetch_url(url, headers=(), timeout=15, report_text=None, data=None,
|
||||
cookiejar_send=None, cookiejar_receive=None, use_tor=True,
|
||||
debug_name=None):
|
||||
start_time = time.time()
|
||||
|
||||
response, cleanup_func = fetch_url_response(
|
||||
url, headers, timeout=timeout,
|
||||
url, headers, timeout=timeout, data=data,
|
||||
cookiejar_send=cookiejar_send, cookiejar_receive=cookiejar_receive,
|
||||
use_tor=use_tor)
|
||||
print(response)
|
||||
|
||||
if response == "Captcha":
|
||||
return "Captcha"
|
||||
response_time = time.time()
|
||||
|
||||
content = response.read()
|
||||
read_finish = time.time()
|
||||
|
||||
cleanup_func(response) # release_connection for urllib3
|
||||
|
||||
if (response.status == 429
|
||||
and content.startswith(b'<!DOCTYPE')
|
||||
and b'Our systems have detected unusual traffic' in content):
|
||||
ip = re.search(br'IP address: ((?:[\da-f]*:)+[\da-f]+|(?:\d+\.)+\d+)',
|
||||
content)
|
||||
content)
|
||||
ip = ip.group(1).decode('ascii') if ip else None
|
||||
raise FetchError('429', reason=response.reason, ip=ip)
|
||||
|
||||
@ -262,14 +178,12 @@ def fetch_url(url, headers=(), timeout=15, report_text=None, data=None,
|
||||
raise FetchError(str(response.status), reason=response.reason, ip=None)
|
||||
|
||||
if report_text:
|
||||
print(report_text, ' Latency:', round(response_time - start_time, 3), ' Read time:',
|
||||
round(read_finish - response_time, 3))
|
||||
print(report_text, ' Latency:', round(response_time - start_time,3), ' Read time:', round(read_finish - response_time,3))
|
||||
content = decode_content(content, response.getheader('Content-Encoding', default='identity'))
|
||||
return content
|
||||
|
||||
|
||||
def head(url, use_tor=False, report_text=None, max_redirects=10):
|
||||
pool = connection_pool
|
||||
pool = get_pool(use_tor)
|
||||
start_time = time.time()
|
||||
|
||||
# default: Retry.DEFAULT = Retry(3)
|
||||
@ -277,21 +191,24 @@ def head(url, use_tor=False, report_text=None, max_redirects=10):
|
||||
# According to the documentation for urlopen, a redirect counts as a retry
|
||||
# So there are 3 redirects max by default. Let's change that
|
||||
# to 10 since googlevideo redirects a lot.
|
||||
retries = urllib3.Retry(3 + max_redirects, redirect=max_redirects,
|
||||
raise_on_redirect=False)
|
||||
retries = urllib3.Retry(3+max_redirects, redirect=max_redirects,
|
||||
raise_on_redirect=False)
|
||||
headers = {'User-Agent': 'Python-urllib'}
|
||||
response = pool.request('HEAD', url, headers=headers, retries=retries)
|
||||
if report_text:
|
||||
print(report_text, ' Latency:', round(time.time() - start_time, 3))
|
||||
print(report_text, ' Latency:', round(time.time() - start_time,3))
|
||||
return response
|
||||
|
||||
|
||||
mobile_user_agent = 'Mozilla/5.0 (Linux; Android 7.0; Redmi Note 4 Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36'
|
||||
mobile_ua = (('User-Agent', mobile_user_agent),)
|
||||
desktop_user_agent = 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'
|
||||
desktop_ua = (('User-Agent', desktop_user_agent),)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class RateLimitedQueue(gevent.queue.Queue):
|
||||
''' Does initial_burst (def. 30) at first, then alternates between waiting waiting_period (def. 5) seconds and doing subsequent_bursts (def. 10) queries. After 5 seconds with nothing left in the queue, resets rate limiting. '''
|
||||
|
||||
@ -308,8 +225,9 @@ class RateLimitedQueue(gevent.queue.Queue):
|
||||
self.empty_start = 0
|
||||
gevent.queue.Queue.__init__(self)
|
||||
|
||||
|
||||
def get(self):
|
||||
self.lock.acquire() # blocks if another greenlet currently has the lock
|
||||
self.lock.acquire() # blocks if another greenlet currently has the lock
|
||||
if self.count_since_last_wait >= self.subsequent_bursts and self.surpassed_initial:
|
||||
gevent.sleep(self.waiting_period)
|
||||
self.count_since_last_wait = 0
|
||||
@ -325,7 +243,7 @@ class RateLimitedQueue(gevent.queue.Queue):
|
||||
self.currently_empty = True
|
||||
self.empty_start = time.monotonic()
|
||||
|
||||
item = gevent.queue.Queue.get(self) # blocks when nothing left
|
||||
item = gevent.queue.Queue.get(self) # blocks when nothing left
|
||||
|
||||
if self.currently_empty:
|
||||
if time.monotonic() - self.empty_start >= self.waiting_period:
|
||||
@ -339,6 +257,7 @@ class RateLimitedQueue(gevent.queue.Queue):
|
||||
return item
|
||||
|
||||
|
||||
|
||||
def download_thumbnail(save_directory, video_id):
|
||||
url = "https://i.ytimg.com/vi/" + video_id + "/mqdefault.jpg"
|
||||
save_location = os.path.join(save_directory, video_id + ".jpg")
|
||||
@ -350,23 +269,26 @@ def download_thumbnail(save_directory, video_id):
|
||||
try:
|
||||
f = open(save_location, 'wb')
|
||||
except FileNotFoundError:
|
||||
os.makedirs(save_directory, exist_ok=True)
|
||||
os.makedirs(save_directory, exist_ok = True)
|
||||
f = open(save_location, 'wb')
|
||||
f.write(thumbnail)
|
||||
f.close()
|
||||
return True
|
||||
|
||||
|
||||
def download_thumbnails(save_directory, ids):
|
||||
if not isinstance(ids, (list, tuple)):
|
||||
ids = list(ids)
|
||||
# only do 5 at a time
|
||||
# do the n where n is divisible by 5
|
||||
i = -1
|
||||
for i in range(0, int(len(ids) / 5) - 1):
|
||||
gevent.joinall([gevent.spawn(download_thumbnail, save_directory, ids[j]) for j in range(i * 5, i * 5 + 5)])
|
||||
for i in range(0, int(len(ids)/5) - 1 ):
|
||||
gevent.joinall([gevent.spawn(download_thumbnail, save_directory, ids[j]) for j in range(i*5, i*5 + 5)])
|
||||
# do the remainders (< 5)
|
||||
gevent.joinall([gevent.spawn(download_thumbnail, save_directory, ids[j]) for j in range(i * 5 + 5, len(ids))])
|
||||
gevent.joinall([gevent.spawn(download_thumbnail, save_directory, ids[j]) for j in range(i*5 + 5, len(ids))])
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def dict_add(*dicts):
|
||||
@ -374,7 +296,6 @@ def dict_add(*dicts):
|
||||
dicts[0].update(dictionary)
|
||||
return dicts[0]
|
||||
|
||||
|
||||
def video_id(url):
|
||||
url_parts = urllib.parse.urlparse(url)
|
||||
return urllib.parse.parse_qs(url_parts.query)['v'][0]
|
||||
@ -384,11 +305,10 @@ def video_id(url):
|
||||
def get_thumbnail_url(video_id):
|
||||
return "/i.ytimg.com/vi/" + video_id + "/mqdefault.jpg"
|
||||
|
||||
|
||||
def seconds_to_timestamp(seconds):
|
||||
seconds = int(seconds)
|
||||
hours, seconds = divmod(seconds, 3600)
|
||||
minutes, seconds = divmod(seconds, 60)
|
||||
hours, seconds = divmod(seconds,3600)
|
||||
minutes, seconds = divmod(seconds,60)
|
||||
if hours != 0:
|
||||
timestamp = str(hours) + ":"
|
||||
timestamp += str(minutes).zfill(2) # zfill pads with zeros
|
||||
@ -399,32 +319,31 @@ def seconds_to_timestamp(seconds):
|
||||
return timestamp
|
||||
|
||||
|
||||
|
||||
def update_query_string(query_string, items):
|
||||
parameters = urllib.parse.parse_qs(query_string)
|
||||
parameters.update(items)
|
||||
return urllib.parse.urlencode(parameters, doseq=True)
|
||||
|
||||
|
||||
def uppercase_escape(s):
|
||||
return re.sub(
|
||||
r'\\U([0-9a-fA-F]{8})',
|
||||
lambda m: chr(int(m.group(1), base=16)), s)
|
||||
|
||||
def uppercase_escape(s):
|
||||
return re.sub(
|
||||
r'\\U([0-9a-fA-F]{8})',
|
||||
lambda m: chr(int(m.group(1), base=16)), s)
|
||||
|
||||
def prefix_url(url):
|
||||
if url is None:
|
||||
return None
|
||||
url = url.lstrip('/') # some urls have // before them, which has a special meaning
|
||||
url = url.lstrip('/') # some urls have // before them, which has a special meaning
|
||||
return '/' + url
|
||||
|
||||
|
||||
def left_remove(string, substring):
|
||||
'''removes substring from the start of string, if present'''
|
||||
if string.startswith(substring):
|
||||
return string[len(substring):]
|
||||
return string
|
||||
|
||||
|
||||
def concat_or_none(*strings):
|
||||
'''Concatenates strings. Returns None if any of the arguments are None'''
|
||||
result = ''
|
||||
@ -446,7 +365,6 @@ def prefix_urls(item):
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
def add_extra_html_info(item):
|
||||
if item['type'] == 'video':
|
||||
item['url'] = (URL_ORIGIN + '/watch?v=' + item['id']) if item.get('id') else None
|
||||
@ -465,7 +383,6 @@ def add_extra_html_info(item):
|
||||
elif item['type'] == 'channel':
|
||||
item['url'] = (URL_ORIGIN + "/channel/" + item['id']) if item.get('id') else None
|
||||
|
||||
|
||||
def parse_info_prepare_for_html(renderer, additional_info={}):
|
||||
item = yt_data_extract.extract_item_info(renderer, additional_info)
|
||||
prefix_urls(item)
|
||||
@ -473,7 +390,6 @@ def parse_info_prepare_for_html(renderer, additional_info={}):
|
||||
|
||||
return item
|
||||
|
||||
|
||||
def check_gevent_exceptions(*tasks):
|
||||
for task in tasks:
|
||||
if task.exception:
|
||||
|
@ -29,7 +29,7 @@ def parse_comment(raw_comment):
|
||||
cmnt = {}
|
||||
imgHostName = urllib.parse.urlparse(raw_comment['author_avatar'][1:]).netloc
|
||||
cmnt['author'] = raw_comment['author']
|
||||
cmnt['thumbnail'] = raw_comment['author_avatar'].replace("https://{}".format(imgHostName),"")[1:] + "?host=" + imgHostName
|
||||
cmnt['thumbnail'] = raw_comment['author_avatar'].replace(f"https://{imgHostName}","")[1:] + "?host=" + imgHostName
|
||||
|
||||
print(cmnt['thumbnail'])
|
||||
cmnt['channel'] = raw_comment['author_url']
|
||||
|
78
youtube/video.py
Normal file
78
youtube/video.py
Normal file
@ -0,0 +1,78 @@
|
||||
from youtube_dlc import YoutubeDL
|
||||
import json
|
||||
options = {
|
||||
'ignoreerrors': True,
|
||||
'quiet': True,
|
||||
'skip_download': True
|
||||
}
|
||||
ydl = YoutubeDL(options)
|
||||
ydl.add_default_info_extractors()
|
||||
config = json.load(open('yotter-config.json'))
|
||||
|
||||
def get_info(url):
|
||||
video = {}
|
||||
video['error'] = False
|
||||
|
||||
try:
|
||||
info = ydl.extract_info(url, download=False)
|
||||
except:
|
||||
video['error'] = True
|
||||
|
||||
if info == None:
|
||||
video['error'] = True
|
||||
if not video['error'] and info is not None:
|
||||
video['uploader'] = info['uploader']
|
||||
video['uploader_id'] = info['uploader_id']
|
||||
video['channel_id'] = info['channel_id']
|
||||
video['upload_date'] = info['upload_date']
|
||||
video['title'] = info['title']
|
||||
video['thumbnails'] = info['thumbnails']
|
||||
video['description'] = info['description']
|
||||
video['categories'] = info['categories']
|
||||
video['subtitles'] = info['subtitles']
|
||||
video['duration'] = info['duration']
|
||||
video['view_count'] = info['view_count']
|
||||
|
||||
if(info['like_count'] is None):
|
||||
video['like_count'] = 0
|
||||
else:
|
||||
video['like_count'] = int(info['like_count'])
|
||||
|
||||
if(info['dislike_count'] is None):
|
||||
video['dislike_count'] = 0
|
||||
else:
|
||||
video['dislike_count'] = int(info['dislike_count'])
|
||||
|
||||
video['total_likes'] = video['dislike_count'] + video['like_count']
|
||||
|
||||
video['average_rating'] = str(info['average_rating'])[0:4]
|
||||
video['formats'] = get_video_formats(info['formats'])
|
||||
video['audio_formats'] = get_video_formats(info['formats'], audio=True)
|
||||
video['is_live'] = info['is_live']
|
||||
video['start_time'] = info['start_time']
|
||||
video['end_time'] = info['end_time']
|
||||
video['series'] = info['series']
|
||||
video['subscriber_count'] = info['subscriber_count']
|
||||
return video
|
||||
|
||||
def get_video_formats(formats, audio=False):
|
||||
best_formats = []
|
||||
audio_formats = []
|
||||
for format in formats:
|
||||
if format['vcodec'] != 'none' and format['acodec'] != 'none':
|
||||
# Video and Audio
|
||||
if format['format_note'] == '144p':
|
||||
continue
|
||||
else:
|
||||
best_formats.append(format)
|
||||
elif format['vcodec'] == 'none' and format['acodec'] != 'none':
|
||||
# Audio only
|
||||
audio_formats.append(format)
|
||||
else:
|
||||
# Video only
|
||||
continue
|
||||
|
||||
if audio:
|
||||
return audio_formats
|
||||
else:
|
||||
return best_formats
|
@ -164,18 +164,14 @@ headers = (
|
||||
('X-YouTube-Client-Version', '2.20180830'),
|
||||
) + util.mobile_ua
|
||||
def extract_info(video_id, use_invidious, playlist_id=None, index=None):
|
||||
# bpctr=9999999999 will bypass are-you-sure dialogs for controversial videos
|
||||
# bpctr=9999999999 will bypass are-you-sure dialogs for controversial
|
||||
# videos
|
||||
url = 'https://m.youtube.com/watch?v=' + video_id + '&pbj=1&bpctr=9999999999'
|
||||
if playlist_id:
|
||||
url += '&list=' + playlist_id
|
||||
if index:
|
||||
url += '&index=' + index
|
||||
polymer_json = util.fetch_url(url, headers=headers, debug_name='watch')
|
||||
|
||||
# If there's a captcha... Return word Captcha
|
||||
if polymer_json == 'Captcha':
|
||||
return 'Captcha'
|
||||
|
||||
polymer_json = polymer_json.decode('utf-8')
|
||||
# TODO: Decide whether this should be done in yt_data_extract.extract_watch_info
|
||||
try:
|
||||
@ -270,5 +266,3 @@ def format_bytes(bytes):
|
||||
suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
|
||||
converted = float(bytes) / float(1024 ** exponent)
|
||||
return '%.2f%s' % (converted, suffix)
|
||||
|
||||
|
||||
|
@ -329,6 +329,11 @@ def extract_item_info(item, additional_info={}):
|
||||
|
||||
def extract_response(polymer_json):
|
||||
'''return response, error'''
|
||||
# /youtubei/v1/browse endpoint returns response directly
|
||||
if isinstance(polymer_json, dict) and 'responseContext' in polymer_json:
|
||||
# this is the response
|
||||
return polymer_json, None
|
||||
|
||||
response = multi_deep_get(polymer_json, [1, 'response'], ['response'])
|
||||
if response is None:
|
||||
return None, 'Failed to extract response'
|
||||
|
@ -177,9 +177,8 @@ def _extract_watch_info_mobile(top_level):
|
||||
author_id = deep_get(playlist, 'longBylineText', 'runs', 0,
|
||||
'navigationEndpoint', 'browseEndpoint', 'browseId')
|
||||
info['playlist']['author_id'] = author_id
|
||||
if author_id:
|
||||
info['playlist']['author_url'] = concat_or_none(
|
||||
'https://www.youtube.com/channel/', author_id)
|
||||
info['playlist']['author_url'] = concat_or_none(
|
||||
'https://www.youtube.com/channel/', author_id)
|
||||
info['playlist']['id'] = playlist.get('playlistId')
|
||||
info['playlist']['url'] = concat_or_none(
|
||||
'https://www.youtube.com/playlist?list=',
|
||||
@ -447,7 +446,8 @@ def _extract_playability_error(info, player_response, error_prefix=''):
|
||||
|
||||
SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt')
|
||||
def extract_watch_info(polymer_json):
|
||||
info = {'playability_error': None, 'error': None}
|
||||
info = {'playability_error': None, 'error': None,
|
||||
'player_response_missing': None}
|
||||
|
||||
if isinstance(polymer_json, dict):
|
||||
top_level = polymer_json
|
||||
@ -509,6 +509,10 @@ def extract_watch_info(polymer_json):
|
||||
if not info['formats']:
|
||||
_extract_formats(info, player_response)
|
||||
|
||||
# see https://github.com/user234683/youtube-local/issues/22#issuecomment-706395160
|
||||
info['player_urls_missing'] = (
|
||||
not info['formats'] and not embedded_player_response)
|
||||
|
||||
# playability errors
|
||||
_extract_playability_error(info, player_response)
|
||||
|
||||
@ -565,6 +569,84 @@ def extract_watch_info(polymer_json):
|
||||
info['author_url'] = 'https://www.youtube.com/channel/' + info['author_id'] if info['author_id'] else None
|
||||
return info
|
||||
|
||||
single_char_codes = {
|
||||
'n': '\n',
|
||||
'\\': '\\',
|
||||
'"': '"',
|
||||
"'": "'",
|
||||
'b': '\b',
|
||||
'f': '\f',
|
||||
'n': '\n',
|
||||
'r': '\r',
|
||||
't': '\t',
|
||||
'v': '\x0b',
|
||||
'0': '\x00',
|
||||
'\n': '', # backslash followed by literal newline joins lines
|
||||
}
|
||||
def js_escape_replace(match):
|
||||
r'''Resolves javascript string escape sequences such as \x..'''
|
||||
# some js-strings in the watch page html include them for no reason
|
||||
# https://mathiasbynens.be/notes/javascript-escapes
|
||||
escaped_sequence = match.group(1)
|
||||
if escaped_sequence[0] in ('x', 'u'):
|
||||
return chr(int(escaped_sequence[1:], base=16))
|
||||
|
||||
# In javascript, if it's not one of those escape codes, it's just the
|
||||
# literal character. e.g., "\a" = "a"
|
||||
return single_char_codes.get(escaped_sequence, escaped_sequence)
|
||||
|
||||
# works but complicated and unsafe:
|
||||
#PLAYER_RESPONSE_RE = re.compile(r'<script[^>]*?>[^<]*?var ytInitialPlayerResponse = ({(?:"(?:[^"\\]|\\.)*?"|[^"])+?});')
|
||||
|
||||
# Because there are sometimes additional statements after the json object
|
||||
# so we just capture all of those until end of script and tell json decoder
|
||||
# to ignore extra stuff after the json object
|
||||
PLAYER_RESPONSE_RE = re.compile(r'<script[^>]*?>[^<]*?var ytInitialPlayerResponse = ({.*?)</script>')
|
||||
INITIAL_DATA_RE = re.compile(r"<script[^>]*?>var ytInitialData = '(.+?[^\\])';")
|
||||
BASE_JS_RE = re.compile(r'jsUrl":\s*"([\w\-\./]+?/base.js)"')
|
||||
JS_STRING_ESCAPE_RE = re.compile(r'\\([^xu]|x..|u....)')
|
||||
def extract_watch_info_from_html(watch_html):
|
||||
base_js_match = BASE_JS_RE.search(watch_html)
|
||||
player_response_match = PLAYER_RESPONSE_RE.search(watch_html)
|
||||
initial_data_match = INITIAL_DATA_RE.search(watch_html)
|
||||
|
||||
if base_js_match is not None:
|
||||
base_js_url = base_js_match.group(1)
|
||||
else:
|
||||
base_js_url = None
|
||||
|
||||
if player_response_match is not None:
|
||||
decoder = json.JSONDecoder()
|
||||
# this will make it ignore extra stuff after end of object
|
||||
player_response = decoder.raw_decode(player_response_match.group(1))[0]
|
||||
else:
|
||||
return {'error': 'Could not find ytInitialPlayerResponse'}
|
||||
player_response = None
|
||||
|
||||
if initial_data_match is not None:
|
||||
initial_data = initial_data_match.group(1)
|
||||
initial_data = JS_STRING_ESCAPE_RE.sub(js_escape_replace, initial_data)
|
||||
initial_data = json.loads(initial_data)
|
||||
else:
|
||||
print('extract_watch_info_from_html: failed to find initialData')
|
||||
initial_data = None
|
||||
|
||||
# imitate old format expected by extract_watch_info
|
||||
fake_polymer_json = {
|
||||
'player': {
|
||||
'args': {},
|
||||
'assets': {
|
||||
'js': base_js_url
|
||||
}
|
||||
},
|
||||
'playerResponse': player_response,
|
||||
'response': initial_data,
|
||||
}
|
||||
|
||||
return extract_watch_info(fake_polymer_json)
|
||||
|
||||
|
||||
|
||||
def get_caption_url(info, language, format, automatic=False, translation_language=None):
|
||||
'''Gets the url for captions with the given language and format. If automatic is True, get the automatic captions for that language. If translation_language is given, translate the captions from `language` to `translation_language`. If automatic is true and translation_language is given, the automatic captions will be translated.'''
|
||||
url = info['_captions_base_url']
|
||||
@ -580,7 +662,8 @@ def get_caption_url(info, language, format, automatic=False, translation_languag
|
||||
return url
|
||||
|
||||
def update_with_age_restricted_info(info, video_info_page):
|
||||
ERROR_PREFIX = 'Error bypassing age-restriction: '
|
||||
'''Inserts urls from 'player_response' in get_video_info page'''
|
||||
ERROR_PREFIX = 'Error getting missing player or bypassing age-restriction: '
|
||||
|
||||
video_info = urllib.parse.parse_qs(video_info_page)
|
||||
player_response = deep_get(video_info, 'player_response', 0)
|
||||
@ -603,7 +686,9 @@ def requires_decryption(info):
|
||||
# adapted from youtube-dl and invidious:
|
||||
# https://github.com/omarroth/invidious/blob/master/src/invidious/helpers/signatures.cr
|
||||
decrypt_function_re = re.compile(r'function\(a\)\{(a=a\.split\(""\)[^\}{]+)return a\.join\(""\)\}')
|
||||
op_with_arg_re = re.compile(r'[^\.]+\.([^\(]+)\(a,(\d+)\)')
|
||||
# gives us e.g. rt, .xK, 5 from rt.xK(a,5) or rt, ["xK"], 5 from rt["xK"](a,5)
|
||||
# (var, operation, argument)
|
||||
var_op_arg_re = re.compile(r'(\w+)(\.\w+|\["[^"]+"\])\(a,(\d+)\)')
|
||||
def extract_decryption_function(info, base_js):
|
||||
'''Insert decryption function into info. Return error string if not successful.
|
||||
Decryption function is a list of list[2] of numbers.
|
||||
@ -617,10 +702,11 @@ def extract_decryption_function(info, base_js):
|
||||
if not function_body:
|
||||
return 'Empty decryption function body'
|
||||
|
||||
var_name = get(function_body[0].split('.'), 0)
|
||||
if var_name is None:
|
||||
var_with_operation_match = var_op_arg_re.fullmatch(function_body[0])
|
||||
if var_with_operation_match is None:
|
||||
return 'Could not find var_name'
|
||||
|
||||
var_name = var_with_operation_match.group(1)
|
||||
var_body_match = re.search(r'var ' + re.escape(var_name) + r'=\{(.*?)\};', base_js, flags=re.DOTALL)
|
||||
if var_body_match is None:
|
||||
return 'Could not find var_body'
|
||||
@ -649,13 +735,13 @@ def extract_decryption_function(info, base_js):
|
||||
|
||||
decryption_function = []
|
||||
for op_with_arg in function_body:
|
||||
match = op_with_arg_re.fullmatch(op_with_arg)
|
||||
match = var_op_arg_re.fullmatch(op_with_arg)
|
||||
if match is None:
|
||||
return 'Could not parse operation with arg'
|
||||
op_name = match.group(1)
|
||||
op_name = match.group(2).strip('[].')
|
||||
if op_name not in operation_definitions:
|
||||
return 'Unknown op_name: ' + op_name
|
||||
op_argument = match.group(2)
|
||||
return 'Unknown op_name: ' + str(op_name)
|
||||
op_argument = match.group(3)
|
||||
decryption_function.append([operation_definitions[op_name], int(op_argument)])
|
||||
|
||||
info['decryption_function'] = decryption_function
|
||||
|
@ -1,2 +0,0 @@
|
||||
### Youtube_data
|
||||
This is the module for extracting data from Youtube. It uses a scrapping method through the Youtube JSON headers. It aims to replace the Youtube-API and also my `youtube_search-fork` that I am using right now.
|
@ -1,213 +0,0 @@
|
||||
from youtube_data import proto
|
||||
from flask import Markup as mk
|
||||
import requests
|
||||
import base64
|
||||
import json
|
||||
import re
|
||||
|
||||
# From: https://github.com/user234683/youtube-local/blob/master/youtube/channel.py
|
||||
# SORT:
|
||||
# videos:
|
||||
# Popular - 1
|
||||
# Oldest - 2
|
||||
# Newest - 3
|
||||
# playlists:
|
||||
# Oldest - 2
|
||||
# Newest - 3
|
||||
# Last video added - 4
|
||||
|
||||
# view:
|
||||
# grid: 0 or 1
|
||||
# list: 2
|
||||
|
||||
headers = {
|
||||
'Host': 'www.youtube.com',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)',
|
||||
'Accept': '*/*',
|
||||
'Accept-Language': 'en-US,en;q=0.5',
|
||||
'X-YouTube-Client-Name': '1',
|
||||
'X-YouTube-Client-Version': '2.20180418',
|
||||
}
|
||||
real_cookie = (('Cookie', 'VISITOR_INFO1_LIVE=8XihrAcN1l4'),)
|
||||
generic_cookie = (('Cookie', 'VISITOR_INFO1_LIVE=ST1Ti53r4fU'),)
|
||||
|
||||
|
||||
def channel_ctoken_desktop(channel_id, page, sort, tab, view=1):
|
||||
# see https://github.com/iv-org/invidious/issues/1319#issuecomment-671732646
|
||||
# page > 1 doesn't work when sorting by oldest
|
||||
offset = 30*(int(page) - 1)
|
||||
schema_number = {
|
||||
3: 6307666885028338688,
|
||||
2: 17254859483345278706,
|
||||
1: 16570086088270825023,
|
||||
}[int(sort)]
|
||||
page_token = proto.string(61, proto.unpadded_b64encode(proto.string(1,
|
||||
proto.uint(1, schema_number) + proto.string(2,
|
||||
proto.string(1, proto.unpadded_b64encode(proto.uint(1,offset)))
|
||||
)
|
||||
)))
|
||||
|
||||
tab = proto.string(2, tab )
|
||||
sort = proto.uint(3, int(sort))
|
||||
#page = proto.string(15, str(page) )
|
||||
|
||||
shelf_view = proto.uint(4, 0)
|
||||
view = proto.uint(6, int(view))
|
||||
continuation_info = proto.string(3,
|
||||
proto.percent_b64encode(tab + sort + shelf_view + view + page_token)
|
||||
)
|
||||
|
||||
channel_id = proto.string(2, channel_id )
|
||||
pointless_nest = proto.string(80226972, channel_id + continuation_info)
|
||||
|
||||
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
||||
|
||||
def channel_ctoken_mobile(channel_id, page, sort, tab, view=1):
|
||||
tab = proto.string(2, tab )
|
||||
sort = proto.uint(3, int(sort))
|
||||
page = proto.string(15, str(page) )
|
||||
# example with shelves in videos tab: https://www.youtube.com/channel/UCNL1ZadSjHpjm4q9j2sVtOA/videos
|
||||
shelf_view = proto.uint(4, 0)
|
||||
view = proto.uint(6, int(view))
|
||||
continuation_info = proto.string( 3, proto.percent_b64encode(tab + view + sort + shelf_view + page) )
|
||||
|
||||
channel_id = proto.string(2, channel_id )
|
||||
pointless_nest = proto.string(80226972, channel_id + continuation_info)
|
||||
|
||||
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
||||
|
||||
|
||||
def id_or_username(string):
|
||||
cidRegex = "^UC.{22}$"
|
||||
if re.match(cidRegex, string):
|
||||
return "channel"
|
||||
else:
|
||||
return "user"
|
||||
|
||||
def get_channel_videos_tab(content):
|
||||
tabs = content['contents']['twoColumnBrowseResultsRenderer']['tabs']
|
||||
for tab in tabs:
|
||||
if tab['title'] != "Videos":
|
||||
continue
|
||||
else:
|
||||
return tab
|
||||
|
||||
def get_video_items_from_tab(tab):
|
||||
items = []
|
||||
for item in tab:
|
||||
try:
|
||||
if item['gridVideoRenderer']:
|
||||
items.append(item)
|
||||
else:
|
||||
continue
|
||||
except KeyError:
|
||||
continue
|
||||
return items
|
||||
|
||||
def get_info_grid_video_item(item, channel=None):
|
||||
item = item['gridVideoRenderer']
|
||||
thumbnailOverlays = item['thumbnailOverlays']
|
||||
published = ""
|
||||
views = ""
|
||||
isLive = False
|
||||
isUpcoming = False
|
||||
try:
|
||||
if 'UPCOMING' in str(thumbnailOverlays):
|
||||
start_time = item['upcomingEventData']['startTime']
|
||||
isUpcoming = True
|
||||
views = "-"
|
||||
published = "Scheduled"
|
||||
except KeyError:
|
||||
isUpcoming = False
|
||||
|
||||
try:
|
||||
if 'LIVE' in str(thumbnailOverlays):
|
||||
isLive = True
|
||||
try:
|
||||
views = item['viewCountText']['simpleText']
|
||||
except:
|
||||
views = "Live"
|
||||
try:
|
||||
duration = item['lengthText']['simpleText']
|
||||
except:
|
||||
duration = "-"
|
||||
if published != "Scheduled":
|
||||
try:
|
||||
published = item['publishedTimeText']['simpleText']
|
||||
except KeyError:
|
||||
published = "None"
|
||||
except KeyError:
|
||||
isUpcoming = False
|
||||
isLive = False
|
||||
|
||||
if not isUpcoming and not isLive:
|
||||
views = item['viewCountText']['simpleText']
|
||||
published = item['publishedTimeText']['simpleText']
|
||||
try:
|
||||
duration = item['lengthText']['simpleText']
|
||||
except:
|
||||
duration = "?"
|
||||
|
||||
video = {
|
||||
'videoTitle':item['title']['runs'][0]['text'],
|
||||
'description':"",
|
||||
'views':views,
|
||||
'timeStamp':published,
|
||||
'duration':duration,
|
||||
'channelName':channel['username'],
|
||||
'authorUrl':"/channel/{}".format(channel['channelId']),
|
||||
'channelId':channel['channelId'],
|
||||
'id':item['videoId'],
|
||||
'videoUrl':"/watch?v={}".format(item['videoId']),
|
||||
'isLive':isLive,
|
||||
'isUpcoming':isUpcoming,
|
||||
'videoThumb':item['thumbnail']['thumbnails'][0]['url']
|
||||
}
|
||||
return video
|
||||
|
||||
def get_author_info_from_channel(content):
|
||||
hmd = content['metadata']['channelMetadataRenderer']
|
||||
cmd = content['header']['c4TabbedHeaderRenderer']
|
||||
description = mk(hmd['description'])
|
||||
channel = {
|
||||
"channelId": cmd['channelId'],
|
||||
"username": cmd['title'],
|
||||
"thumbnail": "https:{}".format(cmd['avatar']['thumbnails'][0]['url'].replace("/", "~")),
|
||||
"description":description,
|
||||
"suscribers": cmd['subscriberCountText']['runs'][0]['text'].split(" ")[0],
|
||||
"banner": cmd['banner']['thumbnails'][0]['url']
|
||||
}
|
||||
return channel
|
||||
|
||||
def get_channel_info(channelId, videos=True, page=1, sort=3):
|
||||
if id_or_username(channelId) == "channel":
|
||||
videos = []
|
||||
ciUrl = "https://www.youtube.com/channel/{}".format(channelId)
|
||||
mainUrl = "https://www.youtube.com/browse_ajax?ctoken={}".format(channel_ctoken_desktop(channelId, page, sort, "videos"))
|
||||
content = json.loads(requests.get(mainUrl, headers=headers).text)
|
||||
req = requests.get(ciUrl, headers=headers).text
|
||||
|
||||
start = (
|
||||
req.index('window["ytInitialData"]')
|
||||
+ len('window["ytInitialData"]')
|
||||
+ 3
|
||||
)
|
||||
|
||||
end = req.index("};", start) + 1
|
||||
jsonIni = req[start:end]
|
||||
data = json.loads(jsonIni)
|
||||
|
||||
#videosTab = get_channel_videos_tab(content)
|
||||
authorInfo = get_author_info_from_channel(data)
|
||||
if videos:
|
||||
gridVideoItemList = get_video_items_from_tab(content[1]['response']['continuationContents']['gridContinuation']['items'])
|
||||
for video in gridVideoItemList:
|
||||
vid = get_info_grid_video_item(video, authorInfo)
|
||||
videos.append(vid)
|
||||
print({"channel":authorInfo, "videos":videos})
|
||||
return {"channel":authorInfo, "videos":videos}
|
||||
else:
|
||||
return {"channel":authorInfo}
|
||||
|
||||
else:
|
||||
baseUrl = "https://www.youtube.com/user/{}".format(channelId)
|
@ -1,130 +0,0 @@
|
||||
from math import ceil
|
||||
import base64
|
||||
import io
|
||||
|
||||
# FROM https://github.com/user234683/youtube-local/blob/master/youtube/proto.py
|
||||
|
||||
def byte(n):
|
||||
return bytes((n,))
|
||||
|
||||
|
||||
def varint_encode(offset):
|
||||
'''In this encoding system, for each 8-bit byte, the first bit is 1 if there are more bytes, and 0 is this is the last one.
|
||||
The next 7 bits are data. These 7-bit sections represent the data in Little endian order. For example, suppose the data is
|
||||
aaaaaaabbbbbbbccccccc (each of these sections is 7 bits). It will be encoded as:
|
||||
1ccccccc 1bbbbbbb 0aaaaaaa
|
||||
|
||||
This encoding is used in youtube parameters to encode offsets and to encode the length for length-prefixed data.
|
||||
See https://developers.google.com/protocol-buffers/docs/encoding#varints for more info.'''
|
||||
needed_bytes = ceil(offset.bit_length()/7) or 1 # (0).bit_length() returns 0, but we need 1 in that case.
|
||||
encoded_bytes = bytearray(needed_bytes)
|
||||
for i in range(0, needed_bytes - 1):
|
||||
encoded_bytes[i] = (offset & 127) | 128 # 7 least significant bits
|
||||
offset = offset >> 7
|
||||
encoded_bytes[-1] = offset & 127 # leave first bit as zero for last byte
|
||||
|
||||
return bytes(encoded_bytes)
|
||||
|
||||
|
||||
def varint_decode(encoded):
|
||||
decoded = 0
|
||||
for i, byte in enumerate(encoded):
|
||||
decoded |= (byte & 127) << 7*i
|
||||
|
||||
if not (byte & 128):
|
||||
break
|
||||
return decoded
|
||||
|
||||
|
||||
def string(field_number, data):
|
||||
data = as_bytes(data)
|
||||
return _proto_field(2, field_number, varint_encode(len(data)) + data)
|
||||
nested = string
|
||||
|
||||
def uint(field_number, value):
|
||||
return _proto_field(0, field_number, varint_encode(value))
|
||||
|
||||
|
||||
|
||||
|
||||
def _proto_field(wire_type, field_number, data):
|
||||
''' See https://developers.google.com/protocol-buffers/docs/encoding#structure '''
|
||||
return varint_encode( (field_number << 3) | wire_type) + data
|
||||
|
||||
|
||||
|
||||
def percent_b64encode(data):
|
||||
return base64.urlsafe_b64encode(data).replace(b'=', b'%3D')
|
||||
|
||||
|
||||
def unpadded_b64encode(data):
|
||||
return base64.urlsafe_b64encode(data).replace(b'=', b'')
|
||||
|
||||
def as_bytes(value):
|
||||
if isinstance(value, str):
|
||||
return value.encode('utf-8')
|
||||
return value
|
||||
|
||||
|
||||
def read_varint(data):
|
||||
result = 0
|
||||
i = 0
|
||||
while True:
|
||||
try:
|
||||
byte = data.read(1)[0]
|
||||
except IndexError:
|
||||
if i == 0:
|
||||
raise EOFError()
|
||||
raise Exception('Unterminated varint starting at ' + str(data.tell() - i))
|
||||
result |= (byte & 127) << 7*i
|
||||
if not byte & 128:
|
||||
break
|
||||
|
||||
i += 1
|
||||
return result
|
||||
|
||||
|
||||
def read_group(data, end_sequence):
|
||||
start = data.tell()
|
||||
index = data.original.find(end_sequence, start)
|
||||
if index == -1:
|
||||
raise Exception('Unterminated group')
|
||||
data.seek(index + len(end_sequence))
|
||||
return data.original[start:index]
|
||||
|
||||
def read_protobuf(data):
|
||||
data_original = data
|
||||
data = io.BytesIO(data)
|
||||
data.original = data_original
|
||||
while True:
|
||||
try:
|
||||
tag = read_varint(data)
|
||||
except EOFError:
|
||||
break
|
||||
wire_type = tag & 7
|
||||
field_number = tag >> 3
|
||||
|
||||
if wire_type == 0:
|
||||
value = read_varint(data)
|
||||
elif wire_type == 1:
|
||||
value = data.read(8)
|
||||
elif wire_type == 2:
|
||||
length = read_varint(data)
|
||||
value = data.read(length)
|
||||
elif wire_type == 3:
|
||||
end_bytes = encode_varint((field_number << 3) | 4)
|
||||
value = read_group(data, end_bytes)
|
||||
elif wire_type == 5:
|
||||
value = data.read(4)
|
||||
else:
|
||||
raise Exception("Unknown wire type: " + str(wire_type) + ", Tag: " + bytes_to_hex(succinct_encode(tag)) + ", at position " + str(data.tell()))
|
||||
yield (wire_type, field_number, value)
|
||||
|
||||
def parse(data):
|
||||
return {field_number: value for _, field_number, value in read_protobuf(data)}
|
||||
|
||||
def b64_to_bytes(data):
|
||||
if isinstance(data, bytes):
|
||||
data = data.decode('ascii')
|
||||
data = data.replace("%3D", "=")
|
||||
return base64.urlsafe_b64decode(data + "="*((4 - len(data)%4)%4) )
|
@ -1,158 +0,0 @@
|
||||
from youtube_data import proto
|
||||
from youtube import utils
|
||||
from flask import Markup
|
||||
import urllib.parse
|
||||
import requests
|
||||
import base64
|
||||
import json
|
||||
|
||||
def page_number_to_sp_parameter(page, autocorrect, sort, filters):
|
||||
offset = (int(page) - 1)*20 # 20 results per page
|
||||
autocorrect = proto.nested(8, proto.uint(1, 1 - int(autocorrect) ))
|
||||
filters_enc = proto.nested(2, proto.uint(1, filters['time']) + proto.uint(2, filters['type']) + proto.uint(3, filters['duration']))
|
||||
result = proto.uint(1, sort) + filters_enc + autocorrect + proto.uint(9, offset) + proto.string(61, b'')
|
||||
return base64.urlsafe_b64encode(result).decode('ascii')
|
||||
|
||||
def search_by_terms(search_terms, page, autocorrect, sort, filters):
|
||||
url = "https://www.youtube.com/results?search_query=" + urllib.parse.quote_plus(search_terms)
|
||||
headers = {
|
||||
'Host': 'www.youtube.com',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)',
|
||||
'Accept': '*/*',
|
||||
'Accept-Language': 'en-US,en;q=0.5',
|
||||
'X-YouTube-Client-Name': '1',
|
||||
'X-YouTube-Client-Version': '2.20180418',
|
||||
}
|
||||
url += "&pbj=1&sp=" + page_number_to_sp_parameter(page, autocorrect, sort, filters).replace("=", "%3D")
|
||||
content = requests.get(url, headers=headers).text
|
||||
|
||||
info = json.loads(content)
|
||||
videos = get_videos_from_search(info)
|
||||
channels = get_channels_from_search(info)
|
||||
|
||||
results = {
|
||||
"videos": videos,
|
||||
"channels": channels
|
||||
}
|
||||
return results
|
||||
|
||||
def get_channels_from_search(search):
|
||||
results = []
|
||||
search = search[1]['response']
|
||||
primaryContents = search['contents']['twoColumnSearchResultsRenderer']['primaryContents']
|
||||
items = primaryContents['sectionListRenderer']['contents'][0]['itemSectionRenderer']['contents']
|
||||
|
||||
for item in items:
|
||||
try:
|
||||
item['channelRenderer']
|
||||
channel = get_channel_renderer_item_info(item['channelRenderer'])
|
||||
results.append(channel)
|
||||
except KeyError:
|
||||
continue
|
||||
return results
|
||||
|
||||
def get_channel_renderer_item_info(item):
|
||||
try:
|
||||
suscribers = item['subscriberCountText']['simpleText'].split(" ")[0]
|
||||
except:
|
||||
suscribers = "?"
|
||||
|
||||
try:
|
||||
description = utils.get_description_snippet_text(item['descriptionSnippet']['runs'])
|
||||
except KeyError:
|
||||
description = ""
|
||||
|
||||
try:
|
||||
channel = {
|
||||
"channelId": item['channelId'],
|
||||
"username": item['title']['simpleText'],
|
||||
"thumbnail": "https:{}".format(item['thumbnail']['thumbnails'][0]['url'].replace("/", "~")),
|
||||
"description": Markup(str(description)),
|
||||
"suscribers": suscribers,
|
||||
"videos": item['videoCountText']['runs'][0]['text']
|
||||
}
|
||||
except KeyError:
|
||||
channel = {
|
||||
"channelId": item['channelId'],
|
||||
"username": item['title']['simpleText'],
|
||||
"avatar": item['thumbnail']['thumbnails'][0]['url'],
|
||||
"suscribers": suscribers
|
||||
}
|
||||
return channel
|
||||
|
||||
def get_videos_from_search(search):
|
||||
latest = []
|
||||
results = []
|
||||
search = search[1]['response']
|
||||
primaryContents = search['contents']['twoColumnSearchResultsRenderer']['primaryContents']
|
||||
items = primaryContents['sectionListRenderer']['contents'][0]['itemSectionRenderer']['contents']
|
||||
|
||||
for item in items:
|
||||
try:
|
||||
item['videoRenderer']
|
||||
video = get_video_renderer_item_info(item['videoRenderer'])
|
||||
results.append(video)
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
# Sometimes Youtube will return an empty query. Try again.
|
||||
return results
|
||||
|
||||
def get_video_renderer_item_info(item):
|
||||
published = ""
|
||||
views = ""
|
||||
isLive = False
|
||||
isUpcoming = False
|
||||
|
||||
thumbnailOverlays = item['thumbnailOverlays']
|
||||
try:
|
||||
if 'UPCOMING' in str(thumbnailOverlays):
|
||||
start_time = item['upcomingEventData']['startTime']
|
||||
isUpcoming = True
|
||||
views = "-"
|
||||
published = "Scheduled"
|
||||
except KeyError:
|
||||
isUpcoming = False
|
||||
|
||||
try:
|
||||
if 'LIVE' in str(thumbnailOverlays):
|
||||
isLive = True
|
||||
try:
|
||||
views = item['viewCountText']['simpleText']
|
||||
except:
|
||||
views = "Live"
|
||||
try:
|
||||
duration = item['lengthText']['simpleText']
|
||||
except:
|
||||
duration = "-"
|
||||
if published != "Scheduled":
|
||||
try:
|
||||
published = item['publishedTimeText']['simpleText']
|
||||
except KeyError:
|
||||
published = "None"
|
||||
except:
|
||||
isUpcoming = False
|
||||
isLive = False
|
||||
|
||||
if not isUpcoming and not isLive:
|
||||
views = item['viewCountText']['simpleText']
|
||||
published = item['publishedTimeText']['simpleText']
|
||||
duration = item['lengthText']['simpleText']
|
||||
|
||||
video = {
|
||||
'videoTitle':item['title']['runs'][0]['text'],
|
||||
'description':Markup(str(utils.get_description_snippet_text(item['descriptionSnippet']['runs']))),
|
||||
'views':views,
|
||||
'timeStamp':published,
|
||||
'duration':duration,
|
||||
'channelName':item['ownerText']['runs'][0]['text'],
|
||||
'authorUrl':"/channel/{}".format(item['ownerText']['runs'][0]['navigationEndpoint']['browseEndpoint']['browseId']),
|
||||
'channelId':item['ownerText']['runs'][0]['navigationEndpoint']['browseEndpoint']['browseId'],
|
||||
'id':item['videoId'],
|
||||
'videoUrl':"/watch?v={}".format(item['videoId']),
|
||||
'isLive':isLive,
|
||||
'isUpcoming':isUpcoming,
|
||||
'videoThumb':item['thumbnail']['thumbnails'][0]['url']
|
||||
}
|
||||
return video
|
||||
|
Reference in New Issue
Block a user