twitter-api-client


Nametwitter-api-client JSON
Version 0.10.21 PyPI version JSON
download
home_pagehttps://github.com/trevorhobenshield/twitter-api-client
SummaryImplementation of X/Twitter v1, v2, and GraphQL APIs.
upload_time2024-04-14 20:24:28
maintainerNone
docs_urlNone
authorTrevor Hobenshield
requires_python>=3.10.10
licenseMIT
keywords twitter api client async search automation bot scrape
VCS
bugtrack_url
requirements No requirements were recorded.
Travis-CI No Travis.
coveralls test coverage No coveralls.
            

## Implementation of X/Twitter v1, v2, and GraphQL APIs.


## Table of Contents

* [Installation](#installation)
* [Automation](#automation)
* [Scraping](#scraping)
  * [Get all user/tweet data](#get-all-usertweet-data)
  * [Resume Pagination](#resume-pagination)
  * [Search](#search)
* [Spaces](#spaces)
  * [Live Audio Capture](#live-audio-capture)
  * [Live Transcript Capture](#live-transcript-capture)
  * [Search and Metadata](#search-and-metadata)
* [Automated Solvers](#automated-solvers)
* [Example API Responses](#example-api-responses)

### Installation

```bash
pip install twitter-api-client
```

### Automation

```python
from twitter.account import Account

## sign-in with credentials
email, username, password = ..., ..., ...
account = Account(email, username, password)

## or, resume session using cookies
# account = Account(cookies={"ct0": ..., "auth_token": ...})

## or, resume session using cookies (JSON file)
# account = Account(cookies='twitter.cookies')


account.tweet('test 123')
account.untweet(123456)
account.retweet(123456)
account.unretweet(123456)
account.reply('foo', tweet_id=123456)
account.quote('bar', tweet_id=123456)
account.schedule_tweet('schedule foo', 1681851240)
account.unschedule_tweet(123456)

account.tweet('hello world', media=[
    {'media': 'test.jpg', 'alt': 'some alt text', 'tagged_users': [123]},
    {'media': 'test.jpeg', 'alt': 'some alt text', 'tagged_users': [123]},
    {'media': 'test.png', 'alt': 'some alt text', 'tagged_users': [123]},
    {'media': 'test.jfif', 'alt': 'some alt text', 'tagged_users': [123]},
])

account.schedule_tweet('foo bar', '2023-04-18 15:42', media=[
    {'media': 'test.gif', 'alt': 'some alt text'},
])

account.schedule_reply('hello world', '2023-04-19 15:42', tweet_id=123456, media=[
    {'media': 'test.gif', 'alt': 'some alt text'},
])

account.dm('my message', [1234], media='test.jpg')

account.create_poll('test poll 123', ['hello', 'world', 'foo', 'bar'], 10080)

# tweets
account.like(123456)
account.unlike(123456)
account.bookmark(123456)
account.unbookmark(123456)
account.pin(123456)
account.unpin(123456)

# users
account.follow(1234)
account.unfollow(1234)
account.mute(1234)
account.unmute(1234)
account.enable_notifications(1234)
account.disable_notifications(1234)
account.block(1234)
account.unblock(1234)

# user profile
account.update_profile_image('test.jpg')
account.update_profile_banner('test.png')
account.update_profile_info(name='Foo Bar', description='test 123', location='Victoria, BC')

# topics
account.follow_topic(111)
account.unfollow_topic(111)

# lists
account.create_list('My List', 'description of my list', private=False)
account.update_list(222, 'My Updated List', 'some updated description', private=False)
account.update_list_banner(222, 'test.png')
account.delete_list_banner(222)
account.add_list_member(222, 1234)
account.remove_list_member(222, 1234)
account.delete_list(222)
account.pin_list(222)
account.unpin_list(222)

# refresh all pinned lists in this order
account.update_pinned_lists([222, 111, 333])

# unpin all lists
account.update_pinned_lists([])

# get timelines
timeline = account.home_timeline()
latest_timeline = account.home_latest_timeline(limit=500)

# get bookmarks
bookmarks = account.bookmarks()

# get DM inbox metadata
inbox = account.dm_inbox()

# get DMs from all conversations
dms = account.dm_history()

# get DMs from specific conversations
dms = account.dm_history(['123456-789012', '345678-901234'])

# search DMs by keyword
dms = account.dm_search('test123')

# delete entire conversation
account.dm_delete(conversation_id='123456-789012')

# delete (hide) specific DM
account.dm_delete(message_id='123456')

# get all scheduled tweets
scheduled_tweets = account.scheduled_tweets()

# delete a scheduled tweet
account.delete_scheduled_tweet(12345678)

# get all draft tweets
draft_tweets = account.draft_tweets()

# delete a draft tweet
account.delete_draft_tweet(12345678)

# delete all scheduled tweets
account.clear_scheduled_tweets()

# delete all draft tweets
account.clear_draft_tweets()

# example configuration
account.update_settings({
    "address_book_live_sync_enabled": False,
    "allow_ads_personalization": False,
    "allow_authenticated_periscope_requests": True,
    "allow_dm_groups_from": "following",
    "allow_dms_from": "following",
    "allow_location_history_personalization": False,
    "allow_logged_out_device_personalization": False,
    "allow_media_tagging": "none",
    "allow_sharing_data_for_third_party_personalization": False,
    "alt_text_compose_enabled": None,
    "always_use_https": True,
    "autoplay_disabled": False,
    "country_code": "us",
    "discoverable_by_email": False,
    "discoverable_by_mobile_phone": False,
    "display_sensitive_media": False,
    "dm_quality_filter": "enabled",
    "dm_receipt_setting": "all_disabled",
    "geo_enabled": False,
    "include_alt_text_compose": True,
    "include_mention_filter": True,
    "include_nsfw_admin_flag": True,
    "include_nsfw_user_flag": True,
    "include_ranked_timeline": True,
    "language": "en",
    "mention_filter": "unfiltered",
    "nsfw_admin": False,
    "nsfw_user": False,
    "personalized_trends": True,
    "protected": False,
    "ranked_timeline_eligible": None,
    "ranked_timeline_setting": None,
    "require_password_login": False,
    "requires_login_verification": False,
    "sleep_time": {
        "enabled": False,
        "end_time": None,
        "start_time": None
    },
    "translator_type": "none",
    "universal_quality_filtering_enabled": "enabled",
    "use_cookie_personalization": False,
})

# example configuration
account.update_search_settings({
    "optInFiltering": True,  # filter nsfw content
    "optInBlocking": True,  # filter blocked accounts
})

notifications = account.notifications()

account.change_password('old pwd','new pwd')

```

### Scraping

#### Get all user/tweet data

Two special batch queries `scraper.tweets_by_ids` and `scraper.users_by_ids` should be preferred when applicable. These endpoints are more much more efficient and have higher rate limits than their unbatched counterparts. See the table below for a comparison.

| Endpoint      | Batch Size     | Rate Limit    |
|---------------|----------------|---------------|
| tweets_by_ids | ~220           | 500 / 15 mins |
| tweets_by_id  | 1              | 50 / 15 mins  |
| users_by_ids  | ~220           | 100 / 15 mins |
| users_by_id   | 1              | 500 / 15 mins |

*As of Fall 2023 login by username/password is unstable. Using cookies is now recommended.*

```python
from twitter.scraper import Scraper

## sign-in with credentials
email, username, password = ..., ..., ...
scraper = Scraper(email, username, password)

## or, resume session using cookies
# scraper = Scraper(cookies={"ct0": ..., "auth_token": ...})

## or, resume session using cookies (JSON file)
# scraper = Scraper(cookies='twitter.cookies')

## or, initialize guest session (limited endpoints)
# from twitter.util import init_session
# scraper = Scraper(session=init_session())

# user data
users = scraper.users(['foo', 'bar', 'hello', 'world'])
users = scraper.users_by_ids([123, 234, 345]) # preferred
users = scraper.users_by_id([123, 234, 345])
tweets = scraper.tweets([123, 234, 345])
likes = scraper.likes([123, 234, 345])
tweets_and_replies = scraper.tweets_and_replies([123, 234, 345])
media = scraper.media([123, 234, 345])
following = scraper.following([123, 234, 345])
followers = scraper.followers([123, 234, 345])
scraper.tweet_stats([111111, 222222, 333333])

# get recommended users based on user
scraper.recommended_users()
scraper.recommended_users([123])

# tweet data
tweets = scraper.tweets_by_ids([987, 876, 754]) # preferred
tweets = scraper.tweets_by_id([987, 876, 754])
tweet_details = scraper.tweets_details([987, 876, 754])
retweeters = scraper.retweeters([987, 876, 754])
favoriters = scraper.favoriters([987, 876, 754])

scraper.download_media([
    111111,
    222222,
    333333,
    444444,
])

# trends
scraper.trends()
```

#### Resume Pagination
**Pagination is already done by default**, however there are circumstances where you may need to resume pagination from a specific cursor. For example, the `Followers` endpoint only allows for 50 requests every 15 minutes. In this case, we can resume from where we left off by providing a specific cursor value.
```python
from twitter.scraper import Scraper

email, username, password = ...,...,...
scraper = Scraper(email, username, password)

user_id = 44196397
cursor = '1767341853908517597|1663601806447476672'  # example cursor
limit = 100  # arbitrary limit for demonstration
follower_subset, last_cursor = scraper.followers([user_id], limit=limit, cursor=cursor)

# use last_cursor to resume pagination
```

#### Search

```python
from twitter.search import Search

email, username, password = ..., ..., ...
# default output directory is `data/search_results` if save=True
search = Search(email, username, password, save=True, debug=1)

res = search.run(
    limit=37,
    retries=5,
    queries=[
        {
            'category': 'Top',
            'query': 'paperswithcode -tensorflow -tf'
        },
        {
            'category': 'Latest',
            'query': 'test'
        },
        {
            'category': 'People',
            'query': 'brasil portugal -argentina'
        },
        {
            'category': 'Photos',
            'query': 'greece'
        },
        {
            'category': 'Videos',
            'query': 'italy'
        },
    ],
)
```

**Search Operators Reference**

https://developer.twitter.com/en/docs/twitter-api/v1/rules-and-filtering/search-operators

https://developer.twitter.com/en/docs/twitter-api/tweets/search/integrate/build-a-query

### Spaces

#### Live Audio Capture

Capture live audio for up to 500 streams per IP

```python
from twitter.scraper import Scraper
from twitter.util import init_session

session = init_session() # initialize guest session, no login required
scraper = Scraper(session=session)

rooms = [...]
scraper.spaces_live(rooms=rooms) # capture live audio from list of rooms
```

#### Live Transcript Capture

**Raw transcript chunks**

```python
from twitter.scraper import Scraper
from twitter.util import init_session

session = init_session() # initialize guest session, no login required
scraper = Scraper(session=session)

# room must be live, i.e. in "Running" state
scraper.space_live_transcript('1zqKVPlQNApJB', frequency=2)  # word-level live transcript. (dirty, on-the-fly transcription before post-processing)
```

**Processed (final) transcript chunks**

```python
from twitter.scraper import Scraper
from twitter.util import init_session

session = init_session() # initialize guest session, no login required
scraper = Scraper(session=session)

# room must be live, i.e. in "Running" state
scraper.space_live_transcript('1zqKVPlQNApJB', frequency=1)  # finalized live transcript.  (clean)
```

#### Search and Metadata
```python
from twitter.scraper import Scraper
from twitter.util import init_session
from twitter.constants import SpaceCategory

session = init_session() # initialize guest session, no login required
scraper = Scraper(session=session)

# download audio and chat-log from space
spaces = scraper.spaces(rooms=['1eaJbrAPnBVJX', '1eaJbrAlZjjJX'], audio=True, chat=True)

# pull metadata only
spaces = scraper.spaces(rooms=['1eaJbrAPnBVJX', '1eaJbrAlZjjJX'])

# search for spaces in "Upcoming", "Top" and "Live" categories
spaces = scraper.spaces(search=[
    {
        'filter': SpaceCategory.Upcoming,
        'query': 'hello'
    },
    {
        'filter': SpaceCategory.Top,
        'query': 'world'
    },
    {
        'filter': SpaceCategory.Live,
        'query': 'foo bar'
    }
])
```

### Automated Solvers

> This requires installation of the [proton-api-client](https://pypi.org/project/proton-api-client) package

To set up automated email confirmation/verification solvers, add your Proton Mail credentials below as shown.
This removes the need to manually solve email challenges via the web app. These credentials can be used
in `Scraper`, `Account`, and `Search` constructors.

E.g.

```python
from twitter.account import Account
from twitter.util import get_code
from proton.client import ProtonMail

proton_username, proton_password = ..., ...
proton = lambda: get_code(ProtonMail(proton_username, proton_password))

email, username, password = ..., ..., ...
account = Account(email, username, password, proton=proton)
```


            

Raw data

            {
    "_id": null,
    "home_page": "https://github.com/trevorhobenshield/twitter-api-client",
    "name": "twitter-api-client",
    "maintainer": null,
    "docs_url": null,
    "requires_python": ">=3.10.10",
    "maintainer_email": null,
    "keywords": "twitter api client async search automation bot scrape",
    "author": "Trevor Hobenshield",
    "author_email": "trevorhobenshield@gmail.com",
    "download_url": "https://files.pythonhosted.org/packages/b4/b4/3fe54663782ce1a25812c90b4a78e9468198263f7e9c131bb44d7aa6e905/twitter_api_client-0.10.21.tar.gz",
    "platform": null,
    "description": "\n\n## Implementation of X/Twitter v1, v2, and GraphQL APIs.\n\n\n## Table of Contents\n\n* [Installation](#installation)\n* [Automation](#automation)\n* [Scraping](#scraping)\n  * [Get all user/tweet data](#get-all-usertweet-data)\n  * [Resume Pagination](#resume-pagination)\n  * [Search](#search)\n* [Spaces](#spaces)\n  * [Live Audio Capture](#live-audio-capture)\n  * [Live Transcript Capture](#live-transcript-capture)\n  * [Search and Metadata](#search-and-metadata)\n* [Automated Solvers](#automated-solvers)\n* [Example API Responses](#example-api-responses)\n\n### Installation\n\n```bash\npip install twitter-api-client\n```\n\n### Automation\n\n```python\nfrom twitter.account import Account\n\n## sign-in with credentials\nemail, username, password = ..., ..., ...\naccount = Account(email, username, password)\n\n## or, resume session using cookies\n# account = Account(cookies={\"ct0\": ..., \"auth_token\": ...})\n\n## or, resume session using cookies (JSON file)\n# account = Account(cookies='twitter.cookies')\n\n\naccount.tweet('test 123')\naccount.untweet(123456)\naccount.retweet(123456)\naccount.unretweet(123456)\naccount.reply('foo', tweet_id=123456)\naccount.quote('bar', tweet_id=123456)\naccount.schedule_tweet('schedule foo', 1681851240)\naccount.unschedule_tweet(123456)\n\naccount.tweet('hello world', media=[\n    {'media': 'test.jpg', 'alt': 'some alt text', 'tagged_users': [123]},\n    {'media': 'test.jpeg', 'alt': 'some alt text', 'tagged_users': [123]},\n    {'media': 'test.png', 'alt': 'some alt text', 'tagged_users': [123]},\n    {'media': 'test.jfif', 'alt': 'some alt text', 'tagged_users': [123]},\n])\n\naccount.schedule_tweet('foo bar', '2023-04-18 15:42', media=[\n    {'media': 'test.gif', 'alt': 'some alt text'},\n])\n\naccount.schedule_reply('hello world', '2023-04-19 15:42', tweet_id=123456, media=[\n    {'media': 'test.gif', 'alt': 'some alt text'},\n])\n\naccount.dm('my message', [1234], media='test.jpg')\n\naccount.create_poll('test poll 123', ['hello', 'world', 'foo', 'bar'], 10080)\n\n# tweets\naccount.like(123456)\naccount.unlike(123456)\naccount.bookmark(123456)\naccount.unbookmark(123456)\naccount.pin(123456)\naccount.unpin(123456)\n\n# users\naccount.follow(1234)\naccount.unfollow(1234)\naccount.mute(1234)\naccount.unmute(1234)\naccount.enable_notifications(1234)\naccount.disable_notifications(1234)\naccount.block(1234)\naccount.unblock(1234)\n\n# user profile\naccount.update_profile_image('test.jpg')\naccount.update_profile_banner('test.png')\naccount.update_profile_info(name='Foo Bar', description='test 123', location='Victoria, BC')\n\n# topics\naccount.follow_topic(111)\naccount.unfollow_topic(111)\n\n# lists\naccount.create_list('My List', 'description of my list', private=False)\naccount.update_list(222, 'My Updated List', 'some updated description', private=False)\naccount.update_list_banner(222, 'test.png')\naccount.delete_list_banner(222)\naccount.add_list_member(222, 1234)\naccount.remove_list_member(222, 1234)\naccount.delete_list(222)\naccount.pin_list(222)\naccount.unpin_list(222)\n\n# refresh all pinned lists in this order\naccount.update_pinned_lists([222, 111, 333])\n\n# unpin all lists\naccount.update_pinned_lists([])\n\n# get timelines\ntimeline = account.home_timeline()\nlatest_timeline = account.home_latest_timeline(limit=500)\n\n# get bookmarks\nbookmarks = account.bookmarks()\n\n# get DM inbox metadata\ninbox = account.dm_inbox()\n\n# get DMs from all conversations\ndms = account.dm_history()\n\n# get DMs from specific conversations\ndms = account.dm_history(['123456-789012', '345678-901234'])\n\n# search DMs by keyword\ndms = account.dm_search('test123')\n\n# delete entire conversation\naccount.dm_delete(conversation_id='123456-789012')\n\n# delete (hide) specific DM\naccount.dm_delete(message_id='123456')\n\n# get all scheduled tweets\nscheduled_tweets = account.scheduled_tweets()\n\n# delete a scheduled tweet\naccount.delete_scheduled_tweet(12345678)\n\n# get all draft tweets\ndraft_tweets = account.draft_tweets()\n\n# delete a draft tweet\naccount.delete_draft_tweet(12345678)\n\n# delete all scheduled tweets\naccount.clear_scheduled_tweets()\n\n# delete all draft tweets\naccount.clear_draft_tweets()\n\n# example configuration\naccount.update_settings({\n    \"address_book_live_sync_enabled\": False,\n    \"allow_ads_personalization\": False,\n    \"allow_authenticated_periscope_requests\": True,\n    \"allow_dm_groups_from\": \"following\",\n    \"allow_dms_from\": \"following\",\n    \"allow_location_history_personalization\": False,\n    \"allow_logged_out_device_personalization\": False,\n    \"allow_media_tagging\": \"none\",\n    \"allow_sharing_data_for_third_party_personalization\": False,\n    \"alt_text_compose_enabled\": None,\n    \"always_use_https\": True,\n    \"autoplay_disabled\": False,\n    \"country_code\": \"us\",\n    \"discoverable_by_email\": False,\n    \"discoverable_by_mobile_phone\": False,\n    \"display_sensitive_media\": False,\n    \"dm_quality_filter\": \"enabled\",\n    \"dm_receipt_setting\": \"all_disabled\",\n    \"geo_enabled\": False,\n    \"include_alt_text_compose\": True,\n    \"include_mention_filter\": True,\n    \"include_nsfw_admin_flag\": True,\n    \"include_nsfw_user_flag\": True,\n    \"include_ranked_timeline\": True,\n    \"language\": \"en\",\n    \"mention_filter\": \"unfiltered\",\n    \"nsfw_admin\": False,\n    \"nsfw_user\": False,\n    \"personalized_trends\": True,\n    \"protected\": False,\n    \"ranked_timeline_eligible\": None,\n    \"ranked_timeline_setting\": None,\n    \"require_password_login\": False,\n    \"requires_login_verification\": False,\n    \"sleep_time\": {\n        \"enabled\": False,\n        \"end_time\": None,\n        \"start_time\": None\n    },\n    \"translator_type\": \"none\",\n    \"universal_quality_filtering_enabled\": \"enabled\",\n    \"use_cookie_personalization\": False,\n})\n\n# example configuration\naccount.update_search_settings({\n    \"optInFiltering\": True,  # filter nsfw content\n    \"optInBlocking\": True,  # filter blocked accounts\n})\n\nnotifications = account.notifications()\n\naccount.change_password('old pwd','new pwd')\n\n```\n\n### Scraping\n\n#### Get all user/tweet data\n\nTwo special batch queries `scraper.tweets_by_ids` and `scraper.users_by_ids` should be preferred when applicable. These endpoints are more much more efficient and have higher rate limits than their unbatched counterparts. See the table below for a comparison.\n\n| Endpoint      | Batch Size     | Rate Limit    |\n|---------------|----------------|---------------|\n| tweets_by_ids | ~220           | 500 / 15 mins |\n| tweets_by_id  | 1              | 50 / 15 mins  |\n| users_by_ids  | ~220           | 100 / 15 mins |\n| users_by_id   | 1              | 500 / 15 mins |\n\n*As of Fall 2023 login by username/password is unstable. Using cookies is now recommended.*\n\n```python\nfrom twitter.scraper import Scraper\n\n## sign-in with credentials\nemail, username, password = ..., ..., ...\nscraper = Scraper(email, username, password)\n\n## or, resume session using cookies\n# scraper = Scraper(cookies={\"ct0\": ..., \"auth_token\": ...})\n\n## or, resume session using cookies (JSON file)\n# scraper = Scraper(cookies='twitter.cookies')\n\n## or, initialize guest session (limited endpoints)\n# from twitter.util import init_session\n# scraper = Scraper(session=init_session())\n\n# user data\nusers = scraper.users(['foo', 'bar', 'hello', 'world'])\nusers = scraper.users_by_ids([123, 234, 345]) # preferred\nusers = scraper.users_by_id([123, 234, 345])\ntweets = scraper.tweets([123, 234, 345])\nlikes = scraper.likes([123, 234, 345])\ntweets_and_replies = scraper.tweets_and_replies([123, 234, 345])\nmedia = scraper.media([123, 234, 345])\nfollowing = scraper.following([123, 234, 345])\nfollowers = scraper.followers([123, 234, 345])\nscraper.tweet_stats([111111, 222222, 333333])\n\n# get recommended users based on user\nscraper.recommended_users()\nscraper.recommended_users([123])\n\n# tweet data\ntweets = scraper.tweets_by_ids([987, 876, 754]) # preferred\ntweets = scraper.tweets_by_id([987, 876, 754])\ntweet_details = scraper.tweets_details([987, 876, 754])\nretweeters = scraper.retweeters([987, 876, 754])\nfavoriters = scraper.favoriters([987, 876, 754])\n\nscraper.download_media([\n    111111,\n    222222,\n    333333,\n    444444,\n])\n\n# trends\nscraper.trends()\n```\n\n#### Resume Pagination\n**Pagination is already done by default**, however there are circumstances where you may need to resume pagination from a specific cursor. For example, the `Followers` endpoint only allows for 50 requests every 15 minutes. In this case, we can resume from where we left off by providing a specific cursor value.\n```python\nfrom twitter.scraper import Scraper\n\nemail, username, password = ...,...,...\nscraper = Scraper(email, username, password)\n\nuser_id = 44196397\ncursor = '1767341853908517597|1663601806447476672'  # example cursor\nlimit = 100  # arbitrary limit for demonstration\nfollower_subset, last_cursor = scraper.followers([user_id], limit=limit, cursor=cursor)\n\n# use last_cursor to resume pagination\n```\n\n#### Search\n\n```python\nfrom twitter.search import Search\n\nemail, username, password = ..., ..., ...\n# default output directory is `data/search_results` if save=True\nsearch = Search(email, username, password, save=True, debug=1)\n\nres = search.run(\n    limit=37,\n    retries=5,\n    queries=[\n        {\n            'category': 'Top',\n            'query': 'paperswithcode -tensorflow -tf'\n        },\n        {\n            'category': 'Latest',\n            'query': 'test'\n        },\n        {\n            'category': 'People',\n            'query': 'brasil portugal -argentina'\n        },\n        {\n            'category': 'Photos',\n            'query': 'greece'\n        },\n        {\n            'category': 'Videos',\n            'query': 'italy'\n        },\n    ],\n)\n```\n\n**Search Operators Reference**\n\nhttps://developer.twitter.com/en/docs/twitter-api/v1/rules-and-filtering/search-operators\n\nhttps://developer.twitter.com/en/docs/twitter-api/tweets/search/integrate/build-a-query\n\n### Spaces\n\n#### Live Audio Capture\n\nCapture live audio for up to 500 streams per IP\n\n```python\nfrom twitter.scraper import Scraper\nfrom twitter.util import init_session\n\nsession = init_session() # initialize guest session, no login required\nscraper = Scraper(session=session)\n\nrooms = [...]\nscraper.spaces_live(rooms=rooms) # capture live audio from list of rooms\n```\n\n#### Live Transcript Capture\n\n**Raw transcript chunks**\n\n```python\nfrom twitter.scraper import Scraper\nfrom twitter.util import init_session\n\nsession = init_session() # initialize guest session, no login required\nscraper = Scraper(session=session)\n\n# room must be live, i.e. in \"Running\" state\nscraper.space_live_transcript('1zqKVPlQNApJB', frequency=2)  # word-level live transcript. (dirty, on-the-fly transcription before post-processing)\n```\n\n**Processed (final) transcript chunks**\n\n```python\nfrom twitter.scraper import Scraper\nfrom twitter.util import init_session\n\nsession = init_session() # initialize guest session, no login required\nscraper = Scraper(session=session)\n\n# room must be live, i.e. in \"Running\" state\nscraper.space_live_transcript('1zqKVPlQNApJB', frequency=1)  # finalized live transcript.  (clean)\n```\n\n#### Search and Metadata\n```python\nfrom twitter.scraper import Scraper\nfrom twitter.util import init_session\nfrom twitter.constants import SpaceCategory\n\nsession = init_session() # initialize guest session, no login required\nscraper = Scraper(session=session)\n\n# download audio and chat-log from space\nspaces = scraper.spaces(rooms=['1eaJbrAPnBVJX', '1eaJbrAlZjjJX'], audio=True, chat=True)\n\n# pull metadata only\nspaces = scraper.spaces(rooms=['1eaJbrAPnBVJX', '1eaJbrAlZjjJX'])\n\n# search for spaces in \"Upcoming\", \"Top\" and \"Live\" categories\nspaces = scraper.spaces(search=[\n    {\n        'filter': SpaceCategory.Upcoming,\n        'query': 'hello'\n    },\n    {\n        'filter': SpaceCategory.Top,\n        'query': 'world'\n    },\n    {\n        'filter': SpaceCategory.Live,\n        'query': 'foo bar'\n    }\n])\n```\n\n### Automated Solvers\n\n> This requires installation of the [proton-api-client](https://pypi.org/project/proton-api-client) package\n\nTo set up automated email confirmation/verification solvers, add your Proton Mail credentials below as shown.\nThis removes the need to manually solve email challenges via the web app. These credentials can be used\nin `Scraper`, `Account`, and `Search` constructors.\n\nE.g.\n\n```python\nfrom twitter.account import Account\nfrom twitter.util import get_code\nfrom proton.client import ProtonMail\n\nproton_username, proton_password = ..., ...\nproton = lambda: get_code(ProtonMail(proton_username, proton_password))\n\nemail, username, password = ..., ..., ...\naccount = Account(email, username, password, proton=proton)\n```\n\n",
    "bugtrack_url": null,
    "license": "MIT",
    "summary": "Implementation of X/Twitter v1, v2, and GraphQL APIs.",
    "version": "0.10.21",
    "project_urls": {
        "Homepage": "https://github.com/trevorhobenshield/twitter-api-client"
    },
    "split_keywords": [
        "twitter",
        "api",
        "client",
        "async",
        "search",
        "automation",
        "bot",
        "scrape"
    ],
    "urls": [
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "eb3342c4706ce9630db8e9c1371a93677399290827f4182b432a422323687a0f",
                "md5": "3bd390497f17ce139091c0864c493c30",
                "sha256": "fa94b311a04bdb08719e04543c2ebeeebb8284aca6662c95f2d14a5c91e73ffa"
            },
            "downloads": -1,
            "filename": "twitter_api_client-0.10.21-py3-none-any.whl",
            "has_sig": false,
            "md5_digest": "3bd390497f17ce139091c0864c493c30",
            "packagetype": "bdist_wheel",
            "python_version": "py3",
            "requires_python": ">=3.10.10",
            "size": 41893,
            "upload_time": "2024-04-14T20:24:25",
            "upload_time_iso_8601": "2024-04-14T20:24:25.870209Z",
            "url": "https://files.pythonhosted.org/packages/eb/33/42c4706ce9630db8e9c1371a93677399290827f4182b432a422323687a0f/twitter_api_client-0.10.21-py3-none-any.whl",
            "yanked": false,
            "yanked_reason": null
        },
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "b4b43fe54663782ce1a25812c90b4a78e9468198263f7e9c131bb44d7aa6e905",
                "md5": "149b87f472960086fbe6f7441bfe4f52",
                "sha256": "e0d1203080358a634f2956da4b87d439af2524de7e135338b98ba929994ff9d2"
            },
            "downloads": -1,
            "filename": "twitter_api_client-0.10.21.tar.gz",
            "has_sig": false,
            "md5_digest": "149b87f472960086fbe6f7441bfe4f52",
            "packagetype": "sdist",
            "python_version": "source",
            "requires_python": ">=3.10.10",
            "size": 44228,
            "upload_time": "2024-04-14T20:24:28",
            "upload_time_iso_8601": "2024-04-14T20:24:28.391619Z",
            "url": "https://files.pythonhosted.org/packages/b4/b4/3fe54663782ce1a25812c90b4a78e9468198263f7e9c131bb44d7aa6e905/twitter_api_client-0.10.21.tar.gz",
            "yanked": false,
            "yanked_reason": null
        }
    ],
    "upload_time": "2024-04-14 20:24:28",
    "github": true,
    "gitlab": false,
    "bitbucket": false,
    "codeberg": false,
    "github_user": "trevorhobenshield",
    "github_project": "twitter-api-client",
    "travis_ci": false,
    "coveralls": false,
    "github_actions": false,
    "lcname": "twitter-api-client"
}
        
Elapsed time: 0.39594s