Last active
November 18, 2024 17:17
-
-
Save mindbreaker/8e52ed1553dd99f1ab504f650775bd5d to your computer and use it in GitHub Desktop.
pip install requests tweepy pandas beautifulsoup4 nltk
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import requests | |
| from bs4 import BeautifulSoup | |
| import tweepy | |
| import pandas as pd | |
| import time | |
| import re | |
| import nltk | |
| from collections import Counter | |
| from nltk.corpus import stopwords | |
| # Download stopwords for text analysis | |
| nltk.download('stopwords') | |
| # Set API Keys and Tokens | |
| api_key = "YOUR_TWITTER_API_KEY" | |
| api_secret = "YOUR_TWITTER_API_SECRET" | |
| access_token = "YOUR_TWITTER_ACCESS_TOKEN" | |
| access_token_secret = "YOUR_TWITTER_ACCESS_TOKEN_SECRET" | |
| TWEETSCOUT_API_KEY = 'YOUR_TWEETSCOUT_API_KEY' | |
| RUGCHECK_API_KEY = 'YOUR_RUGCHECK_API_KEY' | |
| TELEGRAM_BOT_TOKEN = 'YOUR_TELEGRAM_BOT_TOKEN' | |
| TELEGRAM_CHAT_ID = 'YOUR_CHAT_ID' | |
| # Authenticate with Twitter API | |
| auth = tweepy.OAuthHandler(api_key, api_secret) | |
| auth.set_access_token(access_token, access_token_secret) | |
| api = tweepy.API(auth) | |
| # Step 1: Analyze information from the Twitter list | |
| def analyze_twitter_list(list_owner, list_slug): | |
| tweets = [] | |
| try: | |
| for tweet in tweepy.Cursor(api.list_timeline, owner_screen_name=list_owner, slug=list_slug).items(200): | |
| tweets.append(tweet.text) | |
| except tweepy.TweepError as e: | |
| print(f"Error: {e}") | |
| return [] | |
| # Clean and tokenize tweets | |
| def clean_text(text): | |
| text = text.lower() | |
| text = re.sub(r'http\S+', '', text) # Remove URLs | |
| text = re.sub(r'[^a-zA-Z\s]', '', text) # Remove special characters | |
| return text | |
| cleaned_tweets = [clean_text(tweet) for tweet in tweets] | |
| all_tokens = [] | |
| stop_words = set(stopwords.words('english')) | |
| for tweet in cleaned_tweets: | |
| tokens = [word for word in tweet.split() if word not in stop_words] | |
| all_tokens.extend(tokens) | |
| # Get the most common tokens | |
| counter = Counter(all_tokens) | |
| top_tokens = counter.most_common(20) | |
| return pd.DataFrame(top_tokens, columns=['Token', 'Count']) | |
| # Step 2: Scrape Pump.fun board for new tokens | |
| def scrape_pump_fun_board(): | |
| url = 'https://pump.fun/board' | |
| headers = { | |
| 'User-Agent': 'Mozilla/5.0' | |
| } | |
| response = requests.get(url, headers=headers) | |
| if response.status_code != 200: | |
| print(f"Failed to retrieve the page. Status code: {response.status_code}") | |
| return pd.DataFrame() | |
| soup = BeautifulSoup(response.text, 'html.parser') | |
| tokens = [] | |
| token_cards = soup.find_all('div', class_='token-card') | |
| for card in token_cards: | |
| try: | |
| token_name = card.find('h2', class_='token-name').text.strip() | |
| token_symbol = card.find('span', class_='token-symbol').text.strip() | |
| token_mint = card.find('a', class_='token-mint')['href'].strip() | |
| social_links = [a['href'] for a in card.find_all('a', class_='social-link')] | |
| tokens.append({ | |
| 'Name': token_name, | |
| 'Symbol': token_symbol, | |
| 'Mint': token_mint, | |
| 'Social Links': social_links | |
| }) | |
| except Exception as e: | |
| print(f"Error processing a token card: {e}") | |
| return pd.DataFrame(tokens) | |
| # Step 3: Get RugCheck data | |
| def get_rugcheck_data(contract_address): | |
| api_url = f"https://api.rugcheck.xyz/v1/tokens/{contract_address}" | |
| headers = { | |
| 'Authorization': f'Bearer {RUGCHECK_API_KEY}', | |
| 'Content-Type': 'application/json' | |
| } | |
| try: | |
| response = requests.get(api_url, headers=headers) | |
| response.raise_for_status() | |
| data = response.json() | |
| return data | |
| except requests.exceptions.HTTPError as http_err: | |
| print(f"HTTP error occurred for {contract_address}: {http_err}") | |
| except Exception as err: | |
| print(f"An error occurred for {contract_address}: {err}") | |
| return None | |
| # Step 4: Get TweetScout score | |
| def get_tweetscout_score(twitter_handle): | |
| api_url = f"https://api.tweetscout.io/score/{twitter_handle}" | |
| headers = { | |
| 'Authorization': f'Bearer {TWEETSCOUT_API_KEY}', | |
| 'Content-Type': 'application/json' | |
| } | |
| try: | |
| response = requests.get(api_url, headers=headers) | |
| response.raise_for_status() | |
| data = response.json() | |
| return data.get('score', 0) | |
| except requests.exceptions.HTTPError as http_err: | |
| print(f"HTTP error occurred for {twitter_handle}: {http_err}") | |
| except Exception as err: | |
| print(f"An error occurred for {twitter_handle}: {err}") | |
| return 0 | |
| # Step 5: Send to Telegram | |
| def send_to_telegram(message): | |
| api_url = f"https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/sendMessage" | |
| params = { | |
| 'chat_id': TELEGRAM_CHAT_ID, | |
| 'text': message | |
| } | |
| try: | |
| response = requests.post(api_url, params=params) | |
| response.raise_for_status() | |
| print(f"Message sent to Telegram: {message}") | |
| except requests.exceptions.HTTPError as http_err: | |
| print(f"HTTP error occurred: {http_err}") | |
| except Exception as err: | |
| print(f"An error occurred while sending message: {err}") | |
| # Main routine to combine everything | |
| def main(): | |
| # Step 1: Analyze Twitter list | |
| twitter_data = analyze_twitter_list('list_owner_username', 'list-slug') | |
| print(twitter_data) | |
| # Step 2: Scrape Pump.fun | |
| token_data = scrape_pump_fun_board() | |
| # Step 3 & 4: Analyze tokens via RugCheck and TweetScout | |
| for index, row in token_data.iterrows(): | |
| contract_address = row['Mint'] | |
| twitter_handle = row['Social Links'][0] if row['Social Links'] else None | |
| if twitter_handle: | |
| tweetscout_score = get_tweetscout_score(twitter_handle) | |
| else: | |
| tweetscout_score = 0 | |
| rugcheck_data = get_rugcheck_data(contract_address) | |
| if rugcheck_data and rugcheck_data.get('contract_status', '').lower() == 'good' and tweetscout_score > 20: | |
| message = ( | |
| f"Token Alert 🚨\n" | |
| f"Name: {row['Name']}\n" | |
| f"Symbol: {row['Symbol']}\n" | |
| f"Contract Address: {contract_address}\n" | |
| f"TweetScout Score: {tweetscout_score}\n" | |
| f"Meets all criteria for analysis. 🟢" | |
| ) | |
| send_to_telegram(message) | |
| time.sleep(1) # To respect rate limits | |
| if __name__ == "__main__": | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment