From ba51d74777ea001a0238ed96b21dc21ead5d0586 Mon Sep 17 00:00:00 2001 From: Piotr Orzechowski <3194302+plusz@users.noreply.github.com> Date: Sat, 24 Jan 2026 22:37:09 +0100 Subject: [PATCH 1/3] add rate limiting, retry logic, and progress tracking for tweet deletion - Track deleted tweets in deleted_tweets.txt to support resume on interruption - Add 20s delay between deletions to stay within 50 requests per 15 minutes limit - Implement retry logic with exponential backoff for connection errors and timeouts - Handle 429 rate limit responses with automatic waiting until reset time - Display rate limit status and progress counter (current/total) - Add extra delay when rate limit remaining --- de-x.py | 104 ++++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 94 insertions(+), 10 deletions(-) diff --git a/de-x.py b/de-x.py index 821e2db..6872eb7 100644 --- a/de-x.py +++ b/de-x.py @@ -10,6 +10,8 @@ import sys import json import requests +import time +import os def get_tweet_ids(json_data): @@ -21,6 +23,17 @@ def get_tweet_ids(json_data): return result +def load_deleted_tweets(deleted_file): + if not os.path.exists(deleted_file): + return set() + + with open(deleted_file, 'r') as f: + return set(line.strip() for line in f if line.strip()) + +def save_deleted_tweet(deleted_file, tweet_id): + with open(deleted_file, 'a') as f: + f.write(f"{tweet_id}\n") + def parse_req_headers(request_file): sess = {} @@ -56,24 +69,95 @@ def main(ac, av): session = parse_req_headers(av[2]) - for i in ids: - delete_tweet(session, i) - # maybe add some random sleep here to prevent future rate-limiting + deleted_file = 'deleted_tweets.txt' + deleted_tweets = load_deleted_tweets(deleted_file) + + ids_to_delete = [tid for tid in ids if tid not in deleted_tweets] + + if len(deleted_tweets) > 0: + print(f"[+] Loaded {len(deleted_tweets)} already-deleted tweets") + print(f"[+] Skipping {len(ids) - len(ids_to_delete)} tweets") + + total = len(ids_to_delete) + if total == 0: + print("[+] All tweets already deleted!") + return + + print(f"[+] {total} tweets remaining to delete\n") + + for idx, i in enumerate(ids_to_delete, 1): + success = delete_tweet(session, i, idx, total) + if success: + save_deleted_tweet(deleted_file, i) + # delay to stay within 50 requests per 15 minutes limit + if idx < total: + time.sleep(20) -def delete_tweet(session, tweet_id): +def delete_tweet(session, tweet_id, index, total): - print(f"[*] delete tweet-id {tweet_id}") + print(f"[*] [{index}/{total}] delete tweet-id {tweet_id}") delete_url = "https://twitter.com/i/api/graphql/VaenaVgh5q5ih7kvyVjgtg/DeleteTweet" data = {"variables":{"tweet_id":tweet_id,"dark_request":False},"queryId":"VaenaVgh5q5ih7kvyVjgtg"} # set or re-set correct content-type header session["content-type"] = 'application/json' - r = requests.post(delete_url, data=json.dumps(data), headers=session) - print(r.status_code, r.reason) - print(r.text[:500] + '...') - - return + + max_retries = 5 + retry_delay = 5 + + for attempt in range(max_retries): + try: + r = requests.post(delete_url, data=json.dumps(data), headers=session, timeout=30) + print(r.status_code, r.reason) + + rate_limit = r.headers.get('x-rate-limit-limit') + rate_remaining = r.headers.get('x-rate-limit-remaining') + rate_reset = r.headers.get('x-rate-limit-reset') + + if rate_limit and rate_remaining: + print(f"[i] Rate limit: {rate_remaining}/{rate_limit} remaining") + + if r.status_code == 429: + if rate_reset: + reset_time = int(rate_reset) + current_time = int(time.time()) + wait_time = max(reset_time - current_time + 5, 60) + print(f"[!] Rate limit exceeded. Waiting {wait_time}s until reset (at {time.strftime('%H:%M:%S', time.localtime(reset_time))})") + else: + wait_time = 60 * (2 ** attempt) + print(f"[!] Rate limit hit. Waiting {wait_time}s before retry... (attempt {attempt + 1}/{max_retries})") + + if attempt < max_retries - 1: + time.sleep(wait_time) + continue + else: + print(f"[!] Rate limit persists after {max_retries} attempts.") + print(f"[!] Stopping execution. Run script again later to continue.") + sys.exit(1) + + print(r.text[:500] + '...') + + if r.status_code == 200: + if rate_remaining and int(rate_remaining) < 5: + print(f"[!] Low rate limit remaining ({rate_remaining}). Adding extra 5s delay...") + time.sleep(5) + return True + else: + print(f"[!] Unexpected status code. Marking as failed.") + return False + + except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e: + if attempt < max_retries - 1: + wait_time = retry_delay * (2 ** attempt) + print(f"[!] Connection error: {type(e).__name__}. Retrying in {wait_time}s... (attempt {attempt + 1}/{max_retries})") + time.sleep(wait_time) + else: + print(f"[!] Failed after {max_retries} attempts. Error: {type(e).__name__}") + print(f"[!] Skipping tweet-id {tweet_id} and continuing...") + return False + + return False if __name__ == '__main__': From ad2c8d04af95c0524f5aa99dbf3c6ef66e175e45 Mon Sep 17 00:00:00 2001 From: Piotr Orzechowski <3194302+plusz@users.noreply.github.com> Date: Sat, 24 Jan 2026 23:03:56 +0100 Subject: [PATCH 2/3] reduce delay between deletions from 20s to 2s, rely on 429 rate limit handling --- de-x.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/de-x.py b/de-x.py index 6872eb7..9f66110 100644 --- a/de-x.py +++ b/de-x.py @@ -89,9 +89,9 @@ def main(ac, av): success = delete_tweet(session, i, idx, total) if success: save_deleted_tweet(deleted_file, i) - # delay to stay within 50 requests per 15 minutes limit + # small delay between requests, will auto-wait on 429 if idx < total: - time.sleep(20) + time.sleep(2) def delete_tweet(session, tweet_id, index, total): From 2ad83726f168fb0edd363ead8f027607aae27461 Mon Sep 17 00:00:00 2001 From: Piotr Orzechowski <3194302+plusz@users.noreply.github.com> Date: Sun, 8 Feb 2026 22:29:27 +0100 Subject: [PATCH 3/3] Refactor code structure for improved readability and maintainability --- de-x.py | 69 ++++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 53 insertions(+), 16 deletions(-) diff --git a/de-x.py b/de-x.py index 9f66110..0226c6b 100644 --- a/de-x.py +++ b/de-x.py @@ -37,19 +37,40 @@ def save_deleted_tweet(deleted_file, tweet_id): def parse_req_headers(request_file): sess = {} - + with open(request_file) as f: - line = f.readline() - while line: - try: - k,v = line.split(':', 1) - val = v.lstrip().rstrip() - sess[k] = val - except: - # ignore empty lines - pass - - line = f.readline() + content = f.read() + + # Try to parse as fetch format (JSON with headers object) + if '"headers"' in content or "'headers'" in content: + try: + # Extract the fetch call object + start = content.find('{') + end = content.rfind('}') + 1 + if start >= 0 and end > start: + fetch_obj = json.loads(content[start:end]) + if 'headers' in fetch_obj: + for k, v in fetch_obj['headers'].items(): + # Normalize header names to Title-Case + k = '-'.join(word.capitalize() for word in k.split('-')) + sess[k] = v + return sess + except: + pass + + # Fallback to simple key: value format + for line in content.split('\n'): + line = line.strip() + if not line or line.startswith('#'): + continue + try: + k, v = line.split(':', 1) + val = v.lstrip().rstrip() + # Normalize header names to Title-Case + k = '-'.join(word.capitalize() for word in k.strip().split('-')) + sess[k] = val + except: + pass return sess @@ -97,11 +118,24 @@ def main(ac, av): def delete_tweet(session, tweet_id, index, total): print(f"[*] [{index}/{total}] delete tweet-id {tweet_id}") - delete_url = "https://twitter.com/i/api/graphql/VaenaVgh5q5ih7kvyVjgtg/DeleteTweet" + delete_url = "https://x.com/i/api/graphql/VaenaVgh5q5ih7kvyVjgtg/DeleteTweet" data = {"variables":{"tweet_id":tweet_id,"dark_request":False},"queryId":"VaenaVgh5q5ih7kvyVjgtg"} # set or re-set correct content-type header - session["content-type"] = 'application/json' + session["Content-Type"] = 'application/json' + + # Ensure critical headers are present + if "User-Agent" not in session: + session["User-Agent"] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36" + if "Origin" not in session: + session["Origin"] = "https://x.com" + if "Referer" not in session: + session["Referer"] = "https://x.com/home" + + print(f"[*] API endpoint: {delete_url}") + print(f"[*] Request data: {json.dumps(data)}") + print(f"[*] Has Cookie header: {'Cookie' in session}") + print(f"[*] Headers sent: {list(session.keys())}") max_retries = 5 retry_delay = 5 @@ -109,7 +143,8 @@ def delete_tweet(session, tweet_id, index, total): for attempt in range(max_retries): try: r = requests.post(delete_url, data=json.dumps(data), headers=session, timeout=30) - print(r.status_code, r.reason) + print(f"[*] Response status: {r.status_code} {r.reason}") + print(f"[*] Response headers: {dict(r.headers)}") rate_limit = r.headers.get('x-rate-limit-limit') rate_remaining = r.headers.get('x-rate-limit-remaining') @@ -144,7 +179,9 @@ def delete_tweet(session, tweet_id, index, total): time.sleep(5) return True else: - print(f"[!] Unexpected status code. Marking as failed.") + print(f"[!] Unexpected status code {r.status_code}. Response body: {r.text[:1000]}") + if r.status_code == 403: + print(f"[!] 403 Forbidden - Check if headers (especially authorization tokens) are still valid") return False except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e: