├── requirements.txt ├── Proxies and scrapers GitHub bonus banner.png ├── LinkedIn Images ├── linkedin-scraper-bright-data-screenshot-linkedin-jobs.png ├── linkedin-scraper-bright-data-screenshot-web-scraper-api.png ├── linkedin-scraper-bright-data-screenshot-linkedin-posts-by-url.png ├── linkedin-scraper-bright-data-screenshot-linkedin_jobs_by_url.png ├── linkedin-scraper-bright-data-screenshot-linkedin_jobs_by_keyword.png ├── linkedin-scraper-bright-data-screenshot-linkedin_jobs_by_search_url.png ├── linkedin-scraper-bright-data-screenshot-linkedin_posts_by_company_url.png ├── linkedin-scraper-bright-data-screenshot-linkedin_posts_by_profile_url.png ├── linkedin-scraper-bright-data-screenshot-linkedin-people-profiles-by-name.png ├── linkedin-scraper-bright-data-screenshot-linkedin-people-profiles-by-url.png ├── linkedin-scraper-bright-data-screenshot-linkedin-posts-discover-by-url.png └── linkedin-scraper-bright-data-screenshot-linkedin-company-information-by-url.png ├── linkedin_scraper_api_codes ├── linkedin_profile_by_name.py ├── linkedin_posts_by_url.py ├── linkedin_profile_by_url.py ├── linkedin_company_info_by_url.py ├── linkedin_jobs_by_keyword.py ├── linkedin_posts_by_profile_url.py ├── linkedin_posts_discover_by_url.py ├── linkedin_posts_by_company_url.py ├── linkedin_jobs_by_search_url.py └── linkedin_jobs_by_url.py ├── free_scraper ├── profile_checker.py └── jobs_scraper.py ├── linkedin_scraper_api_data ├── linkedin_jobs_url.json └── linkedin_company_info.json └── README.md /requirements.txt: -------------------------------------------------------------------------------- 1 | requests==2.32.3 2 | beautifulsoup4==4.12.3 3 | tenacity==9.0.0 4 | -------------------------------------------------------------------------------- /Proxies and scrapers GitHub bonus banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luminati-io/LinkedIn-Scraper/HEAD/Proxies and scrapers GitHub bonus banner.png -------------------------------------------------------------------------------- /LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin-jobs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luminati-io/LinkedIn-Scraper/HEAD/LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin-jobs.png -------------------------------------------------------------------------------- /LinkedIn Images/linkedin-scraper-bright-data-screenshot-web-scraper-api.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luminati-io/LinkedIn-Scraper/HEAD/LinkedIn Images/linkedin-scraper-bright-data-screenshot-web-scraper-api.png -------------------------------------------------------------------------------- /LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin-posts-by-url.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luminati-io/LinkedIn-Scraper/HEAD/LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin-posts-by-url.png -------------------------------------------------------------------------------- /LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin_jobs_by_url.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luminati-io/LinkedIn-Scraper/HEAD/LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin_jobs_by_url.png -------------------------------------------------------------------------------- /LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin_jobs_by_keyword.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luminati-io/LinkedIn-Scraper/HEAD/LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin_jobs_by_keyword.png -------------------------------------------------------------------------------- /LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin_jobs_by_search_url.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luminati-io/LinkedIn-Scraper/HEAD/LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin_jobs_by_search_url.png -------------------------------------------------------------------------------- /LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin_posts_by_company_url.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luminati-io/LinkedIn-Scraper/HEAD/LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin_posts_by_company_url.png -------------------------------------------------------------------------------- /LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin_posts_by_profile_url.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luminati-io/LinkedIn-Scraper/HEAD/LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin_posts_by_profile_url.png -------------------------------------------------------------------------------- /LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin-people-profiles-by-name.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luminati-io/LinkedIn-Scraper/HEAD/LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin-people-profiles-by-name.png -------------------------------------------------------------------------------- /LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin-people-profiles-by-url.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luminati-io/LinkedIn-Scraper/HEAD/LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin-people-profiles-by-url.png -------------------------------------------------------------------------------- /LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin-posts-discover-by-url.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luminati-io/LinkedIn-Scraper/HEAD/LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin-posts-discover-by-url.png -------------------------------------------------------------------------------- /LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin-company-information-by-url.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luminati-io/LinkedIn-Scraper/HEAD/LinkedIn Images/linkedin-scraper-bright-data-screenshot-linkedin-company-information-by-url.png -------------------------------------------------------------------------------- /linkedin_scraper_api_codes/linkedin_profile_by_name.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import time 4 | from datetime import datetime 5 | from typing import List, Dict, Optional, Any 6 | 7 | DATASET_ID = "gd_l1viktl72bvl7bjuj0" 8 | API_URL = "https://api.brightdata.com/datasets/v3" 9 | 10 | 11 | class LinkedInProfileDiscovery: 12 | def __init__(self, api_token: str): 13 | self.api_token = api_token 14 | self.headers = { 15 | "Authorization": f"Bearer {api_token}", 16 | "Content-Type": "application/json", 17 | } 18 | 19 | def discover_profiles(self, people: List[Dict[str, str]]) -> Optional[bool]: 20 | start_time = datetime.now() 21 | print( 22 | f"\nStarting discovery for {len(people)} profiles at {start_time.strftime('%H:%M:%S')}" 23 | ) 24 | 25 | collection_response = self._trigger_discovery(people) 26 | if not collection_response or "snapshot_id" not in collection_response: 27 | print("Failed to initiate profile discovery") 28 | return None 29 | snapshot_id = collection_response["snapshot_id"] 30 | print("Discovery initiated") 31 | print("\nCollecting data:") 32 | 33 | while True: 34 | status = self._check_status(snapshot_id) 35 | elapsed = (datetime.now() - start_time).seconds 36 | 37 | print(f"\rStatus: {status} ({elapsed}s elapsed)", end="", flush=True) 38 | 39 | if status == "ready": 40 | print(f"\nDiscovery completed after {elapsed} seconds") 41 | profile_data = self._get_data(snapshot_id) 42 | if profile_data: 43 | self._save_data(profile_data) 44 | return True 45 | break 46 | elif status in ["failed", "error"]: 47 | print(f"\nDiscovery failed with status: {status}") 48 | return None 49 | time.sleep(5) 50 | 51 | def _trigger_discovery( 52 | self, people: List[Dict[str, str]] 53 | ) -> Optional[Dict[str, Any]]: 54 | try: 55 | print("Connecting to API...") 56 | response = requests.post( 57 | f"{API_URL}/trigger", 58 | headers=self.headers, 59 | params={ 60 | "dataset_id": DATASET_ID, 61 | "type": "discover_new", 62 | "discover_by": "name", 63 | }, 64 | json=people, 65 | timeout=30, 66 | ) 67 | response.raise_for_status() 68 | return response.json() 69 | except requests.exceptions.RequestException as e: 70 | print(f"Failed to trigger discovery: {str(e)}") 71 | return None 72 | 73 | def _check_status(self, snapshot_id: str) -> str: 74 | try: 75 | response = requests.get( 76 | f"{API_URL}/progress/{snapshot_id}", 77 | headers=self.headers, 78 | timeout=30, 79 | ) 80 | response.raise_for_status() 81 | return response.json().get("status", "error") 82 | except requests.exceptions.RequestException: 83 | return "error" 84 | 85 | def _get_data(self, snapshot_id: str) -> Optional[Dict[str, Any]]: 86 | try: 87 | response = requests.get( 88 | f"{API_URL}/snapshot/{snapshot_id}", 89 | headers=self.headers, 90 | params={"format": "json"}, 91 | timeout=30, 92 | ) 93 | response.raise_for_status() 94 | return response.json() 95 | except requests.exceptions.RequestException: 96 | return None 97 | 98 | def _save_data( 99 | self, data: Dict[str, Any], filename: str = "profiles_by_name.json" 100 | ) -> None: 101 | try: 102 | with open(filename, "w", encoding="utf-8") as f: 103 | json.dump(data, f, indent=2, ensure_ascii=False) 104 | print(f"✓ Data saved to {filename}") 105 | print(f"✓ Discovered {len(data)} profiles") 106 | except Exception as e: 107 | print(f"Error saving data: {str(e)}") 108 | 109 | 110 | def main() -> None: 111 | api_token = "" 112 | discoverer = LinkedInProfileDiscovery(api_token) 113 | 114 | people = [ 115 | {"first_name": "James", "last_name": "Smith"}, 116 | {"first_name": "Bill", "last_name": "Gates"}, 117 | ] 118 | 119 | discoverer.discover_profiles(people) 120 | 121 | 122 | if __name__ == "__main__": 123 | main() -------------------------------------------------------------------------------- /free_scraper/profile_checker.py: -------------------------------------------------------------------------------- 1 | # import requests 2 | # from datetime import datetime 3 | # from tenacity import retry, stop_after_attempt, wait_random, before_sleep_log 4 | # from typing import Optional, NamedTuple 5 | # import logging 6 | # import random 7 | # import time 8 | 9 | # class URLCheckResult(NamedTuple): 10 | # status_code: int 11 | # error: Optional[str] 12 | # is_auth_wall: bool 13 | # timestamp: str 14 | 15 | # class LinkedInUrlChecker: 16 | # AUTH_WALL_PATTERNS = { 17 | # "authwall?trk=", 18 | # 'window.location.href = "https://" + domain + "/authwall?"', 19 | # "sessionRedirect", 20 | # } 21 | 22 | # RETRY_STATUS_CODES = {999, 429, 403} 23 | 24 | # def __init__(self, max_retries: int = 3, timeout: int = 10): 25 | # """ 26 | # Initialize the LinkedInUrlChecker with optional max_retries and timeout. 27 | # """ 28 | # self.timeout = timeout 29 | # self.logger = logging.getLogger(__name__) 30 | # self.logger.setLevel(logging.WARNING) 31 | # if not self.logger.handlers: 32 | # handler = logging.StreamHandler() 33 | # handler.setFormatter(logging.Formatter("%(message)s")) 34 | # self.logger.addHandler(handler) 35 | 36 | # self.session = requests.Session() 37 | 38 | # def __enter__(self): 39 | # return self 40 | 41 | # def __exit__(self, exc_type, exc_val, exc_tb): 42 | # self.session.close() 43 | 44 | # def _is_auth_wall(self, html: str) -> bool: 45 | # """ 46 | # Check if the HTML content contains any authentication wall patterns. 47 | # """ 48 | # return any(pattern in html for pattern in self.AUTH_WALL_PATTERNS) 49 | 50 | # @retry( 51 | # stop=stop_after_attempt(3), 52 | # wait=wait_random(min=3, max=7), 53 | # before_sleep=before_sleep_log(logging.getLogger(), logging.WARNING), 54 | # ) 55 | # def _make_request(self, linkedin_url: str) -> URLCheckResult: 56 | # """ 57 | # Make a request to the LinkedIn URL and return the result. 58 | # """ 59 | # timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") 60 | # try: 61 | # response = self.session.get( 62 | # linkedin_url, timeout=self.timeout, allow_redirects=True 63 | # ) 64 | # is_auth_wall = self._is_auth_wall(response.text) 65 | # if is_auth_wall or response.status_code in self.RETRY_STATUS_CODES: 66 | # error_msg = "Auth wall detected" if is_auth_wall else f"Rate limited (Status: {response.status_code})" 67 | # raise Exception(error_msg) 68 | # return URLCheckResult( 69 | # status_code=response.status_code, 70 | # error=None, 71 | # is_auth_wall=is_auth_wall, 72 | # timestamp=timestamp, 73 | # ) 74 | # except Exception as e: 75 | # error_msg = str(e) if not isinstance(e, requests.RequestException) else "Request failed" 76 | # raise Exception(error_msg) 77 | 78 | # def check_url(self, linkedin_url: str) -> URLCheckResult: 79 | # """ 80 | # Check the LinkedIn URL and return the result. 81 | # """ 82 | # try: 83 | # return self._make_request(linkedin_url) 84 | # except Exception as e: 85 | # error_msg = str(getattr(e, "last_attempt", e).exception()) if hasattr(e, "last_attempt") else str(e) 86 | # error_msg = error_msg.replace("Exception: ", "") 87 | # return URLCheckResult( 88 | # status_code=0, 89 | # error=error_msg, 90 | # is_auth_wall=False, 91 | # timestamp=datetime.now().strftime("%Y%m%d_%H%M%S"), 92 | # ) 93 | 94 | # def main(): 95 | # """ 96 | # Main function to check a list of LinkedIn URLs. 97 | # """ 98 | # test_urls = [ 99 | # "https://www.linkedin.com/company/bright-data/", 100 | # "https://www.linkedin.com/company/aabbccdd/", 101 | # "https://www.linkedin.com/in/williamhgates", 102 | # "https://www.linkedin.com/in/99887766", 103 | # "https://www.linkedin.com/in/rbranson/", 104 | # ] 105 | 106 | # print("\nChecking LinkedIn URLs...") 107 | # print("-" * 50) 108 | 109 | # with LinkedInUrlChecker() as checker: 110 | # for url in test_urls: 111 | # result = checker.check_url(url) 112 | # status = "\u2713" if result.status_code == 200 else "\u2717" 113 | # print(f"{status} {url} - {'Error: ' + result.error if result.error else 'Status: ' + str(result.status_code)}") 114 | # time.sleep(random.uniform(3, 7)) 115 | # print("-" * 50) 116 | 117 | # if __name__ == "__main__": 118 | # main() 119 | 120 | print("\u2713") -------------------------------------------------------------------------------- /linkedin_scraper_api_codes/linkedin_posts_by_url.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import time 4 | from datetime import datetime 5 | 6 | 7 | class LinkedInPostCollector: 8 | def __init__(self, api_token): 9 | self.api_token = api_token 10 | self.headers = { 11 | "Authorization": f"Bearer {api_token}", 12 | "Content-Type": "application/json", 13 | } 14 | self.dataset_id = "gd_lyy3tktm25m4avu764" 15 | 16 | def collect_posts(self, post_urls): 17 | start_time = datetime.now() 18 | print( 19 | f"\nStarting collection for {len(post_urls)} posts at {start_time.strftime('%H:%M:%S')}" 20 | ) 21 | 22 | collection_response = self._trigger_collection(post_urls) 23 | if not collection_response or "snapshot_id" not in collection_response: 24 | print("Failed to initiate collection") 25 | return None 26 | snapshot_id = collection_response["snapshot_id"] 27 | print("[OK] Collection initiated") 28 | 29 | print("\nCollecting data:") 30 | while True: 31 | status = self._check_status(snapshot_id) 32 | current_time = datetime.now() 33 | elapsed = (current_time - start_time).seconds 34 | 35 | if status == "ready": 36 | print( 37 | f"\n[{current_time.strftime('%H:%M:%S')}] Collection completed after {elapsed} seconds" 38 | ) 39 | post_data = self._get_data(snapshot_id) 40 | if post_data: 41 | self._save_data(post_data) 42 | return True 43 | elif status in ["failed", "error"]: 44 | print(f"Collection failed with status: {status}") 45 | return None 46 | print( 47 | f"[{current_time.strftime('%H:%M:%S')}] Status: {status} ({elapsed}s elapsed)", 48 | end="\r", 49 | ) 50 | time.sleep(5) 51 | 52 | def _trigger_collection(self, post_urls): 53 | print("Connecting to API...") 54 | try: 55 | response = requests.post( 56 | "https://api.brightdata.com/datasets/v3/trigger", 57 | headers=self.headers, 58 | params={"dataset_id": self.dataset_id}, 59 | json=post_urls, 60 | timeout=30, 61 | ) 62 | response.raise_for_status() 63 | return response.json() 64 | except requests.exceptions.RequestException as e: 65 | print(f"Failed to trigger collection: {str(e)}") 66 | return None 67 | 68 | def _check_status(self, snapshot_id): 69 | try: 70 | response = requests.get( 71 | f"https://api.brightdata.com/datasets/v3/progress/{snapshot_id}", 72 | headers=self.headers, 73 | timeout=30, 74 | ) 75 | response.raise_for_status() 76 | return response.json().get("status") 77 | except requests.exceptions.RequestException: 78 | return "error" 79 | 80 | def _get_data(self, snapshot_id): 81 | try: 82 | response = requests.get( 83 | f"https://api.brightdata.com/datasets/v3/snapshot/{snapshot_id}", 84 | headers=self.headers, 85 | params={"format": "json"}, 86 | timeout=30, 87 | ) 88 | response.raise_for_status() 89 | return response.json() 90 | except requests.exceptions.RequestException: 91 | return None 92 | 93 | def _save_data(self, data, filename="linkedin_posts_url.json"): 94 | try: 95 | with open(filename, "w", encoding="utf-8") as f: 96 | json.dump(data, f, indent=2, ensure_ascii=False) 97 | except Exception as e: 98 | print(f"Error saving data: {str(e)}") 99 | 100 | 101 | def main(): 102 | api_token = "" 103 | collector = LinkedInPostCollector(api_token) 104 | 105 | posts = [ 106 | { 107 | "url": "https://www.linkedin.com/pulse/ab-test-optimisation-earlier-decisions-new-readout-de-b%C3%A9naz%C3%A9?trk=public_profile_article_view" 108 | }, 109 | { 110 | "url": "https://www.linkedin.com/posts/orlenchner_scrapecon-activity-7180537307521769472-oSYN?trk=public_profile" 111 | }, 112 | { 113 | "url": "https://www.linkedin.com/posts/karin-dodis_web-data-collection-for-businesses-bright-activity-7176601589682434049-Aakz?trk=public_profile" 114 | }, 115 | { 116 | "url": "https://www.linkedin.com/pulse/getting-value-out-sunburst-guillaume-de-b%C3%A9naz%C3%A9?trk=public_profile_article_view" 117 | }, 118 | ] 119 | 120 | collector.collect_posts(posts) 121 | 122 | 123 | if __name__ == "__main__": 124 | main() -------------------------------------------------------------------------------- /linkedin_scraper_api_codes/linkedin_profile_by_url.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import time 4 | from datetime import datetime 5 | from typing import List, Dict, Optional, Any 6 | 7 | 8 | class LinkedInProfileInfo: 9 | def __init__(self, api_token: str, dataset_id: str = "gd_l1viktl72bvl7bjuj0"): 10 | self.api_token = api_token 11 | self.headers = { 12 | "Authorization": f"Bearer {api_token}", 13 | "Content-Type": "application/json", 14 | } 15 | self.dataset_id = dataset_id 16 | 17 | def collect_profile_info( 18 | self, profile_urls: List[Dict[str, str]] 19 | ) -> Optional[bool]: 20 | try: 21 | start_time = datetime.now() 22 | print( 23 | f"\nStarting collection for {len(profile_urls)} profiles at {start_time.strftime('%H:%M:%S')}" 24 | ) 25 | 26 | collection_response = self._trigger_collection(profile_urls) 27 | if not collection_response or "snapshot_id" not in collection_response: 28 | raise ValueError("Failed to initiate data collection") 29 | snapshot_id = collection_response["snapshot_id"] 30 | print("\nCollecting data:") 31 | 32 | while True: 33 | status = self._check_status(snapshot_id) 34 | elapsed = (datetime.now() - start_time).seconds 35 | 36 | print(f"\rStatus: {status} ({elapsed}s elapsed)", end="", flush=True) 37 | 38 | if status == "ready": 39 | print(f"\nCollection completed after {elapsed} seconds") 40 | profile_data = self._get_data(snapshot_id) 41 | if profile_data: 42 | self._save_data(profile_data) 43 | print(f"✓ Collected {len(profile_data)} profiles") 44 | return True 45 | break 46 | elif status in ["failed", "error"]: 47 | print(f"\nCollection failed with status: {status}") 48 | return None 49 | time.sleep(5) 50 | except Exception as e: 51 | print(f"\nERROR: {str(e)}") 52 | return None 53 | 54 | def _trigger_collection( 55 | self, profile_urls: List[Dict[str, str]] 56 | ) -> Optional[Dict[str, Any]]: 57 | try: 58 | print("Connecting to API...") 59 | response = requests.post( 60 | "https://api.brightdata.com/datasets/v3/trigger", 61 | headers=self.headers, 62 | params={"dataset_id": self.dataset_id}, 63 | json=profile_urls, 64 | timeout=30, 65 | ) 66 | response.raise_for_status() 67 | return response.json() 68 | except requests.exceptions.RequestException as e: 69 | print(f"Failed to trigger collection: {str(e)}") 70 | return None 71 | 72 | def _check_status(self, snapshot_id: str) -> str: 73 | try: 74 | response = requests.get( 75 | f"https://api.brightdata.com/datasets/v3/progress/{snapshot_id}", 76 | headers=self.headers, 77 | timeout=30, 78 | ) 79 | response.raise_for_status() 80 | return response.json().get("status", "error") 81 | except requests.exceptions.RequestException: 82 | return "error" 83 | 84 | def _get_data(self, snapshot_id: str) -> Optional[Dict[str, Any]]: 85 | try: 86 | response = requests.get( 87 | f"https://api.brightdata.com/datasets/v3/snapshot/{snapshot_id}", 88 | headers=self.headers, 89 | params={"format": "json"}, 90 | timeout=30, 91 | ) 92 | response.raise_for_status() 93 | return response.json() 94 | except requests.exceptions.RequestException: 95 | return None 96 | 97 | def _save_data( 98 | self, data: Dict[str, Any], filename: str = "profiles_by_url.json" 99 | ) -> None: 100 | try: 101 | with open(filename, "w", encoding="utf-8") as f: 102 | json.dump(data, f, indent=2, ensure_ascii=False) 103 | print(f"✓ Data saved to {filename}") 104 | except Exception as e: 105 | print(f"Error saving data: {str(e)}") 106 | 107 | 108 | def main(): 109 | api_token = "" 110 | collector = LinkedInProfileInfo(api_token) 111 | 112 | profiles = [ 113 | {"url": "https://www.linkedin.com/in/williamhgates"}, 114 | {"url": "https://www.linkedin.com/in/rbranson/"}, 115 | {"url": "https://www.linkedin.com/in/justinwelsh/"}, 116 | {"url": "https://www.linkedin.com/in/simonsinek/"}, 117 | ] 118 | 119 | collector.collect_profile_info(profiles) 120 | 121 | 122 | if __name__ == "__main__": 123 | main() -------------------------------------------------------------------------------- /linkedin_scraper_api_codes/linkedin_company_info_by_url.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import time 4 | import logging 5 | from datetime import datetime 6 | from typing import List, Dict, Optional, Any 7 | 8 | 9 | logging.basicConfig( 10 | level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" 11 | ) 12 | 13 | 14 | class LinkedInCompanyInfo: 15 | API_BASE_URL = "https://api.brightdata.com/datasets/v3" 16 | DATASET_ID = "gd_l1vikfnt1wgvvqz95w" 17 | 18 | def __init__(self, api_token: str): 19 | self.api_token = api_token 20 | self.headers = { 21 | "Authorization": f"Bearer {api_token}", 22 | "Content-Type": "application/json", 23 | } 24 | 25 | def collect_company_info( 26 | self, company_urls: List[Dict[str, str]] 27 | ) -> Optional[bool]: 28 | start_time = datetime.now() 29 | logging.info( 30 | f"Starting collection for {len(company_urls)} companies at {start_time.strftime('%H:%M:%S')}" 31 | ) 32 | 33 | collection_response = self._trigger_collection(company_urls) 34 | if not collection_response or "snapshot_id" not in collection_response: 35 | logging.error("Failed to initiate data collection") 36 | return None 37 | snapshot_id = collection_response["snapshot_id"] 38 | logging.info("Collection initiated") 39 | 40 | logging.info("Collecting data:") 41 | while True: 42 | status = self._check_status(snapshot_id) 43 | current_time = datetime.now() 44 | elapsed = (current_time - start_time).seconds 45 | 46 | if status == "ready": 47 | logging.info(f"Collection completed after {elapsed} seconds") 48 | company_data = self._fetch_data(snapshot_id) 49 | if company_data: 50 | break 51 | elif status in ["failed", "error"]: 52 | logging.error(f"Collection failed with status: {status}") 53 | return None 54 | logging.info(f"Status: {status} ({elapsed}s elapsed)") 55 | time.sleep(5) 56 | self._save_data(company_data) 57 | return True 58 | 59 | def _trigger_collection( 60 | self, company_urls: List[Dict[str, str]] 61 | ) -> Optional[Dict[str, Any]]: 62 | try: 63 | logging.info("Connecting to API...") 64 | response = requests.post( 65 | f"{self.API_BASE_URL}/trigger", 66 | headers=self.headers, 67 | params={"dataset_id": self.DATASET_ID}, 68 | json=company_urls, 69 | timeout=30, 70 | ) 71 | response.raise_for_status() 72 | return response.json() 73 | except requests.exceptions.RequestException as e: 74 | logging.error(f"Failed to trigger collection: {str(e)}") 75 | return None 76 | 77 | def _check_status(self, snapshot_id: str) -> Optional[str]: 78 | try: 79 | response = requests.get( 80 | f"{self.API_BASE_URL}/progress/{snapshot_id}", 81 | headers=self.headers, 82 | timeout=30, 83 | ) 84 | response.raise_for_status() 85 | return response.json().get("status") 86 | except requests.exceptions.RequestException: 87 | return "error" 88 | 89 | def _fetch_data(self, snapshot_id: str) -> Optional[List[Dict[str, Any]]]: 90 | try: 91 | response = requests.get( 92 | f"{self.API_BASE_URL}/snapshot/{snapshot_id}", 93 | headers=self.headers, 94 | params={"format": "json"}, 95 | timeout=30, 96 | ) 97 | response.raise_for_status() 98 | return response.json() 99 | except requests.exceptions.RequestException: 100 | return None 101 | 102 | def _save_data( 103 | self, data: List[Dict[str, Any]], filename: str = "linkedin_company_info.json" 104 | ) -> None: 105 | try: 106 | with open(filename, "w", encoding="utf-8") as f: 107 | json.dump(data, f, indent=2, ensure_ascii=False) 108 | except Exception as e: 109 | logging.error(f"Error saving data: {str(e)}") 110 | 111 | 112 | def main() -> None: 113 | api_token = "" 114 | collector = LinkedInCompanyInfo(api_token) 115 | 116 | companies = [ 117 | {"url": "https://il.linkedin.com/company/ibm"}, 118 | {"url": "https://www.linkedin.com/company/stalkit"}, 119 | { 120 | "url": "https://www.linkedin.com/organization-guest/company/the-kraft-heinz-company" 121 | }, 122 | {"url": "https://il.linkedin.com/company/bright-data"}, 123 | ] 124 | 125 | collector.collect_company_info(companies) 126 | 127 | 128 | if __name__ == "__main__": 129 | main() -------------------------------------------------------------------------------- /linkedin_scraper_api_codes/linkedin_jobs_by_keyword.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import time 4 | import logging 5 | from datetime import datetime 6 | 7 | logging.basicConfig( 8 | level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" 9 | ) 10 | 11 | 12 | class LinkedInJobsDiscovery: 13 | def __init__(self, api_token): 14 | self.api_token = api_token 15 | self.headers = { 16 | "Authorization": f"Bearer {api_token}", 17 | "Content-Type": "application/json", 18 | } 19 | self.dataset_id = "gd_lpfll7v5hcqtkxl6l" 20 | 21 | def discover_jobs(self, search_criteria): 22 | try: 23 | start_time = time.time() 24 | logging.info("Discovering jobs") 25 | 26 | trigger_response = self._trigger_collection(search_criteria) 27 | if not trigger_response or "snapshot_id" not in trigger_response: 28 | raise Exception("Failed to initiate job discovery") 29 | snapshot_id = trigger_response["snapshot_id"] 30 | jobs_data = None 31 | 32 | while True: 33 | status = self._check_status(snapshot_id) 34 | elapsed = int(time.time() - start_time) 35 | 36 | if status == "running": 37 | logging.info(f"Status: {status} ({elapsed}s elapsed)") 38 | time.sleep(5) 39 | continue 40 | elif status == "ready": 41 | if jobs_data is None: 42 | jobs_data = self._get_data(snapshot_id) 43 | if jobs_data: 44 | logging.info(f"Discovery completed after {elapsed} seconds") 45 | self._save_data(jobs_data) 46 | return jobs_data 47 | break 48 | elif status in ["failed", "error"]: 49 | raise Exception(f"Discovery failed with status: {status}") 50 | time.sleep(5) 51 | except Exception as e: 52 | logging.error(f"Error during job discovery: {str(e)}") 53 | return None 54 | 55 | def _trigger_collection(self, search_criteria): 56 | try: 57 | response = requests.post( 58 | "https://api.brightdata.com/datasets/v3/trigger", 59 | headers=self.headers, 60 | params={ 61 | "dataset_id": self.dataset_id, 62 | "type": "discover_new", 63 | "discover_by": "keyword", 64 | "include_errors": "true", 65 | }, 66 | json=search_criteria, 67 | timeout=30, 68 | ) 69 | response.raise_for_status() 70 | return response.json() 71 | except requests.exceptions.RequestException as e: 72 | logging.error(f"Error triggering discovery: {str(e)}") 73 | return None 74 | 75 | def _check_status(self, snapshot_id): 76 | try: 77 | response = requests.get( 78 | f"https://api.brightdata.com/datasets/v3/progress/{snapshot_id}", 79 | headers=self.headers, 80 | timeout=30, 81 | ) 82 | response.raise_for_status() 83 | return response.json().get("status") 84 | except requests.exceptions.RequestException as e: 85 | logging.error(f"Error checking status: {str(e)}") 86 | return "error" 87 | 88 | def _get_data(self, snapshot_id): 89 | try: 90 | response = requests.get( 91 | f"https://api.brightdata.com/datasets/v3/snapshot/{snapshot_id}", 92 | headers=self.headers, 93 | params={"format": "json"}, 94 | timeout=30, 95 | ) 96 | response.raise_for_status() 97 | return response.json() 98 | except requests.exceptions.RequestException as e: 99 | logging.error(f"Error retrieving data: {str(e)}") 100 | return None 101 | 102 | def _save_data(self, data, filename="linkedin_jobs_keyword.json"): 103 | try: 104 | with open(filename, "w", encoding="utf-8") as f: 105 | json.dump(data, f, indent=2, ensure_ascii=False) 106 | logging.info(f"Data saved to {filename}") 107 | logging.info(f"Discovered {len(data)} jobs") 108 | except Exception as e: 109 | logging.error(f"Error saving data: {str(e)}") 110 | 111 | def _get_timestamp(self): 112 | return datetime.now().strftime("%H:%M:%S") 113 | 114 | 115 | def main(): 116 | api_token = "" 117 | discoverer = LinkedInJobsDiscovery(api_token) 118 | 119 | search_criteria = [ 120 | { 121 | "location": "New York", 122 | "keyword": "data analyst", 123 | "country": "US", 124 | "time_range": "Any time", 125 | "job_type": "Part-time", 126 | "experience_level": "Entry level", 127 | "remote": "Remote", 128 | "company": "", 129 | }, 130 | ] 131 | 132 | discoverer.discover_jobs(search_criteria) 133 | 134 | 135 | if __name__ == "__main__": 136 | main() 137 | -------------------------------------------------------------------------------- /linkedin_scraper_api_codes/linkedin_posts_by_profile_url.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import time 4 | from datetime import datetime 5 | 6 | 7 | class LinkedInPostDiscovery: 8 | def __init__(self, api_token): 9 | self.api_token = api_token 10 | self.headers = { 11 | "Authorization": f"Bearer {api_token}", 12 | "Content-Type": "application/json", 13 | } 14 | self.dataset_id = "gd_lyy3tktm25m4avu764" 15 | 16 | def discover_posts(self, profile_urls): 17 | try: 18 | start_time = datetime.now() 19 | print( 20 | f"\nStarting discovery for {len(profile_urls)} profiles at {start_time.strftime('%H:%M:%S')}" 21 | ) 22 | 23 | collection_response = self._trigger_discovery(profile_urls) 24 | if not collection_response or "snapshot_id" not in collection_response: 25 | raise Exception("Failed to initiate discovery") 26 | snapshot_id = collection_response["snapshot_id"] 27 | print("[OK] Discovery initiated") 28 | 29 | print("\nCollecting data:") 30 | while True: 31 | status = self._check_status(snapshot_id) 32 | current_time = datetime.now() 33 | elapsed = (current_time - start_time).seconds 34 | 35 | if status == "ready": 36 | print( 37 | f"\n[{current_time.strftime('%H:%M:%S')}] Discovery completed after {elapsed} seconds" 38 | ) 39 | post_data = self._get_data(snapshot_id) 40 | if post_data: 41 | self._save_data(post_data) 42 | break 43 | else: 44 | raise Exception( 45 | "Failed to retrieve data after discovery completion" 46 | ) 47 | elif status in ["failed", "error"]: 48 | raise Exception(f"Discovery failed with status: {status}") 49 | print( 50 | f"[{current_time.strftime('%H:%M:%S')}] Status: {status} ({elapsed}s elapsed)", 51 | end="\r", 52 | ) 53 | time.sleep(5) 54 | return True 55 | except Exception as e: 56 | print(f"\nERROR: {str(e)}") 57 | return None 58 | 59 | def _trigger_discovery(self, profile_urls): 60 | try: 61 | print("Connecting to API...") 62 | response = requests.post( 63 | "https://api.brightdata.com/datasets/v3/trigger", 64 | headers=self.headers, 65 | params={ 66 | "dataset_id": self.dataset_id, 67 | "type": "discover_new", 68 | "discover_by": "profile_url", 69 | }, 70 | json=profile_urls, 71 | timeout=30, 72 | ) 73 | response.raise_for_status() 74 | return response.json() 75 | except requests.exceptions.RequestException as e: 76 | print(f"Failed to trigger discovery: {str(e)}") 77 | return None 78 | 79 | def _check_status(self, snapshot_id): 80 | try: 81 | response = requests.get( 82 | f"https://api.brightdata.com/datasets/v3/progress/{snapshot_id}", 83 | headers=self.headers, 84 | timeout=30, 85 | ) 86 | response.raise_for_status() 87 | return response.json().get("status") 88 | except requests.exceptions.RequestException: 89 | return "error" 90 | 91 | def _get_data(self, snapshot_id): 92 | try: 93 | response = requests.get( 94 | f"https://api.brightdata.com/datasets/v3/snapshot/{snapshot_id}", 95 | headers=self.headers, 96 | params={"format": "json"}, 97 | timeout=30, 98 | ) 99 | response.raise_for_status() 100 | return response.json() 101 | except requests.exceptions.RequestException: 102 | return None 103 | 104 | def _save_data(self, data, filename="posts_by_profile.json"): 105 | try: 106 | with open(filename, "w", encoding="utf-8") as f: 107 | json.dump(data, f, indent=2, ensure_ascii=False) 108 | except Exception as e: 109 | print(f"Error saving data: {str(e)}") 110 | 111 | 112 | def main(): 113 | api_token = "" 114 | discoverer = LinkedInPostDiscovery(api_token) 115 | 116 | profiles = [ 117 | { 118 | "url": "https://www.linkedin.com/in/luca-rossi-0aa497bb", 119 | "start_date": "2024-10-01T00:00:00.000Z", 120 | "end_date": "2024-10-09T00:00:00.000Z", 121 | }, 122 | { 123 | "url": "https://www.linkedin.com/in/srijith-gomattam-401059214", 124 | "start_date": "2024-09-01T00:00:00.000Z", 125 | "end_date": "2024-10-01T00:00:00.000Z", 126 | }, 127 | { 128 | "url": "https://www.linkedin.com/in/anna-clarke-0a342513", 129 | "start_date": "2024-10-01T00:00:00.000Z", 130 | }, 131 | ] 132 | 133 | discoverer.discover_posts(profiles) 134 | 135 | 136 | if __name__ == "__main__": 137 | main() -------------------------------------------------------------------------------- /linkedin_scraper_api_codes/linkedin_posts_discover_by_url.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import time 4 | import logging 5 | from datetime import datetime 6 | from typing import List, Dict, Any, Optional 7 | 8 | logging.basicConfig( 9 | level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" 10 | ) 11 | 12 | API_URL = "https://api.brightdata.com/datasets/v3" 13 | DATASET_ID = "gd_lyy3tktm25m4avu764" 14 | DISCOVER_TYPE = "discover_new" 15 | DISCOVER_BY = "url" 16 | STATUS_READY = "ready" 17 | STATUS_FAILED = "failed" 18 | STATUS_ERROR = "error" 19 | DEFAULT_FILENAME = "discovered_posts_by_url.json" 20 | 21 | 22 | class LinkedInArticleDiscovery: 23 | def __init__(self, api_token: str): 24 | self.api_token = api_token 25 | self.headers = { 26 | "Authorization": f"Bearer {api_token}", 27 | "Content-Type": "application/json", 28 | } 29 | 30 | def discover_articles(self, author_urls: List[Dict[str, Any]]) -> bool: 31 | """ 32 | Discover articles for a list of author URLs. 33 | """ 34 | start_time = datetime.now() 35 | logging.info( 36 | f"Starting discovery for {len(author_urls)} authors at {start_time.strftime('%H:%M:%S')}" 37 | ) 38 | 39 | collection_response = self._trigger_discovery(author_urls) 40 | if not collection_response or "snapshot_id" not in collection_response: 41 | logging.error("Failed to initiate discovery") 42 | return False 43 | snapshot_id = collection_response["snapshot_id"] 44 | logging.info("Discovery initiated") 45 | 46 | logging.info("Collecting data:") 47 | while True: 48 | status = self._check_status(snapshot_id) 49 | current_time = datetime.now() 50 | elapsed = (current_time - start_time).seconds 51 | 52 | if status == STATUS_READY: 53 | logging.info(f"Discovery completed after {elapsed} seconds") 54 | article_data = self._get_data(snapshot_id) 55 | if article_data: 56 | self._save_data(article_data) 57 | return True 58 | elif status in [STATUS_FAILED, STATUS_ERROR]: 59 | logging.error(f"Discovery failed with status: {status}") 60 | return False 61 | print(f"\rStatus: {status} ({elapsed}s elapsed)", end="") 62 | time.sleep(5) 63 | 64 | def _trigger_discovery( 65 | self, author_urls: List[Dict[str, Any]] 66 | ) -> Optional[Dict[str, Any]]: 67 | """ 68 | Trigger the discovery process. 69 | """ 70 | try: 71 | logging.info("Connecting to API...") 72 | response = requests.post( 73 | f"{API_URL}/trigger", 74 | headers=self.headers, 75 | params={ 76 | "dataset_id": DATASET_ID, 77 | "type": DISCOVER_TYPE, 78 | "discover_by": DISCOVER_BY, 79 | }, 80 | json=author_urls, 81 | timeout=30, 82 | ) 83 | response.raise_for_status() 84 | return response.json() 85 | except requests.exceptions.RequestException as e: 86 | logging.error(f"Failed to trigger discovery: {str(e)}") 87 | return None 88 | 89 | def _check_status(self, snapshot_id: str) -> str: 90 | """ 91 | Check the status of the discovery process. 92 | """ 93 | try: 94 | response = requests.get( 95 | f"{API_URL}/progress/{snapshot_id}", 96 | headers=self.headers, 97 | timeout=30, 98 | ) 99 | response.raise_for_status() 100 | return response.json().get("status", STATUS_ERROR) 101 | except requests.exceptions.RequestException: 102 | return STATUS_ERROR 103 | 104 | def _get_data(self, snapshot_id: str) -> Optional[Dict[str, Any]]: 105 | """ 106 | Retrieve the discovered data. 107 | """ 108 | try: 109 | response = requests.get( 110 | f"{API_URL}/snapshot/{snapshot_id}", 111 | headers=self.headers, 112 | params={"format": "json"}, 113 | timeout=30, 114 | ) 115 | response.raise_for_status() 116 | return response.json() 117 | except requests.exceptions.RequestException: 118 | return None 119 | 120 | def _save_data( 121 | self, data: Dict[str, Any], filename: str = DEFAULT_FILENAME 122 | ) -> None: 123 | """ 124 | Save the discovered data to a file. 125 | """ 126 | try: 127 | with open(filename, "w", encoding="utf-8") as f: 128 | json.dump(data, f, indent=2, ensure_ascii=False) 129 | logging.info(f"Data saved to {filename}") 130 | except Exception as e: 131 | logging.error(f"Error saving data: {str(e)}") 132 | 133 | 134 | def main() -> None: 135 | api_token = "" 136 | discoverer = LinkedInArticleDiscovery(api_token) 137 | 138 | authors = [ 139 | { 140 | "url": "https://www.linkedin.com/today/author/cristianbrunori?trk=public_post_follow-articles", 141 | "limit": 50, 142 | }, 143 | { 144 | "url": "https://www.linkedin.com/today/author/stevenouri?trk=public_post_follow-articles" 145 | }, 146 | ] 147 | 148 | discoverer.discover_articles(authors) 149 | 150 | 151 | if __name__ == "__main__": 152 | main() -------------------------------------------------------------------------------- /free_scraper/jobs_scraper.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import List, Optional 3 | import requests 4 | from bs4 import BeautifulSoup 5 | import time 6 | import random 7 | import json 8 | from urllib.parse import quote 9 | from requests.adapters import HTTPAdapter 10 | from urllib3.util import Retry 11 | 12 | 13 | @dataclass 14 | class JobData: 15 | title: str 16 | company: str 17 | location: str 18 | job_link: str 19 | posted_date: str 20 | 21 | 22 | class ScraperConfig: 23 | BASE_URL = "https://www.linkedin.com/jobs-guest/jobs/api/seeMoreJobPostings/search" 24 | JOBS_PER_PAGE = 25 25 | MIN_DELAY = 2 26 | MAX_DELAY = 5 27 | RATE_LIMIT_DELAY = 30 28 | RATE_LIMIT_THRESHOLD = 10 29 | 30 | HEADERS = { 31 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", 32 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", 33 | "Accept-Language": "en-US,en;q=0.5", 34 | "Accept-Encoding": "gzip, deflate, br", 35 | "Connection": "keep-alive", 36 | "DNT": "1", 37 | "Cache-Control": "no-cache", 38 | } 39 | 40 | 41 | class LinkedInJobsScraper: 42 | def __init__(self): 43 | self.session = self._setup_session() 44 | 45 | def _setup_session(self) -> requests.Session: 46 | session = requests.Session() 47 | retries = Retry( 48 | total=5, backoff_factor=0.5, status_forcelist=[429, 500, 502, 503, 504] 49 | ) 50 | session.mount("https://", HTTPAdapter(max_retries=retries)) 51 | return session 52 | 53 | def _build_search_url(self, keywords: str, location: str, start: int = 0) -> str: 54 | params = { 55 | "keywords": keywords, 56 | "location": location, 57 | "start": start, 58 | } 59 | return f"{ScraperConfig.BASE_URL}?{'&'.join(f'{k}={quote(str(v))}' for k, v in params.items())}" 60 | 61 | def _clean_job_url(self, url: str) -> str: 62 | return url.split("?")[0] if "?" in url else url 63 | 64 | def _extract_job_data(self, job_card: BeautifulSoup) -> Optional[JobData]: 65 | try: 66 | title = job_card.find("h3", class_="base-search-card__title").text.strip() 67 | company = job_card.find( 68 | "h4", class_="base-search-card__subtitle" 69 | ).text.strip() 70 | location = job_card.find( 71 | "span", class_="job-search-card__location" 72 | ).text.strip() 73 | job_link = self._clean_job_url( 74 | job_card.find("a", class_="base-card__full-link")["href"] 75 | ) 76 | posted_date = job_card.find("time", class_="job-search-card__listdate") 77 | posted_date = posted_date.text.strip() if posted_date else "N/A" 78 | 79 | return JobData( 80 | title=title, 81 | company=company, 82 | location=location, 83 | job_link=job_link, 84 | posted_date=posted_date, 85 | ) 86 | except Exception as e: 87 | print(f"Failed to extract job data: {str(e)}") 88 | return None 89 | 90 | def _fetch_job_page(self, url: str) -> BeautifulSoup: 91 | try: 92 | response = self.session.get(url, headers=ScraperConfig.HEADERS) 93 | if response.status_code != 200: 94 | raise RuntimeError( 95 | f"Failed to fetch data: Status code {response.status_code}" 96 | ) 97 | return BeautifulSoup(response.text, "html.parser") 98 | except requests.RequestException as e: 99 | raise RuntimeError(f"Request failed: {str(e)}") 100 | 101 | def scrape_jobs( 102 | self, keywords: str, location: str, max_jobs: int = 100 103 | ) -> List[JobData]: 104 | all_jobs = [] 105 | start = 0 106 | 107 | while len(all_jobs) < max_jobs: 108 | try: 109 | url = self._build_search_url(keywords, location, start) 110 | soup = self._fetch_job_page(url) 111 | job_cards = soup.find_all("div", class_="base-card") 112 | 113 | if not job_cards: 114 | break 115 | for card in job_cards: 116 | job_data = self._extract_job_data(card) 117 | if job_data: 118 | all_jobs.append(job_data) 119 | if len(all_jobs) >= max_jobs: 120 | break 121 | print(f"Scraped {len(all_jobs)} jobs...") 122 | start += ScraperConfig.JOBS_PER_PAGE 123 | time.sleep( 124 | random.uniform(ScraperConfig.MIN_DELAY, ScraperConfig.MAX_DELAY) 125 | ) 126 | except Exception as e: 127 | print(f"Scraping error: {str(e)}") 128 | break 129 | return all_jobs[:max_jobs] 130 | 131 | def save_results( 132 | self, jobs: List[JobData], filename: str = "linkedin_jobs.json" 133 | ) -> None: 134 | if not jobs: 135 | return 136 | with open(filename, "w", encoding="utf-8") as f: 137 | json.dump([vars(job) for job in jobs], f, indent=2, ensure_ascii=False) 138 | print(f"Saved {len(jobs)} jobs to {filename}") 139 | 140 | 141 | def main(): 142 | params = {"keywords": "AI/ML Engineer", "location": "London", "max_jobs": 100} 143 | 144 | scraper = LinkedInJobsScraper() 145 | jobs = scraper.scrape_jobs(**params) 146 | scraper.save_results(jobs) 147 | 148 | 149 | if __name__ == "__main__": 150 | main() -------------------------------------------------------------------------------- /linkedin_scraper_api_codes/linkedin_posts_by_company_url.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import time 4 | import logging 5 | import os 6 | from datetime import datetime 7 | from typing import List, Dict, Any, Optional 8 | 9 | # Configure logging 10 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 11 | 12 | class LinkedInPostsCollector: 13 | def __init__(self, api_token: str, dataset_id: str = "gd_lyy3tktm25m4avu764", sleep_interval: int = 5, timeout: int = 30): 14 | self.api_token = api_token 15 | self.headers = { 16 | "Authorization": f"Bearer {api_token}", 17 | "Content-Type": "application/json", 18 | } 19 | self.dataset_id = dataset_id 20 | self.sleep_interval = sleep_interval 21 | self.timeout = timeout 22 | 23 | def collect_posts(self, company_data: List[Dict[str, Any]]) -> Optional[List[Dict[str, Any]]]: 24 | try: 25 | start_time = time.time() 26 | logging.info("Collecting posts:") 27 | 28 | trigger_response = self._trigger_collection(company_data) 29 | if not trigger_response or 'snapshot_id' not in trigger_response: 30 | raise Exception("Failed to initiate data collection") 31 | 32 | snapshot_id = trigger_response['snapshot_id'] 33 | posts_data = None 34 | 35 | while True: 36 | status = self._check_status(snapshot_id) 37 | elapsed = int(time.time() - start_time) 38 | 39 | if status == "running": 40 | logging.info(f"Status: {status} ({elapsed}s elapsed)") 41 | time.sleep(self.sleep_interval) 42 | continue 43 | 44 | elif status == "ready": 45 | if posts_data is None: 46 | posts_data = self._get_data(snapshot_id) 47 | if posts_data: 48 | logging.info(f"Collection completed after {elapsed} seconds") 49 | self._save_data(posts_data) 50 | return posts_data 51 | break 52 | 53 | elif status in ["failed", "error"]: 54 | raise Exception(f"Collection failed with status: {status}") 55 | 56 | time.sleep(self.sleep_interval) 57 | 58 | except Exception as e: 59 | logging.error(f"Error during collection: {str(e)}") 60 | return None 61 | 62 | def _trigger_collection(self, company_data: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]: 63 | try: 64 | response = requests.post( 65 | "https://api.brightdata.com/datasets/v3/trigger", 66 | headers=self.headers, 67 | params={ 68 | "dataset_id": self.dataset_id, 69 | "type": "discover_new", 70 | "discover_by": "company_url", 71 | "include_errors": "true" 72 | }, 73 | json=company_data, 74 | timeout=self.timeout 75 | ) 76 | response.raise_for_status() 77 | return response.json() 78 | except requests.exceptions.RequestException as e: 79 | logging.error(f"Error triggering collection: {str(e)}") 80 | return None 81 | 82 | def _check_status(self, snapshot_id: str) -> str: 83 | try: 84 | response = requests.get( 85 | f"https://api.brightdata.com/datasets/v3/progress/{snapshot_id}", 86 | headers=self.headers, 87 | timeout=self.timeout 88 | ) 89 | response.raise_for_status() 90 | return response.json().get("status", "error") 91 | except requests.exceptions.RequestException as e: 92 | logging.error(f"Error checking status: {str(e)}") 93 | return "error" 94 | 95 | def _get_data(self, snapshot_id: str) -> Optional[List[Dict[str, Any]]]: 96 | try: 97 | response = requests.get( 98 | f"https://api.brightdata.com/datasets/v3/snapshot/{snapshot_id}", 99 | headers=self.headers, 100 | params={"format": "json"}, 101 | timeout=self.timeout 102 | ) 103 | response.raise_for_status() 104 | return response.json() 105 | except requests.exceptions.RequestException as e: 106 | logging.error(f"Error retrieving data: {str(e)}") 107 | return None 108 | 109 | def _save_data(self, data: List[Dict[str, Any]], filename: str = "linkedin_posts_company_url.json") -> None: 110 | try: 111 | with open(filename, "w", encoding="utf-8") as f: 112 | json.dump(data, f, indent=2, ensure_ascii=False) 113 | logging.info(f"Data saved to {filename}") 114 | logging.info(f"Collected {len(data)} posts") 115 | except Exception as e: 116 | logging.error(f"Error saving data: {str(e)}") 117 | 118 | def _get_timestamp(self) -> str: 119 | return datetime.now().strftime("%H:%M:%S") 120 | 121 | def main() -> None: 122 | api_token = "" 123 | if not api_token: 124 | logging.error("API token not found. Please set the API_TOKEN environment variable.") 125 | return 126 | 127 | collector = LinkedInPostsCollector(api_token) 128 | 129 | companies = [ 130 | { 131 | "url": "https://www.linkedin.com/company/lanieri", 132 | } 133 | ] 134 | 135 | collector.collect_posts(companies) 136 | 137 | if __name__ == "__main__": 138 | main() 139 | -------------------------------------------------------------------------------- /linkedin_scraper_api_codes/linkedin_jobs_by_search_url.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import time 4 | import logging 5 | from datetime import datetime 6 | from typing import List, Dict, Any, Optional 7 | 8 | # Configure logging 9 | 10 | logging.basicConfig( 11 | level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" 12 | ) 13 | 14 | BRIGHTDATA_API_URL = "https://api.brightdata.com/datasets/v3" 15 | 16 | 17 | class LinkedInJobsURLDiscovery: 18 | def __init__( 19 | self, 20 | api_token: str, 21 | dataset_id: str = "gd_lpfll7v5hcqtkxl6l", 22 | sleep_interval: int = 5, 23 | timeout: int = 30, 24 | ): 25 | self.api_token = api_token 26 | self.headers = { 27 | "Authorization": f"Bearer {api_token}", 28 | "Content-Type": "application/json", 29 | } 30 | self.dataset_id = dataset_id 31 | self.sleep_interval = sleep_interval 32 | self.timeout = timeout 33 | 34 | def discover_jobs( 35 | self, search_urls: List[Dict[str, str]] 36 | ) -> Optional[List[Dict[str, Any]]]: 37 | try: 38 | start_time = time.time() 39 | logging.info("Discovering jobs:") 40 | 41 | trigger_response = self._trigger_collection(search_urls) 42 | if not trigger_response or "snapshot_id" not in trigger_response: 43 | raise ValueError("Failed to initiate job discovery") 44 | snapshot_id = trigger_response["snapshot_id"] 45 | jobs_data = None 46 | 47 | while True: 48 | status = self._check_status(snapshot_id) 49 | elapsed = int(time.time() - start_time) 50 | 51 | if status == "running": 52 | logging.info(f"Status: {status} ({elapsed}s elapsed)") 53 | time.sleep(self.sleep_interval) 54 | continue 55 | elif status == "ready": 56 | if jobs_data is None: 57 | jobs_data = self._get_data(snapshot_id) 58 | if jobs_data: 59 | logging.info(f"Discovery completed after {elapsed} seconds") 60 | self._save_data(jobs_data) 61 | return jobs_data 62 | break 63 | elif status in ["failed", "error"]: 64 | raise RuntimeError(f"Discovery failed with status: {status}") 65 | time.sleep(self.sleep_interval) 66 | except Exception as e: 67 | logging.error(f"Error during job discovery: {e}") 68 | return None 69 | 70 | def _trigger_collection( 71 | self, search_urls: List[Dict[str, str]] 72 | ) -> Optional[Dict[str, Any]]: 73 | try: 74 | response = requests.post( 75 | f"{BRIGHTDATA_API_URL}/trigger", 76 | headers=self.headers, 77 | params={ 78 | "dataset_id": self.dataset_id, 79 | "type": "discover_new", 80 | "discover_by": "url", 81 | "include_errors": "true", 82 | }, 83 | json=search_urls, 84 | timeout=self.timeout, 85 | ) 86 | response.raise_for_status() 87 | return response.json() 88 | except requests.exceptions.RequestException as e: 89 | logging.error(f"Error triggering discovery: {e}") 90 | return None 91 | 92 | def _check_status(self, snapshot_id: str) -> str: 93 | try: 94 | response = requests.get( 95 | f"{BRIGHTDATA_API_URL}/progress/{snapshot_id}", 96 | headers=self.headers, 97 | timeout=self.timeout, 98 | ) 99 | response.raise_for_status() 100 | return response.json().get("status", "error") 101 | except requests.exceptions.RequestException as e: 102 | logging.error(f"Error checking status: {e}") 103 | return "error" 104 | 105 | def _get_data(self, snapshot_id: str) -> Optional[List[Dict[str, Any]]]: 106 | try: 107 | response = requests.get( 108 | f"{BRIGHTDATA_API_URL}/snapshot/{snapshot_id}", 109 | headers=self.headers, 110 | params={"format": "json"}, 111 | timeout=self.timeout, 112 | ) 113 | response.raise_for_status() 114 | return response.json() 115 | except requests.exceptions.RequestException as e: 116 | logging.error(f"Error retrieving data: {e}") 117 | return None 118 | 119 | def _save_data( 120 | self, 121 | data: List[Dict[str, Any]], 122 | filename: str = "linkedin_jobs_search_url.json", 123 | ) -> None: 124 | try: 125 | with open(filename, "w", encoding="utf-8") as f: 126 | json.dump(data, f, indent=2, ensure_ascii=False) 127 | logging.info(f"Data saved to {filename}") 128 | logging.info(f"Discovered {len(data)} jobs") 129 | except IOError as e: 130 | logging.error(f"Error saving data: {e}") 131 | 132 | def _get_timestamp(self) -> str: 133 | return datetime.now().strftime("%H:%M:%S") 134 | 135 | 136 | def main() -> None: 137 | api_token = "" 138 | discoverer = LinkedInJobsURLDiscovery(api_token) 139 | 140 | search_urls = [ 141 | { 142 | "url": "https://www.linkedin.com/jobs/search?keywords=Software&location=Tel%20Aviv-Yafo&geoId=101570771&trk=public_jobs_jobs-search-bar_search-submit&position=1&pageNum=0&f_TPR=r3600" 143 | }, 144 | ] 145 | 146 | discoverer.discover_jobs(search_urls) 147 | 148 | 149 | if __name__ == "__main__": 150 | main() 151 | -------------------------------------------------------------------------------- /linkedin_scraper_api_codes/linkedin_jobs_by_url.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import time 4 | from datetime import datetime 5 | from typing import List, Dict, Optional, Any 6 | 7 | 8 | class LinkedInJobsCollector: 9 | def __init__(self, api_token: str, dataset_id: str): 10 | """ 11 | Initialize the LinkedInJobsCollector with API token and dataset ID. 12 | """ 13 | self.api_token = api_token 14 | self.headers = { 15 | "Authorization": f"Bearer {api_token}", 16 | "Content-Type": "application/json", 17 | } 18 | self.dataset_id = dataset_id 19 | 20 | def collect_jobs( 21 | self, job_urls: List[Dict[str, str]] 22 | ) -> Optional[List[Dict[str, Any]]]: 23 | """ 24 | Collect job data from LinkedIn using the provided job URLs. 25 | """ 26 | try: 27 | start_time = time.time() 28 | print("\nCollecting data:") 29 | 30 | trigger_response = self._trigger_collection(job_urls) 31 | if not trigger_response or "snapshot_id" not in trigger_response: 32 | raise ValueError("Failed to initiate data collection") 33 | snapshot_id = trigger_response["snapshot_id"] 34 | jobs_data = None 35 | 36 | while True: 37 | status = self._check_status(snapshot_id) 38 | elapsed = int(time.time() - start_time) 39 | 40 | if status == "running": 41 | print( 42 | f"\r[{self._get_timestamp()}] Status: {status} ({elapsed}s elapsed)", 43 | end="", 44 | flush=True, 45 | ) 46 | time.sleep(5) 47 | continue 48 | elif status == "ready": 49 | if jobs_data is None: 50 | jobs_data = self._get_data(snapshot_id) 51 | if jobs_data: 52 | print( 53 | f"\r[{self._get_timestamp()}] Collection completed after {elapsed} seconds\n" 54 | ) 55 | self._save_data(jobs_data) 56 | return jobs_data 57 | break 58 | elif status in ["failed", "error"]: 59 | raise RuntimeError(f"Collection failed with status: {status}") 60 | time.sleep(5) 61 | except (ValueError, RuntimeError, Exception) as e: 62 | print(f"Error during collection: {str(e)}") 63 | return None 64 | 65 | def _trigger_collection( 66 | self, job_urls: List[Dict[str, str]] 67 | ) -> Optional[Dict[str, Any]]: 68 | """ 69 | Trigger the data collection process. 70 | """ 71 | try: 72 | response = requests.post( 73 | "https://api.brightdata.com/datasets/v3/trigger", 74 | headers=self.headers, 75 | params={"dataset_id": self.dataset_id, "include_errors": "true"}, 76 | json=job_urls, 77 | timeout=30, 78 | ) 79 | response.raise_for_status() 80 | return response.json() 81 | except requests.exceptions.RequestException as e: 82 | print(f"Error triggering collection: {str(e)}") 83 | return None 84 | 85 | def _check_status(self, snapshot_id: str) -> str: 86 | """ 87 | Check the status of the data collection process. 88 | """ 89 | try: 90 | response = requests.get( 91 | f"https://api.brightdata.com/datasets/v3/progress/{snapshot_id}", 92 | headers=self.headers, 93 | timeout=30, 94 | ) 95 | response.raise_for_status() 96 | return response.json().get("status", "error") 97 | except requests.exceptions.RequestException: 98 | return "error" 99 | 100 | def _get_data(self, snapshot_id: str) -> Optional[List[Dict[str, Any]]]: 101 | """ 102 | Retrieve the collected data. 103 | """ 104 | try: 105 | response = requests.get( 106 | f"https://api.brightdata.com/datasets/v3/snapshot/{snapshot_id}", 107 | headers=self.headers, 108 | params={"format": "json"}, 109 | timeout=30, 110 | ) 111 | response.raise_for_status() 112 | return response.json() 113 | except requests.exceptions.RequestException as e: 114 | print(f"Error retrieving data: {str(e)}") 115 | return None 116 | 117 | def _save_data( 118 | self, data: List[Dict[str, Any]], filename: str = "linkedin_jobs_url.json" 119 | ) -> None: 120 | """ 121 | Save the collected data to a JSON file. 122 | """ 123 | try: 124 | with open(filename, "w", encoding="utf-8") as f: 125 | json.dump(data, f, indent=2, ensure_ascii=False) 126 | print(f"✓ Data saved to {filename}") 127 | print(f"✓ Collected {len(data)} job listings") 128 | except Exception as e: 129 | print(f"Error saving data: {str(e)}") 130 | 131 | def _get_timestamp(self) -> str: 132 | """ 133 | Get the current timestamp. 134 | """ 135 | return datetime.now().strftime("%H:%M:%S") 136 | 137 | 138 | def main() -> None: 139 | api_token = "" 140 | dataset_id = "gd_lpfll7v5hcqtkxl6l" 141 | collector = LinkedInJobsCollector(api_token, dataset_id) 142 | 143 | job_searches = [ 144 | {"url": "https://www.linkedin.com/jobs/view/4073552631"}, 145 | {"url": "https://www.linkedin.com/jobs/view/4073729630"}, 146 | ] 147 | 148 | collector.collect_jobs(job_searches) 149 | 150 | 151 | if __name__ == "__main__": 152 | main() 153 | -------------------------------------------------------------------------------- /linkedin_scraper_api_data/linkedin_jobs_url.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "input": { 4 | "url": "https://www.linkedin.com/jobs/view/4073552631" 5 | }, 6 | "url": "https://www.linkedin.com/jobs/view/4073552631?lg=en", 7 | "job_posting_id": "4073552631", 8 | "job_title": "Data Platform Engineer", 9 | "company_name": "Cycode | Complete ASPM", 10 | "company_id": "40789623", 11 | "job_location": "Tel Aviv-Yafo, Tel Aviv District, Israel", 12 | "job_summary": "This is a unique opportunity to join an exciting early-stage startup experiencing hypergrowth in a white-hot segment of the cybersecurity space. Cycode is a fast-growing cybersecurity startup and the creator of the first comprehensive software supply chain security solution. As software supply chain attacks rise sharply, Cycode’s platform delivers complete visibility, security, and integrity across all stages of the software development lifecycle (SDLC), helping companies protect against high-profile threats. Founded in 2019, Cycode is backed by YL Ventures and Insight Partners and has received significant industry recognition, including Cyber Defense Magazine's Top Infosec Innovator (2023) and multiple other awards in recent years. As a Data Engineer at Cycode, you will play a pivotal role in shaping our evolving data culture. You will be responsible for collecting, organizing, and analyzing data to provide valuable insights that drive informed decision-making across the organization. This is an opportunity to join a dynamic team and contribute to the development of data-driven solutions that drive business growth and innovation. If you are passionate about data and thrive in a collaborative environment, we encourage you to apply. Responsibilities Collect and gather data from various sources, both internal and external. Organize and clean datasets to ensure accuracy and reliability. Utilize appropriate tools and software to analyze and visualize data effectively. Collaborate with cross-functional teams to identify data needs and requirements. Develop and implement data collection strategies to support business objectives. Interpret data and provide insights to inform decision-making processes. Create reports and presentations to communicate findings and recommendations. Assist in the development of data-driven solutions to address business challenges. Stay updated on industry trends and best practices in data analysis. Requirements: Bachelor's degree in a relevant field such as Statistics, Mathematics, Computer Science, or Economics. Strong analytical skills with the ability to collect, organize, and interpret large datasets. Proven experience in building, deploying, and monitoring of ETLs Proficiency in data analysis tools such as SQL, Python, Pandas, Apache Spark / Beam, etc Good understanding of data modeling principles Familiarity with data visualization tools such as Tableau, Power BI, or Google Data Studio. Excellent communication and collaboration skills. Ability to work independently and prioritize tasks effectively. Problem-solving mindset with a keen attention to detail. Experience in a data-related role is preferred but not required. Eagerness to learn and adapt to new technologies and methodologies. Advantages MongoDB AWS Cloud CICD, Docker Kubernetes Show more Show less", 13 | "job_seniority_level": "Not Applicable", 14 | "job_function": "Engineering and Information Technology", 15 | "job_employment_type": "Full-time", 16 | "job_industries": "Computer and Network Security", 17 | "company_url": "https://www.linkedin.com/company/cycode?trk=public_jobs_topcard-org-name", 18 | "job_posted_time": "1 month ago", 19 | "job_num_applicants": 85, 20 | "discovery_input": { 21 | "time_range": null, 22 | "job_type": null, 23 | "experience_level": null, 24 | "remote": null, 25 | "selective_search": null 26 | }, 27 | "apply_link": "https://www.linkedin.com/jobs/view/externalApply/4073552631?url=https%3A%2F%2Fcycode%2Ecom%2Fcareers%2Fposition%2F%3Fpos_title%3Ddata-platform-engineer%26pos_id%3D53%2ED48%26coref%3D1%2E11%2Ep9D_4217&urlHash=c1hm", 28 | "country_code": null, 29 | "title_id": "6483", 30 | "company_logo": "https://media.licdn.com/dms/image/v2/D4D0BAQFsSsfzqEVWtw/company-logo_100_100/company-logo_100_100/0/1689682315729/cycode_logo?e=2147483647&v=beta&t=h91f6XM-5MGHa5FDhMCVtXy7Me0S8YQIPRAYUc4UVC0", 31 | "job_posted_date": "2024-11-22T09:41:10.107Z", 32 | "job_poster": { 33 | "name": null, 34 | "title": null, 35 | "url": null 36 | }, 37 | "application_availability": true, 38 | "job_description_formatted": "
\n
\n This is a unique opportunity to join an exciting early-stage startup experiencing hypergrowth in a white-hot segment of the cybersecurity space.

Cycode is a fast-growing cybersecurity startup and the creator of the first comprehensive software supply chain security solution. As software supply chain attacks rise sharply, Cycode’s platform delivers complete visibility, security, and integrity across all stages of the software development lifecycle (SDLC), helping companies protect against high-profile threats.

Founded in 2019, Cycode is backed by YL Ventures and Insight Partners and has received significant industry recognition, including Cyber Defense Magazine's Top Infosec Innovator (2023) and multiple other awards in recent years.

As a Data Engineer at Cycode, you will play a pivotal role in shaping our evolving data culture. You will be responsible for collecting, organizing, and analyzing data to provide valuable insights that drive informed decision-making across the organization. This is an opportunity to join a dynamic team and contribute to the development of data-driven solutions that drive business growth and innovation. If you are passionate about data and thrive in a collaborative environment, we encourage you to apply.

Responsibilities

  • Collect and gather data from various sources, both internal and external.
  • Organize and clean datasets to ensure accuracy and reliability.
  • Utilize appropriate tools and software to analyze and visualize data effectively.
  • Collaborate with cross-functional teams to identify data needs and requirements.
  • Develop and implement data collection strategies to support business objectives.
  • Interpret data and provide insights to inform decision-making processes.
  • Create reports and presentations to communicate findings and recommendations.
  • Assist in the development of data-driven solutions to address business challenges.
  • Stay updated on industry trends and best practices in data analysis.

Requirements:

  • Bachelor's degree in a relevant field such as Statistics, Mathematics, Computer Science, or Economics.
  • Strong analytical skills with the ability to collect, organize, and interpret large datasets.
  • Proven experience in building, deploying, and monitoring of ETLs
  • Proficiency in data analysis tools such as SQL, Python, Pandas, Apache Spark / Beam, etc
  • Good understanding of data modeling principles
  • Familiarity with data visualization tools such as Tableau, Power BI, or Google Data Studio.
  • Excellent communication and collaboration skills.
  • Ability to work independently and prioritize tasks effectively.
  • Problem-solving mindset with a keen attention to detail.
  • Experience in a data-related role is preferred but not required.
  • Eagerness to learn and adapt to new technologies and methodologies.

Advantages

  • MongoDB
  • AWS Cloud
  • CICD, Docker Kubernetes
\n
\n\n \n\n \n \n \n\n \n \n\n \n\n \n \n \n\n \n \n
", 39 | "base_salary": { 40 | "min_amount": null, 41 | "max_amount": null, 42 | "currency": null, 43 | "payment_period": null 44 | }, 45 | "timestamp": "2024-12-22T09:41:10.122Z" 46 | }, 47 | { 48 | "input": { 49 | "url": "https://www.linkedin.com/jobs/view/4073729630" 50 | }, 51 | "url": "https://www.linkedin.com/jobs/view/4073729630?lg=en", 52 | "job_posting_id": "4073729630", 53 | "job_title": "Data Engineer", 54 | "company_name": "Meta", 55 | "company_id": "10667", 56 | "job_location": "Tel Aviv-Yafo, Tel Aviv District, Israel", 57 | "job_summary": "As a Data Engineer at Meta, you will shape the future of people-facing and business-facing products we build across our entire family of applications (Facebook, Instagram, Messenger, WhatsApp, Reality Labs, Threads). Your technical skills and analytical mindset will be utilized designing and building some of the world's most extensive data sets, helping to craft experiences for billions of people and hundreds of millions of businesses worldwide.In this role, you will collaborate with software engineering, data science, and product management teams to design/build scalable data solutions across Meta to optimize growth, strategy, and user experience for our 3 billion plus users, as well as our internal employee community.You will be at the forefront of identifying and solving some of the most interesting data challenges at a scale few companies can match. By joining Meta, you will become part of a vibrant community dedicated to skill development and career growth in data engineering and beyond.Data Engineering: You will guide teams by building optimal data artifacts (including datasets and visualizations) to address key questions. You will refine our systems, design logging solutions, and create scalable data models. Ensuring data security and quality, and with a strong focus on efficiency, you will suggest architecture and development approaches and data management standards to address complex analytical problems.Product leadership: You will use data to shape product development, identify new opportunities, and tackle upcoming challenges. You'll ensure our products add value for users and businesses, by prioritizing projects, and driving innovative solutions to respond to challenges or opportunities.Communication and influence: You won't simply present data, but tell data-driven stories. You will convince and influence your partners using clear insights and recommendations. You will build credibility through structure and clarity, and be a trusted strategic partner. Data Engineer Responsibilities: Conceptualize and own the data architecture for multiple large-scale projects, while evaluating design and operational cost-benefit tradeoffs within systems Create and contribute to frameworks that improve the efficacy of logging data, while working with data infrastructure to triage issues and resolve Collaborate with engineers, product managers, and data scientists to understand data needs, representing key data insights in a meaningful way Define and manage Service Level Agreements for all data sets in allocated areas of ownership Determine and implement the security model based on privacy requirements, confirm safeguards are followed, address data quality issues, and evolve governance processes within allocated areas of ownership Design, build, and launch collections of sophisticated data models and visualizations that support multiple use cases across different products or domains Solve our most challenging data integration problems, utilizing optimal Extract, Transform, Load (ETL) patterns, frameworks, query techniques, sourcing from structured and unstructured data sources Assist in owning existing processes running in production, optimizing complex code through advanced algorithmic concepts Optimize pipelines, dashboards, frameworks, and systems to facilitate easier development of data artifacts Influence product and cross-functional teams to identify data opportunities to drive impact Mentor team members by giving/receiving actionable feedback Minimum Qualifications: Bachelor's degree in Computer Science, Engineering, relevant technical field, or equivalent 4+ years of experience where the primary responsibility involves working with data. This could include roles such as data analyst, data scientist, data engineer, or similar positions 4+ years of experience (or a minimum of 2+ years with a Ph.D) with SQL, ETL, data modeling, and at least one programming language (e.g., Python, C++, C#, Scala, etc.) About Meta: Meta builds technologies that help people connect, find communities, and grow businesses. When Facebook launched in 2004, it changed the way people connect. Apps like Messenger, Instagram and WhatsApp further empowered billions around the world. Now, Meta is moving beyond 2D screens toward immersive experiences like augmented and virtual reality to help build the next evolution in social technology. People who choose to build their careers by building with us at Meta help shape a future that will take us beyond what digital connection makes possible today—beyond the constraints of screens, the limits of distance, and even the rules of physics. Individual compensation is determined by skills, qualifications, experience, and location. Compensation details listed in this posting reflect the base hourly rate, monthly rate, or annual salary only, and do not include bonus, equity or sales incentives, if applicable. In addition to base compensation, Meta offers benefits. Learn more about benefits at Meta. Show more Show less", 58 | "job_seniority_level": "Not Applicable", 59 | "job_function": "Information Technology", 60 | "job_employment_type": "Full-time", 61 | "job_industries": "Technology, Information and Internet", 62 | "company_url": "https://www.linkedin.com/company/meta?trk=public_jobs_topcard-org-name", 63 | "job_posted_time": "2 weeks ago", 64 | "job_num_applicants": 200, 65 | "discovery_input": { 66 | "time_range": null, 67 | "job_type": null, 68 | "experience_level": null, 69 | "remote": null, 70 | "selective_search": null 71 | }, 72 | "apply_link": "https://www.linkedin.com/jobs/view/externalApply/4073729630?url=https%3A%2F%2Fjsv3%2Erecruitics%2Ecom%2Fredirect%3Frx_cid%3D3239%26rx_jobId%3Da1KDp00000E2LWtMAN%26rx_url%3Dhttps%253A%252F%252Fwww%2Emetacareers%2Ecom%252Fjobs%252F1243470300203522%252F%253Frx_campaign%253DLinkedin1%2526rx_ch%253Dconnector%2526rx_group%253D126320%2526rx_job%253Da1KDp00000E2LWtMAN%2526rx_medium%253Dpost%2526rx_r%253Dnone%2526rx_source%253DLinkedin%2526rx_ts%253D20241222T001201Z%2526rx_vp%253Dslots%2526utm_campaign%253DJob%25252Bboard%2526utm_medium%253Djobs%2526utm_source%253DLIpaid&urlHash=ZsAf", 73 | "country_code": null, 74 | "title_id": "2732", 75 | "company_logo": "https://media.licdn.com/dms/image/v2/C4E0BAQFdNatYGiBelg/company-logo_100_100/company-logo_100_100/0/1636138754252/facebook_logo?e=2147483647&v=beta&t=yQdLB8_bCnLl5CaiAOcOMFm39fvGboZVNBYTqgRVKWg", 76 | "job_posted_date": "2024-12-08T09:41:14.309Z", 77 | "job_poster": { 78 | "name": null, 79 | "title": null, 80 | "url": null 81 | }, 82 | "application_availability": true, 83 | "job_description_formatted": "
\n
\n As a Data Engineer at Meta, you will shape the future of people-facing and business-facing products we build across our entire family of applications (Facebook, Instagram, Messenger, WhatsApp, Reality Labs, Threads). Your technical skills and analytical mindset will be utilized designing and building some of the world's most extensive data sets, helping to craft experiences for billions of people and hundreds of millions of businesses worldwide.In this role, you will collaborate with software engineering, data science, and product management teams to design/build scalable data solutions across Meta to optimize growth, strategy, and user experience for our 3 billion plus users, as well as our internal employee community.You will be at the forefront of identifying and solving some of the most interesting data challenges at a scale few companies can match. By joining Meta, you will become part of a vibrant community dedicated to skill development and career growth in data engineering and beyond.Data Engineering: You will guide teams by building optimal data artifacts (including datasets and visualizations) to address key questions. You will refine our systems, design logging solutions, and create scalable data models. Ensuring data security and quality, and with a strong focus on efficiency, you will suggest architecture and development approaches and data management standards to address complex analytical problems.Product leadership: You will use data to shape product development, identify new opportunities, and tackle upcoming challenges. You'll ensure our products add value for users and businesses, by prioritizing projects, and driving innovative solutions to respond to challenges or opportunities.Communication and influence: You won't simply present data, but tell data-driven stories. You will convince and influence your partners using clear insights and recommendations. You will build credibility through structure and clarity, and be a trusted strategic partner.

Data Engineer Responsibilities:

  • Conceptualize and own the data architecture for multiple large-scale projects, while evaluating design and operational cost-benefit tradeoffs within systems
  • Create and contribute to frameworks that improve the efficacy of logging data, while working with data infrastructure to triage issues and resolve
  • Collaborate with engineers, product managers, and data scientists to understand data needs, representing key data insights in a meaningful way
  • Define and manage Service Level Agreements for all data sets in allocated areas of ownership
  • Determine and implement the security model based on privacy requirements, confirm safeguards are followed, address data quality issues, and evolve governance processes within allocated areas of ownership
  • Design, build, and launch collections of sophisticated data models and visualizations that support multiple use cases across different products or domains
  • Solve our most challenging data integration problems, utilizing optimal Extract, Transform, Load (ETL) patterns, frameworks, query techniques, sourcing from structured and unstructured data sources
  • Assist in owning existing processes running in production, optimizing complex code through advanced algorithmic concepts
  • Optimize pipelines, dashboards, frameworks, and systems to facilitate easier development of data artifacts
  • Influence product and cross-functional teams to identify data opportunities to drive impact
  • Mentor team members by giving/receiving actionable feedback

Minimum Qualifications:

  • Bachelor's degree in Computer Science, Engineering, relevant technical field, or equivalent
  • 4+ years of experience where the primary responsibility involves working with data. This could include roles such as data analyst, data scientist, data engineer, or similar positions
  • 4+ years of experience (or a minimum of 2+ years with a Ph.D) with SQL, ETL, data modeling, and at least one programming language (e.g., Python, C++, C#, Scala, etc.)

About Meta:

Meta builds technologies that help people connect, find communities, and grow businesses. When Facebook launched in 2004, it changed the way people connect. Apps like Messenger, Instagram and WhatsApp further empowered billions around the world. Now, Meta is moving beyond 2D screens toward immersive experiences like augmented and virtual reality to help build the next evolution in social technology. People who choose to build their careers by building with us at Meta help shape a future that will take us beyond what digital connection makes possible today—beyond the constraints of screens, the limits of distance, and even the rules of physics.

Individual compensation is determined by skills, qualifications, experience, and location. Compensation details listed in this posting reflect the base hourly rate, monthly rate, or annual salary only, and do not include bonus, equity or sales incentives, if applicable. In addition to base compensation, Meta offers benefits. Learn more about benefits at Meta.\n
\n\n \n\n \n \n \n\n \n \n\n \n\n \n \n \n\n \n \n
", 84 | "base_salary": { 85 | "min_amount": null, 86 | "max_amount": null, 87 | "currency": null, 88 | "payment_period": null 89 | }, 90 | "timestamp": "2024-12-22T09:41:14.335Z" 91 | } 92 | ] -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Linkedin Scraper 2 | 3 | [![Promo](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/Proxies%20and%20scrapers%20GitHub%20bonus%20banner.png)](https://brightdata.com/products/web-scraper/linkedin) 4 | 5 | This repository provides two methods for collecting data from LinkedIn: 6 | 1. **Free**: A great option for small-scale projects, experiments, and learning purposes. 7 | 2. **LinkedIn Scraper API**: Designed for large-scale, reliable, and real-time data extraction. 8 | 9 | Want to skip scraping? Purchase the full [LinkedIn dataset](https://brightdata.com/products/datasets/linkedin). 10 | 11 | ## Table of Contents 12 | - [Method 1: Free LinkedIn Scraper](#method-1-free-linkedin-scraper) 13 | - [Jobs Scraper](#1-jobs-scraper) 14 | - [Profile Checker](#2-profile-checker) 15 | - [Quick Start](#quick-start) 16 | - [Usage Examples](#usage-examples) 17 | - [Common Scraping Challenges with Free Method](#common-scraping-challenges-with-free-method) 18 | - [Method 2: Bright Data LinkedIn Scraper API](#method-2-bright-data-linkedin-scraper-api) 19 | - [Key Benefits](#key-benefits) 20 | - [Getting Started with the LinkedIn Scraper API](#getting-started-with-the-linkedin-scraper-api) 21 | - [1. Company Information Scraper](#1-company-information-scraper) 22 | - [2. Profile by URL](#2-profile-by-url) 23 | - [3. Profile Discovery](#3-profile-discovery) 24 | - [4. Posts by URL](#4-posts-by-url) 25 | - [5. Posts Discovery by URL](#5-posts-discovery-by-url) 26 | - [6. Posts Discovery by Profile](#6-posts-discovery-by-profile) 27 | - [7. Posts Discovery by Company](#7-posts-discovery-by-company) 28 | - [8. Job Listings Collection by URL](#8-job-listings-collection-by-url) 29 | - [9. Job Listings Discovery by Keyword](#9-job-listings-discovery-by-keyword) 30 | - [10. Job Listings Discovery by URL](#10-job-listings-discovery-by-url) 31 | - (More info) [Data Collection Approaches](#data-collection-approaches) 32 | 33 | ## Method 1: Free LinkedIn Scraper 34 | This free tool provides two primary functionalities: 35 | 1. **LinkedIn Jobs Scraper**: Collection of job listings with comprehensive metadata 36 | 2. **LinkedIn Profile Validator**: Verification of LinkedIn profile and company URLs 37 | 38 | linkedin-scraper-bright-data-screenshot-linkedin-jobs 39 | 40 | ### 1. Jobs Scraper 41 | Collects job listings from LinkedIn's job search. 42 | 43 | **Key features**: 44 | - Scrapes detailed job listings (title, company, location, URL, posting date) 45 | - Built-in rate limiting & error handling 46 | - Clean JSON output 47 | 48 | ### 2. Profile Checker 49 | Verify whether LinkedIn profiles or company pages exist. 50 | 51 | **Key features**: 52 | - Checks profile/company URLs 53 | - Retries failed requests automatically 54 | - Shows detailed status for each URL 55 | - Can check multiple URLs at once 56 | 57 | ### Quick Start 58 | Let's get you up and running in minutes: 59 | 60 | #### Prerequisites 61 | - Python 3.9 or newer 62 | - Required packages listed in [requirements.txt](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/requirements.txt) 63 | 64 | #### Installation 65 | Three simple steps to get started: 66 | ```bash 67 | git clone https://github.com/luminati-io/LinkedIn-Scraper.git 68 | cd LinkedIn-Scraper 69 | pip install -r requirements.txt 70 | ``` 71 | ### Usage Examples 72 | Here's how to put the scrapers to work: 73 | 74 | #### 1. Jobs Scraper 75 | Configure search parameters: 76 | ```python 77 | # In jobs_scraper.py 78 | params = { 79 | "keywords": "AI/ML Engineer", # Job title/keywords to search 80 | "location": "London", # Location to search in 81 | "max_jobs": 100 # Maximum number of jobs to collect 82 | } 83 | 84 | # Run: python jobs_scraper.py 85 | ``` 86 | 87 | The scraper creates a JSON file with job details: 88 | ```json 89 | { 90 | "title": "Research Engineer, AI/Machine Learning", 91 | "company": "Google", 92 | "location": "London, England, United Kingdom", 93 | "job_link": "https://uk.linkedin.com/jobs/view/research-engineer-ai-machine-learning-at-google-4086259724", 94 | "posted_date": "3 weeks ago", 95 | } 96 | ``` 97 | 98 | #### 2. Profile Checker 99 | Configure URLs for validation: 100 | ```python 101 | # In profile_checker.py 102 | test_urls = [ 103 | "https://www.linkedin.com/company/bright-data/", 104 | "https://www.linkedin.com/company/aabbccdd/" 105 | ] 106 | 107 | # Run: python profile_checker.py 108 | ``` 109 | 110 | You'll get clear status indicators for each URL: 111 | ```bash 112 | ✓ linkedin.com/company/bright-data - Status: 200 113 | ✗ linkedin.com/company/aabbccdd - Status: 400 114 | ``` 115 | 116 | ## Common Scraping Challenges with Free Method 117 | When collecting data from LinkedIn, you'll encounter various anti-scraping measures. Here's what you need to know: 118 | 1. **Rate Limiting**: LinkedIn strictly monitors request frequency per IP address. Exceeding these limits leads to temporary or permanent IP blocks. 119 | 2. **CAPTCHA Detection**: LinkedIn presents CAPTCHA challenges when it detects unusual browsing patterns, blocking automated access. 120 | 3. **Authentication Barriers**: Most valuable LinkedIn data requires authentication. The platform easily detects and blocks automated login attempts. 121 | 4. **Technical Challenges**: Additional barriers include handling pagination, dynamic content loading, incomplete data points, and navigating through LinkedIn ads. 122 | 123 | While manual web scraping works for small projects, it becomes increasingly challenging at scale. For reliable, efficient, and scalable LinkedIn data collection, **Bright Data** provides a superior solution that saves time and resources while delivering higher-quality results. 124 | 125 | [![Promo](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/Proxies%20and%20scrapers%20GitHub%20bonus%20banner.png)](https://brightdata.com/products/web-scraper/linkedin) 126 | 127 | ## Method 2: Bright Data LinkedIn Scraper API 128 | For a robust and scalable LinkedIn scraping solution, consider the [Bright Data LinkedIn Scraper API](https://brightdata.com/products/web-scraper/linkedin). Here's why it's worth considering: 129 | 130 | ### Key Benefits 131 | - **No Infrastructure Setup:** Handles proxies, CAPTCHAs, and throttling automatically. 132 | - **Scalable and Reliable:** Optimized for high-volume and real-time data extraction. 133 | - **Comprehensive Coverage:** Extract data from profiles, jobs, companies, and posts. 134 | - **Global Access:** Supports all regions and languages. 135 | - **Privacy Compliance:** Fully adheres to GDPR and CCPA standards. 136 | - **Pay-as-You-Go:** Only pay for successful responses. 137 | - **Free Trial:** Includes 20 free API calls to get started. 138 | 139 | ## Getting Started with the LinkedIn Scraper API 140 | The Bright Data LinkedIn Scraper API allows developers to programmatically extract public data from LinkedIn profiles, companies, job listings, and posts. This enterprise-grade solution handles complex infrastructure requirements including proxy management, request throttling, and data parsing. 141 | 142 | Before getting started, you'll need: 143 | - Bright Data Account 144 | - [Start a free trial](https://brightdata.com/) and log in. 145 | - Activate your account by adding a payment method under the **Billing** page. 146 | - API Token 147 | - [Follow this guide](https://docs.brightdata.com/general/account/api-token) to obtain your API token. 148 | 149 | ### 1. Company Information Scraper 150 | Extract detailed data about companies using their LinkedIn URLs. 151 | 152 | linkedin-scraper-bright-data-screenshot-linkedin-company-information-by-url 153 | 154 | 155 | #### Input Parameters 156 | | Field | Type | Required | Description | 157 | |----------|--------|----------|----------------------------------| 158 | | `url` | string | Yes | LinkedIn company URL to extract information from | 159 | 160 | #### Sample Response 161 | ```json 162 | { 163 | "name": "Kraft Heinz", 164 | "about": "The Kraft Heinz Company is one of the largest food and beverage companies in the world, with eight $1 billion+ brands and global sales of approximately $25 billion. We're a globally trusted producer of high-quality, great-tasting, and nutritious foods for over 150 years.", 165 | "key_info": { 166 | "headquarters": "Chicago, IL", 167 | "founded": 2015, 168 | "company_size": "10,001+ employees", 169 | "organization_type": "Public Company", 170 | "industries": "Food and Beverage Services", 171 | "website": "https://www.careers.kraftheinz.com/", 172 | }, 173 | "metrics": {"linkedin_followers": 1557451, "linkedin_employees": 25254}, 174 | "stock_info": { 175 | "ticker": "KHC", 176 | "exchange": "NASDAQ", 177 | "price": "$30.52", 178 | "last_updated": "December 21, 2024", 179 | }, 180 | "specialties": "Food, Fast Moving Consumer Packaged Goods, CPG, and Consumer Packaged Goods", 181 | "locations": ["200 E. Randolph St. Suite 7600 Chicago, IL 60601, US"], 182 | "slogan": "Let's make life delicious!", 183 | } 184 | ``` 185 | 186 | 👉 Only key fields are shown here. For the full dataset, refer to the [JSON response sample](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_data/linkedin_company_info.json). 187 | 188 | #### Code Example 189 | Modify the company URLs in the list to extract data: 190 | ```python 191 | companies = [ 192 | {"url": "https://il.linkedin.com/company/ibm"}, 193 | {"url": "https://www.linkedin.com/company/stalkit"}, 194 | { 195 | "url": "https://www.linkedin.com/organization-guest/company/the-kraft-heinz-company" 196 | }, 197 | {"url": "https://il.linkedin.com/company/bright-data"}, 198 | ] 199 | ``` 200 | 201 | 👉 View [Full Python Code](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_codes/linkedin_company_info_by_url.py) 202 | 203 | ### 2. Profile by URL 204 | Retrieves detailed information from individual LinkedIn profiles. 205 | 206 | linkedin-scraper-bright-data-screenshot-linkedin-people-profiles-by-url 207 | 208 | #### Input Parameters 209 | | Parameter | Type | Required | Description | 210 | |-------------|--------|----------|---------------------------------------| 211 | | `url` | string | Yes | LinkedIn profile URL to extract data from| 212 | 213 | #### Sample Response 214 | ```json 215 | { 216 | "name": "Richard Branson", 217 | "profile_info": { 218 | "position": "Founder at Virgin Group", 219 | "followers": 18730516, 220 | "connections": 2, 221 | "avatar": "https://media.licdn.com/dms/image/v2/C4D03AQHh6_Wth5f3rQ/profile-displayphoto-shrink_200_200/profile-displayphoto-shrink_200_200/0/1625181963183?e=2147483647&v=beta&t=oiGK2oBQ3r3COkRR0z62i7CbnqXKw_1ujZ9X4-SKheo", 222 | }, 223 | "experience": [ 224 | { 225 | "title": "Founder", 226 | "company": "Virgin Group", 227 | "duration": "Jan 1968 - Present (57 years)", 228 | "description": "Tie-loathing adventurer and thrill seeker, who believes in turning ideas into reality. Otherwise known as Dr Yes at Virgin!", 229 | } 230 | ], 231 | "current_company": {"name": "Virgin Group", "title": "Founder at Virgin Group"}, 232 | "url": "https://www.linkedin.com/in/rbranson/", 233 | } 234 | ``` 235 | 236 | 👉 Only key fields are shown here. For the full dataset, refer to the [JSON response sample](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_data/profiles_by_url.json). 237 | 238 | #### Code Example 239 | Replace the URLs with the LinkedIn profiles you wish to analyze. 240 | ```python 241 | profiles = [ 242 | {"url": "https://www.linkedin.com/in/williamhgates"}, 243 | {"url": "https://www.linkedin.com/in/rbranson/"}, 244 | {"url": "https://www.linkedin.com/in/justinwelsh/"}, 245 | {"url": "https://www.linkedin.com/in/simonsinek/"}, 246 | ] 247 | ``` 248 | 249 | 👉 View [Full Python Code](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_codes/linkedin_profile_by_url.py) 250 | 251 | ### 3. Profile Discovery 252 | Searches for LinkedIn profiles using name-based queries. 253 | 254 | linkedin-scraper-bright-data-screenshot-linkedin-people-profiles-by-name 255 | 256 | #### Input Parameters 257 | | Parameter | Type | Required | Description | 258 | |---------------|--------|----------|-----------------------------------------------------| 259 | | `first_name` | string | Yes | Person's first name | 260 | | `last_name` | string | Yes | Person's last name | 261 | 262 | #### Sample Response 263 | ```json 264 | { 265 | "profile_info": { 266 | "id": "richard-branson-8a38866", 267 | "name": "Richard Branson", 268 | "location": {"city": "Cincinnati", "state": "Ohio", "country": "US"}, 269 | "about": "Respiratory therapist with 40 years of experience. Over 300 peer-reviewed publications...", 270 | "metrics": {"followers": 868, "connections": 500, "recommendations": 1}, 271 | }, 272 | "professional": { 273 | "current_position": { 274 | "company": "University of Cincinnati", 275 | "company_link": "https://www.linkedin.com/school/university-of-cincinnati", 276 | }, 277 | "education": { 278 | "school": "The George Washington University School of Medicine and Health Sciences", 279 | "years": "2001-2003", 280 | }, 281 | }, 282 | "recommendations": [ 283 | "Tracy OConnell Well known pro active valuable assett to the professon of respiratory care." 284 | ], 285 | "similar_professionals": [ 286 | { 287 | "name": "Walter J. Jones, PhD, MHSA", 288 | "title": "Professor at Medical University of South Carolina", 289 | "location": "Mount Pleasant, SC", 290 | }, 291 | { 292 | "name": "Vincent Arlet", 293 | "title": "Professor of Orthopaedic Surgery", 294 | "location": "Philadelphia, PA", 295 | }, 296 | ], 297 | "url": "https://www.linkedin.com/in/richard-branson-8a38866", 298 | } 299 | ``` 300 | 👉 View [Full JSON Response Sample](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_data/profiles_by_name.json) 301 | 302 | #### Code Example 303 | Modify the first and last name fields to find profiles. 304 | ```python 305 | people = [ 306 | {"first_name": "Richard", "last_name": "Branson"}, 307 | {"first_name": "Bill", "last_name": "Gates"}, 308 | ] 309 | ``` 310 | 👉 View [Full Python Code](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_codes/linkedin_profile_by_name.py) 311 | 312 | ### 4. Posts by URL 313 | Collects detailed information about specific LinkedIn posts. 314 | 315 | linkedin-scraper-bright-data-screenshot-linkedin-posts-by-url 316 | 317 | #### Input Parameters 318 | | Parameter | Type | Required | Description | 319 | |-----------|--------|----------|--------------------------| 320 | | `url` | string | Yes | LinkedIn post URL | 321 | 322 | #### Sample Response 323 | ```json 324 | { 325 | "post_info": { 326 | "id": "7176601589682434049", 327 | "url": "https://www.linkedin.com/posts/karin-dodis_web-data-collection-for-businesses-bright-activity-7176601589682434049-Aakz", 328 | "date_posted": "2024-03-21T15:32:33.770Z", 329 | "post_type": "post", 330 | "engagement": {"num_likes": 12, "num_comments": 4}, 331 | }, 332 | "content": { 333 | "title": "Karin Dodis on LinkedIn: Web data collection for Businesses. Bright Data", 334 | "text": "Hey data enthusiasts, Bright Data has an awesome collection of free datasets waiting for you to dive into. Whether you're a seasoned analyst or just starting out, these datasets are a goldmine of potential for your projects. From Wikipedia to ESPN and beyond, there's something here for everyone. Use them to fuel your next big idea, hone your skills, and add some serious value to your resume", 335 | }, 336 | "author": { 337 | "user_id": "karin-dodis", 338 | "profile_url": "https://il.linkedin.com/in/karin-dodis", 339 | "followers": 4131, 340 | "total_posts": 28, 341 | }, 342 | "repost_info": { 343 | "original_author": "Or Lenchner", 344 | "original_author_id": "orlenchner", 345 | "original_text": "Free Datasets! Not just samples, but complete datasets with millions of records. Before investing in acquiring specific large-scale data to train your LLM, start with free datasets. Wikipedia dataset, ESPN dataset, Goodreads, IMDB, and more.. Check it out -->", 346 | "original_date": "2024-03-27T15:39:54.497Z", 347 | "original_post_id": "7176470998987214848", 348 | }, 349 | } 350 | ``` 351 | 352 | 👉 View [Full JSON Response Sample](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_data/linkedin_posts_url.json) 353 | 354 | #### Code Example 355 | Replace the URLs with the LinkedIn post links you want to analyze. 356 | 357 | ```python 358 | posts = [ 359 | { 360 | "url": "https://www.linkedin.com/pulse/ab-test-optimisation-earlier-decisions-new-readout-de-b%C3%A9naz%C3%A9?trk=public_profile_article_view" 361 | }, 362 | { 363 | "url": "https://www.linkedin.com/posts/orlenchner_scrapecon-activity-7180537307521769472-oSYN?trk=public_profile" 364 | }, 365 | { 366 | "url": "https://www.linkedin.com/posts/karin-dodis_web-data-collection-for-businesses-bright-activity-7176601589682434049-Aakz?trk=public_profile" 367 | }, 368 | { 369 | "url": "https://www.linkedin.com/pulse/getting-value-out-sunburst-guillaume-de-b%C3%A9naz%C3%A9?trk=public_profile_article_view" 370 | }, 371 | ] 372 | ``` 373 | 👉 View [Full Python Code](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_codes/linkedin_posts_by_url.py) 374 | 375 | ### 5. Posts Discovery by URL 376 | Find detailed data on LinkedIn articles authored or interacted with by users. 377 | 378 | linkedin-scraper-bright-data-screenshot-linkedin-posts-discover-by-url 379 | 380 | #### Input Parameters 381 | | Parameter | Type | Required | Description | 382 | |-----------|--------|----------|---------------------------------| 383 | | `url` | string | Yes | LinkedIn author/article URL | 384 | | `limit` | number | No | Maximum number of articles to retrieve | 385 | 386 | #### Sample Response 387 | ```json 388 | { 389 | "article_info": { 390 | "id": "fare-business-con-la-propria-identità-cristian-brunori", 391 | "url": "https://it.linkedin.com/pulse/fare-business-con-la-propria-identità-cristian-brunori", 392 | "title": "Fare Business con la propria Identità", 393 | "date_posted": "2017-03-01T17:27:26.000Z", 394 | "post_type": "article", 395 | "engagement": {"num_likes": 18, "num_comments": 0}, 396 | }, 397 | "author": { 398 | "user_id": "cristianbrunori", 399 | "profile_url": "https://it.linkedin.com/in/cristianbrunori", 400 | "followers": 5205, 401 | }, 402 | "content": { 403 | "headline": "Quali sono i fattori che permettono ad un prodotto, ad un servizio e ad un'azienda di distinguersi nei nuovi scenari di mercato dove quasi tutto è tecnicamente e facilmente riproducibile? Mai come in questo momento storico, l'identità di Marca è un valore imprescindibile per tutelare il proprio lavo", 404 | "text": "Quali sono i fattori che permettono ad un prodotto, ad un servizio e ad un'azienda di distinguersi nei nuovi scenari di mercato dove quasi tutto è tecnicamente e facilmente riproducibile? Mai come in questo momento storico, l' identità di Marca è un valore imprescindibile per tutelare il proprio lavoro e per aprire nuovi scenari economici ideali per la propria attività...", 405 | }, 406 | "related_articles": [ 407 | { 408 | "headline": "La differenza tra Marketing e Branding", 409 | "date_posted": "2017-06-29T00:00:00.000Z", 410 | }, 411 | { 412 | "headline": "Ecco perché un contenuto diventa virale", 413 | "date_posted": "2017-03-24T00:00:00.000Z", 414 | }, 415 | ], 416 | } 417 | ``` 418 | 👉 View [Full JSON Response Sample](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_data/discovered_posts_by_url.json) 419 | 420 | #### Code Example 421 | Update the `url` and `limit` fields to retrieve articles from specific LinkedIn profiles. 422 | ```python 423 | authors = [ 424 | { 425 | "url": "https://www.linkedin.com/today/author/cristianbrunori?trk=public_post_follow-articles", 426 | "limit": 50, 427 | }, 428 | { 429 | "url": "https://www.linkedin.com/today/author/stevenouri?trk=public_post_follow-articles" 430 | }, 431 | ] 432 | ``` 433 | 👉 View [Full Python Code](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_codes/linkedin_posts_discover_by_url.py) 434 | 435 | 436 | ### 6. Posts Discovery by Profile 437 | Discover all posts authored or interacted with by a specific LinkedIn profile. 438 | 439 | linkedin-scraper-bright-data-screenshot-linkedin_posts_by_profile_url 440 | 441 | #### Input Parameters 442 | | Parameter | Type | Required | Description | 443 | |--------------|--------|----------|-------------------------------------------------------------------------| 444 | | `url` | string | Yes | LinkedIn profile URL | 445 | | `start_date` | date | No | Start date to filter posts (ISO 8601 format) | 446 | | `end_date` | date | No | End date to filter posts (ISO 8601 format) | 447 | 448 | #### Sample Response 449 | ```json 450 | { 451 | "article_info": { 452 | "id": "fare-business-con-la-propria-identità-cristian-brunori", 453 | "url": "https://it.linkedin.com/pulse/fare-business-con-la-propria-identità-cristian-brunori", 454 | "title": "Fare Business con la propria Identità", 455 | "date_posted": "2017-03-01T17:27:26.000Z", 456 | "post_type": "article", 457 | "engagement": {"num_likes": 18, "num_comments": 0}, 458 | }, 459 | "author": { 460 | "user_id": "cristianbrunori", 461 | "profile_url": "https://it.linkedin.com/in/cristianbrunori", 462 | "followers": 5205, 463 | }, 464 | "content": { 465 | "headline": "Quali sono i fattori che permettono ad un prodotto, ad un servizio e ad un'azienda di distinguersi nei nuovi scenari di mercato dove quasi tutto è tecnicamente e facilmente riproducibile? Mai come in questo momento storico, l'identità di Marca è un valore imprescindibile per tutelare il proprio lavo", 466 | "text": "Quali sono i fattori che permettono ad un prodotto, ad un servizio e ad un'azienda di distinguersi nei nuovi scenari di mercato dove quasi tutto è tecnicamente e facilmente riproducibile? Mai come in questo momento storico, l' identità di Marca è un valore imprescindibile per tutelare il proprio lavoro e per aprire nuovi scenari economici ideali per la propria attività...", 467 | }, 468 | "related_articles": [ 469 | { 470 | "headline": "La differenza tra Marketing e Branding", 471 | "date_posted": "2017-06-29T00:00:00.000Z", 472 | }, 473 | { 474 | "headline": "Ecco perché un contenuto diventa virale", 475 | "date_posted": "2017-03-24T00:00:00.000Z", 476 | }, 477 | ], 478 | } 479 | ``` 480 | 👉 View [Full JSON Response Sample](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_data/posts_by_profile.json) 481 | 482 | #### Code Example 483 | Modify the profile URLs and date ranges to collect posts from specific LinkedIn profiles. 484 | ```python 485 | profiles = [ 486 | { 487 | "url": "https://www.linkedin.com/in/luca-rossi-0aa497bb", 488 | "start_date": "2024-10-01T00:00:00.000Z", 489 | "end_date": "2024-10-09T00:00:00.000Z", 490 | }, 491 | { 492 | "url": "https://www.linkedin.com/in/srijith-gomattam-401059214", 493 | "start_date": "2024-09-01T00:00:00.000Z", 494 | "end_date": "2024-10-01T00:00:00.000Z", 495 | }, 496 | { 497 | "url": "https://www.linkedin.com/in/anna-clarke-0a342513", 498 | "start_date": "2024-10-01T00:00:00.000Z", 499 | }, 500 | ] 501 | ``` 502 | 👉 View [Full Python Code](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_codes/linkedin_posts_by_profile_url.py) 503 | 504 | ### 7. Posts Discovery by Company 505 | Collect posts and updates from company pages. 506 | 507 | linkedin-scraper-bright-data-screenshot-linkedin_posts_by_company_url 508 | 509 | #### Input Parameters 510 | | Parameter | Type | Required | Description | 511 | |--------------|--------|----------|-------------------------------------------------------------------------| 512 | | `url` | string | Yes | LinkedIn company URL | 513 | | `start_date` | date | No | Start date to filter posts (ISO 8601 format) | 514 | | `end_date` | date | No | End date to filter posts (ISO 8601 format) | 515 | 516 | #### Sample Response 517 | ```json 518 | { 519 | "post_info": { 520 | "id": "7254476883906482179", 521 | "url": "https://it.linkedin.com/posts/lanieri_lanieri-torna-in-lussemburgo-siamo-lieti-activity-7254476883906482179-8dW8", 522 | "date_posted": "2024-10-22T13:01:10.754Z", 523 | "post_type": "post", 524 | }, 525 | "content": { 526 | "title": "Lanieri on LinkedIn: Lanieri torna in Lussemburgo. Siamo lieti di annunciare che dal 7 al 9…", 527 | "text": "Lanieri torna in Lussemburgo. Siamo lieti di annunciare che dal 7 al 9 novembre il nostro Trunk Show Su Misura fa tappa in Lussemburgo. Crea il tuo pezzo unico insieme ai nostri Style Advisor: scegli il tessuto, i dettagli e la vestibilità del tuo capo: noi lo realizzeremo per te in sole quattro settimane. Ci vediamo all'Hotel Le Royal, Boulevard Royal 12. Prenota il tuo appuntamento qui https://bit.ly/4hgYgyk", 528 | "images": [ 529 | "https://media.licdn.com/dms/image/v2/D4D22AQHbmc9Vn-NP5Q/feedshare-shrink_2048_1536/feedshare-shrink_2048_1536/0/1729602070140?e=2147483647&v=beta&t=gt-rNjUJR_ZMVDjNfwmtx3mwBpR3UjCdtVjoj2ZsAv0" 530 | ], 531 | }, 532 | "engagement": {"likes": 12, "comments": 0}, 533 | "company_info": { 534 | "name": "Lanieri", 535 | "followers": 5768, 536 | "account_type": "Organization", 537 | "profile_url": "https://it.linkedin.com/company/lanieri", 538 | }, 539 | } 540 | ``` 541 | 542 | 👉 View [Full JSON Response Sample](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_data/linkedin_posts_company_url.json) 543 | 544 | #### Code Example 545 | Customize the company URLs and date ranges to retrieve posts from specific company pages. 546 | ```python 547 | companies = [ 548 | {"url": "https://www.linkedin.com/company/green-philly"}, 549 | {"url": "https://www.linkedin.com/company/lanieri"}, 550 | {"url": "https://www.linkedin.com/company/effortel"}, 551 | ] 552 | ``` 553 | 554 | 👉 View [Full Python Code](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_codes/linkedin_posts_by_company_url.py) 555 | 556 | ### 8. Job Listings Collection by URL 557 | Extract complete information about specific job listings using their URLs. 558 | 559 | linkedin-scraper-bright-data-screenshot-linkedin_jobs_by_url 560 | 561 | #### Input Parameters 562 | | Parameter | Type | Required | Description | 563 | |-----------|--------|----------|------------------------------| 564 | | `url` | string | Yes | LinkedIn job listing URL | 565 | 566 | #### Sample Response 567 | ```json 568 | { 569 | "job_info": { 570 | "id": "4073552631", 571 | "title": "Data Platform Engineer", 572 | "location": "Tel Aviv-Yafo, Tel Aviv District, Israel", 573 | "posted_date": "2024-11-22T09:41:10.107Z", 574 | "posted_time": "1 month ago", 575 | "employment_type": "Full-time", 576 | "function": "Engineering and Information Technology", 577 | "seniority_level": "Not Applicable", 578 | "industries": "Computer and Network Security", 579 | "applicants": 85, 580 | "apply_link": "https://www.linkedin.com/jobs/view/externalApply/4073552631?url=https%3A%2F%2Fcycode%2Ecom%2Fcareers%2Fposition%2F%3Fpos_title%3Ddata-platform-engineer%26pos_id%3D53%2ED48%26coref%3D1%2E11%2Ep9D_4217&urlHash=c1hm", 581 | }, 582 | "company": { 583 | "name": "Cycode | Complete ASPM", 584 | "id": "40789623", 585 | "logo": "https://media.licdn.com/dms/image/v2/D4D0BAQFsSsfzqEVWtw/company-logo_100_100/company-logo_100_100/0/1689682315729/cycode_logo?e=2147483647&v=beta&t=h91f6XM-5MGHa5FDhMCVtXy7Me0S8YQIPRAYUc4UVC0", 586 | "url": "https://www.linkedin.com/company/cycode", 587 | }, 588 | "description": { 589 | "summary": "This is a unique opportunity to join an exciting early-stage startup experiencing hypergrowth in a white-hot segment of the cybersecurity space. Cycode is a fast-growing cybersecurity startup and the creator of the first comprehensive software supply chain security solution...", 590 | "requirements": [ 591 | "Bachelor's degree in a relevant field such as Statistics, Mathematics, Computer Science, or Economics", 592 | "Proven experience in building, deploying, and monitoring of ETLs", 593 | "Proficiency in data analysis tools such as SQL, Python, Pandas, Apache Spark / Beam", 594 | "Good understanding of data modeling principles", 595 | "Familiarity with data visualization tools", 596 | ], 597 | "advantages": ["MongoDB", "AWS Cloud", "CICD, Docker Kubernetes"], 598 | }, 599 | } 600 | ``` 601 | 602 | 👉 View [Full JSON Response Sample](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_data/linkedin_jobs_url.json) 603 | 604 | #### Code Example 605 | Update the job URLs to collect information about specific job listings. 606 | ```python 607 | job_searches = [ 608 | {"url": "https://www.linkedin.com/jobs/view/4073552631"}, 609 | {"url": "https://www.linkedin.com/jobs/view/4073729630"}, 610 | ] 611 | ``` 612 | 613 | 👉 View [Full Python Code](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_codes/linkedin_jobs_by_url.py) 614 | 615 | 616 | ### 9. Job Listings Discovery by Keyword 617 | Extract job listings using advanced search criteria and filters to find relevant opportunities. 618 | 619 | linkedin-scraper-bright-data-screenshot-linkedin_jobs_by_keyword 620 | 621 | #### Input Parameters 622 | | Parameter | Type | Required | Description | 623 | |--------------------|---------|----------|--------------------------------------------------------------------------------------------------| 624 | | `location` | string | Yes | Collect jobs in a specific location | 625 | | `keyword` | string | No | Search for jobs by keyword or title (e.g., "Product Manager"). Use quotation marks for exact matches. | 626 | | `country` | string | No | 2-letter country code (e.g., US or FR) | 627 | | `time_range` | string | No | Time range of job posting (e.g., past 24 hours, past week) | 628 | | `job_type` | string | No | Filter by job type (e.g., full-time, part-time, contract) | 629 | | `experience_level` | string | No | Filter by required experience level (e.g., entry, mid, senior) | 630 | | `remote` | string | No | Filter jobs by remote work options | 631 | | `company` | string | No | Search jobs at a specific company | 632 | | `selective_search` | boolean | No | When set to `true`, excludes titles that do not contain the specified keywords | 633 | 634 | 635 | #### Sample Response 636 | ```json 637 | { 638 | "job_info": { 639 | "id": "4096670538", 640 | "title": "Remote Part-Time Focus Group Participants (Up To $750/Week)", 641 | "posted_date": "2024-12-15T09:16:55.932Z", 642 | "posted_time": "1 week ago", 643 | "location": {"city": "Bronx", "state": "NY", "country": "US"}, 644 | "type": { 645 | "employment": "Part-time", 646 | "level": "Entry level", 647 | "function": "Other", 648 | "industry": "Market Research", 649 | "remote": true, 650 | }, 651 | "applicants": 25, 652 | "apply_link": "https://www.linkedin.com/jobs/view/externalApply/4096670538?url=https%3A%2F%2Fwww%2Ecollegerecruiter%2Ecom%2Fjob%2F1447234465%3Fr%3D1%26source%3D101%26ids%3D513&urlHash=Nagt", 653 | }, 654 | "company": { 655 | "name": "Apex Focus Group", 656 | "id": "89885194", 657 | "logo": "https://media.licdn.com/dms/image/v2/C560BAQHmbh3iXrrrEA/company-logo_100_100/company-logo_100_100/0/1670524954585?e=2147483647&v=beta&t=n2mnVpQTNpofk7mrixyy7aBax0fXqhY031fijCPtp14", 658 | "url": "https://www.linkedin.com/company/apex-focus-group", 659 | }, 660 | "compensation": { 661 | "per_session": "$75-$150 (1 hour)", 662 | "multi_session": "$300-$750", 663 | "frequency": "weekly", 664 | }, 665 | "requirements": { 666 | "technical": [ 667 | "Smartphone with working camera or desktop/laptop with webcam", 668 | "High speed internet connection", 669 | ], 670 | "responsibilities": [ 671 | "Show up 10 mins before discussion start time", 672 | "Complete written and oral instructions", 673 | "Complete surveys for each panel", 674 | "Use and discuss provided products/services", 675 | ], 676 | }, 677 | "search_parameters": { 678 | "keyword": "data analyst", 679 | "location": "New York", 680 | "job_type": "Part-time", 681 | "experience": "Entry level", 682 | "remote": "Remote", 683 | "country": "US", 684 | }, 685 | } 686 | ``` 687 | 688 | 👉 View [Full JSON Response Sample](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_data/linkedin_jobs_keyword.json) 689 | 690 | #### Code Example 691 | Customize these search criteria to find specific job opportunities across different locations and requirements. 692 | ```python 693 | search_criteria = [ 694 | { 695 | "location": "New York", 696 | "keyword": "data analyst", 697 | "country": "US", 698 | "time_range": "Any time", 699 | "job_type": "Part-time", 700 | "experience_level": "Entry level", 701 | "remote": "Remote", 702 | "company": "", 703 | }, 704 | { 705 | "location": "paris", 706 | "keyword": "product manager", 707 | "country": "FR", 708 | "time_range": "Past month", 709 | "job_type": "Full-time", 710 | "experience_level": "Internship", 711 | "remote": "On-site", 712 | "company": "", 713 | }, 714 | { 715 | "location": "New York", 716 | "keyword": '"python developer"', 717 | "country": "", 718 | "time_range": "", 719 | "job_type": "", 720 | "experience_level": "", 721 | "remote": "", 722 | "company": "", 723 | }, 724 | ] 725 | ``` 726 | 727 | 👉 View [Full Python Code](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_codes/linkedin_jobs_by_keyword.py) 728 | 729 | ### 10. Job Listings Discovery by URL 730 | Extract job listings using direct LinkedIn search URLs 731 | 732 | linkedin-scraper-bright-data-screenshot-linkedin_jobs_by_search_url 733 | 734 | #### Input Parameters 735 | | Parameter | Type | Required | Description | 736 | |--------------------|---------|----------|---------------------------------------------------------------------------------------------------| 737 | | `url` | string | Yes | Direct LinkedIn search URL (e.g., company search or keyword-based search) | 738 | | `selective_search` | boolean | No | When set to `true`, excludes titles that do not contain the specified keywords | 739 | 740 | > **Note:** To implement a time range filter, calculate the desired range in seconds (`hours * 3600`) and update the `&f_TPR` parameter in the LinkedIn search URL. 741 | > 742 | > - Use `f_TPR=r3600` for past hour 743 | > - Use `f_TPR=r86400` for past 24 hours 744 | > - Use `f_TPR=r604800` for past week 745 | 746 | #### Sample Response 747 | ```json 748 | { 749 | "job_info": { 750 | "id": "4107998267", 751 | "title": "Software Engineer, Professional Services", 752 | "location": "Tel Aviv District, Israel", 753 | "posted": {"date": "2024-12-22T08:39:21.666Z", "time_ago": "1 hour ago"}, 754 | "type": { 755 | "employment": "Full-time", 756 | "level": "Entry level", 757 | "function": "Information Technology", 758 | "industry": "Software Development", 759 | }, 760 | "applicants": 25, 761 | "apply_link": "https://www.linkedin.com/jobs/view/externalApply/4107998267?url=https%3A%2F%2Fwww%2Efireblocks%2Ecom%2Fcareers%2Fcurrent-openings%2F4426623006%3Fgh_jid%3D4426623006", 762 | }, 763 | "company": { 764 | "name": "Fireblocks", 765 | "id": "14824547", 766 | "logo": "https://media.licdn.com/dms/image/v2/C4D0BAQEyT6gpuwTpPg/company-logo_100_100/company-logo_100_100/0/1630561416766/fireblocks_logo?e=2147483647&v=beta&t=MNcf2cPIzbPMdPDbsidFZBlEVWQHcHK-QimzqSaimww", 767 | "url": "https://www.linkedin.com/company/fireblocks", 768 | }, 769 | "requirements": { 770 | "core": [ 771 | "2+ years of software development experience", 772 | "Proficiency in JavaScript, TypeScript, and Python", 773 | "Strong understanding of frontend and backend technologies", 774 | "Experience with SQL and NoSQL databases", 775 | "Familiarity with Docker and Kubernetes", 776 | "Knowledge of blockchain and crypto development", 777 | "Understanding of security protocols", 778 | ], 779 | "nice_to_have": [ 780 | "Experience with Fireblocks or similar crypto platforms", 781 | "Knowledge of cloud platforms (AWS, GCP, Azure)", 782 | ], 783 | }, 784 | "responsibilities": [ 785 | "Collaborate with clients on technical requirements", 786 | "Build custom tools and integrations", 787 | "Work on frontend and backend components", 788 | "Assist with API integration", 789 | "Provide technical training", 790 | "Stay updated on blockchain trends", 791 | ], 792 | } 793 | ``` 794 | 795 | 👉 View [Full JSON Response Sample](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_data/linkedin_jobs_search_url.json) 796 | 797 | #### Code Example 798 | Modify these search URLs to collect job listings from specific companies or search results. 799 | ```python 800 | search_urls[ 801 | { 802 | "url": "https://www.linkedin.com/jobs/search?keywords=Software&location=Tel%20Aviv-Yafo&geoId=101570771&trk=public_jobs_jobs-search-bar_search-submit&position=1&pageNum=0&f_TPR=r3600" 803 | }, 804 | {"url": "https://www.linkedin.com/jobs/semrush-jobs?f_C=2821922"}, 805 | {"url": "https://www.linkedin.com/jobs/reddit-inc.-jobs-worldwide?f_C=150573"}, 806 | ] 807 | ``` 808 | 809 | 👉 View [Full Python Code](https://github.com/luminati-io/LinkedIn-Scraper/blob/main/linkedin_scraper_api_codes/linkedin_jobs_by_search_url.py) 810 | 811 | 812 | ## Data Collection Approaches 813 | You can use the following parameters to fine-tune your results: 814 | | **Parameter** | **Type** | **Description** | **Example** | 815 | |---------------------|------------|------------------------------------------------------------|------------------------------| 816 | | `limit` | `integer` | Max results per input | `limit=10` | 817 | | `include_errors` | `boolean` | Get error reports for troubleshooting | `include_errors=true` | 818 | | `notify` | `url` | Webhook notification URL to be notified upon completion | `notify=https://notify-me.com/` | 819 | | `format` | `enum` | Output format (e.g., JSON, NDJSON, JSONL, CSV) | `format=json` | 820 | 821 | 💡 **Pro Tip:** You can also select whether to deliver the data to an [external storage](https://docs.brightdata.com/scraping-automation/web-data-apis/web-scraper-api/overview#via-deliver-to-external-storage) or to deliver it to a [webhook](https://docs.brightdata.com/scraping-automation/web-data-apis/web-scraper-api/overview#via-webhook). 822 | 823 | ---- 824 | 825 | Need more details? Check the [official API docs](https://docs.brightdata.com/scraping-automation/web-data-apis/web-scraper-api/overview). 826 | -------------------------------------------------------------------------------- /linkedin_scraper_api_data/linkedin_company_info.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "input": { 4 | "url": "https://www.linkedin.com/organization-guest/company/the-kraft-heinz-company" 5 | }, 6 | "id": "the-kraft-heinz-company", 7 | "name": "Kraft Heinz", 8 | "country_code": "US", 9 | "locations": [ 10 | "200 E. Randolph St. Suite 7600 Chicago, IL 60601, US" 11 | ], 12 | "followers": 1557451, 13 | "employees_in_linkedin": 25254, 14 | "about": "The Kraft Heinz Company is one of the largest food and beverage companies in the world, with eight $1 billion+ brands and global sales of approximately $25 billion. We’re a globally trusted producer of high-quality, great-tasting, and nutritious foods for over 150 years. While Kraft Heinz is co-headquartered in Chicago and Pittsburgh, our brands are truly global, with products produced and marketed in over 40 countries. These beloved products include condiments and sauces, cheese and dairy, meals, meats, refreshment beverages, coffee, infant and nutrition products, and numerous other grocery products in a portfolio of more than 200 legacy and emerging brands. We spark joy around mealtime with our iconic brands, including Kraft, Oscar Mayer, Heinz, Philadelphia, Lunchables, Velveeta, Maxwell House, Capri Sun, Ore-Ida, Kool-Aid, Jell-O, Primal Kitchen, and Classico, among others. No matter the brand, we’re united under one vision: To sustainably grow by delighting more consumers globally. Bringing this vision to life is our team of 37,000+ food lovers, creative thinkers, and high performers worldwide. Together, we help provide meals to those in need through our global partnership with Rise Against Hunger. We also stand committed to responsible, sustainable practices that extend to every facet of our business, our consumers, and our communities. Every day, we’re transforming the food industry with bold thinking and unprecedented results. If you share our passion – and are ready to create the future, build a legacy, and lead as a global citizen – there’s only one thing to do: join our table and let’s make life delicious!", 15 | "specialties": "Food, Fast Moving Consumer Packaged Goods, CPG, and Consumer Packaged Goods", 16 | "company_size": "10,001+ employees", 17 | "organization_type": "Public Company", 18 | "industries": "Food and Beverage Services", 19 | "website": "https://www.careers.kraftheinz.com/", 20 | "crunchbase_url": "https://www.crunchbase.com/organization/the-kraft-heinz-company?utm_source=linkedin&utm_medium=referral&utm_campaign=linkedin_companies&utm_content=profile_cta_anon&trk=funding_crunchbase", 21 | "founded": 2015, 22 | "company_id": "164197", 23 | "employees": [ 24 | { 25 | "img": "https://media.licdn.com/dms/image/v2/C4E03AQE92ZKlIvyvzQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1516179219523?e=2147483647&v=beta&t=38SnIecPAeE_Q_kwXUmP3SvBIKqulCTWpSmC7mcwEmY", 26 | "link": "https://nl.linkedin.com/in/eder-j-martins?trk=org-employees", 27 | "subtitle": "Head of Global Procurement Finance at The Kraft Heinz Company", 28 | "title": "Eder J. Martins" 29 | }, 30 | { 31 | "img": "https://media.licdn.com/dms/image/v2/C4D03AQENbGTy7v_cSg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1565662052636?e=2147483647&v=beta&t=7v9ZrtgSyT1I8cL1TCKhlUUAGcrE9sZlD5ccSCXlzfQ", 32 | "link": "https://www.linkedin.com/in/thiago-dedavid-bastos?trk=org-employees", 33 | "title": "Thiago Dedavid Bastos" 34 | }, 35 | { 36 | "img": "https://media.licdn.com/dms/image/v2/D5603AQGxJWwgJ2s3oQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1676829641186?e=2147483647&v=beta&t=HwFcha2Lgn9klGu3ga7or9pzPOAxCHsNit74OfSTyy4", 37 | "link": "https://www.linkedin.com/in/brianley?trk=org-employees", 38 | "subtitle": "Global Consumer Insights Executive Leadership | Kraft Heinz", 39 | "title": "Brian Ley" 40 | }, 41 | { 42 | "img": "https://media.licdn.com/dms/image/v2/D4D03AQEhiyZ00P0cBQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1713299331512?e=2147483647&v=beta&t=WD4ZhdoKmDP0kKum7TdpNtJTRdG2Ts638U0dlte7NhQ", 43 | "link": "https://nl.linkedin.com/in/willembrandt?trk=org-employees", 44 | "subtitle": "Zone President Europe and Pacific Developed Markets, and Member of the Executive Leadership Team at Kraft Heinz | Advisory Board Member (NED) at Just…", 45 | "title": "Willem Brandt" 46 | } 47 | ], 48 | "headquarters": "Chicago, IL", 49 | "image": "https://media.licdn.com/dms/image/v2/D563DAQG4j2eF_1HAXQ/image-scale_191_1128/image-scale_191_1128/0/1681740656085/the_kraft_heinz_company_cover?e=2147483647&v=beta&t=c41tOgrB-ADsTUxh9n1fcACSrFrmPh6bwlGlVbhIn8M", 50 | "logo": "https://media.licdn.com/dms/image/v2/D4E0BAQHvZJa5VpJ9MQ/company-logo_200_200/company-logo_200_200/0/1722866409756/the_kraft_heinz_company_logo?e=2147483647&v=beta&t=5mvQFNEyhfLzv51OHcigrzgKNAWHYdSCYDdNebkaVbY", 51 | "similar": [ 52 | { 53 | "Links": "https://www.linkedin.com/company/mondelezinternational?trk=similar-pages", 54 | "subtitle": "Food and Beverage Manufacturing", 55 | "title": "Mondelēz International", 56 | "location": "Greater Chicago Area, IL" 57 | }, 58 | { 59 | "Links": "https://ch.linkedin.com/company/nestle-s-a-?trk=similar-pages", 60 | "subtitle": "Food and Beverage Services", 61 | "title": "Nestlé" 62 | }, 63 | { 64 | "Links": "https://www.linkedin.com/company/pepsico?trk=similar-pages", 65 | "subtitle": "Food and Beverage Services", 66 | "title": "PepsiCo", 67 | "location": "Purchase, New York" 68 | }, 69 | { 70 | "Links": "https://uk.linkedin.com/company/unilever?trk=similar-pages", 71 | "subtitle": "Manufacturing", 72 | "title": "Unilever", 73 | "location": "Blackfriars, London" 74 | }, 75 | { 76 | "Links": "https://www.linkedin.com/company/mars?trk=similar-pages", 77 | "subtitle": "Manufacturing", 78 | "title": "Mars", 79 | "location": "McLean, Virginia" 80 | }, 81 | { 82 | "Links": "https://www.linkedin.com/company/procter-and-gamble?trk=similar-pages", 83 | "subtitle": "Manufacturing", 84 | "title": "Procter & Gamble", 85 | "location": "Cincinnati, Ohio" 86 | }, 87 | { 88 | "Links": "https://fr.linkedin.com/company/danone?trk=similar-pages", 89 | "subtitle": "Food and Beverage Manufacturing", 90 | "title": "Danone" 91 | }, 92 | { 93 | "Links": "https://www.linkedin.com/company/the-coca-cola-company?trk=similar-pages", 94 | "subtitle": "Food and Beverage Services", 95 | "title": "The Coca-Cola Company", 96 | "location": "Atlanta, GA" 97 | }, 98 | { 99 | "Links": "https://fr.linkedin.com/company/lor%C3%A9al?trk=similar-pages", 100 | "subtitle": "Personal Care Product Manufacturing", 101 | "title": "L'Oréal" 102 | }, 103 | { 104 | "Links": "https://www.linkedin.com/company/general-mills?trk=similar-pages", 105 | "subtitle": "Manufacturing", 106 | "title": "General Mills", 107 | "location": "Minneapolis, Minnesota" 108 | } 109 | ], 110 | "url": "https://www.linkedin.com/company/the-kraft-heinz-company", 111 | "updates": [ 112 | { 113 | "likes_count": 199, 114 | "text": "After 16 years, Paris continues to show what's possible when hard work meets opportunity 💪 From starting on the production floor to stepping into leadership positions, Paris has charted an inspiring path of growth. Her story reflects how, here at Kraft Heinz, we're committed to empowering our people and helping them build lasting careers 🌱 If you're ready to begin building your own legacy, head to our careers site: https://bit.ly/3ZBFutB #HereAtKraftHeinz #LetsMakeLifeDelicious", 115 | "time": "2d", 116 | "title": "Kraft Heinz", 117 | "comments_count": 28, 118 | "post_url": "https://www.linkedin.com/posts/the-kraft-heinz-company_paris-journey-at-kraft-heinz-activity-7275544184424464384-s1sZ", 119 | "post_id": "7275544184424464384", 120 | "date": "2024-12-20T06:43:58.330Z" 121 | }, 122 | { 123 | "likes_count": 340, 124 | "text": "2024 was a memorable year for Kraft Heinz! 👏 We introduced our new company Dream and 10 Year Strategy, brought home our first-ever Grand Prix at Cannes and earned a spot on Fast Company’s Most Innovative Companies list. None of this would have been possible without the dedication and creativity of our incredible employees. Their sense of ownership, passion and hard work made 2024 unforgettable. 🎥 Join us in celebrating the moments that defined 2024. Here’s to an even brighter 2025!", 125 | "time": "4d", 126 | "title": "Kraft Heinz", 127 | "comments_count": 11, 128 | "videos": [ 129 | "https://dms.licdn.com/playlist/vid/v2/D4E10AQGSn_xIHEV6tw/mp4-640p-30fp-crf28/mp4-640p-30fp-crf28/0/1734436522853?e=2147483647&v=beta&t=7rIajPNyBAHbnSb3FFx5eXdjqK5ywofwwkaVwQY_wPA", 130 | "https://dms.licdn.com/playlist/vid/v2/D4E10AQGSn_xIHEV6tw/mp4-360p-30fp-crf28/mp4-360p-30fp-crf28/0/1734436517602?e=2147483647&v=beta&t=QDGNg5R7KEGx1o9rVO_RiJWTpt9I5ttu-ima9r3ZjtI", 131 | "https://dms.licdn.com/playlist/vid/v2/D4E10AQGSn_xIHEV6tw/mp4-720p-30fp-crf28/mp4-720p-30fp-crf28/0/1734436524454?e=2147483647&v=beta&t=szAm5QAZPczYpPmrxZ_CUf-yRA3KwHmby3RW4RUi40U" 132 | ], 133 | "post_url": "https://www.linkedin.com/posts/the-kraft-heinz-company_2024-was-a-memorable-year-for-kraft-heinz-activity-7274754216022228992-RjhQ", 134 | "post_id": "7274754216022228992", 135 | "date": "2024-12-18T06:43:58.332Z" 136 | }, 137 | { 138 | "likes_count": 309, 139 | "text": "Shaping the leaders of tomorrow starts today 🫶 We're proud to be honored as one of TIME 's Best Companies for Future Leaders. This recognition reflects our unwavering commitment to helping every team member build a career filled with growth and purpose. We're dedicated to building a culture that can grow their potential and make an impact that matters 🌱 Begin your journey with us today by visiting our careers site: https://bit.ly/4hCBO2I #HereAtKraftHeinz #TIMEFutureLeaders #LetsMakeLifeDelicious", 140 | "time": "1w", 141 | "title": "Kraft Heinz", 142 | "comments_count": 7, 143 | "videos": [ 144 | "https://dms.licdn.com/playlist/vid/v2/D4E10AQHROUlwv6ltMA/mp4-640p-30fp-crf28/mp4-640p-30fp-crf28/0/1733943620521?e=2147483647&v=beta&t=TbfX1_cDOSRxUfioPKg_DjkyGPYc4WuOVpbAXbzYmYo", 145 | "https://dms.licdn.com/playlist/vid/v2/D4E10AQHROUlwv6ltMA/mp4-360p-30fp-crf28/mp4-360p-30fp-crf28/0/1733943620331?e=2147483647&v=beta&t=oeeWym4qxGLAgHn49frwF5IgneRxUJmUClk8wn1-kWQ", 146 | "https://dms.licdn.com/playlist/vid/v2/D4E10AQHROUlwv6ltMA/mp4-720p-30fp-crf28/mp4-720p-30fp-crf28/0/1733943620653?e=2147483647&v=beta&t=_WS-OoxFkYaO7I4gtLLBqOHwAtYYmhcFUNXlYcgdZtQ" 147 | ], 148 | "post_url": "https://www.linkedin.com/posts/the-kraft-heinz-company_hereatkraftheinz-timefutureleaders-letsmakelifedelicious-activity-7272686720599396352-1xZD", 149 | "post_id": "7272686720599396352", 150 | "date": "2024-12-15T06:43:58.334Z" 151 | }, 152 | { 153 | "likes_count": 234, 154 | "text": "The holiday season just got tastier! 🎄🍴 Watch our latest “In the Kitchen” episode featuring Chef Eric Pilgram and Caroline Boulos, President of Hydration & Desserts in North America, as they whip up three unforgettable recipes using Kraft Heinz favorites: ✨ A Caesar salad with air-fried Ore-Ida Tater Tot croutons & Kraft Caesar Dressing 🍫 A hot cocoa dip with Jet-Puffed, Philly Cream Cheese, JELL-O, and Baker’s Chocolate 💧 A mio beverage cart for mix-and-match sparkling drinks Bring bold flavors to your holiday table with these fun and unique recipes. Watch now and to get inspired, and snag the recipes in the description here: https://bit.ly/3ZIizxL 🍫", 155 | "time": "1w", 156 | "title": "Kraft Heinz", 157 | "comments_count": 8, 158 | "videos": [ 159 | "https://dms.licdn.com/playlist/vid/v2/D4E10AQGjw2AwePRGTw/mp4-640p-30fp-crf28/mp4-640p-30fp-crf28/0/1733862309336?e=2147483647&v=beta&t=RiDT1TJGFCE6ncprux2XSLfO0YRJMwrkpXvtZHTADlQ", 160 | "https://dms.licdn.com/playlist/vid/v2/D4E10AQGjw2AwePRGTw/mp4-360p-30fp-crf28/mp4-360p-30fp-crf28/0/1733862309300?e=2147483647&v=beta&t=aDgQzJL6eYH4nsiMpXfgBE6Lp4bWN2gkOc7RjeSgLes", 161 | "https://dms.licdn.com/playlist/vid/v2/D4E10AQGjw2AwePRGTw/mp4-720p-30fp-crf28/mp4-720p-30fp-crf28/0/1733862321828?e=2147483647&v=beta&t=-c2hBeV0pxZ_a8ohbat0qZsXZ_Ktbe_Rct52_9haebc" 162 | ], 163 | "post_url": "https://www.linkedin.com/posts/the-kraft-heinz-company_the-holiday-season-just-got-tastier-activity-7272345807243153408-fZFs", 164 | "post_id": "7272345807243153408", 165 | "date": "2024-12-15T06:43:58.335Z" 166 | }, 167 | { 168 | "likes_count": 304, 169 | "text": "At Kraft Heinz, we are committed to nourishing the world and leading the future of food. Our 2024 ESG Report highlights the partnerships, initiatives and innovations that are helping us achieve this ambition. Thanks to our partnerships, we provided approximately 368 million meals to people in need in 2023 – putting us 90% of the way to our 2025 goal to provide 1.5 billion meals to people in need. While there is more work to do, we remain committed to our journey toward a more sustainable future. Learn more in our 2024 ESG Report: https://lnkd.in/gsz-GzVu", 170 | "time": "2w", 171 | "title": "Kraft Heinz", 172 | "comments_count": 8, 173 | "images": [ 174 | "https://media.licdn.com/dms/image/v2/D5622AQH7MtfKOoO1bw/feedshare-shrink_800/feedshare-shrink_800/0/1733503454979?e=2147483647&v=beta&t=jiDGdVYYQEpj1rXYuplx_imDqmxiHE73UvPOd62lYAA" 175 | ], 176 | "post_url": "https://www.linkedin.com/posts/the-kraft-heinz-company_at-kraft-heinz-we-are-committed-to-nourishing-activity-7270840481893249024-IvVO", 177 | "post_id": "7270840481893249024", 178 | "date": "2024-12-08T06:43:58.336Z" 179 | }, 180 | { 181 | "likes_count": 81, 182 | "text": "On World Soil Day, we’re celebrating our roots—quite literally. As an agricultural company at heart, we’re committed to leading the future of food through sustainable and regenerative farming practices. One of our longest-standing partnerships is with Conesa Group in Spain, where we’ve been working together for 20 years to enhance soil health, protect biodiversity and secure the future of food. From cover cropping to reducing water use and waste, we’re investing in sustainable practices so we can provide the food you love for generations to come. #WorldSoilDay", 183 | "time": "2w", 184 | "title": "Kraft Heinz", 185 | "comments_count": 1, 186 | "post_url": "https://www.linkedin.com/posts/the-kraft-heinz-company_world-soil-day-activity-7270500456206868482-oOe_", 187 | "post_id": "7270500456206868482", 188 | "date": "2024-12-08T06:43:58.338Z" 189 | }, 190 | { 191 | "likes_count": 714, 192 | "text": "It’s official – we’ve certified as a Great Place to Work in 22 countries! ❤️ These certifications highlight our commitment to creating an inclusive, supportive environment where everyone can thrive. Every day, our teams raise the bar together—whether it’s driving innovation, solving complex challenges, or simply lending a helping hand to a colleague. Thank you to our incredible people for making Kraft Heinz a truly great place to work 🌟 Want to help us grow greatness? Head to our careers site to learn more: https://bit.ly/3ZzaiML #HereAtKraftHeinz #GreatPlaceToWork #LetsMakeLifeDelicious", 193 | "time": "2w", 194 | "title": "Kraft Heinz", 195 | "comments_count": 18, 196 | "videos": [ 197 | "https://dms.licdn.com/playlist/vid/v2/D5610AQHa3elWJGyhnw/mp4-640p-30fp-crf28/mp4-640p-30fp-crf28/0/1733328102472?e=2147483647&v=beta&t=kY9OQgHkC8WVa2cD6AsU42o9FLbr95ZxKnICalg8CNw", 198 | "https://dms.licdn.com/playlist/vid/v2/D5610AQHa3elWJGyhnw/mp4-360p-30fp-crf28/mp4-360p-30fp-crf28/0/1733328061911?e=2147483647&v=beta&t=40bh0huO3W2KZfcc3dpuuV07vP7ZKg_nkEuKr7vWt1o", 199 | "https://dms.licdn.com/playlist/vid/v2/D5610AQHa3elWJGyhnw/mp4-720p-30fp-crf28/mp4-720p-30fp-crf28/0/1733328056398?e=2147483647&v=beta&t=gubFY2ygc7sr0JJFyJRFwRfhwoE7_bpTb_bOqQt5F6k" 200 | ], 201 | "post_url": "https://www.linkedin.com/posts/the-kraft-heinz-company_hereatkraftheinz-greatplacetowork-letsmakelifedelicious-activity-7270105332289282050-3Xnp", 202 | "post_id": "7270105332289282050", 203 | "date": "2024-12-08T06:43:58.339Z" 204 | }, 205 | { 206 | "likes_count": 135, 207 | "text": "In honor of International Day of Persons with Disabilities, our team members, Elijah and Kassie, are sharing their own experiences, and highlighting some ways we can all support, uplift and empower this community inside and outside of Kraft Heinz. We believe that our unique perspectives make us a stronger and more innovative Company. The more we listen to the perspectives of those with different lived experiences than our own, the more we can unlock untapped potential. 🌟 #IDPWD #HereAtKraftHeinz", 208 | "time": "2w", 209 | "title": "Kraft Heinz", 210 | "comments_count": 1, 211 | "videos": [ 212 | "https://dms.licdn.com/playlist/vid/v2/D4E05AQFDZr4AXKbYxw/mp4-640p-30fp-crf28/mp4-640p-30fp-crf28/0/1733261677822?e=2147483647&v=beta&t=aA92yVMovsv3z7VhXEf3JbHrQhMqFLOLO_5lfkpxQ_Y", 213 | "https://dms.licdn.com/playlist/vid/v2/D4E05AQFDZr4AXKbYxw/mp4-720p-30fp-crf28/mp4-720p-30fp-crf28/0/1733261677533?e=2147483647&v=beta&t=spywrlKON7sxYa5EJXw0zPR468JWFL-BZvRxD1Xvbf0" 214 | ], 215 | "post_url": "https://www.linkedin.com/posts/the-kraft-heinz-company_celebrating-international-day-of-persons-activity-7269826408212742145-px7k", 216 | "post_id": "7269826408212742145", 217 | "date": "2024-12-08T06:43:58.341Z" 218 | }, 219 | { 220 | "likes_count": 217, 221 | "text": "🎶 My bologna has a first name 🎶 50 years ago, Oscar Mayer aired “The Bologna song” for the first time, teaching millions how to spell “B-O-L-O-G-N-A\" and creating one of the most beloved and enduring songs in advertising history. Now, half a century later, we’re bringing back the nostalgia by reintroducing the world to the iconic ad! To celebrate, our employees across the country came together to sing along to the tune that has stood the test of time. Here’s to 50 years of fun, creativity and the joy that Oscar Mayer and its delicious meats have brought to tables everywhere! 🍽️ Check out how Oscar Mayer is rewarding fans for singing along: https://bit.ly/3B9ji1z", 222 | "time": "2w", 223 | "title": "Kraft Heinz", 224 | "comments_count": 5, 225 | "videos": [ 226 | "https://dms.licdn.com/playlist/vid/v2/D4E10AQFDqHAj2S7zfw/mp4-640p-30fp-crf28/mp4-640p-30fp-crf28/0/1733182063680?e=2147483647&v=beta&t=i2o7U4ASJ2cnQGUY3F546lbH2VMY-AqUNjSObykwbt0", 227 | "https://dms.licdn.com/playlist/vid/v2/D4E10AQFDqHAj2S7zfw/mp4-360p-30fp-crf28/mp4-360p-30fp-crf28/0/1733182062699?e=2147483647&v=beta&t=YUiaLWBNkBI1DgEuOfWKXoJjNlcRgBcnYSBhmWR3YyI", 228 | "https://dms.licdn.com/playlist/vid/v2/D4E10AQFDqHAj2S7zfw/mp4-720p-30fp-crf28/mp4-720p-30fp-crf28/0/1733182063380?e=2147483647&v=beta&t=SNToyjHgPRUt4LALi4Zww5-GAwX9WRiQ02D9qynv3hA" 229 | ], 230 | "post_url": "https://www.linkedin.com/posts/the-kraft-heinz-company_my-bologna-has-a-first-name-50-years-activity-7269492485045772288-zqG0", 231 | "post_id": "7269492485045772288", 232 | "date": "2024-12-08T06:43:58.342Z" 233 | }, 234 | { 235 | "likes_count": 180, 236 | "text": "'Tis the season for gratitude and giving! 🍁 From celebrating talent and togetherness in India to honoring the Veterans who help shape our communities, November was packed with stories that highlight how we're all growing greatness, together. Join us in making life delicious - head to our careers site to learn more: https://bit.ly/3ZzaiML #HereAtKraftHeinz #LetsMakeLifeDelicious", 237 | "time": "2w", 238 | "title": "Kraft Heinz", 239 | "comments_count": 1, 240 | "post_url": "https://www.linkedin.com/posts/the-kraft-heinz-company_ketchup-with-our-month-november-activity-7269409620840833024-xcAv", 241 | "post_id": "7269409620840833024", 242 | "date": "2024-12-08T06:43:58.344Z" 243 | } 244 | ], 245 | "slogan": "Let's make life delicious!", 246 | "affiliated": [ 247 | { 248 | "title": "PT Heinz ABC Indonesia", 249 | "subtitle": "Food and Beverage Manufacturing", 250 | "location": "Central jakarta, Jakarta", 251 | "Links": "https://www.linkedin.com/company/pt-heinz-abc-indonesia?trk=affiliated-pages" 252 | }, 253 | { 254 | "title": "Wattie's (Kraft Heinz)", 255 | "subtitle": "Food and Beverage Services", 256 | "Links": "https://nz.linkedin.com/company/heinz-wattie's---an-hj-heinz-company?trk=affiliated-pages" 257 | }, 258 | { 259 | "title": "HEINZ Away From Home", 260 | "subtitle": "Food and Beverage Services", 261 | "Links": "https://www.linkedin.com/showcase/heinz-usa-awayfromhome/?trk=affiliated-pages" 262 | }, 263 | { 264 | "title": "Kraft Heinz Ingredients", 265 | "subtitle": "Food and Beverage Manufacturing", 266 | "location": "Glenview, IL", 267 | "Links": "https://www.linkedin.com/showcase/kraft-heinz-ingredients/?trk=affiliated-pages" 268 | }, 269 | { 270 | "title": "Heinz", 271 | "Links": "https://www.linkedin.com/showcase/heinz/?trk=affiliated-pages" 272 | }, 273 | { 274 | "title": "PHILADELPHIA Away From Home", 275 | "subtitle": "Food and Beverage Services", 276 | "Links": "https://www.linkedin.com/showcase/philadelphia-usa-awayfromhome/?trk=affiliated-pages" 277 | } 278 | ], 279 | "funding": { 280 | "last_round_date": "2019-07-10T00:00:00.000Z", 281 | "last_round_type": "Post IPO equity", 282 | "rounds": 1, 283 | "last_round_raised": "US$ 20.0M" 284 | }, 285 | "formatted_locations": [ 286 | "200 E. Randolph St., Suite 7600, Chicago, IL 60601, US" 287 | ], 288 | "stock_info": { 289 | "id": "KHC", 290 | "datetime": "December 21, 2024", 291 | "stock_exchange": "NASDAQ", 292 | "stock_ticker": "KHC", 293 | "stock_price": "$30.52", 294 | "stock_price_change": "0.42 (1.395%)", 295 | "stock_provider": "Data from Refinitiv" 296 | }, 297 | "get_directions_url": [ 298 | { 299 | "directions_url": "https://www.bing.com/maps?where=200+E.+Randolph+St.+Suite+7600+Chicago+60601+IL+US&trk=org-locations_url" 300 | } 301 | ], 302 | "description": "Kraft Heinz | 1,557,451 followers on LinkedIn. Let's make life delicious! | The Kraft Heinz Company is one of the largest food and beverage companies in the world, with eight $1 billion+ brands and global sales of approximately $25 billion. We’re a globally trusted producer of high-quality, great-tasting, and nutritious foods for over 150 years. While Kraft Heinz is co-headquartered in Chicago and Pittsburgh, our brands are truly global, with products produced and marketed in over 40 countries.", 303 | "additional_information": "Additional jobs info: Kraft Heinz (453 open jobs). Analyst (694,057 open jobs). Manager (1,880,925 open jobs). Director (1,220,357 open jobs). Intern (71,196 open jobs). Engineer (555,845 open jobs). Project Manager (253,048 open jobs). Marketing Manager (106,879 open jobs). Specialist (768,666 open jobs). Human Resources Manager (31,339 open jobs). Sales Specialist (129,703 open jobs). Supervisor (1,264,191 open jobs). Account Manager (121,519 open jobs). Graduate (361,130 open jobs). Associate (1,091,945 open jobs). Assistant (711,811 open jobs). Coordinator (545,033 open jobs). Sales Executive (122,628 open jobs). Scientist (48,969 open jobs). Human Resources Specialist (34,340 open jobs)", 304 | "country_codes_array": [ 305 | "US" 306 | ], 307 | "alumni": null, 308 | "alumni_information": null, 309 | "timestamp": "2024-12-22T06:43:58.438Z" 310 | }, 311 | { 312 | "input": { 313 | "url": "https://il.linkedin.com/company/ibm" 314 | }, 315 | "id": "ibm", 316 | "name": "IBM", 317 | "country_code": "US,ZA,UY,TH,ID,AU,CZ,SG,IT,DE,MY,FI,CO,RU,AE,FR,SK,KW,IN,BR,GR,MX,ES,AR,RO,EG", 318 | "locations": [ 319 | "International Business Machines Corp. New Orchard Road Armonk, New York, NY 10504, US", 320 | "590 Madison Ave New York, NY 10022, US", 321 | "90 Grayston Dr Sandton, Gauteng 2196, ZA", 322 | "Plaza Independencia 721 Montevideo, 11000, UY", 323 | "388 Phahon Yothin Road Phaya Thai, Bangkok City 10400, TH", 324 | "Jalan Prof. Dr. Latumenten Jakarta Barat, Jakarta 11330, ID", 325 | "30 S 17th St Philadelphia, PA 19103, US", 326 | "60 City Rd Melbourne, VIC 3006, AU", 327 | "V Parku 2294/4 Prague, Prague 148 00, CZ", 328 | "9 Changi Business Park Central 1 Singapore, Singapore 486048, SG", 329 | "Via Sciangai Rome, Laz. 00144, IT", 330 | "Nahmitzer Damm 12 Berlin, BE 12277, DE", 331 | "3031 N Rocky Point Dr W Tampa, FL 33607, US", 332 | "First Avenue Petaling Jaya, Selangor 47800, MY", 333 | "Laajalahdentie 23 Helsinki, Southern Finland 00330, FI", 334 | "Carrera 53 100-25 Bogota, Bogota, D.C. 111111, CO", 335 | "Presnenskaya naberezhnaya 10 Moscow, Central Federal District 123112, RU", 336 | "3 Road Dubai, Dubai, AE", 337 | "71 S Wacker Dr Chicago, IL 60606, US", 338 | "50 Rue de Picpus Paris, IdF 75012, FR", 339 | "Mlynske nivy 16688/49 Bratislava, Bratislava 821 09, SK", 340 | "Shuhada'A Street Kuwait City, Kuwait City, KW", 341 | "Vasant Kunj Road Delhi, Delhi 110070, IN", 342 | "Avenida Pasteur, 138 Rio de Janeiro, RJ 22290-240, BR", 343 | "284 Leoforos Kifisias Chalandri, Attica 152 32, GR", 344 | "14212 Cochran Rd SW Huntsville, AL 35824, US", 345 | "Carretera al Castillo El Salto, JAL 45680, MX", 346 | "Calle de Corazon de Maria, 44 Madrid, Community of Madrid 28002, ES", 347 | "Technicka 2995/21 Brno, South Moravia 612 00, CZ", 348 | "150 Kettletown Rd Southbury, CT 06488, US", 349 | "601 Pacific Hwy Sydney, NSW 2065, AU", 350 | "505 Howard St San Francisco, CA 94105, US", 351 | "600 14th St NW Washington, DC 20005, US", 352 | "7100 Highlands Pkwy SE Smyrna, GA 30082, US", 353 | "1000 Belleview St Dallas, TX 75215, US", 354 | "3039 E Cornwallis Rd Durham, NC 27709, US", 355 | "Avenida Hipolito Yrigoyen 2149 Martinez, Buenos Aires 1640, AR", 356 | "Soseaua Bucuresti-Ploiesti 1A Bucharest, Bucharest, RO", 357 | "Rodovia Jorn. Francisco Aguirre Proenca Hortolandia, SP 13186-624, BR", 358 | "B-19 Noida, Uttar Pradesh 201307, IN", 359 | "Cairo Alexandria Desert Road Sixth of October, Al Jizah, EG" 360 | ], 361 | "followers": 17685425, 362 | "employees_in_linkedin": 316215, 363 | "about": "At IBM, we do more than work. We create. We create as technologists, developers, and engineers. We create with our partners. We create with our competitors. If you're searching for ways to make the world work better through technology and infrastructure, software and consulting, then we want to work with you. We're here to help every creator turn their \"what if\" into what is. Let's create something that will change everything.", 364 | "specialties": "Cloud, Mobile, Cognitive, Security, Research, Watson, Analytics, Consulting, Commerce, Experience Design, Internet of Things, Technology support, Industry solutions, Systems services, Resiliency services, Financing, and IT infrastructure", 365 | "company_size": "10,001+ employees", 366 | "organization_type": "Public Company", 367 | "industries": "IT Services and IT Consulting", 368 | "website": "https://www.ibm.com/", 369 | "company_id": "1009", 370 | "employees": [ 371 | { 372 | "img": "https://media.licdn.com/dms/image/v2/D5603AQG8BrSbmTF3KQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1698961770751?e=2147483647&v=beta&t=NIr8LzS3QQoK-FqwJ8V1ZzW9hFdKDjqxi_x5cMxMSS4", 373 | "link": "https://www.linkedin.com/in/dnielsen?trk=org-employees", 374 | "subtitle": "Developer Relations Professional with a Passion for Community", 375 | "title": "Dave Nielsen" 376 | }, 377 | { 378 | "img": "https://media.licdn.com/dms/image/v2/C4E03AQEdXwXV3KRnIg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1561756249105?e=2147483647&v=beta&t=1nJndA8UwSc8_jr3tPTjqXrv-YFZ8ZiiLMcowKJignk", 379 | "link": "https://www.linkedin.com/in/tmarkiewicz?trk=org-employees", 380 | "subtitle": "Developer Relations - IBM Watson", 381 | "title": "Tom Markiewicz" 382 | }, 383 | { 384 | "img": "https://media.licdn.com/dms/image/v2/C4E03AQHxBEPuzK0GXw/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1516156195924?e=2147483647&v=beta&t=eHopBiWQVyb_Efgn5jANXdarYJP-vcAvoEkjEZhyv_k", 385 | "link": "https://www.linkedin.com/in/blohr?trk=org-employees", 386 | "subtitle": "Americas Software Sales Leader, IBM- Kyndryl Strategic Partnership", 387 | "title": "Bill Lohr" 388 | }, 389 | { 390 | "img": "https://media.licdn.com/dms/image/v2/C4E03AQGMC2DFp4qlkQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1648313945137?e=2147483647&v=beta&t=wN-6vpSp6gvcY8A_jmeSh9vaTEAEVieHcC_kD-S7zyY", 391 | "link": "https://ca.linkedin.com/in/nancy-robertson-1565?trk=org-employees", 392 | "title": "Nancy Robertson" 393 | } 394 | ], 395 | "headquarters": "Armonk, New York, NY", 396 | "image": "https://media.licdn.com/dms/image/v2/D4E3DAQFzwuYKMiB5Cg/image-scale_191_1128/image-scale_191_1128/0/1731689757964/ibm_cover?e=2147483647&v=beta&t=44QF8chAGaPlhQgEZZfTEruSWXpQEUcM9NWOG0RXqJQ", 397 | "logo": "https://media.licdn.com/dms/image/v2/D560BAQGiz5ecgpCtkA/company-logo_200_200/company-logo_200_200/0/1688684715866/ibm_logo?e=2147483647&v=beta&t=yWxQj1oew7nR92bDw8r80j2EiCwx29aNxLZktJYrsWw", 398 | "similar": [ 399 | { 400 | "Links": "https://www.linkedin.com/company/deloitte?trk=similar-pages", 401 | "subtitle": "Business Consulting and Services", 402 | "title": "Deloitte" 403 | }, 404 | { 405 | "Links": "https://www.linkedin.com/company/google?trk=similar-pages", 406 | "subtitle": "Software Development", 407 | "title": "Google", 408 | "location": "Mountain View, CA" 409 | }, 410 | { 411 | "Links": "https://www.linkedin.com/company/microsoft?trk=similar-pages", 412 | "subtitle": "Software Development", 413 | "title": "Microsoft", 414 | "location": "Redmond, Washington" 415 | }, 416 | { 417 | "Links": "https://ie.linkedin.com/company/accenture?trk=similar-pages", 418 | "subtitle": "Business Consulting and Services", 419 | "title": "Accenture" 420 | }, 421 | { 422 | "Links": "https://www.linkedin.com/company/amazon?trk=similar-pages", 423 | "subtitle": "Software Development", 424 | "title": "Amazon", 425 | "location": "Seattle, WA" 426 | }, 427 | { 428 | "Links": "https://in.linkedin.com/company/tata-consultancy-services?trk=similar-pages", 429 | "subtitle": "IT Services and IT Consulting", 430 | "title": "Tata Consultancy Services", 431 | "location": "Mumbai, Maharashtra" 432 | }, 433 | { 434 | "Links": "https://in.linkedin.com/company/infosys?trk=similar-pages", 435 | "subtitle": "IT Services and IT Consulting", 436 | "title": "Infosys", 437 | "location": "Bangalore, Karnataka" 438 | }, 439 | { 440 | "Links": "https://www.linkedin.com/company/cognizant?trk=similar-pages", 441 | "subtitle": "IT Services and IT Consulting", 442 | "title": "Cognizant", 443 | "location": "Teaneck, New Jersey" 444 | }, 445 | { 446 | "Links": "https://in.linkedin.com/company/wipro?trk=similar-pages", 447 | "subtitle": "IT Services and IT Consulting", 448 | "title": "Wipro", 449 | "location": "Bangalore, Karnataka" 450 | }, 451 | { 452 | "Links": "https://www.linkedin.com/company/oracle?trk=similar-pages", 453 | "subtitle": "IT Services and IT Consulting", 454 | "title": "Oracle", 455 | "location": "Austin, Texas" 456 | } 457 | ], 458 | "url": "https://www.linkedin.com/company/ibm", 459 | "updates": [ 460 | { 461 | "likes_count": 753, 462 | "text": "IBM Granite is getting stronger and shipping faster. 🔥 Today, we’re releasing IBM Granite 3.1 – the latest update to our Granite series of open and performant language models, delivering new features and significant performance improvements. What's new? We upped the context windows to 128K for all models. We added new function calling hallucination detection capabilities so users can have more control over agentic #AI workflows. Plus, we’re introducing 4 new Granite embedding models that offer multilingual support across 12 different languages. Dive into the full details here: https://ibm.biz/BdGXBh", 463 | "time": "3d", 464 | "title": "IBM", 465 | "comments_count": 44, 466 | "images": [ 467 | "https://media.licdn.com/dms/image/v2/D4E22AQHgl9Phbmnaww/feedshare-shrink_2048_1536/B4EZPaYjc.HsAo-/0/1734535691508?e=2147483647&v=beta&t=PElVtV1HANZZPGQdwdCvpf_bdPaJd1pIyMWHv4he940", 468 | "https://media.licdn.com/dms/image/v2/D4E22AQFmyMtczCZTXQ/feedshare-shrink_1280/B4EZPaYje1GcAo-/0/1734535702572?e=2147483647&v=beta&t=tJ5xa3rkJJ3UPOHnzXQzF5_wGotlueC7cOvB8FMdaYY", 469 | "https://media.licdn.com/dms/image/v2/D4E22AQEkC0V6TN1d0w/feedshare-shrink_1280/B4EZPaYjdMHAAo-/0/1734535691463?e=2147483647&v=beta&t=66z0w-AGMoH56mJWAiLMRhQMrUkz6KINDe7p-X06L34", 470 | "https://media.licdn.com/dms/image/v2/D4E22AQGHBBYEP3uf-A/feedshare-shrink_1280/B4EZPaYjeqHwAo-/0/1734535713221?e=2147483647&v=beta&t=481ihJVkynQjvEpuOmcNcq8AvIDbhFlMQkA3BMqWXZQ" 471 | ], 472 | "post_url": "https://www.linkedin.com/posts/ibm_ai-activity-7275170101132234753-PBeS", 473 | "post_id": "7275170101132234753", 474 | "date": "2024-12-19T06:44:05.703Z" 475 | }, 476 | { 477 | "likes_count": 247, 478 | "text": "The #AI skills gap is widening. Employers need skilled workers, and workers need guidance on which skills to acquire. To help close this disconnect, The AI Alliance created a comprehensive guide and framework with the support of IBM and Meta . Download a copy below and learn how you can better prepare for the AI-driven future: https://ibm.co/4flxMcI", 479 | "time": "2d Edited", 480 | "title": "IBM", 481 | "comments_count": 18, 482 | "images": [ 483 | "https://media.licdn.com/dms/image/v2/D5610AQFTM25xluXc_Q/image-shrink_800/image-shrink_800/0/1734624203772?e=2147483647&v=beta&t=NDIRxut1AGorFYhr5oqDDs7FzBQB2I13r_ZdUgi4meg", 484 | "https://static.licdn.com/aero-v1/sc/h/dur0ryw0e9uscxa9b6zqvgvfs" 485 | ], 486 | "post_url": "https://www.linkedin.com/posts/ibm_ai-activity-7275541259035795456-DjPC", 487 | "post_id": "7275541259035795456", 488 | "date": "2024-12-20T06:44:05.705Z" 489 | }, 490 | { 491 | "likes_count": 368, 492 | "text": "You asked about AI agents, we answered. From complex problem-solving to seamless IT automation, #AI agents will help reshape how people complete tasks in the workplace. Read more about the potential of agentic AI and the ways it'll impact businesses in 2025: https://ibm.biz/BdGERg", 493 | "time": "4d", 494 | "title": "IBM", 495 | "comments_count": 19, 496 | "post_url": "https://www.linkedin.com/posts/ibm_ai-agents-qa-activity-7274798300736753664-XDAG", 497 | "post_id": "7274798300736753664", 498 | "date": "2024-12-18T06:44:05.706Z" 499 | }, 500 | { 501 | "likes_count": 188, 502 | "text": "Explore our latest strides in technology, from exciting optics innovations to expanding quantum computing access and #AI -powered global solutions. Let’s dive in. 👇 ⚡️ Learn how we’re bringing the speed of light to #GenAI with our new co-packaged optics innovation: https://ibm.co/4iCyhlq 💡 Explore the winning solutions from the 2024 Call for Code Global Challenge that use IBM’s AI technologies to address societal issues: https://ibm.co/3BvmKDV 🌐 Read about the University of Missouri-Columbia joining the IBM Quantum Network, providing researchers and other institutions access to quantum computing via cloud: https://ibm.co/4guL2g7", 503 | "time": "1w Edited", 504 | "title": "IBM", 505 | "comments_count": 8, 506 | "images": [ 507 | "https://media.licdn.com/dms/image/v2/D4E10AQHEemgQxPB3lw/image-shrink_1280/image-shrink_1280/0/1734108238329?e=2147483647&v=beta&t=4ZSkVsVW5gr3yRPvUY2jyg0Jdo4Xa8EJqzF_2P-VATo" 508 | ], 509 | "post_url": "https://www.linkedin.com/posts/ibm_ai-genai-activity-7273377145589006336-QXj0", 510 | "post_id": "7273377145589006336", 511 | "date": "2024-12-15T06:44:05.707Z" 512 | }, 513 | { 514 | "likes_count": 208, 515 | "text": "Time for a pop quiz. How well do you know our family of IBM Granite foundation models? 🤔 Follow along as IBMers put their knowledge to the test at this year’s TechXchange conference in Las Vegas. You might even learn something new. 😉", 516 | "time": "1w", 517 | "title": "IBM", 518 | "comments_count": 8, 519 | "videos": [ 520 | "https://dms.licdn.com/playlist/vid/v2/D5610AQGfdRUzBCRGUA/mp4-640p-30fp-crf28/mp4-640p-30fp-crf28/0/1733952272090?e=2147483647&v=beta&t=jlu2bpCh8dqEFsTh_tEAFx3O-JRQJiVQWi1gikmqjf8", 521 | "https://dms.licdn.com/playlist/vid/v2/D5610AQGfdRUzBCRGUA/mp4-360p-30fp-crf28/mp4-360p-30fp-crf28/0/1733952267676?e=2147483647&v=beta&t=ZU2nw35gEddQwClqxAGRpFkA-JpXyK4yNfQobDFfi_s", 522 | "https://dms.licdn.com/playlist/vid/v2/D5610AQGfdRUzBCRGUA/mp4-720p-30fp-crf28/mp4-720p-30fp-crf28/0/1733952269517?e=2147483647&v=beta&t=4DnRC1rOvsCT0H9K_G62ybrPsZUqvFGBsxvoTKOm7q0" 523 | ], 524 | "post_url": "https://www.linkedin.com/posts/ibm_time-for-a-pop-quiz-how-well-do-you-know-activity-7272722970286911488-lsUQ", 525 | "post_id": "7272722970286911488", 526 | "date": "2024-12-15T06:44:05.709Z" 527 | }, 528 | { 529 | "likes_count": 2399, 530 | "text": "AI in Action | 2024 Edition 28: IBM at AWS re:Invent Missed the latest from AWS re:Invent last week in Las Vegas? Find out what's new with our Amazon Web Services (AWS) partnership in today's newsletter. Read now and subscribe ⤵", 531 | "time": "1w", 532 | "title": "IBM", 533 | "comments_count": 85, 534 | "external_link": "https://www.linkedin.com/pulse/ibm-aws-reinvent-ibm-vwple?trk=organization_guest_main-feed-card_feed-article-content", 535 | "images": [ 536 | "https://media.licdn.com/dms/image/v2/D4E12AQENRrDk6xOCDw/article-cover_image-shrink_720_1280/article-cover_image-shrink_720_1280/0/1733928777148?e=2147483647&v=beta&t=LkQ1k0bV9gObviXXNFeh89bYOgpRGiCDcjCfbeMNDdk" 537 | ], 538 | "post_url": "https://www.linkedin.com/posts/ibm_ibm-at-aws-reinvent-activity-7272656405147389952-J_T9", 539 | "post_id": "7272656405147389952", 540 | "date": "2024-12-15T06:44:05.710Z" 541 | }, 542 | { 543 | "likes_count": 1079, 544 | "text": "We are excited to announce IBM watsonx Code Assistant is now available! This enterprise-grade code assistant is designed to accelerate the software development lifecycle by offering real-time code assistance. It supports Python, Java, C, C++, Go, JavaScript, Typescript and many more programming languages. Start your 30-day free trial today: https://ibm.co/3OIJ2Fc", 545 | "time": "2w", 546 | "title": "IBM watsonx", 547 | "comments_count": 27, 548 | "videos": [ 549 | "https://dms.licdn.com/playlist/vid/v2/D5610AQG19XHodLiNKg/mp4-640p-30fp-crf28/mp4-640p-30fp-crf28/0/1733407265261?e=2147483647&v=beta&t=CEY4UagY-wak1A3k_x0hxkmZdHsWCCIftF9CJbwT6I0", 550 | "https://dms.licdn.com/playlist/vid/v2/D5610AQG19XHodLiNKg/mp4-360p-30fp-crf28/mp4-360p-30fp-crf28/0/1733407267181?e=2147483647&v=beta&t=Vn_LV_QiZrZp6kIIV6ElPvsIXiM9nJlcNWhrk1krqTc", 551 | "https://dms.licdn.com/playlist/vid/v2/D5610AQG19XHodLiNKg/mp4-720p-30fp-crf28/mp4-720p-30fp-crf28/0/1733407265246?e=2147483647&v=beta&t=JIlitDkYrb2TWHBP-HPo5qjaOelecMymB6YoR7bG5A8" 552 | ], 553 | "post_url": "https://www.linkedin.com/posts/ibm_we-are-excited-to-announce-ibm-watsonx-code-activity-7270549995307634688-4XOD", 554 | "post_id": "7270549995307634688", 555 | "date": "2024-12-08T06:44:05.711Z" 556 | }, 557 | { 558 | "likes_count": 389, 559 | "text": "IBM's commitment to a better world goes beyond business. This International Volunteer Day, we're proud to launch a new series showcasing how our people are using their time and tech expertise to make a difference in their communities. Dive into stories of #IBMImpact : https://ibm.biz/BdGc3D", 560 | "time": "2w", 561 | "title": "IBM", 562 | "comments_count": 10, 563 | "videos": [ 564 | "https://dms.licdn.com/playlist/vid/v2/D4E05AQEGdIK77vA1kA/mp4-640p-30fp-crf28/mp4-640p-30fp-crf28/0/1733422350990?e=2147483647&v=beta&t=ULTDQxn2v8oaENmldVo2kfyx91U4o4WmQejj5z4IcEw", 565 | "https://dms.licdn.com/playlist/vid/v2/D4E05AQEGdIK77vA1kA/mp4-720p-30fp-crf28/mp4-720p-30fp-crf28/0/1733422352499?e=2147483647&v=beta&t=TRVOUMlj8HRpErSmQra-ohQg3e4p11eyi7Ebjr0iwh0" 566 | ], 567 | "post_url": "https://www.linkedin.com/posts/ibm_ibmimpact-activity-7270500355451359233-_7RJ", 568 | "post_id": "7270500355451359233", 569 | "date": "2024-12-08T06:44:05.713Z" 570 | }, 571 | { 572 | "likes_count": 416, 573 | "text": "Small models are set to make a huge impact. With IBM Granite foundation models and Instruct Lab, you can build smaller, more efficient models that leverage open-source technology and enterprise data. The result? High-quality #AI solutions at a fraction of the cost and energy. Learn about our approach to open and customizable AI in this branded article via WIRED : https://ibm.co/4fEexMm", 574 | "time": "1mo Edited", 575 | "title": "IBM", 576 | "comments_count": 40, 577 | "images": [ 578 | "https://media.licdn.com/dms/image/v2/D5610AQHFAnASyXaWcQ/image-shrink_800/image-shrink_800/0/1732202162237?e=2147483647&v=beta&t=JmSN79izG24_cyrD8mdNbwWsXfXitkylJrG9RsibHKo" 579 | ], 580 | "post_url": "https://www.linkedin.com/posts/ibm_ai-activity-7265382481019699200-azaC", 581 | "post_id": "7265382481019699200", 582 | "date": "2024-11-22T06:44:05.714Z" 583 | }, 584 | { 585 | "likes_count": 1870, 586 | "text": "The first IBM Quantum System One in the Republic of Korea is now online. Housed at Yonsei University in Seoul, South Korea, the 127-qubit system provides researchers, students, and organizations dedicated access to a utility-scale quantum computer that will help solve problems in chemistry, physics, materials, and other fields. Dive deeper into the announcement: https://ibm.co/493n610", 587 | "time": "1mo Edited", 588 | "title": "IBM", 589 | "comments_count": 96, 590 | "images": [ 591 | "https://media.licdn.com/dms/image/v2/D5610AQGArBrPC8zoKQ/image-shrink_1280/image-shrink_1280/0/1732127416968?e=2147483647&v=beta&t=ftdmQGF6o0oKKX45o41S08oMb0h0bAeFl0-ANjnVFe4" 592 | ], 593 | "post_url": "https://www.linkedin.com/posts/ibm_the-first-ibm-quantum-system-one-in-the-republic-activity-7265068977213587458-BXQk", 594 | "post_id": "7265068977213587458", 595 | "date": "2024-11-22T06:44:05.715Z" 596 | } 597 | ], 598 | "affiliated": [ 599 | { 600 | "title": "IBM Consulting", 601 | "subtitle": "IT Services and IT Consulting", 602 | "Links": "https://www.linkedin.com/showcase/ibmconsulting/?trk=affiliated-pages" 603 | }, 604 | { 605 | "title": "IBM Hybrid Cloud and Infrastructure", 606 | "subtitle": "IT Services and IT Consulting", 607 | "location": "Armonk, New York", 608 | "Links": "https://www.linkedin.com/showcase/ibm-cloud/?trk=affiliated-pages" 609 | }, 610 | { 611 | "title": "IBM Data, AI & Automation", 612 | "subtitle": "IT Services and IT Consulting", 613 | "location": "Armonk, New York", 614 | "Links": "https://www.linkedin.com/showcase/ibmdata/?trk=affiliated-pages" 615 | }, 616 | { 617 | "title": "IBM Security", 618 | "subtitle": "Computer and Network Security", 619 | "Links": "https://www.linkedin.com/showcase/ibm-security/?trk=affiliated-pages" 620 | }, 621 | { 622 | "title": "IBM watsonx", 623 | "subtitle": "IT Services and IT Consulting", 624 | "location": "New York, NY", 625 | "Links": "https://www.linkedin.com/showcase/ibm-watsonx/?trk=affiliated-pages" 626 | }, 627 | { 628 | "title": "IBM Research", 629 | "subtitle": "Research Services", 630 | "location": "Yorktown Heights, New York", 631 | "Links": "https://www.linkedin.com/showcase/ibm-research/?trk=affiliated-pages" 632 | }, 633 | { 634 | "title": "IBM Quantum", 635 | "subtitle": "IT Services and IT Consulting", 636 | "location": "Yorktown Heights, New York", 637 | "Links": "https://www.linkedin.com/showcase/ibm-quantum/?trk=affiliated-pages" 638 | }, 639 | { 640 | "title": "IBM Developer", 641 | "subtitle": "IT Services and IT Consulting", 642 | "location": "New York, NY", 643 | "Links": "https://www.linkedin.com/showcase/ibmdeveloper/?trk=affiliated-pages" 644 | }, 645 | { 646 | "title": "IBM Alumni", 647 | "subtitle": "IT Services and IT Consulting", 648 | "location": "Armonk, New York", 649 | "Links": "https://www.linkedin.com/showcase/ibmalumni/?trk=affiliated-pages" 650 | }, 651 | { 652 | "title": "IBM Institute for Business Value", 653 | "subtitle": "Think Tanks", 654 | "Links": "https://www.linkedin.com/showcase/ibm-institute-for-business-value/?trk=affiliated-pages" 655 | }, 656 | { 657 | "title": "IBM Industry Insider", 658 | "subtitle": "Technology, Information and Internet", 659 | "location": "Armonk, New York", 660 | "Links": "https://www.linkedin.com/showcase/ibm-industry-insider/?trk=affiliated-pages" 661 | }, 662 | { 663 | "title": "IBM Partner Plus", 664 | "subtitle": "Information Technology & Services", 665 | "Links": "https://www.linkedin.com/showcase/ibm-partner-plus/?trk=affiliated-pages" 666 | } 667 | ], 668 | "formatted_locations": [ 669 | "International Business Machines Corp., New Orchard Road, Armonk, New York, NY 10504, US", 670 | "590 Madison Ave, New York, NY 10022, US", 671 | "90 Grayston Dr, Sandton, Gauteng 2196, ZA", 672 | "Plaza Independencia 721, Montevideo, 11000, UY", 673 | "388 Phahon Yothin Road, Phaya Thai, Bangkok City 10400, TH", 674 | "Jalan Prof. Dr. Latumenten, Jakarta Barat, Jakarta 11330, ID", 675 | "30 S 17th St, Philadelphia, PA 19103, US", 676 | "60 City Rd, Melbourne, VIC 3006, AU", 677 | "V Parku 2294/4, Prague, Prague 148 00, CZ", 678 | "9 Changi Business Park Central 1, Singapore, Singapore 486048, SG", 679 | "Via Sciangai, Rome, Laz. 00144, IT", 680 | "Nahmitzer Damm 12, Berlin, BE 12277, DE", 681 | "3031 N Rocky Point Dr W, Tampa, FL 33607, US", 682 | "First Avenue, Petaling Jaya, Selangor 47800, MY", 683 | "Laajalahdentie 23, Helsinki, Southern Finland 00330, FI", 684 | "Carrera 53 100-25, Bogota, Bogota, D.C. 111111, CO", 685 | "Presnenskaya naberezhnaya 10, Moscow, Central Federal District 123112, RU", 686 | "3 Road, Dubai, Dubai, AE", 687 | "71 S Wacker Dr, Chicago, IL 60606, US", 688 | "50 Rue de Picpus, Paris, IdF 75012, FR", 689 | "Mlynske nivy 16688/49, Bratislava, Bratislava 821 09, SK", 690 | "Shuhada'A Street, Kuwait City, Kuwait City, KW", 691 | "Vasant Kunj Road, Delhi, Delhi 110070, IN", 692 | "Avenida Pasteur, 138, Rio de Janeiro, RJ 22290-240, BR", 693 | "284 Leoforos Kifisias, Chalandri, Attica 152 32, GR", 694 | "14212 Cochran Rd SW, Huntsville, AL 35824, US", 695 | "Carretera al Castillo, El Salto, JAL 45680, MX", 696 | "Calle de Corazon de Maria, 44, Madrid, Community of Madrid 28002, ES", 697 | "Technicka 2995/21, Brno, South Moravia 612 00, CZ", 698 | "150 Kettletown Rd, Southbury, CT 06488, US", 699 | "601 Pacific Hwy, Sydney, NSW 2065, AU", 700 | "505 Howard St, San Francisco, CA 94105, US", 701 | "600 14th St NW, Washington, DC 20005, US", 702 | "7100 Highlands Pkwy SE, Smyrna, GA 30082, US", 703 | "1000 Belleview St, Dallas, TX 75215, US", 704 | "3039 E Cornwallis Rd, Durham, NC 27709, US", 705 | "Avenida Hipolito Yrigoyen 2149, Martinez, Buenos Aires 1640, AR", 706 | "Soseaua Bucuresti-Ploiesti 1A, Bucharest, Bucharest, RO", 707 | "Rodovia Jorn. Francisco Aguirre Proenca, Hortolandia, SP 13186-624, BR", 708 | "B-19, Noida, Uttar Pradesh 201307, IN", 709 | "Cairo Alexandria Desert Road, Sixth of October, Al Jizah, EG" 710 | ], 711 | "stock_info": { 712 | "id": "IBM", 713 | "datetime": "December 21, 2024", 714 | "stock_exchange": "NYSE", 715 | "stock_ticker": "IBM", 716 | "stock_price": "$223.38", 717 | "stock_price_change": "-0.54 (-0.241%)", 718 | "stock_provider": "Data from Refinitiv" 719 | }, 720 | "get_directions_url": [ 721 | { 722 | "directions_url": "https://www.bing.com/maps?where=International+Business+Machines+Corp.+New+Orchard+Road+Armonk%2C+New+York+10504+NY+US&trk=org-locations_url" 723 | }, 724 | { 725 | "directions_url": "https://www.bing.com/maps?where=590+Madison+Ave+New+York+10022+NY+US&trk=org-locations_url" 726 | }, 727 | { 728 | "directions_url": "https://www.bing.com/maps?where=90+Grayston+Dr+Sandton+2196+Gauteng+ZA&trk=org-locations_url" 729 | }, 730 | { 731 | "directions_url": "https://www.bing.com/maps?where=Plaza+Independencia+721+Montevideo+11000+UY&trk=org-locations_url" 732 | }, 733 | { 734 | "directions_url": "https://www.bing.com/maps?where=388+Phahon+Yothin+Road+Phaya+Thai+10400+Bangkok+City+TH&trk=org-locations_url" 735 | }, 736 | { 737 | "directions_url": "https://www.bing.com/maps?where=Jalan+Prof.+Dr.+Latumenten+Jakarta+Barat+11330+Jakarta+ID&trk=org-locations_url" 738 | }, 739 | { 740 | "directions_url": "https://www.bing.com/maps?where=30+S+17th+St+Philadelphia+19103+PA+US&trk=org-locations_url" 741 | }, 742 | { 743 | "directions_url": "https://www.bing.com/maps?where=60+City+Rd+Melbourne+3006+VIC+AU&trk=org-locations_url" 744 | }, 745 | { 746 | "directions_url": "https://www.bing.com/maps?where=V+Parku+2294%2F4+Prague+148+00+Prague+CZ&trk=org-locations_url" 747 | }, 748 | { 749 | "directions_url": "https://www.bing.com/maps?where=9+Changi+Business+Park+Central+1+Singapore+486048+Singapore+SG&trk=org-locations_url" 750 | }, 751 | { 752 | "directions_url": "https://www.bing.com/maps?where=Via+Sciangai+Rome+00144+Laz.+IT&trk=org-locations_url" 753 | }, 754 | { 755 | "directions_url": "https://www.bing.com/maps?where=Nahmitzer+Damm+12+Berlin+12277+BE+DE&trk=org-locations_url" 756 | }, 757 | { 758 | "directions_url": "https://www.bing.com/maps?where=3031+N+Rocky+Point+Dr+W+Tampa+33607+FL+US&trk=org-locations_url" 759 | }, 760 | { 761 | "directions_url": "https://www.bing.com/maps?where=First+Avenue+Petaling+Jaya+47800+Selangor+MY&trk=org-locations_url" 762 | }, 763 | { 764 | "directions_url": "https://www.bing.com/maps?where=Laajalahdentie+23+Helsinki+00330+Southern+Finland+FI&trk=org-locations_url" 765 | }, 766 | { 767 | "directions_url": "https://www.bing.com/maps?where=Carrera+53+100-25+Bogota+111111+Bogota%2C+D.C.+CO&trk=org-locations_url" 768 | }, 769 | { 770 | "directions_url": "https://www.bing.com/maps?where=Presnenskaya+naberezhnaya+10+Moscow+123112+Central+Federal+District+RU&trk=org-locations_url" 771 | }, 772 | { 773 | "directions_url": "https://www.bing.com/maps?where=3+Road+Dubai+Dubai+AE&trk=org-locations_url" 774 | }, 775 | { 776 | "directions_url": "https://www.bing.com/maps?where=71+S+Wacker+Dr+Chicago+60606+IL+US&trk=org-locations_url" 777 | }, 778 | { 779 | "directions_url": "https://www.bing.com/maps?where=50+Rue+de+Picpus+Paris+75012+IdF+FR&trk=org-locations_url" 780 | }, 781 | { 782 | "directions_url": "https://www.bing.com/maps?where=Mlynske+nivy+16688%2F49+Bratislava+821+09+Bratislava+SK&trk=org-locations_url" 783 | }, 784 | { 785 | "directions_url": "https://www.bing.com/maps?where=Shuhada%27A+Street+Kuwait+City+Kuwait+City+KW&trk=org-locations_url" 786 | }, 787 | { 788 | "directions_url": "https://www.bing.com/maps?where=Vasant+Kunj+Road+Delhi+110070+Delhi+IN&trk=org-locations_url" 789 | }, 790 | { 791 | "directions_url": "https://www.bing.com/maps?where=Avenida+Pasteur%2C+138+Rio+de+Janeiro+22290-240+RJ+BR&trk=org-locations_url" 792 | }, 793 | { 794 | "directions_url": "https://www.bing.com/maps?where=284+Leoforos+Kifisias+Chalandri+152+32+Attica+GR&trk=org-locations_url" 795 | }, 796 | { 797 | "directions_url": "https://www.bing.com/maps?where=14212+Cochran+Rd+SW+Huntsville+35824+AL+US&trk=org-locations_url" 798 | }, 799 | { 800 | "directions_url": "https://www.bing.com/maps?where=Carretera+al+Castillo+El+Salto+45680+JAL+MX&trk=org-locations_url" 801 | }, 802 | { 803 | "directions_url": "https://www.bing.com/maps?where=Calle+de+Corazon+de+Maria%2C+44+Madrid+28002+Community+of+Madrid+ES&trk=org-locations_url" 804 | }, 805 | { 806 | "directions_url": "https://www.bing.com/maps?where=Technicka+2995%2F21+Brno+612+00+South+Moravia+CZ&trk=org-locations_url" 807 | }, 808 | { 809 | "directions_url": "https://www.bing.com/maps?where=150+Kettletown+Rd+Southbury+06488+CT+US&trk=org-locations_url" 810 | }, 811 | { 812 | "directions_url": "https://www.bing.com/maps?where=601+Pacific+Hwy+Sydney+2065+NSW+AU&trk=org-locations_url" 813 | }, 814 | { 815 | "directions_url": "https://www.bing.com/maps?where=505+Howard+St+San+Francisco+94105+CA+US&trk=org-locations_url" 816 | }, 817 | { 818 | "directions_url": "https://www.bing.com/maps?where=600+14th+St+NW+Washington+20005+DC+US&trk=org-locations_url" 819 | }, 820 | { 821 | "directions_url": "https://www.bing.com/maps?where=7100+Highlands+Pkwy+SE+Smyrna+30082+GA+US&trk=org-locations_url" 822 | }, 823 | { 824 | "directions_url": "https://www.bing.com/maps?where=1000+Belleview+St+Dallas+75215+TX+US&trk=org-locations_url" 825 | }, 826 | { 827 | "directions_url": "https://www.bing.com/maps?where=3039+E+Cornwallis+Rd+Durham+27709+NC+US&trk=org-locations_url" 828 | }, 829 | { 830 | "directions_url": "https://www.bing.com/maps?where=Avenida+Hipolito+Yrigoyen+2149+Martinez+1640+Buenos+Aires+AR&trk=org-locations_url" 831 | }, 832 | { 833 | "directions_url": "https://www.bing.com/maps?where=Soseaua+Bucuresti-Ploiesti+1A+Bucharest+Bucharest+RO&trk=org-locations_url" 834 | }, 835 | { 836 | "directions_url": "https://www.bing.com/maps?where=Rodovia+Jorn.+Francisco+Aguirre+Proenca+Hortolandia+13186-624+SP+BR&trk=org-locations_url" 837 | }, 838 | { 839 | "directions_url": "https://www.bing.com/maps?where=B-19+Noida+201307+Uttar+Pradesh+IN&trk=org-locations_url" 840 | }, 841 | { 842 | "directions_url": "https://www.bing.com/maps?where=Cairo+Alexandria+Desert+Road+Sixth+of+October+Al+Jizah+EG&trk=org-locations_url" 843 | } 844 | ], 845 | "description": "IBM | 17,685,425 followers on LinkedIn. At IBM, we do more than work. We create. We create as technologists, developers, and engineers.", 846 | "additional_information": "Additional jobs info: IBM (43,671 open jobs). Engineer (555,845 open jobs). Analyst (694,057 open jobs). Developer (258,935 open jobs). Intern (71,196 open jobs). Software Engineer (300,699 open jobs). Manager (1,880,925 open jobs). Associate (1,091,945 open jobs). Scientist (48,969 open jobs). Project Manager (253,048 open jobs). Accountant (91,739 open jobs). Specialist (768,666 open jobs). Consultant (760,907 open jobs). Graduate (361,130 open jobs). Python Developer (46,642 open jobs). Customer Service Representative (185,647 open jobs). Executive (690,514 open jobs). User Experience Designer (13,659 open jobs). Data Analyst (329,009 open jobs). Recruiter (243,016 open jobs)", 847 | "country_codes_array": [ 848 | "US", 849 | "ZA", 850 | "UY", 851 | "TH", 852 | "ID", 853 | "AU", 854 | "CZ", 855 | "SG", 856 | "IT", 857 | "DE", 858 | "MY", 859 | "FI", 860 | "CO", 861 | "RU", 862 | "AE", 863 | "FR", 864 | "SK", 865 | "KW", 866 | "IN", 867 | "BR", 868 | "GR", 869 | "MX", 870 | "ES", 871 | "AR", 872 | "RO", 873 | "EG" 874 | ], 875 | "alumni": null, 876 | "alumni_information": null, 877 | "timestamp": "2024-12-22T06:44:05.826Z" 878 | }, 879 | { 880 | "input": { 881 | "url": "https://il.linkedin.com/company/bright-data" 882 | }, 883 | "id": "bright-data", 884 | "name": "Bright Data", 885 | "country_code": "IL", 886 | "locations": [ 887 | "Greater Tel Aviv, Israel 42507, IL" 888 | ], 889 | "followers": 27387, 890 | "employees_in_linkedin": 1154, 891 | "about": "Bright Data is the world’s largest data collection platform dedicated to helping all businesses view the Internet just like their consumers and potential consumers do each and every day. We help global brands gather publicly available web data in an ethical manner and transform unstructured data (e.g., HTML) into structured data (e.g., xl). Working with over 15,000 customers, including market-leaders from the Fortune 500 space, the company’s first-of-its-kind data collection platform enables clients to view the internet in complete transparency, no matter where they are based in the world. By scouring the web through the eyes of the consumer, organizations can collect data at scale and gain a real and accurate perspective of how their customers are interacting with their brand and with their competitors – without being blocked or served misleading information. This means organizations can now make better, faster, and more informed business decisions based on real-time competitive data - leveraging real net transparency.", 892 | "specialties": "price intelligence, web data collection, brand monitoring, brand protection, ad verification, data collection, data, market research, web research, social account management, ecommerce tools, data centers, and alternative data", 893 | "company_size": "201-500 employees", 894 | "organization_type": "Privately Held", 895 | "industries": "Software Development", 896 | "website": "https://brdta.com/3go77R5", 897 | "company_id": "4000016", 898 | "employees": [ 899 | { 900 | "img": "https://media.licdn.com/dms/image/v2/D5603AQG0eN935v_ESg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1676746180944?e=2147483647&v=beta&t=vfBpl2YfhzeOaWE6GzksrFQF7x8JsdsZZLLnM3P7O14", 901 | "link": "https://www.linkedin.com/in/gunja-gargeshwari-5bb37?trk=org-employees", 902 | "subtitle": "Chief Revenue Officer (CRO) Bright Data", 903 | "title": "Gunja Gargeshwari" 904 | }, 905 | { 906 | "img": "https://media.licdn.com/dms/image/v2/C4E03AQFdec115n9IjQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1542199103342?e=2147483647&v=beta&t=9zE8i0saYsHVr8Vy1N3IRumI2VzWcJZEJyAz5aIetRA", 907 | "link": "https://br.linkedin.com/in/wagnerporcelli?trk=org-employees", 908 | "subtitle": "Enterprise Senior Account Executive at Bright Data", 909 | "title": "Wagner Porcelli" 910 | }, 911 | { 912 | "img": "https://media.licdn.com/dms/image/v2/D4E03AQGKfYR5z3Xadg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1708538179108?e=2147483647&v=beta&t=RV3yZK3MPaCzin6ejJcq8e55GA8eYI0sHKo6cgZ70Bc", 913 | "link": "https://uk.linkedin.com/in/boaztal?trk=org-employees", 914 | "subtitle": "Hands-on sales leader who is fearless in rolling up my sleeves. Go To Market Strategy. Selling advanced technologies.", 915 | "title": "Boaz Tal" 916 | }, 917 | { 918 | "img": "https://media.licdn.com/dms/image/v2/C4D03AQGPa6DeO_72BQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1625732403686?e=2147483647&v=beta&t=rssGKo1NMZ3Vr4LBgoA6QpMA6ZG_DP5bxkJpj0priU8", 919 | "link": "https://il.linkedin.com/in/yuval-yifrach-4b9285?trk=org-employees", 920 | "title": "Yuval Yifrach" 921 | } 922 | ], 923 | "headquarters": "Greater Tel Aviv, Israel", 924 | "image": "https://media.licdn.com/dms/image/v2/D4D3DAQGsVDROVj3A6w/image-scale_191_1128/image-scale_191_1128/0/1724666138117/bright_data_cover?e=2147483647&v=beta&t=AKjWcG-7sGgCoD_pmMz2mNJ35EVj9KDBhuc7mxjBNkY", 925 | "logo": "https://media.licdn.com/dms/image/v2/C4D0BAQF81Zde5QzE2Q/company-logo_200_200/company-logo_200_200/0/1630474308804/bright_data_logo?e=2147483647&v=beta&t=sp1t4wYc7qfHBduC4pt19jFASldMU9jetfx0XI_E160", 926 | "similar": [ 927 | { 928 | "Links": "https://ie.linkedin.com/company/brightdata?trk=similar-pages", 929 | "subtitle": "IT Services and IT Consulting", 930 | "title": "BrightData.ie" 931 | }, 932 | { 933 | "Links": "https://il.linkedin.com/company/hola-?trk=similar-pages", 934 | "subtitle": "Entertainment Providers", 935 | "title": "Hola", 936 | "location": "Netanya, Netanya" 937 | }, 938 | { 939 | "Links": "https://il.linkedin.com/company/bright-inisights?trk=similar-pages", 940 | "subtitle": "Software Development", 941 | "title": "Bright Insights", 942 | "location": "Netanya, Center District" 943 | }, 944 | { 945 | "Links": "https://lt.linkedin.com/company/oxylabs-io?trk=similar-pages", 946 | "subtitle": "IT Services and IT Consulting", 947 | "title": "Oxylabs.io", 948 | "location": "Vilnius, Lithuania" 949 | }, 950 | { 951 | "Links": "https://www.linkedin.com/company/revuze?trk=similar-pages", 952 | "subtitle": "Software Development", 953 | "title": "Revuze", 954 | "location": "Palo Alto , CA" 955 | }, 956 | { 957 | "Links": "https://il.linkedin.com/company/the-bright-initiative-by-bright-data?trk=similar-pages", 958 | "subtitle": "Non-profit Organizations", 959 | "title": "The Bright Initiative by Bright Data" 960 | }, 961 | { 962 | "Links": "https://il.linkedin.com/company/netnut?trk=similar-pages", 963 | "subtitle": "Information Technology & Services", 964 | "title": "NetNut.io", 965 | "location": "Tel Aviv, Israel" 966 | }, 967 | { 968 | "Links": "https://il.linkedin.com/company/mondaydotcom?trk=similar-pages", 969 | "subtitle": "Software Development", 970 | "title": "monday.com", 971 | "location": "Tel Aviv, Israel" 972 | }, 973 | { 974 | "Links": "https://www.linkedin.com/company/nimbledata?trk=similar-pages", 975 | "subtitle": "Software Development", 976 | "title": "Nimble", 977 | "location": "New York, NY" 978 | }, 979 | { 980 | "Links": "https://il.linkedin.com/company/wix-com?trk=similar-pages", 981 | "subtitle": "Software Development", 982 | "title": "Wix" 983 | } 984 | ], 985 | "url": "https://www.linkedin.com/company/bright-data", 986 | "updates": [ 987 | { 988 | "likes_count": 31, 989 | "text": "This 45-minute webinar will equip you with strategies to handle high-volume data demands in the travel industry and keep your dynamic pricing algorithms efficient. In This Live Webinar, You Will Learn How To: - Ensure continuous access for scraping operations - Develop a scalable browser infrastructure robust enough for any demand - Extract data effectively from JavaScript-rendered websites Key Takeaways: - Execute high-volume scraping requests seamlessly - Build robust infrastructures for any load - Successfully extract data from JavaScript-rendered pages Ideal for: - IT Professionals in the Travel Industry - Data Scientists and Analysts - Developers Don't Miss Out: Prepare your systems to handle the surge of data collection demands this holiday season with cutting-edge strategies and technologies. 🔗 Register Now to Reserve Your Spot! https://lnkd.in/d3a6JNKz #DataScraping #DynamicPricing #TravelIndustry #Webinar #LiveEvent #DataExtraction #JavaScript #ScalableSolutions #TechnicalSolutions #HolidaySeason", 990 | "time": "1mo Edited", 991 | "title": "Bright Data", 992 | "external_link": "https://www.linkedin.com/login?session_redirect=https%3A%2F%2Fwww%2Elinkedin%2Ecom%2Fvideo%2Flive%2Furn%3Ali%3AugcPost%3A7257003939022184449&trk=organization_guest_main-feed-card_feed-live-video-content", 993 | "post_url": "https://www.linkedin.com/posts/bright-data_datascraping-dynamicpricing-travelindustry-activity-7257003940188200960-uuXp", 994 | "post_id": "7257003940188200960", 995 | "date": "2024-11-22T06:43:59.705Z" 996 | }, 997 | { 998 | "likes_count": 69, 999 | "text": "Don’t miss Yanay Sela , CMO of Bright Data this Wednesday at #TechWeekSingapore . In his session, “Data Darwinism: Beyond the Mirage of Numbers,” you will learn: • How the top 10% of eCommerce brands are leveraging web data for predictive analytics and dynamic pricing • How to avoid the trap of vanity metrics and focus on what truly drives competitive advantage • Strategies to shape market trends and respond faster than competitors Join us on Oct 9th, 10:25 AM at Marina Bay Sands, eCommerce Center Stage! Gunja Gargeshwari Sharon Laor Yvonne L. Vincent Ong Shinara Kanwar Varna Rajan KT Prasad Hadar Danino Zimring 🎗️ #TechWeekSG #eCommerce #BrightData #DataDrivenLeadership #ECEA #eCommerceExpoAsia #TFMA #TechnologyforMarketingAsia #Singapore", 1000 | "time": "2mo Edited", 1001 | "title": "Bright Data", 1002 | "images": [ 1003 | "https://media.licdn.com/dms/image/v2/D4D22AQHMXrBvWNw0XA/feedshare-shrink_2048_1536/feedshare-shrink_2048_1536/0/1728213765128?e=2147483647&v=beta&t=G83jQdLe1e6k16_PRu2i1zQytiT-Np5cjmzRgzchybw" 1004 | ], 1005 | "post_url": "https://www.linkedin.com/posts/bright-data_techweeksingapore-techweeksg-ecommerce-activity-7248653912679870464-BfW_", 1006 | "post_id": "7248653912679870464", 1007 | "date": "2024-10-23T06:43:59.706Z" 1008 | }, 1009 | { 1010 | "likes_count": 42, 1011 | "text": "Meet Bright Data at Tech Week Singapore on October 9th-10th! Tech Week Singapore is the event for businesses to get ahead in the fast-evolving tech landscape. This is your opportunity to experience firsthand how Bright Data’s innovative solutions can transform your data strategy and drive growth. Talk To Us About: 🔹 Data-Driven Advantage: Discover how real-time, ethical data collection can unlock new opportunities for your business. 🔹 Industry-Specific Solutions: Whether you’re focused on cloud, cybersecurity, big data, AI, or eCommerce, Bright Data has the tools to help you excel. 🔹 Scale with Confidence: Learn how our platform enables you to seamlessly gather and use data from millions of sources globally, helping you stay competitive. Meet us at Booth #Y120 in the e-commerce expo. https://lnkd.in/dmNdpjB4 Yanay Sela Gunja Gargeshwari Sharon Laor Yvonne L. Vincent Ong Shinara Kanwar Varna Rajan KT Prasad #TechWeekSingapore #ECEA #eCommerceExpoAsia #TFMA #TechnologyforMarketingAsia #Singapore #Data #DataStrategy #AI #BigData #CloudComputing #CyberSecurity #eCommerce #BusinessGrowth", 1012 | "time": "2mo Edited", 1013 | "title": "Bright Data", 1014 | "images": [ 1015 | "https://media.licdn.com/dms/image/v2/D4D22AQH16AvDoZvbXA/feedshare-shrink_2048_1536/feedshare-shrink_2048_1536/0/1727607011669?e=2147483647&v=beta&t=0SfNgG3ffrIZjqhW-OeSceIPdu_p891XjIZFrLeMJtI" 1016 | ], 1017 | "post_url": "https://www.linkedin.com/posts/bright-data_y120-techweeksingapore-ecea-activity-7246109007206092800-n-kc", 1018 | "post_id": "7246109007206092800", 1019 | "date": "2024-10-23T06:43:59.714Z" 1020 | }, 1021 | { 1022 | "likes_count": 26, 1023 | "text": "🔥 The future of retail intelligence is already here, and it's moving FAST! 🔥 If you're not using web data and AI to optimize your pricing strategy, you're already behind. But don’t worry—we’ve got you covered. Join us TOMORROW for a game-changing keynote session that will give you the tools to lead the market! \"Best Practices for Expanding Market Share & Margins using Web Data & AI\" 📅 17 September, 2:00-2:30 PM 💥 Featuring top industry experts: - Tamir Roter ; Chief Corporate Development Officer, Bright Data - Arthur Mandon ; Strategy Manager ,Vestiaire Collective Learn how to: - Leverage web data for real-time pricing and inventory management - Use AI to make better, data-driven decisions - Apply real-world strategies to grow your business and boost your margins This isn’t just about keeping up—it’s about getting ahead. Retail leaders, this is your moment to get on board with the future of retail to drive market share and revenue. 🚀 See you there: https://lnkd.in/dCwVBgec #ParisRetailWeek #WebData #AI #RetailInnovation #DataDrivenGrowth Bright Data Vestiaire Collective #MarketShare #ecommerce #retail", 1024 | "time": "3mo", 1025 | "title": "Bright Data", 1026 | "images": [ 1027 | "https://media.licdn.com/dms/image/v2/D4D22AQEwz-GN_qBoWQ/feedshare-shrink_2048_1536/feedshare-shrink_2048_1536/0/1726494787169?e=2147483647&v=beta&t=k4yrzKjTa1NhjrMBIbp-W4xbj8msPe674BcG7FkZqtE" 1028 | ], 1029 | "post_url": "https://www.linkedin.com/posts/bright-data_parisretailweek-webdata-ai-activity-7241443998744166400-DOdi", 1030 | "post_id": "7241443998744166400", 1031 | "date": "2024-09-23T06:43:59.716Z" 1032 | }, 1033 | { 1034 | "likes_count": 247, 1035 | "text": "Join us this Tuesday for 45-minute webinar where you will learn how to simplify and scale scraping operations with our powerful serverless architecture. In the demo we'll walk you through the setup where you'll see how to handle dynamic websites, managing proxies, overcome anti-bot protections, and coordinate tasks efficiently. Aditional discussion points: - Orchestrate interaction and parsing workers - Set up multi-stage and parallel scraping tasks - Manage proxies and web unblocking - Debug using live previews and logs - Schedule tasks and handle retries Register now to reserve your spot! https://lnkd.in/dydxSqS9 #Serverless #WebScraping #ServerlessArchitecture #DataExtraction #ProxyManagement #DynamicWebsites #WebAutomation #CloudComputing #DataEngineering #ScalableSolutions", 1036 | "time": "3mo Edited", 1037 | "title": "Bright Data", 1038 | "comments_count": 3, 1039 | "post_url": "https://www.linkedin.com/posts/bright-data_serverless-webscraping-serverlessarchitecture-activity-7239629290655371264-j0Jw", 1040 | "post_id": "7239629290655371264", 1041 | "date": "2024-09-23T06:43:59.717Z" 1042 | }, 1043 | { 1044 | "likes_count": 20, 1045 | "text": "The internet is at the heart of every purchasing decision. Customers constantly turn to the web for reviews, price comparisons, and product research before making a choice. In this keynote session, we will dive deep into how businesses can harness web data and AI to optimize every touchpoint of the customer journey—improving pricing strategies, fine-tuning supply chains, enhancing marketing, and increasing revenue margins. Join us on Tuesday, 17 September, 2:00-2:30 PM for a powerful keynote session on \"Best Practices for Expanding Market Share & Margins using Web Data & AI.\" 🚀 Speakers: - Tamir Roter – Chief Corporate Development Officer, Bright Data - Jean-Maxime Pinhas – VP Business & Corporate Development, Vestiaire Collective You will learn: - How to use web data for real-time pricing, inventory management, and market competitiveness - Unlock AI-driven insights for smarter decision-making across product, marketing, and customer engagement - Real-world strategies from Bright Data and Vestiaire Collective that translate data into business growth This session is perfect for retail leaders looking to leverage cutting-edge technology to expand market share and boost margins. Don’t miss out on the future of retail intelligence! See you there! https://lnkd.in/d2wEwPYB #ParisRetailWeek #WebData #AI #RetailInnovation #DataDrivenGrowth #BrightData #VestiaireCollective #MarketShare #ecommerce #retail", 1046 | "time": "3mo", 1047 | "title": "Bright Data", 1048 | "comments_count": 1, 1049 | "images": [ 1050 | "https://media.licdn.com/dms/image/v2/D4D22AQFeZEZhD__93w/feedshare-shrink_2048_1536/feedshare-shrink_2048_1536/0/1725869541661?e=2147483647&v=beta&t=0RIWsmNFvc8QPCmD2gQEVe1lmk9Mw4EFXEwVmeH1Ivw" 1051 | ], 1052 | "post_url": "https://www.linkedin.com/posts/bright-data_parisretailweek-webdata-ai-activity-7238821530279784449-_Cd0", 1053 | "post_id": "7238821530279784449", 1054 | "date": "2024-09-23T06:43:59.719Z" 1055 | }, 1056 | { 1057 | "likes_count": 28, 1058 | "text": "𝗥𝗲𝗮𝗱𝘆 𝗳𝗼𝗿 𝗣𝗮𝗿𝗶𝘀 𝗥𝗲𝘁𝗮𝗶𝗹 𝗪𝗲𝗲𝗸 𝟮𝟬𝟮𝟰 ?! Join us from September 17th to 19th. The future of retail powered by AI-driven insights is already here. Find out how Bright Insights is already transforming retail & eCommerce business strategies with actionable, data-backed intelligence. 𝗧𝗮𝗹𝗸 𝘁𝗼 𝘂𝘀 𝗮𝗯𝗼𝘂𝘁: - 𝗗𝘆𝗻𝗮𝗺𝗶𝗰 𝗣𝗿𝗶𝗰𝗶𝗻𝗴: How real-time web data can keep you ahead of the competition. - 𝗦𝘂𝗽𝗽𝗹𝘆 𝗖𝗵𝗮𝗶𝗻 𝗘𝗳𝗳𝗶𝗰𝗶𝗲𝗻𝗰𝘆: Using AI to predict demand, reduce risks, and keep your products in stock. - 𝗧𝗮𝗿𝗴𝗲𝘁𝗲𝗱 𝗠𝗮𝗿𝗸𝗲𝘁𝗶𝗻𝗴: Leverage web data to craft personalized marketing campaigns. - 𝗖𝗼𝗻𝘀𝘂𝗺𝗲𝗿 𝗦𝗲𝗻𝘁𝗶𝗺𝗲𝗻𝘁: How AI uncovers customer preferences and market trends. Find us at: 𝙱̲𝚘̲𝚘̲𝚝̲𝚑̲ ̲𝙼̲𝟶̲𝟸̲𝟽̲ Learn how your business can leverage Bright Data for data-driven growth in 2025 and beyond. Book a 1:1 meeting! https://lnkd.in/d_maPNvu Tamir Roter David El Kaim Leigh Wood , Boaz Grinvald , Haim Treistman Hadar Danino Zimring 🎗️ #ParisRetailWeek #eCommerce #AI #RetailInnovation #BrightInsights #DataDriven #Omnichannel #FutureOfRetail #Networking #Events2024", 1059 | "time": "3mo", 1060 | "title": "Bright Data", 1061 | "images": [ 1062 | "https://media.licdn.com/dms/image/v2/D4D22AQFpEoU3WkCAAg/feedshare-shrink_2048_1536/feedshare-shrink_2048_1536/0/1725869093979?e=2147483647&v=beta&t=CfygtzmDi82AanDF_K7DeIxa6Mzo33B4EI66vT_j85c" 1063 | ], 1064 | "post_url": "https://www.linkedin.com/posts/bright-data_parisretailweek-ecommerce-ai-activity-7238819647913906178-i7k7", 1065 | "post_id": "7238819647913906178", 1066 | "date": "2024-09-23T06:43:59.721Z" 1067 | }, 1068 | { 1069 | "likes_count": 40, 1070 | "text": "Join Us at Ai4 2024! We are excited to attend Ai4 - Artificial Intelligence Conferences 2024, North America's largest AI industry event. Join us at Booth #126 to talk about how web data is driving the future of AI and LLMs. Join Rafael Levi , Solutions Architect, for hourly coding sessions covering both fundamental and advanced web scraping techniques specifically for AI data. Learn how our data solutions can support your AI business and help you stay ahead of the competition. 🚀 And if that’s not enough, stop by to pick up some great swag! See you there! https://lnkd.in/da3Su3Au Bill Paulsen , Sarah (Welch) Esser , Omer Primor , Rafael Levi , Zechariah Lopatin , Hadar Danino Zimring #AI4 #AIConference #MachineLearning #DataScience #TechEvent #Vegas #BrightData #LiveCoding #Workshop #SWAG #Innovation #Technology #BigData #ArtificialIntelligence #Networking", 1071 | "time": "4mo", 1072 | "title": "Bright Data", 1073 | "comments_count": 1, 1074 | "images": [ 1075 | "https://media.licdn.com/dms/image/v2/D4D22AQGZz_wjq2qsWA/feedshare-shrink_2048_1536/feedshare-shrink_2048_1536/0/1723151320817?e=2147483647&v=beta&t=yUVt_pyq4xfn7gz01HbAHqs8h39_pkQ_KtuAXzxopKo" 1076 | ], 1077 | "post_url": "https://www.linkedin.com/posts/bright-data_ai4-aiconference-machinelearning-activity-7227420484030595072-Xpwi", 1078 | "post_id": "7227420484030595072", 1079 | "date": "2024-08-24T06:43:59.722Z" 1080 | }, 1081 | { 1082 | "likes_count": 59, 1083 | "text": "Join us at WeAreDevelopers Berlin for an exclusive live coding event lead by Tim Ruscica (Tech with Tim). Tim will run 20-minute live coding sessions on the hour, every hour! This hands-on coding session will empower you with both fundamental and advanced web scraping techniques, helping you harness the power of web data to thrive in a data-driven industry. What You Will Learn: - Scraping static and dynamic websites - Web automation with JavaScript and Playwright - Scaling web automation with promises and async/await - Bypassing CAPTCHAs and handling IP bans/rate limits - Building web scraping APIs - Optimizing headless browsers for performance - Scaling operations in the cloud with ScrapeOps - Developing complete data extraction and processing pipelines - Setting up real-time data extraction and notification systems See the full agenda: https://lnkd.in/dihUGxAz If you are attending WeAreDevelopers let us know so we can meet, talk and code together! Alon Shany 阿龙 Gil Tashema Hadar Danino Zimring Neil B. Nir Aharoni Noah Kalson Rafael Levi Yair Ida Tim Ruscica Haven't signed up to WeAreDevelopers? - Visit: https://lnkd.in/dKcw8kDD ... - Use the promo code 'WWC_Brightdata15' at checkout to get 15% off your ticket. Special thanks to; Christian Heilmann 👀 , Gerry Schneider, akad. BM. and the WeAreDevelopers team. See you there! #WeAreDevelopers #LiveCoding #WebScraping #WebAutomation #JavaScript #Playwright #Puppeteer #Coding #WebDevelopment", 1084 | "time": "5mo", 1085 | "title": "Bright Data", 1086 | "post_url": "https://www.linkedin.com/posts/bright-data_wearedevelopers-livecoding-webscraping-activity-7216698350245470208-fpzh", 1087 | "post_id": "7216698350245470208", 1088 | "date": "2024-07-25T06:43:59.724Z" 1089 | }, 1090 | { 1091 | "likes_count": 222, 1092 | "text": "Join our expert panel for a deep dive into the complex world of browser automation and web scraping. You will learn how to handle dynamic content, avoid bot detection, and scale your scraping projects. This 45-minute session will change how you approach complex scraping tasks! Featured Speakers: - Darío Kondratiuk : Web Developer & Microsoft MVP, specializing in Puppeteer. - Diego Molina : Technical Lead at Sauce Labs, Project Lead for Selenium. - Greg Gorlen: Full Stack Software Engineer & Top Playwright Contributor on StackOverflow. Discover how to tackle the toughest challenges in browser automation and web scraping: - Rate Limiting & IP Blocking - Handling Dynamic Selectors - Navigating Single-Page Applications (SPAs) - Managing Asynchronous Data Loads - Simulating User Interactions - Accessing Shadow DOM - Capturing Full-Page Screenshots of Dynamic Sites - Scaling Browser Infrastructure for Extensive Data Collection Secure your spot now and transform your web scraping expertise! #webinar #webscraping #browserautomation #playwright #puppeteer #selenium", 1093 | "time": "6mo Edited", 1094 | "title": "Bright Data", 1095 | "post_url": "https://www.linkedin.com/posts/bright-data_webinar-webscraping-browserautomation-activity-7208374573744984064-rFIk", 1096 | "post_id": "7208374573744984064", 1097 | "date": "2024-06-25T06:43:59.726Z" 1098 | } 1099 | ], 1100 | "slogan": "The world’s #1 web data platform.", 1101 | "affiliated": [ 1102 | { 1103 | "title": "Hola", 1104 | "subtitle": "Entertainment Providers", 1105 | "location": "Netanya, Netanya", 1106 | "Links": "https://il.linkedin.com/company/hola-?trk=affiliated-pages" 1107 | }, 1108 | { 1109 | "title": "Bright Insights", 1110 | "subtitle": "Software Development", 1111 | "location": "Netanya, Center District", 1112 | "Links": "https://il.linkedin.com/company/bright-inisights?trk=affiliated-pages" 1113 | }, 1114 | { 1115 | "title": "The Bright Initiative by Bright Data", 1116 | "subtitle": "Non-profit Organizations", 1117 | "Links": "https://il.linkedin.com/company/the-bright-initiative-by-bright-data?trk=affiliated-pages" 1118 | }, 1119 | { 1120 | "title": "Bright SDK", 1121 | "subtitle": "Software Development", 1122 | "Links": "https://il.linkedin.com/showcase/bright-sdk/?trk=affiliated-pages" 1123 | } 1124 | ], 1125 | "formatted_locations": [ 1126 | "Greater Tel Aviv, Israel 42507, IL" 1127 | ], 1128 | "get_directions_url": [ 1129 | { 1130 | "directions_url": "https://www.bing.com/maps?where=Greater+Tel+Aviv+42507+Israel+IL&trk=org-locations_url" 1131 | } 1132 | ], 1133 | "description": "Bright Data | 27,387 followers on LinkedIn. The world’s #1 web data platform. | Bright Data is the world’s largest data collection platform dedicated to helping all businesses view the Internet just like their consumers and potential consumers do each and every day. We help global brands gather publicly available web data in an ethical manner and transform unstructured data (e.g., HTML) into structured data (e.g., xl).\n\nWorking with over 15,000 customers, including market-leaders from the Fortune 500 space, the company’s first-of-its-kind data collection platform enables clients to view the internet in complete transparency, no matter where they are based in the world.", 1134 | "additional_information": "Additional jobs info: Manager (1,880,925 open jobs). Engineer (555,845 open jobs). Enterprise Sales Director (9,426 open jobs). Marketing Manager (106,879 open jobs). Sales Operations Analyst (32,937 open jobs). Business Director (87,061 open jobs). Developer (258,935 open jobs). Java Team Lead (21,234 open jobs). Freelance Developer (3,752 open jobs). Principal Product Manager (10,879 open jobs). Alliances Manager (40,982 open jobs). Investment Analyst (14,999 open jobs). Senior Data Analyst (35,445 open jobs). Java Technical Lead (45,014 open jobs). Senior Product Manager (50,771 open jobs). Java Specialist (224,262 open jobs). Assistant Controller (56,825 open jobs). Planner (42,976 open jobs). Digital Marketing Manager (17,135 open jobs). Software Engineering Manager (59,689 open jobs)", 1135 | "country_codes_array": [ 1136 | "IL" 1137 | ], 1138 | "alumni": null, 1139 | "alumni_information": null, 1140 | "timestamp": "2024-12-22T06:43:59.804Z" 1141 | }, 1142 | { 1143 | "input": { 1144 | "url": "https://www.linkedin.com/company/stalkit" 1145 | }, 1146 | "id": "stalkit", 1147 | "name": "StalkIt", 1148 | "country_code": "IL", 1149 | "locations": [ 1150 | "Nahalat Zvi 37 Petach Tikva, Israel 4942161, IL" 1151 | ], 1152 | "followers": 5, 1153 | "employees_in_linkedin": 1, 1154 | "about": "StalkIt is a mobile platform used to aggregate and track data changes from various sources. The app is based on a web interface used to create Toolkits which are shown in the app’s Toolkits store. The interface control which sources to use, what info to extract from each source, how often to track for new/edited data. The platform supports many types of data sources : websites, web services, APIs and more. The interface has 2 types of trackers: regular and paramed trackers. The regular trackers are used to track pre defined urls, while the paramed trackers give the users the ability to create their own trackers when entering pre defined query parameters in the Toolkit. The interface also allows the creator to define the category which the Toolkit belongs to, and then can choose to either distribute the Toolkit in the app Toolkit’s store or send it to his friends by setting it as a private Toolkit and find it in the store with a personal code. The app is targeted for both B2C and B2B clients. B2C clients can enjoy the variety of Toolkits and help them to find new content like news, stock prices, tickets to events, job opportunities, cars, houses and more and do more complicated tasks like set up a price target for a product they interested in and get notification in case the price is adjusted, check for hotel availability and much more. B2B clients can also enjoy the abilities of the platform and use it to follow and analyze their competitor’s price changes, stocks, reviews, ranking, HR recruiting, advertising strategies and more.", 1155 | "specialties": "Online data aggregating and tracking of data changes and Competitive Research", 1156 | "company_size": "1 employee", 1157 | "organization_type": "Self-Owned", 1158 | "industries": "Technology, Information and Internet", 1159 | "website": "https://stalk-it.com/", 1160 | "founded": 2016, 1161 | "company_id": "18049955", 1162 | "employees": [], 1163 | "headquarters": "Petach Tikva, Israel", 1164 | "image": "https://static.licdn.com/aero-v1/sc/h/5q92mjc5c51bjlwaj3rs9aa82", 1165 | "logo": "https://media.licdn.com/dms/image/v2/C560BAQHffqSPJpDumg/company-logo_200_200/company-logo_200_200/0/1631309372028?e=2147483647&v=beta&t=sXG7XmVKP9JF9MtKe7rAkX318tr3f32URGGYlHwqe0E", 1166 | "similar": [ 1167 | { 1168 | "Links": "https://in.linkedin.com/company/the-product-folks-x-bits-pilani?trk=similar-pages", 1169 | "subtitle": "Technology, Information and Internet", 1170 | "title": "The Product Folks X BITS Pilani", 1171 | "location": "Pilani, Rajasthan" 1172 | }, 1173 | { 1174 | "Links": "https://www.linkedin.com/company/superjoin?trk=similar-pages", 1175 | "subtitle": "Technology, Information and Internet", 1176 | "title": "Superjoin", 1177 | "location": "San Francisco, California" 1178 | }, 1179 | { 1180 | "Links": "https://www.linkedin.com/company/theproductfolks?trk=similar-pages", 1181 | "subtitle": "Technology, Information and Internet", 1182 | "title": "The Product Folks" 1183 | } 1184 | ], 1185 | "url": "https://www.linkedin.com/company/stalkit", 1186 | "formatted_locations": [ 1187 | "Nahalat Zvi 37, Petach Tikva, Israel 4942161, IL" 1188 | ], 1189 | "get_directions_url": [ 1190 | { 1191 | "directions_url": "https://www.bing.com/maps?where=Nahalat+Zvi+37+Petach+Tikva+4942161+Israel+IL&trk=org-locations_url" 1192 | } 1193 | ], 1194 | "description": "StalkIt | 5 followers on LinkedIn. StalkIt is a mobile platform used to aggregate and track data changes from various sources. The app is based on a web interface used to create Toolkits which are shown in the app’s Toolkits store. The interface control which sources to use, what info to extract from each source, how often to track for new/edited data.", 1195 | "additional_information": null, 1196 | "country_codes_array": [ 1197 | "IL" 1198 | ], 1199 | "alumni": null, 1200 | "alumni_information": null, 1201 | "timestamp": "2024-12-22T06:43:55.637Z" 1202 | } 1203 | ] --------------------------------------------------------------------------------