├── .rspec ├── Gemfile ├── .autotest ├── .gitignore ├── lib ├── tweetlr │ ├── log_aware.rb │ ├── processors │ │ ├── http.rb │ │ ├── tumblr.rb │ │ ├── twitter.rb │ │ └── photo_service.rb │ ├── combinators │ │ └── twitter_tumblr.rb │ └── core.rb └── tweetlr.rb ├── spec ├── processors │ ├── http_spec.rb │ ├── tumblr_processor_spec.rb │ ├── twitter_processor_spec.rb │ └── photo_services_processor_spec.rb ├── core_spec.rb ├── combinators │ └── twitter_tumblr_combinator_spec.rb ├── support │ └── fixtures │ │ └── twitter_search_api_response.json └── spec_helper.rb ├── Rakefile ├── LICENSE ├── .travis.yml ├── tweetlr.gemspec ├── config ├── tweetlr.yml.test └── tweetlr.yml ├── bin └── tweetlr └── README.md /.rspec: -------------------------------------------------------------------------------- 1 | --colour 2 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | gemspec 3 | -------------------------------------------------------------------------------- /.autotest: -------------------------------------------------------------------------------- 1 | # Include plugins 2 | require 'autotest/fsevent' 3 | require 'autotest/growl' -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | log/ 3 | *.log 4 | pkg 5 | *.pid 6 | *.output 7 | .ruby-version 8 | Gemfile.lock 9 | tweetlr.tid 10 | tweetlr.yml.dev 11 | tweetlr.yml.orig 12 | coverage/ -------------------------------------------------------------------------------- /lib/tweetlr/log_aware.rb: -------------------------------------------------------------------------------- 1 | #use centralized logging 2 | module Tweetlr 3 | module LogAware 4 | def self.log=(log) 5 | @log = log 6 | end 7 | def self.log() 8 | @log || Logger.new(STDOUT) 9 | end 10 | end 11 | end -------------------------------------------------------------------------------- /lib/tweetlr.rb: -------------------------------------------------------------------------------- 1 | require 'tweetlr/log_aware' 2 | require 'tweetlr/core' 3 | 4 | module Tweetlr 5 | VERSION = '0.1.30' 6 | 7 | API_ENDPOINT_TWITTER = 'http://search.twitter.com/search.json' 8 | API_ENDPOINT_TUMBLR = 'http://www.tumblr.com' 9 | TWITTER_RESULTS_PER_PAGE = 100 10 | TWITTER_RESULTS_TYPE = 'recent' 11 | UPDATE_PERIOD = 600 #10 minutes 12 | end -------------------------------------------------------------------------------- /spec/processors/http_spec.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper' 2 | 3 | describe Tweetlr::Processors::Http do 4 | it ".http_get copes with errors by retrying, not raising" do 5 | Curl::Easy.any_instance.stub(:perform).and_raise(Curl::Err::CurlError) 6 | Tweetlr::Processors::Http.stub!(:sleep) #releasing the sleep handbrake... 7 | Tweetlr::Processors::Http.should_receive(:sleep).with(3) 8 | expect { Tweetlr::Processors::Http.http_get('mocky wocky')}.to_not raise_error(Curl::Err::CurlError) 9 | end 10 | end -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | require 'bundler' 2 | Bundler::GemHelper.install_tasks 3 | 4 | require 'rdoc/task' 5 | require 'rspec/core/rake_task' 6 | 7 | RDoc::Task.new do |rdoc| 8 | files = ['README.md', 'LICENSE', 'lib/**/*.rb'] 9 | rdoc.rdoc_files.add(files) 10 | rdoc.main = "README.md" # page to start on 11 | rdoc.title = "tweetlr Docs" # <--- enter name manually! 12 | rdoc.rdoc_dir = 'doc/rdoc' # rdoc output folder 13 | rdoc.options << '--line-numbers' 14 | end 15 | 16 | RSpec::Core::RakeTask.new do |t| 17 | t.rspec_opts = %w(-c) 18 | end 19 | 20 | task :default => :spec 21 | task :test => :spec 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | == tweetlr 2 | 3 | Copyright (c) 2011 Sven Kraeuter sven.kraeuter@gmail.com 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Passes arguments to bundle install (http://gembundler.com/man/bundle-install.1.html) 2 | bundler_args: --binstubs 3 | 4 | # Specify which ruby versions you wish to run your tests on, each version will be used 5 | rvm: 6 | - 1.9.2 7 | - 1.9.3 8 | - 2.0.0 9 | #- 1.8.7 # (current default) 10 | 11 | # Define how to run your tests (defaults to `bundle exec rake` or `rake` depending on whether you have a `Gemfile`) 12 | script: "bundle exec rake test" 13 | 14 | # Define tasks to be completed before and after tests run . Will allow folding of content on frontend 15 | # before_script: 16 | # - command_1 17 | # - command_2 18 | # 19 | # after_script: 20 | # - command_1 21 | # - command_2 22 | 23 | # Specify an ENV variable to run before: 'bundle install' and 'rake' (or your defined 'script') 24 | env: "RACK_ENV='test' " 25 | 26 | # Specify the recipients for email notification 27 | notifications: 28 | recipients: 29 | - github@svenkraeuter.com 30 | #disabled: true # Disable email notifications 31 | 32 | # Specify branches to build 33 | # You can either specify only or except. If you specify both, except will be ignored. 34 | branches: 35 | only: 36 | - master 37 | # except: 38 | # - legacy -------------------------------------------------------------------------------- /tweetlr.gemspec: -------------------------------------------------------------------------------- 1 | Gem::Specification.new do |s| 2 | s.name = "tweetlr" 3 | s.version = "0.1.30" 4 | s.author = "Sven Kraeuter" 5 | s.email = "sven.kraeuter@gmail.com" 6 | s.homepage = "http://tweetlr.5v3n.com" 7 | s.summary = "tweetlr crawls twitter for a given term, extracts photos out of the collected tweets' short urls and posts the images to tumblr." 8 | s.description = s.summary 9 | 10 | s.rubyforge_project = s.name 11 | s.extra_rdoc_files = %w(README.md LICENSE) 12 | 13 | s.add_dependency "daemons" 14 | s.add_dependency "eventmachine" 15 | s.add_dependency "curb" 16 | s.add_dependency "json", ">= 1.7.7" 17 | s.add_dependency "nokogiri" 18 | s.add_dependency "oauth" 19 | s.add_dependency "twitter" 20 | 21 | s.add_development_dependency "rake" 22 | s.add_development_dependency "rspec" 23 | s.add_development_dependency "rdoc" 24 | s.add_development_dependency "simplecov" 25 | s.add_development_dependency "coveralls" 26 | s.add_development_dependency "fakeweb", ["~> 1.3"] 27 | 28 | s.files = `git ls-files`.split("\n") 29 | s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") 30 | s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) } 31 | s.require_paths = ["lib"] 32 | end 33 | -------------------------------------------------------------------------------- /config/tweetlr.yml.test: -------------------------------------------------------------------------------- 1 | results_per_page: 100 2 | result_type: recent 3 | search_term: 'cat+dog+unicorn' #find tweets containing any of these terms 4 | start_at_tweet_id: 61847783463854082 # the tweet id to start searching at 5 | api_endpoint_twitter: 'http://search.twitter.com/search.json' 6 | api_endpoint_tumblr: 'http://www.tumblr.com' 7 | tumblr_oauth_api_key: YOUR APPS TUMBLR API TOKEN 8 | tumblr_oauth_api_secret: YOUR APPS TUMBLR API SECRET 9 | tumblr_oauth_access_token_key: YOUR BLOGS OAUTH ACCESS TOKEN KEY 10 | tumblr_oauth_access_token_secret: YOUR BLOGS OAUTH ACCESS TOKEN SECRE 11 | tumblr_blog_hostname: YOUR BLOGS HOSTNAME #e.g. myblog.tumblr.com 12 | embedly_key: '' #tweetlr uses http://embedly.com for link processing. a free plan containing an api key is available & recommended to use in order to ensure full support 13 | update_period: 300 #check for updates every 300 secs = 5 minutes 14 | shouts: 'says' # will be concatenated after the username, before the message: @mr_x says: awesome things on a photo! 15 | loglevel: 1 # 0: debug, 1: info (default), 2: warn, 3: error, 5: fatal 16 | whitelist: #twitter accounts in that list will have their tweets published immediately. post from others will be saved as drafts. blank list will publish all tweets immediately 17 | - whitey_mc_whitelist 18 | - sven_kr -------------------------------------------------------------------------------- /config/tweetlr.yml: -------------------------------------------------------------------------------- 1 | results_per_page: 100 2 | result_type: recent 3 | search_term: 'cat+dog+unicorn' #find tweets containing any of these terms 4 | start_at_tweet_id: 61847783463854082 # the tweet id to start searching at 5 | twitter_app_consumer_key: YOUR APPS TWITTER API KEY 6 | twitter_app_consumer_secret: YOUR APPS TWITTER API SECRET 7 | twitter_oauth_token: YOUR APPS TWITTER API OAUTH TOKEN 8 | twitter_oauth_token_secret: YOUR APPS TWITTER API OAUTH TOKEN SECRET 9 | api_endpoint_twitter: 'http://search.twitter.com/search.json' 10 | api_endpoint_tumblr: 'http://www.tumblr.com' 11 | tumblr_oauth_api_key: YOUR APPS TUMBLR API TOKEN 12 | tumblr_oauth_api_secret: YOUR APPS TUMBLR API SECRET 13 | tumblr_oauth_access_token_key: YOUR BLOGS OAUTH ACCESS TOKEN KEY 14 | tumblr_oauth_access_token_secret: YOUR BLOGS OAUTH ACCESS TOKEN SECRE 15 | tumblr_blog_hostname: YOUR BLOGS HOSTNAME #e.g. myblog.tumblr.com 16 | embedly_key: '' #tweetlr uses http://embedly.com for link processing. a free plan containing an api key is available & recommended to use in order to ensure full support 17 | update_period: 300 #check for updates every 300 secs = 5 minutes 18 | shouts: 'says' # will be concatenated after the username, before the message: @mr_x says: awesome things on a photo! 19 | loglevel: 1 # 0: debug, 1: info (default), 2: warn, 3: error, 5: fatal 20 | whitelist: #twitter accounts in that list will have their tweets published immediately. post from others will be saved as drafts. blank list will publish all tweets immediately 21 | - whitey_mc_whitelist 22 | - sven_kr -------------------------------------------------------------------------------- /spec/processors/tumblr_processor_spec.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper' 2 | 3 | describe Tweetlr::Processors::Tumblr do 4 | before :all do 5 | config_file = File.join( Dir.pwd, 'config', TWEETLR_CONFIG_FILE) 6 | config = YAML.load_file(config_file) 7 | @twitter_response = {"from_user_id_str"=>"1915714", "profile_image_url"=>"http://a0.twimg.com/profile_images/386000279/2_normal.jpg", "created_at"=>"Sun, 17 Apr 2011 16:48:42 +0000", "from_user"=>"whitey_Mc_whIteLIst", "id_str"=>"59659561224765440", "metadata"=>{"result_type"=>"recent"}, "to_user_id"=>nil, "text"=>"Rigaer #wirsounterwegs #{@first_link} @ Augenarzt Dr. Lierow #{@second_link} #{@third_link}", "id"=>59659561224765440, "from_user_id"=>1915714, "geo"=>{"type"=>"Point", "coordinates"=>[52.5182, 13.454]}, "iso_language_code"=>"de", "place"=>{"id"=>"3078869807f9dd36", "type"=>"city", "full_name"=>"Berlin, Berlin"}, "to_user_id_str"=>nil, "source"=>"<a href="http://instagr.am" rel="nofollow">instagram</a>"} 8 | @tweetlr_config = config 9 | end 10 | it "posts to tumblr" do 11 | stub_tumblr 12 | stub_oauth 13 | tumblr_post = Tweetlr::Combinators::TwitterTumblr::generate_photo_post_from_tweet @twitter_response, @tweetlr_config 14 | tumblr_post[:date] = Time.now.to_s 15 | tumblr_post[:source] = 'http://distilleryimage6.instagram.com/db72627effde11e1b3f322000a1e8899_7.jpg' 16 | response = Tweetlr::Processors::Tumblr::post @tweetlr_config.merge(tumblr_post) 17 | response.should be 18 | response.code.should == "201" 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /lib/tweetlr/processors/http.rb: -------------------------------------------------------------------------------- 1 | require 'curb' 2 | require 'json' 3 | require "#{File.dirname(__FILE__)}/../log_aware" 4 | 5 | module Tweetlr::Processors 6 | #utilities for handling http 7 | module Http 8 | include Tweetlr::LogAware 9 | 10 | USER_AGENT = %{Mozilla/5.0 (compatible; tweetlr; +http://tweetlr.5v3n.com)} 11 | 12 | def self.log 13 | Tweetlr::LogAware.log #TODO why doesn't the include make the log method accessible? 14 | end 15 | #convenience method for curl http get calls 16 | def self.http_get(request) 17 | tries = 3 18 | curl = nil 19 | begin 20 | curl = Curl::Easy.new request 21 | curl.useragent = USER_AGENT 22 | curl.perform 23 | rescue Curl::Err::CurlError => err 24 | log.error "Failure in Curl call: #{err}" if log 25 | tries -= 1 26 | sleep 3 27 | if tries > 0 28 | retry 29 | end 30 | end 31 | return curl 32 | end 33 | #convenience method for curl http get calls and parsing them to json. 34 | def self.http_get_json(request) 35 | curl = self.http_get(request) 36 | begin 37 | JSON.parse curl.body_str 38 | rescue JSON::ParserError => err 39 | begin 40 | log.warn "#{err}: Could not parse response for #{request} - this is probably not a json response: #{curl.body_str}" 41 | return nil 42 | rescue Encoding::CompatibilityError => err 43 | log.error "Trying to rescue a JSON::ParserError for '#{request}' we got stuck in a Encoding::CompatibilityError." 44 | return nil 45 | end 46 | end 47 | end 48 | end 49 | end -------------------------------------------------------------------------------- /spec/core_spec.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper' 2 | 3 | describe Tweetlr::Core do 4 | 5 | config_file = File.join( Dir.pwd, 'config', TWEETLR_CONFIG_FILE) 6 | config = YAML.load_file(config_file) 7 | TIMESTAMP = config['twitter_timestamp'] 8 | WHITELIST = config['whitelist'] 9 | 10 | before :all do 11 | config_file = File.join( Dir.pwd, 'config', TWEETLR_CONFIG_FILE) 12 | config = YAML.load_file(config_file) 13 | @tweetlr_config = config 14 | end 15 | 16 | let(:stubbed_tumblr_post) do 17 | {:tumblr_blog_hostname=>nil, :type=>"photo", :date=>"Sun, 28 Apr 2013 14:10:43 +0000", :source=>"https://irs0.4sqi.net/img/general/600x600/304170_IfHzPdhxs9mpGlPk8jogxdul8q8KhTBNmeAiP9H5TyY.jpg", :tags=>"sven_kr", :state=>"published", :caption=>"@sven_kr: #coffeediary Wanted to stay at home but had to test the new tweetlr ;-) (@ Mamalicious w/ @snoopsmaus) [pic]: http://t.co/7ilE9BDJxJ"} 18 | end 19 | let(:first_link) { "http://url.com" } 20 | let(:second_link) { "http://instagr.am/p/DzCWn/" } 21 | let(:third_link) { "https://imageurl.com" } 22 | let(:twitter_response) { {"from_user_id_str"=>"1915714", "profile_image_url"=>"http://a0.twimg.com/profile_images/386000279/2_normal.jpg", "created_at"=>"Sun, 17 Apr 2011 16:48:42 +0000", "from_user"=>"whitey_Mc_whIteLIst", "id_str"=>"59659561224765440", "metadata"=>{"result_type"=>"recent"}, "to_user_id"=>nil, "text"=>"Rigaer #wirsounterwegs #{first_link} @ Augenarzt Dr. Lierow #{second_link} #{third_link}", "id"=>59659561224765440, "from_user_id"=>1915714, "geo"=>{"type"=>"Point", "coordinates"=>[52.5182, 13.454]}, "iso_language_code"=>"de", "place"=>{"id"=>"3078869807f9dd36", "type"=>"city", "full_name"=>"Berlin, Berlin"}, "to_user_id_str"=>nil, "source"=>"<a href="http://instagr.am" rel="nofollow">instagram</a>"} } 23 | 24 | describe ".new" do 25 | it "initializes a new instance" do 26 | new_instance = Tweetlr::Core.new @tweetlr_config 27 | new_instance.should be 28 | end 29 | end 30 | describe ".crawl(config)" do 31 | before(:each) do 32 | stub_tumblr 33 | stub_oauth 34 | end 35 | it "crawls twitter and posts to tumblr" do 36 | Tweetlr::Combinators::TwitterTumblr.stub(:generate_photo_post_from_tweet).and_return stubbed_tumblr_post 37 | since_id_before = @tweetlr_config['since_id'] 38 | result = Tweetlr::Core.crawl(@tweetlr_config) 39 | since_id_before.should_not == result[:since_id] 40 | end 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /lib/tweetlr/processors/tumblr.rb: -------------------------------------------------------------------------------- 1 | require "#{File.dirname(__FILE__)}/../log_aware" 2 | require 'oauth' 3 | 4 | module Tweetlr::Processors 5 | #utilities for handling tumblr 6 | module Tumblr 7 | GENERATOR = %{tweetlr - http://tweetlr.5v3n.com} 8 | API_ENDPOINT_TUMBLR = 'http://www.tumblr.com' 9 | include Tweetlr::LogAware 10 | def self.log 11 | Tweetlr::LogAware.log #TODO why doesn't the include make the log method accessible? 12 | end 13 | #post a tumblr photo entry. 14 | # 15 | #required arguments are :tumblr_blog_hostname, :tumblr_blog_hostname, :tumblr_oauth_api_secret, :tumblr_oauth_access_token_secret, :source, :caption, :state 16 | # 17 | #optional arguments: :tags, :type (default: 'photo') 18 | # 19 | def self.post(options={}) 20 | log.info "posting to #{options['tumblr_blog_hostname'] || options['group']}..." 21 | base_hostname = options['tumblr_blog_hostname'] || options['group'] 22 | tumblr_oauth_api_key= options['tumblr_oauth_api_key'] 23 | tumblr_oauth_api_secret= options['tumblr_oauth_api_secret'] 24 | access_token_key = options['tumblr_oauth_access_token_key'] 25 | access_token_secret = options['tumblr_oauth_access_token_secret'] 26 | type = options['type'] || 'photo' 27 | tags = options['tags'] || '' 28 | post_response = nil 29 | 30 | if base_hostname && access_token_key && access_token_secret 31 | 32 | consumer = OAuth::Consumer.new(tumblr_oauth_api_key, tumblr_oauth_api_secret, 33 | { :site => 'http://www.tumblr.com', 34 | :request_token_path => '/oauth/request_token', 35 | :authorize_path => '/oauth/authorize', 36 | :access_token_path => '/oauth/access_token', 37 | :http_method => :post } ) 38 | 39 | access_token = OAuth::AccessToken.new(consumer, access_token_key, access_token_secret) 40 | 41 | post_response = access_token.post( 42 | "http://api.tumblr.com/v2/blog/#{base_hostname}/post", { 43 | :type => type, 44 | :source => options[:source], 45 | :caption => options[:caption], 46 | :date => options[:date], 47 | :tags => tags, 48 | :state => options[:state], 49 | :generator => GENERATOR 50 | } 51 | ) 52 | end 53 | post_response 54 | end 55 | end 56 | end -------------------------------------------------------------------------------- /spec/processors/twitter_processor_spec.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper' 2 | 3 | describe Tweetlr::Processors::Twitter do 4 | before :all do 5 | config_file = File.join( Dir.pwd, 'config', TWEETLR_CONFIG_FILE) 6 | @twitter_config = YAML.load_file(config_file) 7 | end 8 | before :each do 9 | @first_link = "http://url.com" 10 | @second_link = "http://instagr.am/p/DzCWn/" 11 | @third_link = "https://imageurl.com" 12 | @twitter_response = {"from_user_id_str"=>"1915714", "profile_image_url"=>"http://a0.twimg.com/profile_images/386000279/2_normal.jpg", "created_at"=>"Sun, 17 Apr 2011 16:48:42 +0000", "from_user"=>"whitey_Mc_whIteLIst", "id_str"=>"59659561224765440", "metadata"=>{"result_type"=>"recent"}, "to_user_id"=>nil, "text"=>"Rigaer #wirsounterwegs #{@first_link} @ Augenarzt Dr. Lierow #{@second_link} #{@third_link}", "id"=>59659561224765440, "from_user_id"=>1915714, "geo"=>{"type"=>"Point", "coordinates"=>[52.5182, 13.454]}, "iso_language_code"=>"de", "place"=>{"id"=>"3078869807f9dd36", "type"=>"city", "full_name"=>"Berlin, Berlin"}, "to_user_id_str"=>nil, "source"=>"<a href="http://instagr.am" rel="nofollow">instagram</a>"} 13 | end 14 | describe "#search(config)" do 15 | it "searches twitter for a given term" do 16 | #stub_twitter 17 | response = Tweetlr::Processors::Twitter::search @twitter_config 18 | tweets = response.statuses 19 | tweets.should be 20 | tweets.should_not be_empty 21 | end 22 | it "copes with errors by retrying, not raising" do 23 | ::Twitter.stub(:search).and_raise(::Twitter::Error::TooManyRequests) 24 | Tweetlr::Processors::Twitter.stub!(:sleep) #releasing the sleep handbrake... 25 | Tweetlr::Processors::Twitter.should_receive(:sleep) #called in rescue block 26 | expect { Tweetlr::Processors::Twitter.call_twitter_api('mocky wocky',{})}.to_not raise_error(::Twitter::Error::TooManyRequests) 27 | end 28 | end 29 | describe "#lazy_search(config)" do 30 | it "searches twitter for a given term" do 31 | response = Tweetlr::Processors::Twitter::lazy_search @twitter_config 32 | tweets = response['results'] 33 | tweets.should be 34 | tweets.should_not be_empty 35 | end 36 | it "copes with nil as input" do 37 | Tweetlr::Processors::Twitter::lazy_search(nil).should be_nil 38 | end 39 | end 40 | describe "#extract_links()" do 41 | it "extracts links" do 42 | links = Tweetlr::Processors::Twitter::extract_links '' 43 | links.should be_nil 44 | links = Tweetlr::Processors::Twitter::extract_links @twitter_response 45 | links[0].should == @first_link 46 | links[1].should == @second_link 47 | links[2].should == @third_link 48 | end 49 | end 50 | end -------------------------------------------------------------------------------- /lib/tweetlr/combinators/twitter_tumblr.rb: -------------------------------------------------------------------------------- 1 | local_path=File.dirname(__FILE__) 2 | require "#{local_path}/../processors/twitter" 3 | require "#{local_path}/../processors/tumblr" 4 | require "#{local_path}/../processors/photo_service" 5 | require "#{local_path}/../log_aware" 6 | 7 | module Tweetlr::Combinators 8 | module TwitterTumblr 9 | include Tweetlr::LogAware 10 | def self.log 11 | Tweetlr::LogAware.log #TODO why doesn't the include make the log method accessible? 12 | end 13 | #extract a linked image file's url from a tweet. first found image will be used. 14 | def self.extract_image_url(tweet, embedly_key=nil) 15 | links = Tweetlr::Processors::Twitter::extract_links tweet 16 | image_url = nil 17 | if links 18 | links.each do |link| 19 | image_url = Tweetlr::Processors::PhotoService::find_image_url(link, embedly_key) 20 | return image_url if Tweetlr::Processors::PhotoService::photo? image_url 21 | end 22 | end 23 | image_url 24 | end 25 | #generate the data for a tumblr photo entry by parsing a tweet 26 | def self.generate_photo_post_from_tweet(tweet, options = {}) 27 | log.debug "#{self}.generate_photo_post_from_tweet with options: #{options.inspect}" 28 | process_options_and_tweet options, tweet 29 | end 30 | private 31 | def self.process_options_and_tweet(options, tweet) 32 | whitelist = options[:whitelist] 33 | whitelist.each {|entry| entry.downcase!} if (whitelist && whitelist.size != 0) 34 | if !Tweetlr::Processors::Twitter::retweet? tweet['text'] 35 | log.debug "tweet: #{tweet}" 36 | tumblr_post = prepare_tumblr_post options, tweet, whitelist 37 | end 38 | end 39 | def self.prepare_tumblr_post(options, tweet, whitelist) 40 | tumblr_post = {} 41 | tumblr_post[:tumblr_blog_hostname] = options[:tumblr_blog_hostname] || options[:group] 42 | tumblr_post[:type] = 'photo' 43 | tumblr_post[:date] = tweet['created_at'] 44 | tumblr_post[:source] = extract_image_url tweet, options[:embedly_key] 45 | user = tweet['from_user'] 46 | tumblr_post[:tags] = user 47 | tweet_id = tweet['id'] 48 | if !whitelist || whitelist.size == 0 || whitelist.member?(user.downcase) 49 | state = 'published' 50 | else 51 | state = 'draft' 52 | end 53 | tumblr_post[:state] = state 54 | shouts = " #{@shouts}" if @shouts 55 | tumblr_post[:caption] = %?
5 |
6 |
7 |
8 |
9 |
10 | tweetlr crawls twitter for a given term, extracts photos out of the collected tweets' short urls and posts the images to tumblr.
11 |
12 | There is a new [tweetlr "as-a-service"](http://tweetlr.5v3n.com) where you can easily create an account without having to know or host anything.
13 |
14 | ## Supported image sharing services
15 |
16 | tweetlr supports
17 |
18 | - instagram
19 | - twitter
20 | - photobucket
21 | - twimg
22 | - foursquare
23 | - path.com
24 | - twitpic
25 | - yfrog
26 | - imgly
27 | - eyeem.com
28 | - t.co shortened links to pictures
29 | - every photo service accessible via embed.ly (see [photo providers](http://embed.ly/providers))
30 |
31 |
32 | ## Installation
33 |
34 | Use `gem install tweetlr` if you're using *rubygems* or add the line `gem 'tweetlr'` to your `Gemfile` if you're using *bundler*.
35 |
36 | ## Configuration
37 |
38 | It's essential that you have a directory called `config` in the directory you are starting tweetlr in, which has to contain the configuration file `tweetlr.yml`:
39 |
40 | ```yaml
41 | results_per_page: 100
42 | result_type: recent
43 | search_term: 'cat dog unicorn' #find tweets containing any of these terms
44 | start_at_tweet_id: 61847783463854082 # the tweet id to start searching at
45 | api_endpoint_tumblr: 'http://www.tumblr.com'
46 | twitter_app_consumer_key: YOUR APPS TWITTER API KEY
47 | twitter_app_consumer_secret: YOUR APPS TWITTER API SECRET
48 | twitter_oauth_token: YOUR APPS TWITTER API OAUTH TOKEN
49 | twitter_oauth_token_secret: YOUR APPS TWITTER API OAUTH TOKEN SECRET
50 | tumblr_oauth_api_key: YOUR APPS TUMBLR API TOKEN
51 | tumblr_oauth_api_secret: YOUR APPS TUMBLR API SECRET
52 | tumblr_oauth_access_token_key: YOUR BLOGS OAUTH ACCESS TOKEN KEY
53 | tumblr_oauth_access_token_secret: YOUR BLOGS OAUTH ACCESS TOKEN SECRE
54 | tumblr_blog_hostname: YOUR BLOGS HOSTNAME #e.g. myblog.tumblr.com
55 | embedly_key: '' #tweetlr uses http://embedly.com for link processing. a free plan containing an api key is available & recommended to use in order to ensure full support
56 | update_period: 300 #check for updates every 300 secs = 5 minutes
57 | shouts: 'says' # will be concatenated after the username, before the message: @mr_x says: awesome things on a photo!
58 | loglevel: 1 # 0: debug, 1: info (default), 2: warn, 3: error, 5: fatal
59 | whitelist: #twitter accounts in that list will have their tweets published immediately. post from others will be saved as drafts. blank list will publish all tweets immediately
60 | - whitey_mc_whitelist
61 | - sven_kr
62 | ```
63 |
64 | ## Usage
65 |
66 | Make sure you put the configuration file in it's proper place as mentioned above, then:
67 |
68 | start/stop tweetlr using `tweetlr start`/`tweetlr stop`. Run `tweetlr` without arguments for a list of options concerning the daemon's options.
69 |
70 | For a easy to modify working example, check out the [tweetlr_demo](http://github.com/5v3n/tweetlr_demo).
71 |
72 | Enjoy!
73 |
74 |
--------------------------------------------------------------------------------
/lib/tweetlr/processors/twitter.rb:
--------------------------------------------------------------------------------
1 | local_path=File.dirname(__FILE__)
2 | require "#{local_path}/http"
3 | require "#{local_path}/../log_aware"
4 | require 'twitter'
5 |
6 | module Tweetlr::Processors
7 | #utilities for dealing with twitter
8 | module Twitter
9 | include Tweetlr::LogAware
10 | def self.log
11 | Tweetlr::LogAware.log #TODO why doesn't the include make the log method accessible?
12 | end
13 |
14 | #checks if the message is a retweet
15 | def self.retweet?(message)
16 | message.index('RT @') || message.index(%{"@}) || message.index("\u201c@") #detect retweets
17 | end
18 |
19 | #extract the links from a given tweet
20 | def self.extract_links(tweet)
21 | if tweet
22 | text = tweet['text']
23 | text.gsub(/https?:\/\/[\S]+/).to_a if text
24 | end
25 | end
26 |
27 | #fire a new search
28 | def self.search(config)
29 | search_call = "#{config['search_term'].gsub('+', ' OR ')} filter:links"
30 | log.debug "#{self}::search search_call: #{search_call}"
31 | response = self.call_twitter_api(search_call, config)
32 | log.debug "#{self}::call_twitter_api response: #{response.inspect}"
33 | response
34 | end
35 |
36 | # lazy update - search for a term or refresh the search if a response is available already
37 | def self.lazy_search(config)
38 | log.debug "#{self}::lazy_search called with config #{config}"
39 | response = nil
40 | if config
41 | search_call = "#{config['search_term'].gsub('+', ' OR ')} filter:links"
42 | log.info "lazy search using '#{search_call}, :since_id => #{config['since_id'] || config[:since_id]}, :count => #{config['results_per_page']}, :result_type => #{config['result_type']})'"
43 | response = self.call_twitter_api(search_call, config, :lazy)
44 | else
45 | log.error "#{self}.lazy_search: no config given!"
46 | end
47 | response
48 | end
49 | private
50 | def self.call_twitter_api(search_call, config, lazy=false)
51 | apply_twitter_api_configuration config
52 | max_attempts = 3
53 | num_attempts = 0
54 | begin
55 | num_attempts += 1
56 | call_twitter_with search_call, config, lazy
57 | rescue ::Twitter::Error::TooManyRequests => error
58 | if num_attempts <= max_attempts
59 | sleep error.rate_limit.reset_in
60 | retry
61 | else
62 | log.error "Twitter API rate limit exceeded - going to sleep for error.rate_limit.reset_in seconds. (#{error})"
63 | end
64 | end
65 | end
66 | def self.apply_twitter_api_configuration(config)
67 | ::Twitter.configure do |configuration|
68 | configuration.consumer_key = config['twitter_app_consumer_key']
69 | configuration.consumer_secret = config['twitter_app_consumer_secret']
70 | configuration.oauth_token = config['twitter_oauth_token']
71 | configuration.oauth_token_secret = config['twitter_oauth_token_secret']
72 | end
73 | end
74 | def self.call_twitter_with(search_call, config, lazy)
75 | if lazy
76 | response = ::Twitter.search(search_call, :since_id => config['since_id'] || config[:since_id], :count => config['results_per_page'], :result_type => config['result_type'])
77 | else
78 | response = ::Twitter.search(search_call, :count => config['results_per_page'], :result_type => config['result_type'])
79 | end
80 | response
81 | end
82 | end
83 | end
--------------------------------------------------------------------------------
/spec/processors/photo_services_processor_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe Tweetlr::Processors::PhotoService do
4 | before :each do
5 | @links = {
6 | :foursquare => 'http://4sq.com/x4p87N',
7 | :eyeem => 'http://www.eyeem.com/p/326629',
8 | :path => 'http://path.com/p/KQd57',
9 | :instagram => "http://instagr.am/p/DzCWn/",
10 | :twitpic => "http://twitpic.com/449o2x",
11 | :yfrog => "http://yfrog.com/h4vlfp",
12 | :tco => 'http://t.co/MUGNayA',
13 | :embedly => 'http://flic.kr/p/973hTv',
14 | :twitter_pics => 'http://t.co/FmyBGfyY',
15 | :twimg => 'http://twitter.com/KSilbereisen/status/228035435237097472',
16 | :imgly => "http://img.ly/3M1o"
17 | }
18 | end
19 | it "finds a picture's url from the supported services" do
20 | @links.each do |service,link|
21 | send "stub_#{service}"
22 | url = Tweetlr::Processors::PhotoService::find_image_url link
23 | url.should be, "service #{service} not working!"
24 | check_pic_url_extraction service if [:twimg, :instagram,:yfrog,:imgly,:foursqaure,:not_listed].index service
25 | end
26 | end
27 | it "extracts images from eye em" do
28 | stub_eyeem
29 | link = Tweetlr::Processors::PhotoService::find_image_url @links[:eyeem]
30 | link.should be
31 | link.should == "http://www.eyeem.com/thumb/h/1024/e35db836c5d3f02498ef60fc3d53837fbe621561-1334126483"
32 | end
33 | it "doesnt find images in embedly results that are not explicitly marked as 'Photo' or 'Image' via the response's 'thumbnail_url' attribute" do
34 | stub_embedly_no_photo
35 | link = Tweetlr::Processors::PhotoService::find_image_url 'http://makersand.co/'
36 | link.should be_nil
37 | end
38 | describe "for foursqaure" do
39 | it "does find an image that is not he profile pic" do
40 | stub_foursquare
41 | link = Tweetlr::Processors::PhotoService::find_image_url @links[:foursquare]
42 | link.should be
43 | link.index('userpix_thumbs').should_not be
44 | end
45 | it "does not extract symbols from tweeted links that contain no images" do
46 | stub_foursquare_no_photo
47 | link = Tweetlr::Processors::PhotoService::find_image_url @links[:foursquare]
48 | link.should_not be
49 | end
50 | end
51 | it "finds path images for redirected moments as well" do
52 | stub_path_redirected
53 | url = Tweetlr::Processors::PhotoService::find_image_url @links[:path]
54 | url.should == 'https://s3-us-west-1.amazonaws.com/images.path.com/photos2/f90fd831-43c3-48fd-84cb-5c3bae52957a/2x.jpg'
55 | end
56 | it "should not crash if embedly fallback won't find a link" do
57 | stub_bad_request
58 | url = Tweetlr::Processors::PhotoService::find_image_url "http://mopskopf"
59 | end
60 | it "should not crash with an encoding error when response is non-us-ascii" do
61 | stub_utf8_response
62 | url = Tweetlr::Processors::PhotoService::find_image_url "http://api.instagram.com/oembed?url=http://instagr.am/p/Gx%E2%80%946/"
63 | end
64 | it "follows redirects" do
65 | stub_imgly
66 | link = Tweetlr::Processors::PhotoService::link_url_redirect 'im mocked anyways'
67 | link.should == 'http://s3.amazonaws.com/imgly_production/899582/full.jpg'
68 | end
69 | it "copes with redirect errors" do
70 | Curl::Easy.any_instance.stub(:http_get).and_raise(Curl::Err::CurlError)
71 | Tweetlr::Processors::PhotoService.stub!(:sleep) #releasing the sleep handbrake...
72 | Tweetlr::Processors::PhotoService.should_receive(:sleep).with(3)
73 | expect { Tweetlr::Processors::PhotoService::link_url_redirect 'im mocked anyways'}.to_not raise_error(Curl::Err::CurlError)
74 | end
75 | end
--------------------------------------------------------------------------------
/lib/tweetlr/core.rb:
--------------------------------------------------------------------------------
1 | # encode: UTF-8
2 | local_path=File.dirname(__FILE__)
3 | require "#{local_path}/processors/twitter"
4 | require "#{local_path}/processors/http"
5 | require "#{local_path}/processors/photo_service"
6 | require "#{local_path}/processors/tumblr"
7 | require "#{local_path}/combinators/twitter_tumblr"
8 | require "#{local_path}/log_aware"
9 | require 'uri'
10 |
11 | class Tweetlr::Core
12 | include Tweetlr::LogAware
13 | def self.log
14 | Tweetlr::LogAware.log #TODO why doesn't the include make the log method accessible?
15 | end
16 |
17 | def initialize(args)
18 | initialize_logging(args[:loglevel])
19 | initialize_attributes(args)
20 | Tweetlr::LogAware.log.info "Tweetlr #{Tweetlr::VERSION} initialized. Ready to roll."
21 | end
22 |
23 | def self.crawl(config)
24 | log.debug "#{self}.crawl() using config: #{config.inspect}"
25 | twitter_config = prepare_twitter_config config
26 | tumblr_config = prepare_tumblr_config config
27 | twitter_config[:search_term] = URI::escape(twitter_config[:search_term]) if twitter_config[:search_term]
28 | log.info "starting tweetlr crawl..."
29 | response = {}
30 | response = Tweetlr::Processors::Twitter::lazy_search(twitter_config)
31 | if response
32 | process_response response, config
33 | # store the highest tweet id
34 | config[:since_id] = response['max_id']
35 | else
36 | log.error "twitter search returned no response. hail the failwhale!"
37 | end
38 | log.info "finished tweetlr crawl."
39 | return config
40 | end
41 | private
42 | def initialize_attributes(args)
43 | @email = args[:tumblr_email]
44 | @password = args[:tumblr_password]
45 | @cookie = args[:cookie]
46 | @api_endpoint_twitter = args[:api_endpoint_twitter] || Tweetlr::API_ENDPOINT_TWITTER
47 | @api_endpoint_tumblr = args[:api_endpoint_tumblr] || Tweetlr::API_ENDPOINT_TUMBLR
48 | @whitelist = args[:whitelist]
49 | @shouts = args[:shouts]
50 | @update_period = args[:update_period] || Tweetlr::UPDATE_PERIOD
51 | @whitelist.each {|entry| entry.downcase!} if @whitelist
52 | end
53 | def initialize_logging(loglevel)
54 | log = Tweetlr::LogAware.log || Logger.new(STDOUT)
55 | if (Logger::DEBUG..Logger::UNKNOWN).to_a.index(loglevel)
56 | log.level = loglevel
57 | else
58 | log.level = Logger::INFO
59 | end
60 | log.debug "log level set to #{log.level}"
61 | Tweetlr::LogAware.log=log
62 | end
63 | def self.process_response(response, config)
64 | tweets = response['results']
65 | process_and_post tweets, config if tweets
66 | end
67 | def self.process_and_post(tweets, config)
68 | tweets.each do |tweet|
69 | tumblr_post = Tweetlr::Combinators::TwitterTumblr::generate_photo_post_from_tweet(tweet, {:whitelist => config[:whitelist], :embedly_key => config[:embedly_key], :group => config[:group]})
70 | if tumblr_post.nil? || tumblr_post[:source].nil?
71 | log.warn "could not get image source: tweet: #{tweet} --- tumblr post: #{tumblr_post.inspect}"
72 | else
73 | post_to_tumblr tumblr_post, config
74 | end
75 | end
76 | end
77 | def self.post_to_tumblr(tumblr_post, config)
78 | log.debug "tumblr post: #{tumblr_post}"
79 | res = Tweetlr::Processors::Tumblr.post tumblr_post.merge(config)
80 | if res && res.code == "201"
81 | log.info "tumblr post created (tumblr response header: #{res.header}"
82 | elsif res
83 | log.warn "tumblr response: #{res.header} #{res.body}"
84 | else
85 | log.warn "there was no tumblr post response - most probably due to a missing oauth authorization"
86 | end
87 | end
88 | def self.prepare_twitter_config(config)
89 | config[:since_id] ||= config[:start_at_tweet_id]
90 | config[:terms] ||= config[:search_term]
91 | config[:results_per_page] ||= Tweetlr::TWITTER_RESULTS_PER_PAGE
92 | config[:result_type] ||= Tweetlr::TWITTER_RESULTS_TYPE
93 | config[:api_endpoint_twitter] ||= Tweetlr::API_ENDPOINT_TWITTER
94 | config
95 | end
96 | def self.prepare_tumblr_config(config)
97 | config[:tumblr_blog_hostname] ||= config[:group]
98 | config
99 | end
100 | end
101 |
--------------------------------------------------------------------------------
/spec/combinators/twitter_tumblr_combinator_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe Tweetlr::Combinators::TwitterTumblr do
4 | before :each do
5 | @first_link = "http://url.com"
6 | @second_link = "http://instagr.am/p/DzCWn/"
7 | @third_link = "https://imageurl.com"
8 | @twitter_response = {"from_user_id_str"=>"1915714", "profile_image_url"=>"http://a0.twimg.com/profile_images/386000279/2_normal.jpg", "created_at"=>"Sun, 17 Apr 2011 16:48:42 +0000", "from_user"=>"whitey_Mc_whIteLIst", "id_str"=>"59659561224765440", "metadata"=>{"result_type"=>"recent"}, "to_user_id"=>nil, "text"=>"Rigaer #wirsounterwegs #{@first_link} @ Augenarzt Dr. Lierow #{@second_link} #{@third_link}", "id"=>59659561224765440, "from_user_id"=>1915714, "geo"=>{"type"=>"Point", "coordinates"=>[52.5182, 13.454]}, "iso_language_code"=>"de", "place"=>{"id"=>"3078869807f9dd36", "type"=>"city", "full_name"=>"Berlin, Berlin"}, "to_user_id_str"=>nil, "source"=>"<a href="http://instagr.am" rel="nofollow">instagram</a>"}
9 | @retweet = @twitter_response.merge "text" => "bla bla RT @fgd: tueddelkram"
10 | @new_style_retweet = @twitter_response.merge "text" => "and it scales! \u201c@moeffju: http://t.co/8gUSPKu #hktbl1 #origami success! :)\u201d"
11 | @new_style_retweet_no_addition = @twitter_response.merge "text" => "\u201c@moeffju: http://t.co/8gUSPKu #hktbl1 #origami success! :)\u201d"
12 | @non_whitelist_tweet = @twitter_response.merge 'from_user' => 'nonwhitelist user'
13 | @whitelist = ['whitey_mc_whitelist']
14 | @tweets = {
15 | :instagram => {'text' => "jadda jadda http://instagr.am/p/DzCWn/"},
16 | :twitpic => {'text' => "jadda jadda http://twitpic.com/449o2x"},
17 | :yfrog => {'text' => "jadda jadda http://yfrog.com/h4vlfp"},
18 | :imgly => {'text' => "jadda jadda http://img.ly/3M1o"},
19 | :tco => {'text' => "jadda jadda http://t.co/MUGNayA"},
20 | :embedly => {'text' => "jadda jadda http://flic.kr/p/973hTv"},
21 | :twitter_pics => {'text' => "jadda jadda http://t.co/FmyBGfyY"}
22 | }
23 | @links = {
24 | :instagram => "http://instagr.am/p/DzCWn/",
25 | :twitpic => "http://twitpic.com/449o2x",
26 | :yfrog => "http://yfrog.com/h4vlfp",
27 | :imgly => "http://img.ly/3M1o",
28 | :tco => 'http://t.co/MUGNayA',
29 | :embedly => 'http://flic.kr/p/973hTv',
30 | :twitter_pics => 'http://t.co/FmyBGfyY'
31 | }
32 | end
33 | context "handles pictures in tweets" do
34 | it "extracting their corresponding links" do
35 | @tweets.each do |key,value|
36 | send "stub_#{key}"
37 | url = Tweetlr::Combinators::TwitterTumblr.extract_image_url value
38 | url.should be, "service #{key} not working!"
39 | check_pic_url_extraction key if [:instagram,:picplz,:yfrog,:imgly,:not_listed].index key
40 | end
41 | end
42 | it "using the first image link found in a tweet with multiple links" do
43 | stub_instagram
44 | link = Tweetlr::Combinators::TwitterTumblr.extract_image_url @twitter_response
45 | link.should == 'http://distillery.s3.amazonaws.com/media/2011/05/02/d25df62b9cec4a138967a3ad027d055b_7.jpg'
46 | end
47 | it "not returning links that do not belong to images" do
48 | stub_no_image_link
49 | link = Tweetlr::Combinators::TwitterTumblr.extract_image_url @twitter_response
50 | link.should_not be
51 | end
52 | end
53 | context "given a user whitelist" do
54 | it "should mark whitelist users' tweets as published" do
55 | stub_instagram
56 | post = Tweetlr::Combinators::TwitterTumblr::generate_photo_post_from_tweet @twitter_response, :whitelist => @whitelist
57 | post[:state].should == 'published'
58 | end
59 | it "should mark non whitelist users' tweets as drafts" do
60 | stub_instagram
61 | post = Tweetlr::Combinators::TwitterTumblr::generate_photo_post_from_tweet @non_whitelist_tweet, :whitelist => @whitelist
62 | post[:state].should == 'draft'
63 | end
64 | end
65 | context "without a user whitelist (whitelist nil or empty)" do
66 | it "should mark every users' posts as published" do
67 | stub_instagram
68 | post = Tweetlr::Combinators::TwitterTumblr::generate_photo_post_from_tweet @twitter_response, :whitelist => nil
69 | post[:state].should == 'published'
70 | stub_instagram
71 | post = Tweetlr::Combinators::TwitterTumblr::generate_photo_post_from_tweet @non_whitelist_tweet, :whitelist => nil
72 | post[:state].should == 'published'
73 | post = Tweetlr::Combinators::TwitterTumblr::generate_photo_post_from_tweet @twitter_response, :whitelist => ""
74 | post[:state].should == 'published'
75 | stub_instagram
76 | post = Tweetlr::Combinators::TwitterTumblr::generate_photo_post_from_tweet @non_whitelist_tweet, :whitelist => ""
77 | post[:state].should == 'published'
78 | end
79 | end
80 | it "should not use retweets which would produce double blog posts" do
81 | post = Tweetlr::Combinators::TwitterTumblr::generate_photo_post_from_tweet @retweet, :whitelist => @whitelist
82 | post.should_not be
83 | end
84 | context "should not use new style retweets which would produce double blog posts" do
85 | it "for quotes in context" do
86 | post = Tweetlr::Combinators::TwitterTumblr::generate_photo_post_from_tweet @new_style_retweet, :whitelist => @whitelist
87 | post.should_not be
88 | end
89 | it "for quotes without further text addition" do
90 | post = Tweetlr::Combinators::TwitterTumblr::generate_photo_post_from_tweet @new_style_retweet_no_addition, :whitelist => @whitelist
91 | post.should_not be
92 | end
93 | end
94 | context "copes with different tumblelogs namely" do
95 | it "uses a given blog via group option to post to" do
96 | stub_instagram
97 | desired_group = 'mygroup.tumblr.com'
98 | tumblr_post = Tweetlr::Combinators::TwitterTumblr.generate_photo_post_from_tweet @twitter_response, {:whitelist => @whitelist, :group => desired_group}
99 | tumblr_post[:tumblr_blog_hostname].should eq desired_group
100 | end
101 | it "uses a given blog via tumblr_blog_hostname to post to" do
102 | stub_instagram
103 | desired_group = 'mygroup.tumblr.com'
104 | tumblr_post = Tweetlr::Combinators::TwitterTumblr.generate_photo_post_from_tweet @twitter_response, {:whitelist => @whitelist, :tumblr_blog_hostname => desired_group}
105 | tumblr_post[:tumblr_blog_hostname].should eq desired_group
106 | end
107 | end
108 | end
--------------------------------------------------------------------------------
/lib/tweetlr/processors/photo_service.rb:
--------------------------------------------------------------------------------
1 | local_path=File.dirname(__FILE__)
2 | require "#{local_path}/http"
3 | require "#{local_path}/../log_aware"
4 | require 'nokogiri'
5 |
6 | module Tweetlr::Processors
7 | #utilities for dealing with photo services
8 | module PhotoService
9 |
10 | LOCATION_START_INDICATOR = 'Location: '
11 | LOCATION_STOP_INDICATOR = "\r\n"
12 | PIC_REGEXP = /(.*?)\.(jpg|jpeg|png|gif)/i
13 |
14 | include Tweetlr::LogAware
15 |
16 | def self.log
17 | Tweetlr::LogAware.log #TODO why doesn't the include make the log method accessible?
18 | end
19 |
20 | def self.find_image_url(link, embedly_key=nil)
21 | url = nil
22 | if link && !(photo? link)
23 | url = process_link link, embedly_key
24 | elsif photo? link
25 | url = link
26 | end
27 | url
28 | end
29 |
30 | def self.photo?(link)
31 | link =~ PIC_REGEXP
32 | end
33 | def self.image_url_twimg(link_url)
34 | retrieve_image_url_by_css link_url, '.media img'
35 | end
36 | #extract the image of an eyeem.com pic
37 | def self.image_url_eyeem(link_url)
38 | retrieve_image_url_by_css link_url, '.viewport-pic img'
39 | end
40 | #extract the image of a foursquare.com pic
41 | def self.image_url_foursqaure(link_url)
42 | link_url = follow_redirect(link_url)
43 | image_url = retrieve_image_url_by_css link_url, 'meta[property="og:image"]', 'content'
44 | image_url unless image_url.include? "foursquare.com/img/categories"
45 | end
46 | #extract the image of a path.com pic
47 | def self.image_url_path(link_url)
48 | retrieve_image_url_by_css link_url, 'img.photo-image'
49 | end
50 |
51 | #find the image's url via embed.ly
52 | def self.image_url_embedly(link_url, key)
53 | link_url = follow_redirect(link_url)
54 | log.debug "embedly call: http://api.embed.ly/1/oembed?key=#{key}&url=#{link_url}"
55 | response = Tweetlr::Processors::Http::http_get_json "http://api.embed.ly/1/oembed?key=#{key}&url=#{link_url}"
56 | if response && (response['type'] == 'photo' || response['type'] == 'image')
57 | image_url = response['url']
58 | end
59 | image_url
60 | end
61 | #find the image's url for an twitter shortened link
62 | def self.image_url_tco(link_url, embedly_key = nil)
63 | service_url = link_url_redirect link_url
64 | find_image_url service_url, embedly_key
65 | end
66 | #find the image's url for an instagram link
67 | def self.image_url_instagram(link_url)
68 | link_url['instagram.com'] = 'instagr.am' if link_url.index 'instagram.com' #instagram's oembed does not work for .com links
69 | response = Tweetlr::Processors::Http::http_get_json "http://api.instagram.com/oembed?url=#{link_url}"
70 | response['url'] if response
71 | end
72 | #find the image's url for a twitpic link
73 | def self.image_url_twitpic(link_url)
74 | image_url_redirect link_url, "http://twitpic.com/show/full/"
75 | end
76 | #find the image'S url for a yfrog link
77 | def self.image_url_yfrog(link_url)
78 | retrieve_image_url_by_css link_url, '#input-direct', 'value'
79 | end
80 | #find the image's url for a img.ly link
81 | def self.image_url_imgly(link_url, embedly_key)
82 | retrieve_image_url_by_css link_url, '#the-image'
83 | end
84 |
85 | # extract image url from services like twitpic & img.ly that do not offer oembed interfaces
86 | def self.image_url_redirect(link_url, service_endpoint, stop_indicator = LOCATION_STOP_INDICATOR)
87 | link_url_redirect "#{service_endpoint}#{extract_id link_url}", stop_indicator
88 | end
89 |
90 | def self.link_url_redirect(short_url, stop_indicator = LOCATION_STOP_INDICATOR)
91 | tries = 3
92 | begin
93 | resp = Curl::Easy.http_get(short_url) { |res| res.follow_location = true }
94 | rescue Curl::Err::CurlError => err
95 | log.error "Curl::Easy.http_get failed: #{err}"
96 | tries -= 1
97 | sleep 3
98 | (tries > 0) ? retry : return
99 | end
100 | process_reponse_header resp, stop_indicator
101 | end
102 |
103 | #extract the pic id from a given link
104 | def self.extract_id(link)
105 | link.split('/').last if link.split('/')
106 | end
107 | #parse html doc for element signature
108 | def self.parse_html_for(element_signature, html_doc, identifier="src")
109 | image_url= nil
110 | if html_doc
111 | photo_container_div = html_doc.css(element_signature)
112 | if photo_container_div && photo_container_div.first && photo_container_div.first.attributes[identifier]
113 | image_url = photo_container_div.first.attributes[identifier].value
114 | end
115 | end
116 | image_url
117 | end
118 | def self.retrieve_image_url_by_css(link_url, css_path, selector='src')
119 | link_url = follow_redirect link_url
120 | response = Tweetlr::Processors::Http::http_get link_url
121 | image_url = parse_html_for css_path, Nokogiri::HTML.parse(response.body_str), selector
122 | return image_url
123 | end
124 | private
125 | def self.process_link(link, embedly_key)
126 | url = nil
127 | url = image_url_eyeem link if link.index 'eyeem.com'
128 | url = image_url_instagram link if (link.index('instagr.am') || link.index('instagram.com'))
129 | url = image_url_twitpic link if link.index 'twitpic'
130 | url = image_url_yfrog link if link.index 'yfrog'
131 | url = image_url_imgly link, embedly_key if link.index 'img.ly'
132 | url = image_url_tco link, embedly_key if link.index 't.co'
133 | url = image_url_twimg link if link.index 'twitter.com'
134 | url = image_url_path link if link.index 'path.com'
135 | url = image_url_foursqaure link if (link.index('4sq.com') || link.index('foursquare.com'))
136 | url = image_url_embedly link, embedly_key if url.nil? #just try embed.ly for anything else. could do all image url processing w/ embedly, but there's probably some kind of rate limit invovled.
137 | url
138 | end
139 | def self.process_reponse_header(resp, stop_indicator)
140 | if(resp && resp.header_str && resp.header_str.index(LOCATION_START_INDICATOR) && resp.header_str.index(stop_indicator))
141 | start = resp.header_str.index(LOCATION_START_INDICATOR) + LOCATION_START_INDICATOR.size
142 | stop = resp.header_str.index(stop_indicator, start)
143 | resp.header_str[start...stop]
144 | else
145 | nil
146 | end
147 | end
148 | def self.follow_redirect(link_url)
149 | service_url = link_url_redirect link_url #follow possible redirects
150 | link_url = service_url if service_url #if there's no redirect, service_url will be nil
151 | link_url
152 | end
153 | end
154 | end
--------------------------------------------------------------------------------
/spec/support/fixtures/twitter_search_api_response.json:
--------------------------------------------------------------------------------
1 | HTTP/1.1 200 OK
2 | cache-control: no-cache, no-store, must-revalidate, pre-check=0, post-check=0
3 | content-length: 32656
4 | content-type: application/json;charset=utf-8
5 | date: Fri, 28 Jun 2013 17:55:47 GMT
6 | expires: Tue, 31 Mar 1981 05:00:00 GMT
7 | last-modified: Fri, 28 Jun 2013 17:55:47 GMT
8 | pragma: no-cache
9 | server: tfe
10 | set-cookie: lang=de
11 | set-cookie: guest_id=v1%3A137244214779516322; Domain=.twitter.com; Path=/; Expires=Sun, 28-Jun-2015 17:55:47 UTC
12 | status: 200 OK
13 | strict-transport-security: max-age=631138519
14 | x-access-level: read
15 | x-frame-options: SAMEORIGIN
16 | x-rate-limit-limit: 180
17 | x-rate-limit-remaining: 179
18 | x-rate-limit-reset: 1372443047
19 | x-transaction: d71db59d56b39732
20 | x-xss-protection: 1; mode=block
21 |
22 | {"statuses":[{"metadata":{"result_type":"recent","iso_language_code":"en"},"created_at":"Fri Jun 28 04:22:55 +0000 2013","id":350469335690645504,"id_str":"350469335690645504","text":"#kaffee1.0 #coffee #pics #tasskaff #coffeediary #smkh #dailycoffee http:\/\/t.co\/4cu5M83yG5","source":"\u003ca href=\"http:\/\/instagram.com\" rel=\"nofollow\"\u003eInstagram\u003c\/a\u003e","truncated":false,"in_reply_to_status_id":null,"in_reply_to_status_id_str":null,"in_reply_to_user_id":null,"in_reply_to_user_id_str":null,"in_reply_to_screen_name":null,"user":{"id":5402272,"id_str":"5402272","name":"oliver kreimer","screen_name":"011i","location":"Hameln, Germany","description":"#coffee #kids #computerscience #photography #biking","url":"http:\/\/t.co\/BXWfd1Nj9h","entities":{"url":{"urls":[{"url":"http:\/\/t.co\/BXWfd1Nj9h","expanded_url":"http:\/\/nullenundeinsenschubser.de","display_url":"nullenundeinsenschubser.de","indices":[0,22]}]},"description":{"urls":[]}},"protected":false,"followers_count":491,"friends_count":788,"listed_count":38,"created_at":"Sun Apr 22 14:31:06 +0000 2007","favourites_count":6289,"utc_offset":3600,"time_zone":"Berlin","geo_enabled":true,"verified":false,"statuses_count":15542,"lang":"de","contributors_enabled":false,"is_translator":false,"profile_background_color":"6E6868","profile_background_image_url":"http:\/\/a0.twimg.com\/profile_background_images\/18162319\/013d2.jpg","profile_background_image_url_https":"https:\/\/si0.twimg.com\/profile_background_images\/18162319\/013d2.jpg","profile_background_tile":true,"profile_image_url":"http:\/\/a0.twimg.com\/profile_images\/3639187245\/004dd88961b270f1b93956217c0dc1c2_normal.jpeg","profile_image_url_https":"https:\/\/si0.twimg.com\/profile_images\/3639187245\/004dd88961b270f1b93956217c0dc1c2_normal.jpeg","profile_banner_url":"https:\/\/pbs.twimg.com\/profile_banners\/5402272\/1355478803","profile_link_color":"243964","profile_sidebar_border_color":"948D7B","profile_sidebar_fill_color":"5888EB","profile_text_color":"000000","profile_use_background_image":true,"default_profile":false,"default_profile_image":false,"following":false,"follow_request_sent":false,"notifications":false},"geo":null,"coordinates":null,"place":null,"contributors":null,"retweet_count":0,"favorite_count":0,"entities":{"hashtags":[{"text":"kaffee1","indices":[0,8]},{"text":"coffee","indices":[11,18]},{"text":"pics","indices":[19,24]},{"text":"tasskaff","indices":[25,34]},{"text":"coffeediary","indices":[35,47]},{"text":"smkh","indices":[48,53]},{"text":"dailycoffee","indices":[54,66]}],"symbols":[],"urls":[{"url":"http:\/\/t.co\/4cu5M83yG5","expanded_url":"http:\/\/instagram.com\/p\/bFs2o7SfBx\/","display_url":"instagram.com\/p\/bFs2o7SfBx\/","indices":[67,89]}],"user_mentions":[]},"favorited":false,"retweeted":false,"possibly_sensitive":false,"lang":"en"},{"metadata":{"result_type":"recent","iso_language_code":"ja"},"created_at":"Thu Jun 27 11:55:31 +0000 2013","id":350220846079553537,"id_str":"350220846079553537","text":"You'll see. #coffeediary #Hipstamatic #Tinto1884 #Dylan @ \u30d5\u30ec\u30c3\u30b7\u30e5\u30cd\u30b9\u30d0\u30fc\u30ac\u30fc \u7d4c\u5802\u5e97 http:\/\/t.co\/SbEHLk86Md","source":"\u003ca href=\"http:\/\/instagram.com\" rel=\"nofollow\"\u003eInstagram\u003c\/a\u003e","truncated":false,"in_reply_to_status_id":null,"in_reply_to_status_id_str":null,"in_reply_to_user_id":null,"in_reply_to_user_id_str":null,"in_reply_to_screen_name":null,"user":{"id":28969581,"id_str":"28969581","name":"Saiko Minomusi","screen_name":"saikom","location":"under your bed..","description":"Love music\/noise\/festivals\/f1\/\uf8ff\/art and science\/paprika and bambie\/etc. ''I thought what I'd do was, I'd pretend I was one of those deaf-mutes or should I?''","url":null,"entities":{"description":{"urls":[]}},"protected":false,"followers_count":955,"friends_count":783,"listed_count":52,"created_at":"Sun Apr 05 10:34:04 +0000 2009","favourites_count":2300,"utc_offset":32400,"time_zone":"Tokyo","geo_enabled":false,"verified":false,"statuses_count":16327,"lang":"en","contributors_enabled":false,"is_translator":false,"profile_background_color":"2E2916","profile_background_image_url":"http:\/\/a0.twimg.com\/profile_background_images\/450080932\/kinokonoko.jpg","profile_background_image_url_https":"https:\/\/si0.twimg.com\/profile_background_images\/450080932\/kinokonoko.jpg","profile_background_tile":true,"profile_image_url":"http:\/\/a0.twimg.com\/profile_images\/1156733233\/photo6_normal.jpg","profile_image_url_https":"https:\/\/si0.twimg.com\/profile_images\/1156733233\/photo6_normal.jpg","profile_banner_url":"https:\/\/pbs.twimg.com\/profile_banners\/28969581\/1361186037","profile_link_color":"FA4605","profile_sidebar_border_color":"8A2BE2","profile_sidebar_fill_color":"333333","profile_text_color":"3BCBFF","profile_use_background_image":true,"default_profile":false,"default_profile_image":false,"following":false,"follow_request_sent":false,"notifications":false},"geo":{"type":"Point","coordinates":[35.64910858,139.63653356]},"coordinates":{"type":"Point","coordinates":[139.63653356,35.64910858]},"place":null,"contributors":null,"retweet_count":0,"favorite_count":0,"entities":{"hashtags":[{"text":"coffeediary","indices":[12,24]},{"text":"Hipstamatic","indices":[26,38]},{"text":"Tinto1884","indices":[39,49]},{"text":"Dylan","indices":[50,56]}],"symbols":[],"urls":[{"url":"http:\/\/t.co\/SbEHLk86Md","expanded_url":"http:\/\/instagram.com\/p\/bD7q8Jg89I\/","display_url":"instagram.com\/p\/bD7q8Jg89I\/","indices":[75,97]}],"user_mentions":[]},"favorited":false,"retweeted":false,"possibly_sensitive":false,"lang":"ja"},{"metadata":{"result_type":"recent","iso_language_code":"de"},"created_at":"Thu Jun 27 09:35:52 +0000 2013","id":350185703415623684,"id_str":"350185703415623684","text":"Kleine St\u00e4rkung :) #coffee #coffeediary @ Impala Coffee http:\/\/t.co\/bm1UvT9dcR","source":"\u003ca href=\"http:\/\/instagram.com\" rel=\"nofollow\"\u003eInstagram\u003c\/a\u003e","truncated":false,"in_reply_to_status_id":null,"in_reply_to_status_id_str":null,"in_reply_to_user_id":null,"in_reply_to_user_id_str":null,"in_reply_to_screen_name":null,"user":{"id":280928169,"id_str":"280928169","name":"N. White","screen_name":"citoyenberlin","location":"Berlin","description":"Liest B\u00fccher, sammelt Operngesamtaufnahmen, trinkt gerne Tee und mag meist nicht abwarten.\r\nBooks, opera recordings, tea; sometimes impatient.","url":null,"entities":{"description":{"urls":[]}},"protected":false,"followers_count":191,"friends_count":281,"listed_count":10,"created_at":"Tue Apr 12 09:04:47 +0000 2011","favourites_count":2499,"utc_offset":3600,"time_zone":"Berlin","geo_enabled":true,"verified":false,"statuses_count":7378,"lang":"de","contributors_enabled":false,"is_translator":false,"profile_background_color":"C0DEED","profile_background_image_url":"http:\/\/a0.twimg.com\/profile_background_images\/821228029\/3d42a951fb39ce38eef572ad0794357e.jpeg","profile_background_image_url_https":"https:\/\/si0.twimg.com\/profile_background_images\/821228029\/3d42a951fb39ce38eef572ad0794357e.jpeg","profile_background_tile":true,"profile_image_url":"http:\/\/a0.twimg.com\/profile_images\/1308803654\/neu_normal.jpg","profile_image_url_https":"https:\/\/si0.twimg.com\/profile_images\/1308803654\/neu_normal.jpg","profile_link_color":"0084B4","profile_sidebar_border_color":"FFFFFF","profile_sidebar_fill_color":"DDEEF6","profile_text_color":"333333","profile_use_background_image":true,"default_profile":false,"default_profile_image":false,"following":false,"follow_request_sent":false,"notifications":false},"geo":{"type":"Point","coordinates":[52.49834597,13.354063]},"coordinates":{"type":"Point","coordinates":[13.354063,52.49834597]},"place":{"id":"3078869807f9dd36","url":"http:\/\/api.twitter.com\/1\/geo\/id\/3078869807f9dd36.json","place_type":"city","name":"Berlin","full_name":"Berlin, Berlin","country_code":"DE","country":"Germany","bounding_box":{"type":"Polygon","coordinates":[[[13.088303999999999,52.338079],[13.760909,52.338079],[13.760909,52.675323],[13.088303999999999,52.675323]]]},"attributes":{}},"contributors":null,"retweet_count":0,"favorite_count":0,"entities":{"hashtags":[{"text":"coffee","indices":[19,26]},{"text":"coffeediary","indices":[27,39]}],"symbols":[],"urls":[{"url":"http:\/\/t.co\/bm1UvT9dcR","expanded_url":"http:\/\/instagram.com\/p\/bDr4hbpc4f\/","display_url":"instagram.com\/p\/bDr4hbpc4f\/","indices":[56,78]}],"user_mentions":[]},"favorited":false,"retweeted":false,"possibly_sensitive":false,"lang":"de"},{"metadata":{"result_type":"recent","iso_language_code":"en"},"created_at":"Thu Jun 27 04:05:04 +0000 2013","id":350102453888356352,"id_str":"350102453888356352","text":"#kaffee1.0 #coffee #pics #tasskaff #coffeediary #smkh #dailycoffee http:\/\/t.co\/q6xOREZRQE","source":"\u003ca href=\"http:\/\/instagram.com\" rel=\"nofollow\"\u003eInstagram\u003c\/a\u003e","truncated":false,"in_reply_to_status_id":null,"in_reply_to_status_id_str":null,"in_reply_to_user_id":null,"in_reply_to_user_id_str":null,"in_reply_to_screen_name":null,"user":{"id":5402272,"id_str":"5402272","name":"oliver kreimer","screen_name":"011i","location":"Hameln, Germany","description":"#coffee #kids #computerscience #photography #biking","url":"http:\/\/t.co\/BXWfd1Nj9h","entities":{"url":{"urls":[{"url":"http:\/\/t.co\/BXWfd1Nj9h","expanded_url":"http:\/\/nullenundeinsenschubser.de","display_url":"nullenundeinsenschubser.de","indices":[0,22]}]},"description":{"urls":[]}},"protected":false,"followers_count":491,"friends_count":788,"listed_count":38,"created_at":"Sun Apr 22 14:31:06 +0000 2007","favourites_count":6289,"utc_offset":3600,"time_zone":"Berlin","geo_enabled":true,"verified":false,"statuses_count":15542,"lang":"de","contributors_enabled":false,"is_translator":false,"profile_background_color":"6E6868","profile_background_image_url":"http:\/\/a0.twimg.com\/profile_background_images\/18162319\/013d2.jpg","profile_background_image_url_https":"https:\/\/si0.twimg.com\/profile_background_images\/18162319\/013d2.jpg","profile_background_tile":true,"profile_image_url":"http:\/\/a0.twimg.com\/profile_images\/3639187245\/004dd88961b270f1b93956217c0dc1c2_normal.jpeg","profile_image_url_https":"https:\/\/si0.twimg.com\/profile_images\/3639187245\/004dd88961b270f1b93956217c0dc1c2_normal.jpeg","profile_banner_url":"https:\/\/pbs.twimg.com\/profile_banners\/5402272\/1355478803","profile_link_color":"243964","profile_sidebar_border_color":"948D7B","profile_sidebar_fill_color":"5888EB","profile_text_color":"000000","profile_use_background_image":true,"default_profile":false,"default_profile_image":false,"following":false,"follow_request_sent":false,"notifications":false},"geo":null,"coordinates":null,"place":null,"contributors":null,"retweet_count":0,"favorite_count":0,"entities":{"hashtags":[{"text":"kaffee1","indices":[0,8]},{"text":"coffee","indices":[11,18]},{"text":"pics","indices":[19,24]},{"text":"tasskaff","indices":[25,34]},{"text":"coffeediary","indices":[35,47]},{"text":"smkh","indices":[48,53]},{"text":"dailycoffee","indices":[54,66]}],"symbols":[],"urls":[{"url":"http:\/\/t.co\/q6xOREZRQE","expanded_url":"http:\/\/instagram.com\/p\/bDGBAASfOD\/","display_url":"instagram.com\/p\/bDGBAASfOD\/","indices":[67,89]}],"user_mentions":[]},"favorited":false,"retweeted":false,"possibly_sensitive":false,"lang":"en"},{"metadata":{"result_type":"recent","iso_language_code":"en"},"created_at":"Wed Jun 26 04:46:18 +0000 2013","id":349750442579861506,"id_str":"349750442579861506","text":"#kaffee1.0 #coffee #pics #tasskaff #coffeediary #smkh #dailycoffee http:\/\/t.co\/ks8i92krK1","source":"\u003ca href=\"http:\/\/instagram.com\" rel=\"nofollow\"\u003eInstagram\u003c\/a\u003e","truncated":false,"in_reply_to_status_id":null,"in_reply_to_status_id_str":null,"in_reply_to_user_id":null,"in_reply_to_user_id_str":null,"in_reply_to_screen_name":null,"user":{"id":5402272,"id_str":"5402272","name":"oliver kreimer","screen_name":"011i","location":"Hameln, Germany","description":"#coffee #kids #computerscience #photography #biking","url":"http:\/\/t.co\/BXWfd1Nj9h","entities":{"url":{"urls":[{"url":"http:\/\/t.co\/BXWfd1Nj9h","expanded_url":"http:\/\/nullenundeinsenschubser.de","display_url":"nullenundeinsenschubser.de","indices":[0,22]}]},"description":{"urls":[]}},"protected":false,"followers_count":491,"friends_count":788,"listed_count":38,"created_at":"Sun Apr 22 14:31:06 +0000 2007","favourites_count":6289,"utc_offset":3600,"time_zone":"Berlin","geo_enabled":true,"verified":false,"statuses_count":15542,"lang":"de","contributors_enabled":false,"is_translator":false,"profile_background_color":"6E6868","profile_background_image_url":"http:\/\/a0.twimg.com\/profile_background_images\/18162319\/013d2.jpg","profile_background_image_url_https":"https:\/\/si0.twimg.com\/profile_background_images\/18162319\/013d2.jpg","profile_background_tile":true,"profile_image_url":"http:\/\/a0.twimg.com\/profile_images\/3639187245\/004dd88961b270f1b93956217c0dc1c2_normal.jpeg","profile_image_url_https":"https:\/\/si0.twimg.com\/profile_images\/3639187245\/004dd88961b270f1b93956217c0dc1c2_normal.jpeg","profile_banner_url":"https:\/\/pbs.twimg.com\/profile_banners\/5402272\/1355478803","profile_link_color":"243964","profile_sidebar_border_color":"948D7B","profile_sidebar_fill_color":"5888EB","profile_text_color":"000000","profile_use_background_image":true,"default_profile":false,"default_profile_image":false,"following":false,"follow_request_sent":false,"notifications":false},"geo":null,"coordinates":null,"place":null,"contributors":null,"retweet_count":0,"favorite_count":0,"entities":{"hashtags":[{"text":"kaffee1","indices":[0,8]},{"text":"coffee","indices":[11,18]},{"text":"pics","indices":[19,24]},{"text":"tasskaff","indices":[25,34]},{"text":"coffeediary","indices":[35,47]},{"text":"smkh","indices":[48,53]},{"text":"dailycoffee","indices":[54,66]}],"symbols":[],"urls":[{"url":"http:\/\/t.co\/ks8i92krK1","expanded_url":"http:\/\/instagram.com\/p\/bAl8P3yfHW\/","display_url":"instagram.com\/p\/bAl8P3yfHW\/","indices":[67,89]}],"user_mentions":[]},"favorited":false,"retweeted":false,"possibly_sensitive":false,"lang":"en"},{"metadata":{"result_type":"recent","iso_language_code":"de"},"created_at":"Tue Jun 25 13:57:51 +0000 2013","id":349526857881882625,"id_str":"349526857881882625","text":"Earlier today: Flat White with @ailine #coffeediary @ R\u00f6stst\u00e4tte http:\/\/t.co\/xQTAXL4qFU","source":"\u003ca href=\"http:\/\/instagram.com\" rel=\"nofollow\"\u003eInstagram\u003c\/a\u003e","truncated":false,"in_reply_to_status_id":null,"in_reply_to_status_id_str":null,"in_reply_to_user_id":null,"in_reply_to_user_id_str":null,"in_reply_to_screen_name":null,"user":{"id":24479611,"id_str":"24479611","name":"Sebastian Waters","screen_name":"sebastianwaters","location":"Berlin","description":"User Experience Designer \/ Information Architect, Co-Founder of supernov.ae, Contributor to @DMIG & @siteswelike","url":"http:\/\/t.co\/uvpIVrAgxk","entities":{"url":{"urls":[{"url":"http:\/\/t.co\/uvpIVrAgxk","expanded_url":"http:\/\/www.sebastianwaters.com","display_url":"sebastianwaters.com","indices":[0,22]}]},"description":{"urls":[]}},"protected":false,"followers_count":1351,"friends_count":391,"listed_count":54,"created_at":"Sun Mar 15 03:24:57 +0000 2009","favourites_count":5384,"utc_offset":3600,"time_zone":"Berlin","geo_enabled":true,"verified":false,"statuses_count":22155,"lang":"en","contributors_enabled":false,"is_translator":false,"profile_background_color":"022330","profile_background_image_url":"http:\/\/a0.twimg.com\/images\/themes\/theme15\/bg.png","profile_background_image_url_https":"https:\/\/si0.twimg.com\/images\/themes\/theme15\/bg.png","profile_background_tile":false,"profile_image_url":"http:\/\/a0.twimg.com\/profile_images\/3529955269\/0a49735e70e4d804ddaa814ef4ae17b3_normal.jpeg","profile_image_url_https":"https:\/\/si0.twimg.com\/profile_images\/3529955269\/0a49735e70e4d804ddaa814ef4ae17b3_normal.jpeg","profile_banner_url":"https:\/\/pbs.twimg.com\/profile_banners\/24479611\/1366105227","profile_link_color":"0084B4","profile_sidebar_border_color":"FFFFFF","profile_sidebar_fill_color":"FFF7CC","profile_text_color":"0C3E53","profile_use_background_image":false,"default_profile":false,"default_profile_image":false,"following":false,"follow_request_sent":false,"notifications":false},"geo":{"type":"Point","coordinates":[52.529113,13.397711]},"coordinates":{"type":"Point","coordinates":[13.397711,52.529113]},"place":{"id":"3078869807f9dd36","url":"http:\/\/api.twitter.com\/1\/geo\/id\/3078869807f9dd36.json","place_type":"city","name":"Berlin","full_name":"Berlin, Berlin","country_code":"DE","country":"Germany","bounding_box":{"type":"Polygon","coordinates":[[[13.088303999999999,52.338079],[13.760909,52.338079],[13.760909,52.675323],[13.088303999999999,52.675323]]]},"attributes":{}},"contributors":null,"retweet_count":0,"favorite_count":1,"entities":{"hashtags":[{"text":"coffeediary","indices":[39,51]}],"symbols":[],"urls":[{"url":"http:\/\/t.co\/xQTAXL4qFU","expanded_url":"http:\/\/instagram.com\/p\/a-xYSMxOlP\/","display_url":"instagram.com\/p\/a-xYSMxOlP\/","indices":[65,87]}],"user_mentions":[{"screen_name":"ailine","name":"Ailine Liefeld","id":15747756,"id_str":"15747756","indices":[31,38]}]},"favorited":false,"retweeted":false,"possibly_sensitive":false,"lang":"de"},{"metadata":{"result_type":"recent","iso_language_code":"de"},"created_at":"Tue Jun 25 04:50:07 +0000 2013","id":349389018473050114,"id_str":"349389018473050114","text":"#kaffee1.0 #coffee #pics #tasskaff #coffeediary #smkh #dailycoffee @ b\u00e4ckerei deiterding http:\/\/t.co\/3SR5tliTNP","source":"\u003ca href=\"http:\/\/instagram.com\" rel=\"nofollow\"\u003eInstagram\u003c\/a\u003e","truncated":false,"in_reply_to_status_id":null,"in_reply_to_status_id_str":null,"in_reply_to_user_id":null,"in_reply_to_user_id_str":null,"in_reply_to_screen_name":null,"user":{"id":5402272,"id_str":"5402272","name":"oliver kreimer","screen_name":"011i","location":"Hameln, Germany","description":"#coffee #kids #computerscience #photography #biking","url":"http:\/\/t.co\/BXWfd1Nj9h","entities":{"url":{"urls":[{"url":"http:\/\/t.co\/BXWfd1Nj9h","expanded_url":"http:\/\/nullenundeinsenschubser.de","display_url":"nullenundeinsenschubser.de","indices":[0,22]}]},"description":{"urls":[]}},"protected":false,"followers_count":491,"friends_count":788,"listed_count":38,"created_at":"Sun Apr 22 14:31:06 +0000 2007","favourites_count":6289,"utc_offset":3600,"time_zone":"Berlin","geo_enabled":true,"verified":false,"statuses_count":15542,"lang":"de","contributors_enabled":false,"is_translator":false,"profile_background_color":"6E6868","profile_background_image_url":"http:\/\/a0.twimg.com\/profile_background_images\/18162319\/013d2.jpg","profile_background_image_url_https":"https:\/\/si0.twimg.com\/profile_background_images\/18162319\/013d2.jpg","profile_background_tile":true,"profile_image_url":"http:\/\/a0.twimg.com\/profile_images\/3639187245\/004dd88961b270f1b93956217c0dc1c2_normal.jpeg","profile_image_url_https":"https:\/\/si0.twimg.com\/profile_images\/3639187245\/004dd88961b270f1b93956217c0dc1c2_normal.jpeg","profile_banner_url":"https:\/\/pbs.twimg.com\/profile_banners\/5402272\/1355478803","profile_link_color":"243964","profile_sidebar_border_color":"948D7B","profile_sidebar_fill_color":"5888EB","profile_text_color":"000000","profile_use_background_image":true,"default_profile":false,"default_profile_image":false,"following":false,"follow_request_sent":false,"notifications":false},"geo":{"type":"Point","coordinates":[52.13038741,9.24720903]},"coordinates":{"type":"Point","coordinates":[9.24720903,52.13038741]},"place":{"id":"c7c87475904f3037","url":"http:\/\/api.twitter.com\/1\/geo\/id\/c7c87475904f3037.json","place_type":"city","name":"Hessisch Oldendorf","full_name":"Hessisch Oldendorf, Hameln-Pyrmont","country_code":"DE","country":"Germany","bounding_box":{"type":"Polygon","coordinates":[[[9.156,52.094006],[9.387540999999999,52.094006],[9.387540999999999,52.220873999999995],[9.156,52.220873999999995]]]},"attributes":{}},"contributors":null,"retweet_count":0,"favorite_count":1,"entities":{"hashtags":[{"text":"kaffee1","indices":[0,8]},{"text":"coffee","indices":[11,18]},{"text":"pics","indices":[19,24]},{"text":"tasskaff","indices":[25,34]},{"text":"coffeediary","indices":[35,47]},{"text":"smkh","indices":[48,53]},{"text":"dailycoffee","indices":[54,66]}],"symbols":[],"urls":[{"url":"http:\/\/t.co\/3SR5tliTNP","expanded_url":"http:\/\/instagram.com\/p\/a-BlbTyfK5\/","display_url":"instagram.com\/p\/a-BlbTyfK5\/","indices":[89,111]}],"user_mentions":[]},"favorited":false,"retweeted":false,"possibly_sensitive":false,"lang":"de"},{"metadata":{"result_type":"recent","iso_language_code":"en"},"created_at":"Mon Jun 24 04:54:32 +0000 2013","id":349027739510116353,"id_str":"349027739510116353","text":"RT @011i: #kaffee1.0 #coffee #pics #tasskaff #coffeediary #smkh #dailycoffee http:\/\/t.co\/fNrWe0V5NS","source":"\u003ca href=\"https:\/\/twitter.com\/coffeers\" rel=\"nofollow\"\u003ecoffeers\u003c\/a\u003e","truncated":false,"in_reply_to_status_id":null,"in_reply_to_status_id_str":null,"in_reply_to_user_id":null,"in_reply_to_user_id_str":null,"in_reply_to_screen_name":null,"user":{"id":1262960174,"id_str":"1262960174","name":"coffee lovers","screen_name":"coffeers","location":"","description":"","url":null,"entities":{"description":{"urls":[]}},"protected":false,"followers_count":3121,"friends_count":0,"listed_count":42,"created_at":"Tue Mar 12 21:55:18 +0000 2013","favourites_count":0,"utc_offset":null,"time_zone":null,"geo_enabled":false,"verified":false,"statuses_count":98216,"lang":"en","contributors_enabled":false,"is_translator":false,"profile_background_color":"C0DEED","profile_background_image_url":"http:\/\/a0.twimg.com\/images\/themes\/theme1\/bg.png","profile_background_image_url_https":"https:\/\/si0.twimg.com\/images\/themes\/theme1\/bg.png","profile_background_tile":false,"profile_image_url":"http:\/\/a0.twimg.com\/profile_images\/3386217165\/314887eafa9cf0cf2d4ba229156992b3_normal.jpeg","profile_image_url_https":"https:\/\/si0.twimg.com\/profile_images\/3386217165\/314887eafa9cf0cf2d4ba229156992b3_normal.jpeg","profile_link_color":"0084B4","profile_sidebar_border_color":"C0DEED","profile_sidebar_fill_color":"DDEEF6","profile_text_color":"333333","profile_use_background_image":true,"default_profile":true,"default_profile_image":false,"following":false,"follow_request_sent":false,"notifications":false},"geo":null,"coordinates":null,"place":null,"contributors":null,"retweeted_status":{"metadata":{"result_type":"recent","iso_language_code":"en"},"created_at":"Mon Jun 24 04:38:15 +0000 2013","id":349023641448628224,"id_str":"349023641448628224","text":"#kaffee1.0 #coffee #pics #tasskaff #coffeediary #smkh #dailycoffee http:\/\/t.co\/fNrWe0V5NS","source":"\u003ca href=\"http:\/\/instagram.com\" rel=\"nofollow\"\u003eInstagram\u003c\/a\u003e","truncated":false,"in_reply_to_status_id":null,"in_reply_to_status_id_str":null,"in_reply_to_user_id":null,"in_reply_to_user_id_str":null,"in_reply_to_screen_name":null,"user":{"id":5402272,"id_str":"5402272","name":"oliver kreimer","screen_name":"011i","location":"Hameln, Germany","description":"#coffee #kids #computerscience #photography #biking","url":"http:\/\/t.co\/BXWfd1Nj9h","entities":{"url":{"urls":[{"url":"http:\/\/t.co\/BXWfd1Nj9h","expanded_url":"http:\/\/nullenundeinsenschubser.de","display_url":"nullenundeinsenschubser.de","indices":[0,22]}]},"description":{"urls":[]}},"protected":false,"followers_count":491,"friends_count":788,"listed_count":38,"created_at":"Sun Apr 22 14:31:06 +0000 2007","favourites_count":6289,"utc_offset":3600,"time_zone":"Berlin","geo_enabled":true,"verified":false,"statuses_count":15542,"lang":"de","contributors_enabled":false,"is_translator":false,"profile_background_color":"6E6868","profile_background_image_url":"http:\/\/a0.twimg.com\/profile_background_images\/18162319\/013d2.jpg","profile_background_image_url_https":"https:\/\/si0.twimg.com\/profile_background_images\/18162319\/013d2.jpg","profile_background_tile":true,"profile_image_url":"http:\/\/a0.twimg.com\/profile_images\/3639187245\/004dd88961b270f1b93956217c0dc1c2_normal.jpeg","profile_image_url_https":"https:\/\/si0.twimg.com\/profile_images\/3639187245\/004dd88961b270f1b93956217c0dc1c2_normal.jpeg","profile_banner_url":"https:\/\/pbs.twimg.com\/profile_banners\/5402272\/1355478803","profile_link_color":"243964","profile_sidebar_border_color":"948D7B","profile_sidebar_fill_color":"5888EB","profile_text_color":"000000","profile_use_background_image":true,"default_profile":false,"default_profile_image":false,"following":false,"follow_request_sent":false,"notifications":false},"geo":null,"coordinates":null,"place":null,"contributors":null,"retweet_count":1,"favorite_count":0,"entities":{"hashtags":[{"text":"kaffee1","indices":[0,8]},{"text":"coffee","indices":[11,18]},{"text":"pics","indices":[19,24]},{"text":"tasskaff","indices":[25,34]},{"text":"coffeediary","indices":[35,47]},{"text":"smkh","indices":[48,53]},{"text":"dailycoffee","indices":[54,66]}],"symbols":[],"urls":[{"url":"http:\/\/t.co\/fNrWe0V5NS","expanded_url":"http:\/\/instagram.com\/p\/a7bbLDyfLq\/","display_url":"instagram.com\/p\/a7bbLDyfLq\/","indices":[67,89]}],"user_mentions":[]},"favorited":false,"retweeted":false,"possibly_sensitive":false,"lang":"en"},"retweet_count":1,"favorite_count":0,"entities":{"hashtags":[{"text":"kaffee1","indices":[10,18]},{"text":"coffee","indices":[21,28]},{"text":"pics","indices":[29,34]},{"text":"tasskaff","indices":[35,44]},{"text":"coffeediary","indices":[45,57]},{"text":"smkh","indices":[58,63]},{"text":"dailycoffee","indices":[64,76]}],"symbols":[],"urls":[{"url":"http:\/\/t.co\/fNrWe0V5NS","expanded_url":"http:\/\/instagram.com\/p\/a7bbLDyfLq\/","display_url":"instagram.com\/p\/a7bbLDyfLq\/","indices":[77,99]}],"user_mentions":[{"screen_name":"011i","name":"oliver kreimer","id":5402272,"id_str":"5402272","indices":[3,8]}]},"favorited":false,"retweeted":false,"possibly_sensitive":false,"lang":"en"},{"metadata":{"result_type":"recent","iso_language_code":"en"},"created_at":"Mon Jun 24 04:38:15 +0000 2013","id":349023641448628224,"id_str":"349023641448628224","text":"#kaffee1.0 #coffee #pics #tasskaff #coffeediary #smkh #dailycoffee http:\/\/t.co\/fNrWe0V5NS","source":"\u003ca href=\"http:\/\/instagram.com\" rel=\"nofollow\"\u003eInstagram\u003c\/a\u003e","truncated":false,"in_reply_to_status_id":null,"in_reply_to_status_id_str":null,"in_reply_to_user_id":null,"in_reply_to_user_id_str":null,"in_reply_to_screen_name":null,"user":{"id":5402272,"id_str":"5402272","name":"oliver kreimer","screen_name":"011i","location":"Hameln, Germany","description":"#coffee #kids #computerscience #photography #biking","url":"http:\/\/t.co\/BXWfd1Nj9h","entities":{"url":{"urls":[{"url":"http:\/\/t.co\/BXWfd1Nj9h","expanded_url":"http:\/\/nullenundeinsenschubser.de","display_url":"nullenundeinsenschubser.de","indices":[0,22]}]},"description":{"urls":[]}},"protected":false,"followers_count":491,"friends_count":788,"listed_count":38,"created_at":"Sun Apr 22 14:31:06 +0000 2007","favourites_count":6289,"utc_offset":3600,"time_zone":"Berlin","geo_enabled":true,"verified":false,"statuses_count":15542,"lang":"de","contributors_enabled":false,"is_translator":false,"profile_background_color":"6E6868","profile_background_image_url":"http:\/\/a0.twimg.com\/profile_background_images\/18162319\/013d2.jpg","profile_background_image_url_https":"https:\/\/si0.twimg.com\/profile_background_images\/18162319\/013d2.jpg","profile_background_tile":true,"profile_image_url":"http:\/\/a0.twimg.com\/profile_images\/3639187245\/004dd88961b270f1b93956217c0dc1c2_normal.jpeg","profile_image_url_https":"https:\/\/si0.twimg.com\/profile_images\/3639187245\/004dd88961b270f1b93956217c0dc1c2_normal.jpeg","profile_banner_url":"https:\/\/pbs.twimg.com\/profile_banners\/5402272\/1355478803","profile_link_color":"243964","profile_sidebar_border_color":"948D7B","profile_sidebar_fill_color":"5888EB","profile_text_color":"000000","profile_use_background_image":true,"default_profile":false,"default_profile_image":false,"following":false,"follow_request_sent":false,"notifications":false},"geo":null,"coordinates":null,"place":null,"contributors":null,"retweet_count":1,"favorite_count":0,"entities":{"hashtags":[{"text":"kaffee1","indices":[0,8]},{"text":"coffee","indices":[11,18]},{"text":"pics","indices":[19,24]},{"text":"tasskaff","indices":[25,34]},{"text":"coffeediary","indices":[35,47]},{"text":"smkh","indices":[48,53]},{"text":"dailycoffee","indices":[54,66]}],"symbols":[],"urls":[{"url":"http:\/\/t.co\/fNrWe0V5NS","expanded_url":"http:\/\/instagram.com\/p\/a7bbLDyfLq\/","display_url":"instagram.com\/p\/a7bbLDyfLq\/","indices":[67,89]}],"user_mentions":[]},"favorited":false,"retweeted":false,"possibly_sensitive":false,"lang":"en"},{"metadata":{"result_type":"recent","iso_language_code":"en"},"created_at":"Mon Jun 24 04:00:20 +0000 2013","id":349014100828889088,"id_str":"349014100828889088","text":"Photo: coffeediary: #vscocam #vscofeature #coffeethogo #coffeediary #coffee #flatwhite #hamburg #welovehh... http:\/\/t.co\/lNYQwhEg1k","source":"\u003ca href=\"http:\/\/www.tumblr.com\/\" rel=\"nofollow\"\u003eTumblr\u003c\/a\u003e","truncated":false,"in_reply_to_status_id":null,"in_reply_to_status_id_str":null,"in_reply_to_user_id":null,"in_reply_to_user_id_str":null,"in_reply_to_screen_name":null,"user":{"id":7164732,"id_str":"7164732","name":"Jandy Jean","screen_name":"JandyJean","location":"Singapore","description":"I spazz Super Junior and Beast! http:\/\/t.co\/l62yuSbZ5D http:\/\/t.co\/5FjOytZuMD","url":"http:\/\/t.co\/3mMeERavC4","entities":{"url":{"urls":[{"url":"http:\/\/t.co\/3mMeERavC4","expanded_url":"http:\/\/jandyjean.tumblr.com","display_url":"jandyjean.tumblr.com","indices":[0,22]}]},"description":{"urls":[{"url":"http:\/\/t.co\/l62yuSbZ5D","expanded_url":"http:\/\/jandyjean.blogspot.sg","display_url":"jandyjean.blogspot.sg","indices":[32,54]},{"url":"http:\/\/t.co\/5FjOytZuMD","expanded_url":"http:\/\/about.me\/jandy.jean","display_url":"about.me\/jandy.jean","indices":[55,77]}]}},"protected":false,"followers_count":345,"friends_count":92,"listed_count":17,"created_at":"Sat Jun 30 02:28:02 +0000 2007","favourites_count":4,"utc_offset":28800,"time_zone":"Singapore","geo_enabled":false,"verified":false,"statuses_count":33865,"lang":"en","contributors_enabled":false,"is_translator":false,"profile_background_color":"FFFFFF","profile_background_image_url":"http:\/\/a0.twimg.com\/profile_background_images\/155409525\/twitter_bg_new.png","profile_background_image_url_https":"https:\/\/si0.twimg.com\/profile_background_images\/155409525\/twitter_bg_new.png","profile_background_tile":true,"profile_image_url":"http:\/\/a0.twimg.com\/profile_images\/1415178815\/twitterdp_normal.JPG","profile_image_url_https":"https:\/\/si0.twimg.com\/profile_images\/1415178815\/twitterdp_normal.JPG","profile_banner_url":"https:\/\/pbs.twimg.com\/profile_banners\/7164732\/1347985384","profile_link_color":"E3B10D","profile_sidebar_border_color":"E3B10D","profile_sidebar_fill_color":"000000","profile_text_color":"F57A08","profile_use_background_image":true,"default_profile":false,"default_profile_image":false,"following":false,"follow_request_sent":false,"notifications":false},"geo":null,"coordinates":null,"place":null,"contributors":null,"retweet_count":0,"favorite_count":0,"entities":{"hashtags":[{"text":"vscocam","indices":[20,28]},{"text":"vscofeature","indices":[29,41]},{"text":"coffeethogo","indices":[42,54]},{"text":"coffeediary","indices":[55,67]},{"text":"coffee","indices":[68,75]},{"text":"flatwhite","indices":[76,86]},{"text":"hamburg","indices":[87,95]},{"text":"welovehh","indices":[96,105]}],"symbols":[],"urls":[{"url":"http:\/\/t.co\/lNYQwhEg1k","expanded_url":"http:\/\/tmblr.co\/ZNaDAyo2dFPa","display_url":"tmblr.co\/ZNaDAyo2dFPa","indices":[109,131]}],"user_mentions":[]},"favorited":false,"retweeted":false,"possibly_sensitive":false,"lang":"en"}],"search_metadata":{"completed_in":0.027,"max_id":350469335690645504,"max_id_str":"350469335690645504","next_results":"?max_id=349014100828889087&q=%25coffeediary%20filter%3Alinks&count=10&include_entities=1&result_type=recent","query":"%25coffeediary+filter%3Alinks","refresh_url":"?since_id=350469335690645504&q=%25coffeediary%20filter%3Alinks&result_type=recent&include_entities=1","count":10,"since_id":0,"since_id_str":"0"}}
--------------------------------------------------------------------------------
/spec/spec_helper.rb:
--------------------------------------------------------------------------------
1 | #encoding: utf-8
2 |
3 | if ENV['RACK_ENV']=='test'
4 | require 'coveralls'
5 | Coveralls.wear!
6 | elsif ENV['RACK_ENV']=='development'
7 | require 'simplecov'
8 | SimpleCov.start
9 | end
10 |
11 | require "bundler"
12 | require "logger"
13 | require "yaml"
14 | require 'fakeweb'
15 | require "#{File.dirname(__FILE__)}/../lib/tweetlr"
16 |
17 |
18 | Bundler.require :default, :development, :test
19 |
20 | logger = Logger.new('/dev/null')
21 | Tweetlr::LogAware.log = logger
22 |
23 | TWEETLR_CONFIG_FILE = 'tweetlr.yml'
24 |
25 | FakeWeb.allow_net_connect = false
26 | FakeWeb.allow_net_connect = %r[^https?://coveralls.io/api/v1/jobs]
27 | twitter_search_api_response = File.open("#{File.dirname(__FILE__)}/support/fixtures/twitter_search_api_response.json", 'rb') { |file| file.read }
28 | FakeWeb.register_uri(:get, %r|https://api.twitter.com/1.1/search/tweets.json|, :response => twitter_search_api_response)
29 |
30 | def check_pic_url_extraction(service)
31 | image_url = Tweetlr::Processors::PhotoService.find_image_url @links[service]
32 | (image_url =~ Tweetlr::Processors::PhotoService::PIC_REGEXP).should be, "service #{service} not working, no picture extracted!"
33 | end
34 |
35 | def stub_oauth
36 | OAuth::AccessToken.any_instance.stub(:post).and_return(Net::HTTPCreated.new("Created.", "201", true))
37 | end
38 |
39 | def stub_tumblr
40 | Curl::Easy.any_instance.stub(:response_code).and_return 201
41 | Curl::Easy.any_instance.stub(:header_str).and_return %|HTTP/1.1 201 Created
42 | Date: Sun, 13 Nov 2011 16:56:02 GMT
43 | Server: Apache
44 | P3P: CP="ALL ADM DEV PSAi COM OUR OTRo STP IND ONL"
45 | Vary: Accept-Encoding
46 | X-Tumblr-Usec: D=2600406
47 | Content-Length: 11
48 | Connection: close
49 | Content-Type: text/plain; charset=utf-8
50 |
51 | |
52 | Curl::Easy.any_instance.stub(:body_str).and_return %|12742797055|
53 | Curl::Easy.stub!(:http_post).and_return Curl::Easy.new
54 | stub_instagram
55 | end
56 |
57 | def stub_twimg
58 | Curl::Easy.any_instance.stub(:body_str).and_return %|
67 |
68 |
181 |
182 |