Below is a pretty simple ruby script for parsing files uploaded to an Amazon S3 bucket and inserting the file's information into a MySQL database.

<p>require 'rubygems'
require 'aws/s3'
require 'mysql'
require 'lockfile'</p>
<p>begin'/tmp/scraper.lock', :retries =&gt; 0) do # Setup the lock file</p>
<p>         my = Mysql::new(&quot;localhost&quot;, &quot;nfssupport&quot;, &quot;password&quot;, &quot;upload_files&quot;) # Setup the MySQL connection
         q = my.query(&quot;Select FileName, FileModified from files&quot;) # Setup the MySQL query
         db = # Create the array for the MySQL file list
         q.each_hash do |f| # For each file in the DB
                 db &lt;&lt; &quot;#{f['FileName']} - #{f['FileModified']}&quot; # Put it into the array
<p>         AWS::S3::Base.establish_connection!( # Create the S3 connection
                 :access_key_id      =&gt; 'access_key_id',
                 :secret_access_key  =&gt; 'secret_access_key'
         files = AWS::S3::Bucket.objects( # Put the S3 files into the files array
                 :prefix =&gt; 'uploads/user@domain.tld/uuid'
<p>         files.each do |file| # For each S3 file
                 t = Time.parse(file.about['last-modified'])
                 file_date = &quot;#{t.year}#{sprintf('%02d',t.month)}#{sprintf('%02d',}&quot;
                 file_name = file.key.split('/',4).last.gsub(/^(\d+\-)/,'')
                 file_modified = file.about['last-modified'].split(' ',5).last</p>
<p>                if !db.include? &quot;#{file_name} - #{file_modified}&quot; # If file not in DB
                         st = my.prepare(&quot;insert into files (FileName, FileDate, FileModified, FileLink, FileDescription, FileSize) VALUES (?, ?, ?, ?, ?, ?)
                         st.execute(file_name, file_date, file_modified, &quot;{file.key.split('/',3).last}&quot;, file.metada
ta['x-amz-meta-description'], file.about['content-length']) # Insert it
<p>rescue Lockfile::MaxTriesLockError =&gt; e
        puts &quot;Scraper is already running!&quot;