Subversion Repositories SmartDukaan

Rev

Rev 301 | Blame | Compare with Previous | Last modification | View Log | RSS feed

##  Copyright (c) 2007-2009 Facebook
##
##  Licensed under the Apache License, Version 2.0 (the "License");
##  you may not use this file except in compliance with the License.
##  You may obtain a copy of the License at
##
##      http://www.apache.org/licenses/LICENSE-2.0
##
##  Unless required by applicable law or agreed to in writing, software
##  distributed under the License is distributed on an "AS IS" BASIS,
##  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
##  See the License for the specific language governing permissions and
##  limitations under the License.
##
## See accompanying file LICENSE or visit the Scribe site at:
## http://developers.facebook.com/scribe/


##
## Sample Scribe configuration
##

# This file configures Scribe to first attempt to write to a hadoop instance.
# If this fails, scribe will then attempt to write to a backup hadoop
# instance.  If this fails, scribe will buffer files to local disk.  This is
# accomplished by nesting a buffer store inside another buffer store.

# Note that since replay_buffer=no in the inner buffer store, messages written
# to the backup hadoop instance will remain on the backup hadoop instance even
# if the primary hadoop instance comes back online.  But since replay_buffer is
# not turned off in the outer buffer store, messages logged to /tmp will
# eventually get logged to hadoop when either the primary or backup hadoop
# instance comes back online.

port=1463
max_msg_per_second=1000000
check_interval=1
max_queue_size=100000000
num_thrift_server_threads=3

# DEFAULT
<store>
  category=default
  type=buffer

  target_write_size=20480
  max_write_interval=1
  retry_interval=120
  retry_interval_range=60
  buffer_send_rate=5

  <primary>
    type=buffer

    target_write_size=20480
    max_write_interval=1
    retry_interval=600
    retry_interval_range=60
    replay_buffer=no

    <primary>
      type=file
      fs_type=hdfs
      file_path=hdfs://hadoopserver:9000/scribedata
      create_symlink=no
      use_hostname_sub_directory=yes
      base_filename=thisisoverwritten
      max_size=1000000000
      rotate_period=daily
      rotate_hour=0
      rotate_minute=5
      add_newlines=1
    </primary>

    <secondary>
      type=file
      fs_type=hdfs
      file_path=hdfs://backuphadoopserver:9000/scribedata
      create_symlink=no
      use_hostname_sub_directory=yes
      base_filename=thisisoverwritten
      max_size=1000000000
      rotate_period=daily
      rotate_hour=0
      rotate_minute=5
      add_newlines=1
    </secondary>
  </primary>

  <secondary>
    type=file
    fs_type=std
    file_path=/tmp
    base_filename=thisisoverwritten
    max_size=4000000
  </secondary>
</store>