| 301 |
ashish |
1 |
## Copyright (c) 2007-2009 Facebook
|
|
|
2 |
##
|
|
|
3 |
## Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
4 |
## you may not use this file except in compliance with the License.
|
|
|
5 |
## You may obtain a copy of the License at
|
|
|
6 |
##
|
|
|
7 |
## http://www.apache.org/licenses/LICENSE-2.0
|
|
|
8 |
##
|
|
|
9 |
## Unless required by applicable law or agreed to in writing, software
|
|
|
10 |
## distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
11 |
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
12 |
## See the License for the specific language governing permissions and
|
|
|
13 |
## limitations under the License.
|
|
|
14 |
##
|
|
|
15 |
## See accompanying file LICENSE or visit the Scribe site at:
|
|
|
16 |
## http://developers.facebook.com/scribe/
|
|
|
17 |
|
|
|
18 |
|
|
|
19 |
##
|
|
|
20 |
## Sample Scribe configuration
|
|
|
21 |
##
|
|
|
22 |
|
|
|
23 |
# This file configures Scribe to first attempt to write to a hadoop instance.
|
|
|
24 |
# If this fails, scribe will then attempt to write to a backup hadoop
|
|
|
25 |
# instance. If this fails, scribe will buffer files to local disk. This is
|
|
|
26 |
# accomplished by nesting a buffer store inside another buffer store.
|
|
|
27 |
|
|
|
28 |
# Note that since replay_buffer=no in the inner buffer store, messages written
|
|
|
29 |
# to the backup hadoop instance will remain on the backup hadoop instance even
|
|
|
30 |
# if the primary hadoop instance comes back online. But since replay_buffer is
|
|
|
31 |
# not turned off in the outer buffer store, messages logged to /tmp will
|
|
|
32 |
# eventually get logged to hadoop when either the primary or backup hadoop
|
|
|
33 |
# instance comes back online.
|
|
|
34 |
|
|
|
35 |
port=1463
|
|
|
36 |
max_msg_per_second=1000000
|
|
|
37 |
check_interval=1
|
|
|
38 |
max_queue_size=100000000
|
|
|
39 |
num_thrift_server_threads=3
|
|
|
40 |
|
|
|
41 |
# DEFAULT
|
|
|
42 |
<store>
|
|
|
43 |
category=default
|
|
|
44 |
type=buffer
|
|
|
45 |
|
|
|
46 |
target_write_size=20480
|
|
|
47 |
max_write_interval=1
|
|
|
48 |
retry_interval=120
|
|
|
49 |
retry_interval_range=60
|
|
|
50 |
buffer_send_rate=5
|
|
|
51 |
|
|
|
52 |
<primary>
|
|
|
53 |
type=buffer
|
|
|
54 |
|
|
|
55 |
target_write_size=20480
|
|
|
56 |
max_write_interval=1
|
|
|
57 |
retry_interval=600
|
|
|
58 |
retry_interval_range=60
|
|
|
59 |
replay_buffer=no
|
|
|
60 |
|
|
|
61 |
<primary>
|
|
|
62 |
type=file
|
|
|
63 |
fs_type=hdfs
|
|
|
64 |
file_path=hdfs://hadoopserver:9000/scribedata
|
|
|
65 |
create_symlink=no
|
|
|
66 |
use_hostname_sub_directory=yes
|
|
|
67 |
base_filename=thisisoverwritten
|
|
|
68 |
max_size=1000000000
|
|
|
69 |
rotate_period=daily
|
|
|
70 |
rotate_hour=0
|
|
|
71 |
rotate_minute=5
|
|
|
72 |
add_newlines=1
|
|
|
73 |
</primary>
|
|
|
74 |
|
|
|
75 |
<secondary>
|
|
|
76 |
type=file
|
|
|
77 |
fs_type=hdfs
|
|
|
78 |
file_path=hdfs://backuphadoopserver:9000/scribedata
|
|
|
79 |
create_symlink=no
|
|
|
80 |
use_hostname_sub_directory=yes
|
|
|
81 |
base_filename=thisisoverwritten
|
|
|
82 |
max_size=1000000000
|
|
|
83 |
rotate_period=daily
|
|
|
84 |
rotate_hour=0
|
|
|
85 |
rotate_minute=5
|
|
|
86 |
add_newlines=1
|
|
|
87 |
</secondary>
|
|
|
88 |
</primary>
|
|
|
89 |
|
|
|
90 |
<secondary>
|
|
|
91 |
type=file
|
|
|
92 |
fs_type=std
|
|
|
93 |
file_path=/tmp
|
|
|
94 |
base_filename=thisisoverwritten
|
|
|
95 |
max_size=4000000
|
|
|
96 |
</secondary>
|
|
|
97 |
</store>
|
|
|
98 |
|