- 浏览: 127221 次
- 性别:
- 来自: 杭州
-
文章分类
最新评论
slave
192.168.1.101
192.168.1.102
192.168.1.103
192.168.1.104
master
192.168.1.3
core-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Do not modify this file directly. Instead, copy entries that you -->
<!-- wish to modify from this file into core-site.xml and change them -->
<!-- there. If core-site.xml does not already exist, create it. -->
<configuration>
<!--- global properties -->
<property>
<name>hadoop.tmp.dir</name>
<value>/tmp/hadoop-${user.name}</value>
<description>A base for other temporary directories.</description>
</property>
<property>
<name>hadoop.native.lib</name>
<value>true</value>
<description>Should native hadoop libraries, if present, be used.</description>
</property>
<property>
<name>hadoop.http.filter.initializers</name>
<value></value>
<description>A comma separated list of class names. Each class in the list
must extend org.apache.hadoop.http.FilterInitializer. The corresponding
Filter will be initialized. Then, the Filter will be applied to all user
facing jsp and servlet web pages. The ordering of the list defines the
ordering of the filters.</description>
</property>
<property>
<name>hadoop.security.authorization</name>
<value>false</value>
<description>Is service-level authorization enabled?</description>
</property>
<!--- logging properties -->
<property>
<name>hadoop.logfile.size</name>
<value>10000000</value>
<description>The max size of each log file</description>
</property>
<property>
<name>hadoop.logfile.count</name>
<value>10</value>
<description>The max number of log files</description>
</property>
<!-- i/o properties -->
<property>
<name>io.file.buffer.size</name>
<value>4096</value>
<description>The size of buffer for use in sequence files.
The size of this buffer should probably be a multiple of hardware
page size (4096 on Intel x86), and it determines how much data is
buffered during read and write operations.</description>
</property>
<property>
<name>io.bytes.per.checksum</name>
<value>512</value>
<description>The number of bytes per checksum. Must not be larger than
io.file.buffer.size.</description>
</property>
<property>
<name>io.skip.checksum.errors</name>
<value>false</value>
<description>If true, when a checksum error is encountered while
reading a sequence file, entries are skipped, instead of throwing an
exception.</description>
</property>
<property>
<name>io.compression.codecs</name>
<value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec</value>
<description>A list of the compression codec classes that can be used
for compression/decompression.</description>
</property>
<property>
<name>io.serializations</name>
<value>org.apache.hadoop.io.serializer.WritableSerialization</value>
<description>A list of serialization classes that can be used for
obtaining serializers and deserializers.</description>
</property>
<!-- file system properties -->
<property>
<name>fs.default.name</name>
<value>hdfs://192.168.1.3:9000</value>
<description>The name of the default file system. A URI whose
scheme and authority determine the FileSystem implementation. The
uri's scheme determines the config property (fs.SCHEME.impl) naming
the FileSystem implementation class. The uri's authority is used to
determine the host, port, etc. for a filesystem.</description>
</property>
<property>
<name>fs.trash.interval</name>
<value>0</value>
<description>Number of minutes between trash checkpoints.
If zero, the trash feature is disabled.
</description>
</property>
<property>
<name>fs.file.impl</name>
<value>org.apache.hadoop.fs.LocalFileSystem</value>
<description>The FileSystem for file: uris.</description>
</property>
<property>
<name>fs.hdfs.impl</name>
<value>org.apache.hadoop.hdfs.DistributedFileSystem</value>
<description>The FileSystem for hdfs: uris.</description>
</property>
<property>
<name>fs.s3.impl</name>
<value>org.apache.hadoop.fs.s3.S3FileSystem</value>
<description>The FileSystem for s3: uris.</description>
</property>
<property>
<name>fs.s3n.impl</name>
<value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value>
<description>The FileSystem for s3n: (Native S3) uris.</description>
</property>
<property>
<name>fs.kfs.impl</name>
<value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value>
<description>The FileSystem for kfs: uris.</description>
</property>
<property>
<name>fs.hftp.impl</name>
<value>org.apache.hadoop.hdfs.HftpFileSystem</value>
</property>
<property>
<name>fs.hsftp.impl</name>
<value>org.apache.hadoop.hdfs.HsftpFileSystem</value>
</property>
<property>
<name>fs.ftp.impl</name>
<value>org.apache.hadoop.fs.ftp.FTPFileSystem</value>
<description>The FileSystem for ftp: uris.</description>
</property>
<property>
<name>fs.ramfs.impl</name>
<value>org.apache.hadoop.fs.InMemoryFileSystem</value>
<description>The FileSystem for ramfs: uris.</description>
</property>
<property>
<name>fs.har.impl</name>
<value>org.apache.hadoop.fs.HarFileSystem</value>
<description>The filesystem for Hadoop archives. </description>
</property>
<property>
<name>fs.har.impl.disable.cache</name>
<value>true</value>
<description>Don't cache 'har' filesystem instances.</description>
</property>
<property>
<name>fs.checkpoint.dir</name>
<value>${hadoop.tmp.dir}/dfs/namesecondary</value>
<description>Determines where on the local filesystem the DFS secondary
name node should store the temporary images to merge.
If this is a comma-delimited list of directories then the image is
replicated in all of the directories for redundancy.
</description>
</property>
<property>
<name>fs.checkpoint.edits.dir</name>
<value>${fs.checkpoint.dir}</value>
<description>Determines where on the local filesystem the DFS secondary
name node should store the temporary edits to merge.
If this is a comma-delimited list of directoires then teh edits is
replicated in all of the directoires for redundancy.
Default value is same as fs.checkpoint.dir
</description>
</property>
<property>
<name>fs.checkpoint.period</name>
<value>3600</value>
<description>The number of seconds between two periodic checkpoints.
</description>
</property>
<property>
<name>fs.checkpoint.size</name>
<value>67108864</value>
<description>The size of the current edit log (in bytes) that triggers
a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
</description>
</property>
<property>
<name>fs.s3.block.size</name>
<value>67108864</value>
<description>Block size to use when writing files to S3.</description>
</property>
<property>
<name>fs.s3.buffer.dir</name>
<value>${hadoop.tmp.dir}/s3</value>
<description>Determines where on the local filesystem the S3 filesystem
should store files before sending them to S3
(or after retrieving them from S3).
</description>
</property>
<property>
<name>fs.s3.maxRetries</name>
<value>4</value>
<description>The maximum number of retries for reading or writing files to S3,
before we signal failure to the application.
</description>
</property>
<property>
<name>fs.s3.sleepTimeSeconds</name>
<value>10</value>
<description>The number of seconds to sleep between each S3 retry.
</description>
</property>
<property>
<name>local.cache.size</name>
<value>10737418240</value>
<description>The limit on the size of cache you want to keep, set by default
to 10GB. This will act as a soft limit on the cache directory for out of band data.
</description>
</property>
<property>
<name>io.seqfile.compress.blocksize</name>
<value>1000000</value>
<description>The minimum block size for compression in block compressed
SequenceFiles.
</description>
</property>
<property>
<name>io.seqfile.lazydecompress</name>
<value>true</value>
<description>Should values of block-compressed SequenceFiles be decompressed
only when necessary.
</description>
</property>
<property>
<name>io.seqfile.sorter.recordlimit</name>
<value>1000000</value>
<description>The limit on number of records to be kept in memory in a spill
in SequenceFiles.Sorter
</description>
</property>
<property>
<name>io.mapfile.bloom.size</name>
<value>1048576</value>
<description>The size of BloomFilter-s used in BloomMapFile. Each time this many
keys is appended the next BloomFilter will be created (inside a DynamicBloomFilter).
Larger values minimize the number of filters, which slightly increases the performance,
but may waste too much space if the total number of keys is usually much smaller
than this number.
</description>
</property>
<property>
<name>io.mapfile.bloom.error.rate</name>
<value>0.005</value>
<description>The rate of false positives in BloomFilter-s used in BloomMapFile.
As this value decreases, the size of BloomFilter-s increases exponentially. This
value is the probability of encountering false positives (default is 0.5%).
</description>
</property>
<property>
<name>hadoop.util.hash.type</name>
<value>murmur</value>
<description>The default implementation of Hash. Currently this can take one of the
two values: 'murmur' to select MurmurHash and 'jenkins' to select JenkinsHash.
</description>
</property>
<!-- ipc properties -->
<property>
<name>ipc.client.idlethreshold</name>
<value>4000</value>
<description>Defines the threshold number of connections after which
connections will be inspected for idleness.
</description>
</property>
<property>
<name>ipc.client.kill.max</name>
<value>10</value>
<description>Defines the maximum number of clients to disconnect in one go.
</description>
</property>
<property>
<name>ipc.client.connection.maxidletime</name>
<value>10000</value>
<description>The maximum time in msec after which a client will bring down the
connection to the server.
</description>
</property>
<property>
<name>ipc.client.connect.max.retries</name>
<value>10</value>
<description>Indicates the number of retries a client will make to establish
a server connection.
</description>
</property>
<property>
<name>ipc.server.listen.queue.size</name>
<value>128</value>
<description>Indicates the length of the listen queue for servers accepting
client connections.
</description>
</property>
<property>
<name>ipc.server.tcpnodelay</name>
<value>false</value>
<description>Turn on/off Nagle's algorithm for the TCP socket connection on
the server. Setting to true disables the algorithm and may decrease latency
with a cost of more/smaller packets.
</description>
</property>
<property>
<name>ipc.client.tcpnodelay</name>
<value>false</value>
<description>Turn on/off Nagle's algorithm for the TCP socket connection on
the client. Setting to true disables the algorithm and may decrease latency
with a cost of more/smaller packets.
</description>
</property>
<!-- Web Interface Configuration -->
<property>
<name>webinterface.private.actions</name>
<value>false</value>
<description> If set to true, the web interfaces of JT and NN may contain
actions, such as kill job, delete file, etc., that should
not be exposed to public. Enable this option if the interfaces
are only reachable by those who have the right authorization.
</description>
</property>
<!-- Proxy Configuration -->
<property>
<name>hadoop.rpc.socket.factory.class.default</name>
<value>org.apache.hadoop.net.StandardSocketFactory</value>
<description> Default SocketFactory to use. This parameter is expected to be
formatted as "package.FactoryClassName".
</description>
</property>
<property>
<name>hadoop.rpc.socket.factory.class.ClientProtocol</name>
<value></value>
<description> SocketFactory to use to connect to a DFS. If null or empty, use
hadoop.rpc.socket.class.default. This socket factory is also used by
DFSClient to create sockets to DataNodes.
</description>
</property>
<property>
<name>hadoop.socks.server</name>
<value></value>
<description> Address (host:port) of the SOCKS server to be used by the
SocksSocketFactory.
</description>
</property>
<!-- Rack Configuration -->
<property>
<name>topology.node.switch.mapping.impl</name>
<value>org.apache.hadoop.net.ScriptBasedMapping</value>
<description> The default implementation of the DNSToSwitchMapping. It
invokes a script specified in topology.script.file.name to resolve
node names. If the value for topology.script.file.name is not set, the
default value of DEFAULT_RACK is returned for all node names.
</description>
</property>
<property>
<name>topology.script.file.name</name>
<value></value>
<description> The script name that should be invoked to resolve DNS names to
NetworkTopology names. Example: the script would take host.foo.bar as an
argument, and return /rack1 as the output.
</description>
</property>
<property>
<name>topology.script.number.args</name>
<value>100</value>
<description> The max number of args that the script configured with
topology.script.file.name should be run with. Each arg is an
IP address.
</description>
</property>
</configuration>
hdfs-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Do not modify this file directly. Instead, copy entries that you -->
<!-- wish to modify from this file into hdfs-site.xml and change them -->
<!-- there. If hdfs-site.xml does not already exist, create it. -->
<configuration>
<property>
<name>dfs.namenode.logging.level</name>
<value>info</value>
<description>The logging level for dfs namenode. Other values are "dir"(trac
e namespace mutations), "block"(trace block under/over replications and block
creations/deletions), or "all".</description>
</property>
<property>
<name>dfs.secondary.http.address</name>
<value>0.0.0.0:50090</value>
<description>
The secondary namenode http server address and port.
If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>dfs.datanode.address</name>
<value>0.0.0.0:50010</value>
<description>
The address where the datanode server will listen to.
If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>0.0.0.0:50075</value>
<description>
The datanode http server address and port.
If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>dfs.datanode.ipc.address</name>
<value>0.0.0.0:50020</value>
<description>
The datanode ipc server address and port.
If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>dfs.datanode.handler.count</name>
<value>3</value>
<description>The number of server threads for the datanode.</description>
</property>
<property>
<name>dfs.http.address</name>
<value>0.0.0.0:50070</value>
<description>
The address and the base port where the dfs namenode web ui will listen on.
If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>dfs.https.enable</name>
<value>false</value>
<description>Decide if HTTPS(SSL) is supported on HDFS
</description>
</property>
<property>
<name>dfs.https.need.client.auth</name>
<value>false</value>
<description>Whether SSL client certificate authentication is required
</description>
</property>
<property>
<name>dfs.https.server.keystore.resource</name>
<value>ssl-server.xml</value>
<description>Resource file from which ssl server keystore
information will be extracted
</description>
</property>
<property>
<name>dfs.https.client.keystore.resource</name>
<value>ssl-client.xml</value>
<description>Resource file from which ssl client keystore
information will be extracted
</description>
</property>
<property>
<name>dfs.datanode.https.address</name>
<value>0.0.0.0:50475</value>
</property>
<property>
<name>dfs.https.address</name>
<value>0.0.0.0:50470</value>
</property>
<property>
<name>dfs.datanode.dns.interface</name>
<value>default</value>
<description>The name of the Network Interface from which a data node should
report its IP address.
</description>
</property>
<property>
<name>dfs.datanode.dns.nameserver</name>
<value>default</value>
<description>The host name or IP address of the name server (DNS)
which a DataNode should use to determine the host name used by the
NameNode for communication and display purposes.
</description>
</property>
<property>
<name>dfs.replication.considerLoad</name>
<value>true</value>
<description>Decide if chooseTarget considers the target's load or not
</description>
</property>
<property>
<name>dfs.default.chunk.view.size</name>
<value>32768</value>
<description>The number of bytes to view for a file on the browser.
</description>
</property>
<property>
<name>dfs.datanode.du.reserved</name>
<value>0</value>
<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
</description>
</property>
<property>
<name>dfs.name.dir</name>
<value>/home/hadoop/data/dfs.name.dir</value>
<description>Determines where on the local filesystem the DFS name node
should store the name table(fsimage). If this is a comma-delimited list
of directories then the name table is replicated in all of the
directories, for redundancy. </description>
</property>
<property>
<name>dfs.name.edits.dir</name>
<value>${dfs.name.dir}</value>
<description>Determines where on the local filesystem the DFS name node
should store the transaction (edits) file. If this is a comma-delimited list
of directories then the transaction file is replicated in all of the
directories, for redundancy. Default value is same as dfs.name.dir
</description>
</property>
<property>
<name>dfs.web.ugi</name>
<value>webuser,webgroup</value>
<description>The user account used by the web interface.
Syntax: USERNAME,GROUP1,GROUP2, ...
</description>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
<description>
If "true", enable permission checking in HDFS.
If "false", permission checking is turned off,
but all other behavior is unchanged.
Switching from one parameter value to the other does not change the mode,
owner or group of files or directories.
</description>
</property>
<property>
<name>dfs.permissions.supergroup</name>
<value>supergroup</value>
<description>The name of the group of super-users.</description>
</property>
<property>
<name>dfs.data.dir</name>
<value>/home/hadoop/data/dfs.data.dir</value>
<description>Determines where on the local filesystem an DFS data node
should store its blocks. If this is a comma-delimited
list of directories, then data will be stored in all named
directories, typically on different devices.
Directories that do not exist are ignored.
</description>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
<description>Default block replication.
The actual number of replications can be specified when the file is created.
The default is used if replication is not specified in create time.
</description>
</property>
<property>
<name>dfs.replication.max</name>
<value>512</value>
<description>Maximal block replication.
</description>
</property>
<property>
<name>dfs.replication.min</name>
<value>1</value>
<description>Minimal block replication.
</description>
</property>
<property>
<name>dfs.block.size</name>
<value>67108864</value>
<description>The default block size for new files.</description>
</property>
<property>
<name>dfs.df.interval</name>
<value>60000</value>
<description>Disk usage statistics refresh interval in msec.</description>
</property>
<property>
<name>dfs.client.block.write.retries</name>
<value>3</value>
<description>The number of retries for writing blocks to the data nodes,
before we signal failure to the application.
</description>
</property>
<property>
<name>dfs.blockreport.intervalMsec</name>
<value>3600000</value>
<description>Determines block reporting interval in milliseconds.</description>
</property>
<property>
<name>dfs.blockreport.initialDelay</name> <value>0</value>
<description>Delay for first block report in seconds.</description>
</property>
<property>
<name>dfs.heartbeat.interval</name>
<value>3</value>
<description>Determines datanode heartbeat interval in seconds.</description>
</property>
<property>
<name>dfs.namenode.handler.count</name>
<value>10</value>
<description>The number of server threads for the namenode.</description>
</property>
<property>
<name>dfs.safemode.threshold.pct</name>
<value>0.999f</value>
<description>
Specifies the percentage of blocks that should satisfy
the minimal replication requirement defined by dfs.replication.min.
Values less than or equal to 0 mean not to start in safe mode.
Values greater than 1 will make safe mode permanent.
</description>
</property>
<property>
<name>dfs.safemode.extension</name>
<value>30000</value>
<description>
Determines extension of safe mode in milliseconds
after the threshold level is reached.
</description>
</property>
<property>
<name>dfs.balance.bandwidthPerSec</name>
<value>1048576</value>
<description>
Specifies the maximum amount of bandwidth that each datanode
can utilize for the balancing purpose in term of
the number of bytes per second.
</description>
</property>
<property>
<name>dfs.hosts</name>
<value></value>
<description>Names a file that contains a list of hosts that are
permitted to connect to the namenode. The full pathname of the file
must be specified. If the value is empty, all hosts are
permitted.</description>
</property>
<property>
<name>dfs.hosts.exclude</name>
<value></value>
<description>Names a file that contains a list of hosts that are
not permitted to connect to the namenode. The full pathname of the
file must be specified. If the value is empty, no hosts are
excluded.</description>
</property>
<property>
<name>dfs.max.objects</name>
<value>0</value>
<description>The maximum number of files, directories and blocks
dfs supports. A value of zero indicates no limit to the number
of objects that dfs supports.
</description>
</property>
<property>
<name>dfs.namenode.decommission.interval</name>
<value>30</value>
<description>Namenode periodicity in seconds to check if decommission is
complete.</description>
</property>
<property>
<name>dfs.namenode.decommission.nodes.per.interval</name>
<value>5</value>
<description>The number of nodes namenode checks if decommission is complete
in each dfs.namenode.decommission.interval.</description>
</property>
<property>
<name>dfs.replication.interval</name>
<value>3</value>
<description>The periodicity in seconds with which the namenode computes
repliaction work for datanodes. </description>
</property>
<property>
<name>dfs.access.time.precision</name>
<value>3600000</value>
<description>The access time for HDFS file is precise upto this value.
The default value is 1 hour. Setting a value of 0 disables
access times for HDFS.
</description>
</property>
<property>
<name>dfs.support.append</name>
<value>false</value>
<description>Does HDFS allow appends to files?
This is currently set to false because there are bugs in the
"append code" and is not supported in any prodction cluster.
</description>
</property>
</configuration>
mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Do not modify this file directly. Instead, copy entries that you -->
<!-- wish to modify from this file into mapred-site.xml and change them -->
<!-- there. If mapred-site.xml does not already exist, create it. -->
<configuration>
<property>
<name>hadoop.job.history.location</name>
<value></value>
<description> If job tracker is static the history files are stored
in this single well known place. If No value is set here, by default,
it is in the local file system at ${hadoop.log.dir}/history.
</description>
</property>
<property>
<name>hadoop.job.history.user.location</name>
<value></value>
<description> User can specify a location to store the history files of
a particular job. If nothing is specified, the logs are stored in
output directory. The files are stored in "_logs/history/" in the directory.
User can stop logging by giving the value "none".
</description>
</property>
<!-- i/o properties -->
<property>
<name>io.sort.factor</name>
<value>10</value>
<description>The number of streams to merge at once while sorting
files. This determines the number of open file handles.</description>
</property>
<property>
<name>io.sort.mb</name>
<value>100</value>
<description>The total amount of buffer memory to use while sorting
files, in megabytes. By default, gives each merge stream 1MB, which
should minimize seeks.</description>
</property>
<property>
<name>io.sort.record.percent</name>
<value>0.05</value>
<description>The percentage of io.sort.mb dedicated to tracking record
boundaries. Let this value be r, io.sort.mb be x. The maximum number
of records collected before the collection thread must block is equal
to (r * x) / 4</description>
</property>
<property>
<name>io.sort.spill.percent</name>
<value>0.80</value>
<description>The soft limit in either the buffer or record collection
buffers. Once reached, a thread will begin to spill the contents to disk
in the background. Note that this does not imply any chunking of data to
the spill. A value less than 0.5 is not recommended.</description>
</property>
<property>
<name>io.map.index.skip</name>
<value>0</value>
<description>Number of index entries to skip between each entry.
Zero by default. Setting this to values larger than zero can
facilitate opening large map files using less memory.</description>
</property>
<property>
<name>mapred.job.tracker</name>
<value>192.168.1.3:9001</value>
<description>The host and port that the MapReduce job tracker runs
at. If "local", then jobs are run in-process as a single map
and reduce task.
</description>
</property>
<property>
<name>mapred.job.tracker.http.address</name>
<value>0.0.0.0:50030</value>
<description>
The job tracker http server address and port the server will listen on.
If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>mapred.job.tracker.handler.count</name>
<value>10</value>
<description>
The number of server threads for the JobTracker. This should be roughly
4% of the number of tasktracker nodes.
</description>
</property>
<property>
<name>mapred.task.tracker.report.address</name>
<value>127.0.0.1:0</value>
<description>The interface and port that task tracker server listens on.
Since it is only connected to by the tasks, it uses the local interface.
EXPERT ONLY. Should only be changed if your host does not have the loopback
interface.</description>
</property>
<property>
<name>mapred.local.dir</name>
<value>/home/hadoop/mapred/mapred.local.dir</value>
<description>The local directory where MapReduce stores intermediate
data files. May be a comma-separated list of
directories on different devices in order to spread disk i/o.
Directories that do not exist are ignored.
</description>
</property>
<property>
<name>mapred.system.dir</name>
<value>/home/hadoop/mapred/system</value>
<description>The shared directory where MapReduce stores control files.
</description>
</property>
<property>
<name>mapred.temp.dir</name>
<value>${hadoop.tmp.dir}/mapred/temp</value>
<description>A shared directory for temporary files.
</description>
</property>
<property>
<name>mapred.local.dir.minspacestart</name>
<value>0</value>
<description>If the space in mapred.local.dir drops under this,
do not ask for more tasks.
Value in bytes.
</description>
</property>
<property>
<name>mapred.local.dir.minspacekill</name>
<value>0</value>
<description>If the space in mapred.local.dir drops under this,
do not ask more tasks until all the current ones have finished and
cleaned up. Also, to save the rest of the tasks we have running,
kill one of them, to clean up some space. Start with the reduce tasks,
then go with the ones that have finished the least.
Value in bytes.
</description>
</property>
<property>
<name>mapred.tasktracker.expiry.interval</name>
<value>600000</value>
<description>Expert: The time-interval, in miliseconds, after which
a tasktracker is declared 'lost' if it doesn't send heartbeats.
</description>
</property>
<property>
<name>mapred.tasktracker.instrumentation</name>
<value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value>
<description>Expert: The instrumentation class to associate with each TaskTracker.
</description>
</property>
<property>
<name>mapred.tasktracker.memory_calculator_plugin</name>
<value></value>
<description>
Name of the class whose instance will be used to query memory information
on the tasktracker.
The class must be an instance of
org.apache.hadoop.util.MemoryCalculatorPlugin. If the value is null, the
tasktracker attempts to use a class appropriate to the platform.
Currently, the only platform supported is Linux.
</description>
</property>
<property>
<name>mapred.tasktracker.taskmemorymanager.monitoring-interval</name>
<value>5000</value>
<description>The interval, in milliseconds, for which the tasktracker waits
between two cycles of monitoring its tasks' memory usage. Used only if
tasks' memory management is enabled via mapred.tasktracker.tasks.maxmemory.
</description>
</property>
<property>
<name>mapred.tasktracker.procfsbasedprocesstree.sleeptime-before-sigkill</name>
<value>5000</value>
<description>The time, in milliseconds, the tasktracker waits for sending a
SIGKILL to a process that has overrun memory limits, after it has been sent
a SIGTERM. Used only if tasks' memory management is enabled via
mapred.tasktracker.tasks.maxmemory.</description>
</property>
<property>
<name>mapred.map.tasks</name>
<value>2</value>
<description>The default number of map tasks per job.
Ignored when mapred.job.tracker is "local".
</description>
</property>
<property>
<name>mapred.reduce.tasks</name>
<value>1</value>
<description>The default number of reduce tasks per job. Typically set to 99%
of the cluster's reduce capacity, so that if a node fails the reduces can
still be executed in a single wave.
Ignored when mapred.job.tracker is "local".
</description>
</property>
<property>
<name>mapred.jobtracker.restart.recover</name>
<value>false</value>
<description>"true" to enable (job) recovery upon restart,
"false" to start afresh
</description>
</property>
<property>
<name>mapred.jobtracker.job.history.block.size</name>
<value>3145728</value>
<description>The block size of the job history file. Since the job recovery
uses job history, its important to dump job history to disk as
soon as possible. Note that this is an expert level parameter.
The default value is set to 3 MB.
</description>
</property>
<property>
<name>mapred.jobtracker.taskScheduler</name>
<value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value>
<description>The class responsible for scheduling the tasks.</description>
</property>
<property>
<name>mapred.jobtracker.taskScheduler.maxRunningTasksPerJob</name>
<value></value>
<description>The maximum number of running tasks for a job before
it gets preempted. No limits if undefined.
</description>
</property>
<property>
<name>mapred.map.max.attempts</name>
<value>4</value>
<description>Expert: The maximum number of attempts per map task.
In other words, framework will try to execute a map task these many number
of times before giving up on it.
</description>
</property>
<property>
<name>mapred.reduce.max.attempts</name>
<value>4</value>
<description>Expert: The maximum number of attempts per reduce task.
In other words, framework will try to execute a reduce task these many number
of times before giving up on it.
</description>
</property>
<property>
<name>mapred.reduce.parallel.copies</name>
<value>5</value>
<description>The default number of parallel transfers run by reduce
during the copy(shuffle) phase.
</description>
</property>
<property>
<name>mapred.reduce.copy.backoff</name>
<value>300</value>
<description>The maximum amount of time (in seconds) a reducer spends on
fetching one map output before declaring it as failed.
</description>
</property>
<property>
<name>mapred.task.timeout</name>
<value>600000</value>
<description>The number of milliseconds before a task will be
terminated if it neither reads an input, writes an output, nor
updates its status string.
</description>
</property>
<property>
<name>mapred.tasktracker.map.tasks.maximum</name>
<value>2</value>
<description>The maximum number of map tasks that will be run
simultaneously by a task tracker.
</description>
</property>
<property>
<name>mapred.tasktracker.reduce.tasks.maximum</name>
<value>2</value>
<description>The maximum number of reduce tasks that will be run
simultaneously by a task tracker.
</description>
</property>
<property>
<name>mapred.jobtracker.completeuserjobs.maximum</name>
<value>100</value>
<description>The maximum number of complete jobs per user to keep around
before delegating them to the job history.</description>
</property>
<property>
<name>mapred.jobtracker.instrumentation</name>
<value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value>
<description>Expert: The instrumentation class to associate with each JobTracker.
</description>
</property>
<property>
<name>mapred.child.java.opts</name>
<value>-Xmx200m</value>
<description>Java opts for the task tracker child processes.
The following symbol, if present, will be interpolated: @taskid@ is replaced
by current TaskID. Any other occurrences of '@' will go unchanged.
For example, to enable verbose gc logging to a file named for the taskid in
/tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
The configuration variable mapred.child.ulimit can be used to control the
maximum virtual memory of the child processes.
</description>
</property>
<property>
<name>mapred.child.ulimit</name>
<value></value>
<description>The maximum virtual memory, in KB, of a process launched by the
Map-Reduce framework. This can be used to control both the Mapper/Reducer
tasks and applications using Hadoop Pipes, Hadoop Streaming etc.
By default it is left unspecified to let cluster admins control it via
limits.conf and other such relevant mechanisms.
Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to
JavaVM, else the VM might not start.
</description>
</property>
<property>
<name>mapred.child.tmp</name>
<value>./tmp</value>
<description> To set the value of tmp directory for map and reduce tasks.
If the value is an absolute path, it is directly assigned. Otherwise, it is
prepended with task's working directory. The java tasks are executed with
option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and
streaming are set with environment variable,
TMPDIR='the absolute path of the tmp dir'
</description>
</property>
<property>
<name>mapred.inmem.merge.threshold</name>
<value>1000</value>
<description>The threshold, in terms of the number of files
for the in-memory merge process. When we accumulate threshold number of files
we initiate the in-memory merge and spill to disk. A value of 0 or less than
0 indicates we want to DON'T have any threshold and instead depend only on
the ramfs's memory consumption to trigger the merge.
</description>
</property>
<property>
<name>mapred.job.shuffle.merge.percent</name>
<value>0.66</value>
<description>The usage threshold at which an in-memory merge will be
initiated, expressed as a percentage of the total memory allocated to
storing in-memory map outputs, as defined by
mapred.job.shuffle.input.buffer.percent.
</description>
</property>
<property>
<name>mapred.job.shuffle.input.buffer.percent</name>
<value>0.70</value>
<description>The percentage of memory to be allocated from the maximum heap
size to storing map outputs during the shuffle.
</description>
</property>
<property>
<name>mapred.job.reduce.input.buffer.percent</name>
<value>0.0</value>
<description>The percentage of memory- relative to the maximum heap size- to
retain map outputs during the reduce. When the shuffle is concluded, any
remaining map outputs in memory must consume less than this threshold before
the reduce can begin.
</description>
</property>
<property>
<name>mapred.map.tasks.speculative.execution</name>
<value>true</value>
<description>If true, then multiple instances of some map tasks
may be executed in parallel.</description>
</property>
<property>
<name>mapred.reduce.tasks.speculative.execution</name>
<value>true</value>
<description>If true, then multiple instances of some reduce tasks
may be executed in parallel.</description>
</property>
<property>
<name>mapred.job.reuse.jvm.num.tasks</name>
<value>1</value>
<description>How many tasks to run per jvm. If set to -1, there is
no limit.
</description>
</property>
<property>
<name>mapred.min.split.size</name>
<value>0</value>
<description>The minimum size chunk that map input should be split
into. Note that some file formats may have minimum split sizes that
take priority over this setting.</description>
</property>
<property>
<name>mapred.jobtracker.maxtasks.per.job</name>
<value>-1</value>
<description>The maximum number of tasks for a single job.
A value of -1 indicates that there is no maximum. </description>
</property>
<property>
<name>mapred.submit.replication</name>
<value>10</value>
<description>The replication level for submitted job files. This
should be around the square root of the number of nodes.
</description>
</property>
<property>
<name>mapred.tasktracker.dns.interface</name>
<value>default</value>
<description>The name of the Network Interface from which a task
tracker should report its IP address.
</description>
</property>
<property>
<name>mapred.tasktracker.dns.nameserver</name>
<value>default</value>
<description>The host name or IP address of the name server (DNS)
which a TaskTracker should use to determine the host name used by
the JobTracker for communication and display purposes.
</description>
</property>
<property>
<name>tasktracker.http.threads</name>
<value>40</value>
<description>The number of worker threads that for the http server. This is
used for map output fetching
</description>
</property>
<property>
<name>mapred.task.tracker.http.address</name>
<value>0.0.0.0:50060</value>
<description>
The task tracker http server address and port.
If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>keep.failed.task.files</name>
<value>false</value>
<description>Should the files for failed tasks be kept. This should only be
used on jobs that are failing, because the storage is never
reclaimed. It also prevents the map outputs from being erased
from the reduce directory as they are consumed.</description>
</property>
<!--
<property>
<name>keep.task.files.pattern</name>
<value>.*_m_123456_0</value>
<description>Keep all files from tasks whose task names match the given
regular expression. Defaults to none.</description>
</property>
-->
<property>
<name>mapred.output.compress</name>
<value>false</value>
<description>Should the job outputs be compressed?
</description>
</property>
<property>
<name>mapred.output.compression.type</name>
<value>RECORD</value>
<description>If the job outputs are to compressed as SequenceFiles, how should
they be compressed? Should be one of NONE, RECORD or BLOCK.
</description>
</property>
<property>
<name>mapred.output.compression.codec</name>
<value>org.apache.hadoop.io.compress.DefaultCodec</value>
<description>If the job outputs are compressed, how should they be compressed?
</description>
</property>
<property>
<name>mapred.compress.map.output</name>
<value>false</value>
<description>Should the outputs of the maps be compressed before being
sent across the network. Uses SequenceFile compression.
</description>
</property>
<property>
<name>mapred.map.output.compression.codec</name>
<value>org.apache.hadoop.io.compress.DefaultCodec</value>
<description>If the map outputs are compressed, how should they be
compressed?
</description>
</property>
<property>
<name>map.sort.class</name>
<value>org.apache.hadoop.util.QuickSort</value>
<description>The default sort class for sorting keys.
</description>
</property>
<property>
<name>mapred.userlog.limit.kb</name>
<value>0</value>
<description>The maximum size of user-logs of each task in KB. 0 disables the cap.
</description>
</property>
<property>
<name>mapred.userlog.retain.hours</name>
<value>24</value>
<description>The maximum time, in hours, for which the user-logs are to be
retained.
</description>
</property>
<property>
<name>mapred.hosts</name>
<value></value>
<description>Names a file that contains the list of nodes that may
connect to the jobtracker. If the value is empty, all hosts are
permitted.</description>
</property>
<property>
<name>mapred.hosts.exclude</name>
<value></value>
<description>Names a file that contains the list of hosts that
should be excluded by the jobtracker. If the value is empty, no
hosts are excluded.</description>
</property>
<property>
<name>mapred.max.tracker.blacklists</name>
<value>4</value>
<description>The number of blacklists for a taskTracker by various jobs
after which the task tracker could be blacklisted across
all jobs. The tracker will be given a tasks later
(after a day). The tracker will become a healthy
tracker after a restart.
</description>
</property>
<property>
<name>mapred.max.tracker.failures</name>
<value>4</value>
<description>The number of task-failures on a tasktracker of a given job
after which new tasks of that job aren't assigned to it.
</description>
</property>
<property>
<name>jobclient.output.filter</name>
<value>FAILED</value>
<description>The filter for controlling the output of the task's userlogs sent
to the console of the JobClient.
The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and
ALL.
</description>
</property>
<property>
<name>mapred.job.tracker.persist.jobstatus.active</name>
<value>false</value>
<description>Indicates if persistency of job status information is
active or not.
</description>
</property>
<property>
<name>mapred.job.tracker.persist.jobstatus.hours</name>
<value>0</value>
<description>The number of hours job status information is persisted in DFS.
The job status information will be available after it drops of the memory
queue and between jobtracker restarts. With a zero value the job status
information is not persisted at all in DFS.
</description>
</property>
<property>
<name>mapred.job.tracker.persist.jobstatus.dir</name>
<value>/jobtracker/jobsInfo</value>
<description>The directory where the job status information is persisted
in a file system to be available after it drops of the memory queue and
between jobtracker restarts.
</description>
</property>
<property>
<name>mapred.task.profile</name>
<value>false</value>
<description>To set whether the system should collect profiler
information for some of the tasks in this job? The information is stored
in the user log directory. The value is "true" if task profiling
is enabled.</description>
</property>
<property>
<name>mapred.task.profile.maps</name>
<value>0-2</value>
<description> To set the ranges of map tasks to profile.
mapred.task.profile has to be set to true for the value to be accounted.
</description>
</property>
<property>
<name>mapred.task.profile.reduces</name>
<value>0-2</value>
<description> To set the ranges of reduce tasks to profile.
mapred.task.profile has to be set to true for the value to be accounted.
</description>
</property>
<property>
<name>mapred.line.input.format.linespermap</name>
<value>1</value>
<description> Number of lines per split in NLineInputFormat.
</description>
</property>
<property>
<name>mapred.skip.attempts.to.start.skipping</name>
<value>2</value>
<description> The number of Task attempts AFTER which skip mode
will be kicked off. When skip mode is kicked off, the
tasks reports the range of records which it will process
next, to the TaskTracker. So that on failures, TT knows which
ones are possibly the bad records. On further executions,
those are skipped.
</description>
</property>
<property>
<name>mapred.skip.map.auto.incr.proc.count</name>
<value>true</value>
<description> The flag which if set to true,
SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented
by MapRunner after invoking the map function. This value must be set to
false for applications which process the records asynchronously
or buffer the input records. For example streaming.
In such cases applications should increment this counter on their own.
</description>
</property>
<property>
<name>mapred.skip.reduce.auto.incr.proc.count</name>
<value>true</value>
<description> The flag which if set to true,
SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented
by framework after invoking the reduce function. This value must be set to
false for applications which process the records asynchronously
or buffer the input records. For example streaming.
In such cases applications should increment this counter on their own.
</description>
</property>
<property>
<name>mapred.skip.out.dir</name>
<value></value>
<description> If no value is specified here, the skipped records are
written to the output directory at _logs/skip.
User can stop writing skipped records by giving the value "none".
</description>
</property>
<property>
<name>mapred.skip.map.max.skip.records</name>
<value>0</value>
<description> The number of acceptable skip records surrounding the bad
record PER bad record in mapper. The number includes the bad record as well.
To turn the feature of detection/skipping of bad records off, set the
value to 0.
The framework tries to narrow down the skipped range by retrying
until this threshold is met OR all attempts get exhausted for this task.
Set the value to Long.MAX_VALUE to indicate that framework need not try to
narrow down. Whatever records(depends on application) get skipped are
acceptable.
</description>
</property>
<property>
<name>mapred.skip.reduce.max.skip.groups</name>
<value>0</value>
<description> The number of acceptable skip groups surrounding the bad
group PER bad group in reducer. The number includes the bad group as well.
To turn the feature of detection/skipping of bad groups off, set the
value to 0.
The framework tries to narrow down the skipped range by retrying
until this threshold is met OR all attempts get exhausted for this task.
Set the value to Long.MAX_VALUE to indicate that framework need not try to
narrow down. Whatever groups(depends on application) get skipped are
acceptable.
</description>
</property>
<!-- Job Notification Configuration -->
<!--
<property>
<name>job.end.notification.url</name>
<value>http://localhost:8080/jobstatus.php?jobId=$jobId&jobStatus=$jobStatus</value>
<description>Indicates url which will be called on completion of job to inform
end status of job.
User can give at most 2 variables with URI : $jobId and $jobStatus.
If they are present in URI, then they will be replaced by their
respective values.
</description>
</property>
-->
<property>
<name>job.end.retry.attempts</name>
<value>0</value>
<description>Indicates how many times hadoop should attempt to contact the
notification URL </description>
</property>
<property>
<name>job.end.retry.interval</name>
<value>30000</value>
<description>Indicates time in milliseconds between notification URL retry
calls</description>
</property>
<!-- Proxy Configuration -->
<property>
<name>hadoop.rpc.socket.factory.class.JobSubmissionProtocol</name>
<value></value>
<description> SocketFactory to use to connect to a Map/Reduce master
(JobTracker). If null or empty, then use hadoop.rpc.socket.class.default.
</description>
</property>
<property>
<name>mapred.task.cache.levels</name>
<value>2</value>
<description> This is the max level of the task cache. For example, if
the level is 2, the tasks cached are at the host level and at the rack
level.
</description>
</property>
<property>
<name>mapred.queue.names</name>
<value>default</value>
<description> Comma separated list of queues configured for this jobtracker.
Jobs are added to queues and schedulers can configure different
scheduling properties for the various queues. To configure a property
for a queue, the name of the queue must match the name specified in this
value. Queue properties that are common to all schedulers are configured
here with the naming convention, mapred.queue.$QUEUE-NAME.$PROPERTY-NAME,
for e.g. mapred.queue.default.submit-job-acl.
The number of queues configured in this parameter could depend on the
type of scheduler being used, as specified in
mapred.jobtracker.taskScheduler. For example, the JobQueueTaskScheduler
supports only a single queue, which is the default configured here.
Before adding more queues, ensure that the scheduler you've configured
supports multiple queues.
</description>
</property>
<property>
<name>mapred.acls.enabled</name>
<value>false</value>
<description> Specifies whether ACLs are enabled, and should be checked
for various operations.
</description>
</property>
<property>
<name>mapred.queue.default.acl-submit-job</name>
<value>*</value>
<description> Comma separated list of user and group names that are allowed
to submit jobs to the 'default' queue. The user list and the group list
are separated by a blank. For e.g. alice,bob group1,group2.
If set to the special value '*', it means all users are allowed to
submit jobs.
</description>
</property>
<property>
<name>mapred.queue.default.acl-administer-jobs</name>
<value>*</value>
<description> Comma separated list of user and group names that are allowed
to delete jobs or modify job's priority for jobs not owned by the current
user in the 'default' queue. The user list and the group list
are separated by a blank. For e.g. alice,bob group1,group2.
If set to the special value '*', it means all users are allowed to do
this operation.
</description>
</property>
<property>
<name>mapred.job.queue.name</name>
<value>default</value>
<description> Queue to which a job is submitted. This must match one of the
queues defined in mapred.queue.names for the system. Also, the ACL setup
for the queue must allow the current user to submit a job to the queue.
Before specifying a queue, ensure that the system is configured with
the queue, and access is allowed for submitting jobs to the queue.
</description>
</property>
<property>
<name>mapred.tasktracker.indexcache.mb</name>
<value>10</value>
<description> The maximum memory that a task tracker allows for the
index cache that is used when serving map outputs to reducers.
</description>
</property>
<property>
<name>mapred.merge.recordsBeforeProgress</name>
<value>10000</value>
<description> The number of records to process during merge before
sending a progress notification to the TaskTracker.
</description>
</property>
<property>
<name>mapred.reduce.slowstart.completed.maps</name>
<value>0.05</value>
<description>Fraction of the number of maps in the job which should be
complete before reduces are scheduled for the job.
</description>
</property>
</configuration>
发表评论
-
hdfs HA 实验
2016-11-06 16:35 573hdfs HA 实验 hdfs 第一次 机器准备 ... -
集群中 统一命令执行 和 文件拷贝
2016-11-05 13:06 705filecopy #!/bin/bash if [ $# ... -
ssh 免密码 不工作的问题
2016-11-05 12:43 443ssh 免密码 不工作的问题 别人不能自动登录进来,先检查 , ... -
eclipse
2013-07-23 21:37 709eclipse + hadoop main ... -
远程链接并自动执行 命令 带参数 同步主机时间到各vm
2013-03-11 22:19 918vmware 设置的 tools.syncTime = &q ... -
hadoop fs.default.name 9000 端口在外面连不上
2013-03-06 20:31 1664hadoop fs.default.name 9000 ... -
hadoop 我的配置过程 -主机配置
2013-03-06 01:24 742192.168.1.10 node10 192.1 ... -
转 经典漫画讲解HDFS原理
2012-10-04 19:16 1294转 http://blog.csdn.net ... -
hadoop 在eclipse 中调试
2012-10-04 19:13 1489转 http://blog.sciencenet.cn/bl ... -
hadoop 搞起来了
2012-10-03 20:13 1670转 http://blog.csdn.net/hguisu/ ...
相关推荐
1、文件内容:ibus-table-chinese-erbi-1.4.6-3.el7.rpm以及相关依赖 2、文件形式:tar.gz压缩包 3、安装指令: #Step1、解压 tar -zxvf /mnt/data/output/ibus-table-chinese-erbi-1.4.6-3.el7.tar.gz #Step2、进入解压后的目录,执行安装 sudo rpm -ivh *.rpm 4、更多资源/技术支持:公众号禅静编程坊
选择Java后台技术和MySQL数据库,在前台界面为提升用户体验,使用Jquery、Ajax、CSS等技术进行布局。 系统包括两类用户:学生、管理员。 学生用户只要实现了前台信息的查看,打开首页,查看网站介绍、自习室信息、在线留言、轮播图信息公告等,通过点击首页的菜单跳转到对应的功能页面菜单,包括网站首页、自习室信息、注册登录、个人中心、后台登录。 学生用户通过账户账号登录,登录后具有所有的操作权限,如果没有登录,不能在线预约。学生用户退出系统将注销个人的登录信息。 管理员通过后台的登录页面,选择管理员权限后进行登录,管理员的权限包括轮播公告管理、老师学生信息管理和信息审核管理,管理员管理后点击退出,注销登录信息。 管理员用户具有在线交流的管理,自习室信息管理、自习室预约管理。 在线交流是对前台用户留言内容进行管理,删除留言信息,查看留言信息。
面向基层就业个性化大学生服务平台(源码+数据库+论文+ppt)java开发springboot框架javaweb,可做计算机毕业设计或课程设计 【功能需求】 面向基层就业个性化大学生服务平台(源码+数据库+论文+ppt)java开发springboot框架javaweb,可做计算机毕业设计或课程设计 面向基层就业个性化大学生服务平台中的管理员角色主要负责了如下功能操作。 (1)职业分类管理功能需求:对职业进行划分分类管理等。 (2)用户管理功能需求:对用户信息进行维护管理等。 (3)职业信息管理功能需求:对职业信息进行发布等。 (4)问卷信息管理功能需求:可以发布学生的问卷调查操作。 (5)个性化测试管理功能需求:可以发布个性化测试试题。 (6)试题管理功能需求:对测试试题进行增删改查操作。 (7)社区交流管理功能需求:对用户的交流论坛信息进行维护管理。 面向基层就业个性化大学生服务平台中的用户角色主要负责了如下功能操作。 (1)注册登录功能需求:没有账号的用户,可以输入账号,密码,昵称,邮箱等信息进行注册操作,注册后可以输入账号和密码进行登录。 (2)职业信息功能需求:用户可以对职业信息进行查看。 (3)问卷信息功能需求:可以在线进行问卷调查答卷操作。 (4)社区交流功能需求:可以在线进行社区交流。 (5)个性化测试功能需求:可以在线进行个性化测试。 (6)公告资讯功能需求:可以查看浏览系统发布的公告资讯信息。 【环境需要】 1.运行环境:最好是java jdk 1.8,我们在这个平台上运行的。其他版本理论上也可以。 2.IDE环境:IDEA,Eclipse,Myeclipse都可以。 3.tomcat环境:Tomcat 7.x,8.x,9.x版本均可 4.数据库:MySql 5.7/8.0等版本均可; 【购买须知】 本源码项目经过严格的调试,项目已确保无误,可直接用于课程实训或毕业设计提交。里面都有配套的运行环境软件,讲解视频,部署视频教程,一应俱全,可以自己按照教程导入运行。附有论文参考,使学习者能够快速掌握系统设计和实现的核心技术。
三菱Fx3u程序:自动检测包装机电机控制模板,PLC脉冲与伺服定位,手自动切换功能,三菱Fx3u程序:自动检测包装机电机控制模板——涵盖伺服定位与手自动切换功能,三菱Fx3u程序,自动检测包装机。 该程序六个电机,plc本体脉冲控制3个轴,3个1pg控制。 程序内包括伺服定位,手自动切,功能快的使用,可作为模板程序,很适合新手。 ,三菱Fx3u程序; 自动检测包装机; 六个电机; PLC脉冲控制; 伺服定位; 手自动切换; 功能快捷键; 模板程序。,三菱Fx3u PLC控制下的自动包装机程序:六电机伺服定位与手自动切换模板程序
1.版本:matlab2014/2019a/2024a 2.附赠案例数据可直接运行matlab程序。 3.代码特点:参数化编程、参数可方便更改、代码编程思路清晰、注释明细。 4.适用对象:计算机,电子信息工程、数学等专业的大学生课程设计、期末大作业和毕业设计。
计及信息间隙决策与多能转换的综合能源系统优化调度模型:实现碳经济最大化与源荷不确定性考量,基于信息间隙决策与多能转换的综合能源系统优化调度模型:源荷不确定性下的高效碳经济调度策略,计及信息间隙决策及多能转的综合能源系统优化调度 本代码构建了含风电、光伏、光热发电系统、燃气轮机、燃气锅炉、电锅炉、储气、储电、储碳、碳捕集装置的综合能源系统优化调度模型,并考虑P2G装置与碳捕集装置联合运行,从而实现碳经济的最大化,最重要的是本文引入了信息间隙决策理论考虑了源荷的不确定性(本代码的重点)与店铺的47代码形成鲜明的对比,注意擦亮眼睛,认准原创,该代码非常适合修改创新,,提供相关的模型资料 ,计及信息间隙决策; 综合能源系统; 优化调度; 多能转换; 碳经济最大化; 风电; 光伏; 燃气轮机; 储气; 储电; 储碳; 碳捕集装置; P2G装置联合运行; 模型资料,综合能源系统优化调度模型:基于信息间隙决策和多能转换的原创方案
IPG QCW激光模块电源驱动电路设计与实现:包含安全回路、紧急放电回路及光纤互锁功能的多版本原理图解析,IPG QCW激光模块电源驱动电路设计与实现:含安全回路、紧急放电及光纤互锁等多重保护功能的原理图解析,IPG QCW激光模块电源驱动电路, 包含安全回路,紧急放电回路,光纤互锁回路等, 元件参数请根据实际设计适当调整,此电路仅供参考,不提供pcb文件 原理图提供PDF和KICAD两个版本。 ,IPG激光模块; QCW激光电源驱动; 安全回路; 紧急放电回路; 光纤互锁回路; 原理图PDF和KICAD版本。,IPG激光模块电源驱动电路图解:含安全与紧急放电回路
基于LSSVM的短期电力负荷预测模型及其性能评估:结果揭露精确度与误差分析,LSSVM在短期电力负荷预测中的结果分析:基于均方根误差、平均绝对误差及平均相对百分误差的评估。,LSSVM最小二乘支持向量机做短期电力负荷预测。 结果分析 均方根误差(RMSE):0.79172 平均绝对误差(MAE):0.4871 平均相对百分误差(MAPE):13.079% ,LSSVM(最小二乘支持向量机);短期电力负荷预测;均方根误差(RMSE);平均绝对误差(MAE);平均相对百分误差(MAPE),LSSVM在电力负荷短期预测中的应用及性能分析
1、文件内容:libmtp-examples-1.1.14-1.el7.rpm以及相关依赖 2、文件形式:tar.gz压缩包 3、安装指令: #Step1、解压 tar -zxvf /mnt/data/output/libmtp-examples-1.1.14-1.el7.tar.gz #Step2、进入解压后的目录,执行安装 sudo rpm -ivh *.rpm 4、更多资源/技术支持:公众号禅静编程坊
资源内项目源码是均来自个人的课程设计、毕业设计或者具体项目,代码都测试ok,都是运行成功后才上传资源,答辩评审绝对信服的,拿来就能用。放心下载使用!源码、说明、论文、数据集一站式服务,拿来就能用的绝对好资源!!! 项目备注 1、该资源内项目代码都经过测试运行成功,功能ok的情况下才上传的,请放心下载使用! 2、本项目适合计算机相关专业(如计科、人工智能、通信工程、自动化、电子信息等)的在校学生、老师或者企业员工下载学习,也适合小白学习进阶,当然也可作为毕设项目、课程设计、大作业、项目初期立项演示等。 3、如果基础还行,也可在此代码基础上进行修改,以实现其他功能,也可用于毕设、课设、作业等。 下载后请首先打开README.md文件(如有),仅供学习参考, 切勿用于商业用途。 4、如有侵权请私信博主,感谢支持
2023-04-06-项目笔记-第四百一十六阶段-课前小分享_小分享1.坚持提交gitee 小分享2.作业中提交代码 小分享3.写代码注意代码风格 4.3.1变量的使用 4.4变量的作用域与生命周期 4.4.1局部变量的作用域 4.4.2全局变量的作用域 4.4.2.1全局变量的作用域_1 4.4.2.414局变量的作用域_414- 2025-02-21
MINIST数据集和春风机器学习框架
1、文件内容:ibus-table-chinese-wu-1.4.6-3.el7.rpm以及相关依赖 2、文件形式:tar.gz压缩包 3、安装指令: #Step1、解压 tar -zxvf /mnt/data/output/ibus-table-chinese-wu-1.4.6-3.el7.tar.gz #Step2、进入解压后的目录,执行安装 sudo rpm -ivh *.rpm 4、更多资源/技术支持:公众号禅静编程坊
宿舍管理系统(源码+数据库+论文+ppt)java开发springboot框架javaweb,可做计算机毕业设计或课程设计 【功能需求】 系统拥有管理员和学生两个角色,主要具备系统首页、个人中心、学生管理、宿舍信息管理、宿舍分配管理、水电费管理、进入宿舍管理、出入宿舍管理、维修信息管理、卫生信息管理、考勤信息管理、留言板、交流论坛、系统管理等功能模块。 【环境需要】 1.运行环境:最好是java jdk 1.8,我们在这个平台上运行的。其他版本理论上也可以。 2.IDE环境:IDEA,Eclipse,Myeclipse都可以。 3.tomcat环境:Tomcat 7.x,8.x,9.x版本均可 4.数据库:MySql 5.7/8.0等版本均可; 【购买须知】 本源码项目经过严格的调试,项目已确保无误,可直接用于课程实训或毕业设计提交。里面都有配套的运行环境软件,讲解视频,部署视频教程,一应俱全,可以自己按照教程导入运行。附有论文参考,使学习者能够快速掌握系统设计和实现的核心技术。
1.版本:matlab2014/2019a/2024a 2.附赠案例数据可直接运行matlab程序。 3.代码特点:参数化编程、参数可方便更改、代码编程思路清晰、注释明细。 4.适用对象:计算机,电子信息工程、数学等专业的大学生课程设计、期末大作业和毕业设计。
人凤飞飞凤飞飞是粉色丰富
2024蓝桥杯嵌入式学习资料
image_download_1740129191509.jpg
基于Multisim仿真的带优先病房呼叫系统设计(仿真图) 设计一个病房呼叫系统。 功能 (1)当有病人紧急呼叫时,产生声,光提示,并显示病人的编号; (2)根据病人的病情设计优先级别,当有多人呼叫时,病情严重者优先; (3)医护人员处理完当前最高级别的呼叫后,系统按优先级别显示其他呼叫病人的病号。
基于STM32F103的3.6kW全桥逆变器资料:并网充电放电、智能切换与全方位保护方案,基于STM32F103的3.6kW全桥逆变器资料:并网充电放电、智能控制与全方位保护方案,逆变器光伏逆变器,3.6kw储能逆变器全套资料 STM32储能逆变器 BOOST 全桥 基于STM32F103设计,具有并网充电、放电;并网离网自动切;485通讯,在线升级;风扇智能控制,提供过流、过压、短路、过温等全方位保护。 基于arm的方案区别于dsp。 有PCB、原理图及代码ad文件。 ,逆变器; 储能逆变器; STM32F103; 3.6kw; 485通讯; 全方位保护; 智能控制; 方案区别; PCB文件; 原理图文件; ad文件。,基于STM32F103的3.6kw储能逆变器:全方位保护与智能控制