推荐文章

生成抖音中的像素视频(视频转字符动画)

html

生成抖音中的像素视频(视频转字符动画)以下还需要需要ffmpeg视频处理工具合成音频 安装ffmpeg |ffmpeg基本用法引用类pip3.7 install numpy opencv-python Pillow代码实现# -*- coding:utf-8 -*-import os, cv2, subprocess, argparsefrom PIL import Image, ImageFont, ImageDraw# pip3.7 install numpy opencv-python Pillow# 命令行输入参数处理parser = argparse.ArgumentParser()parser.add_argument('file')parser.add_argument('-o', '--output')parser.add_argument('-f', '--fps', type=float, default=24) # 帧parser.add_argument('-s', '--save', type=bool, nargs='?', default=False, const=True) # 是否保留Cache文件,默认不保存# 获取参数args = parser.parse_args()INPUT = args.fileOUTPUT = args.outputFPS = args.fpsSAVE = args.save <!--more--># 像素对应ascii码ascii_char = list("$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\|()1{}[]?-_+~<>i!lI;:oa+>!:+. ")# ascii_char = list("MNHQ$OC67+>!:-. ")# ascii_char = list("MNHQ$OC67)oa+>!:+. ")# 将像素转换为ascii码def get_char(r, g, b, alpha=256): if alpha == 0: return '' length = len(ascii_char) gray = int(0.2126 * r + 0.7152 * g + 0.0722 * b) unit = (256.0 + 1) / length return ascii_char[int(gray / unit)]# 将视频拆分成图片def videoSplitJpg(file_name): vc = cv2.VideoCapture(file_name) c = 1 if vc.isOpened(): r, frame = vc.read() if not os.path.exists('Cache'): os.mkdir('Cache') os.chdir('Cache') else: r = False while r: cv2.imwrite(str(c) + '.jpg', frame) # 将txt转换为图片 txtToImage(str(c) + '.jpg') r, frame = vc.read() c += 1 os.chdir('..') return vc# 将txt转换为图片(换为ascii图)def txtToImage(file_name): im = Image.open(file_name).convert('RGB') # gif拆分后的图像,需要转换,否则报错,由于gif分割后保存的是索引颜色 raw_width = im.width raw_height = im.height width = int(raw_width / 6) height = int(raw_height / 15) im = im.resize((width, height), Image.NEAREST) txt = "" colors = [] for i in range(height): for j in range(width): pixel = im.getpixel((j, i)) colors.append((pixel[0], pixel[1], pixel[2])) if (len(pixel) == 4): txt += get_char(pixel[0], pixel[1], pixel[2], pixel[3]) else: txt += get_char(pixel[0], pixel[1], pixel[2]) txt += '\n' colors.append((255, 255, 255)) im_txt = Image.new("RGB", (raw_width, raw_height), (255, 255, 255)) dr = ImageDraw.Draw(im_txt) # font = ImageFont.truetype(os.path.join("fonts","汉仪楷体简.ttf"),18) font = ImageFont.load_default().font x = y = 0 # 获取字体的宽高 font_w, font_h = font.getsize(txt[1]) or (6, 11) font_h *= 1.37 # 调整后更佳 # ImageDraw为每个ascii码进行上色 for i in range(len(txt)): if (txt[i] == '\n'): x += font_h y = -font_w dr.text((y, x), txt[i], fill=colors[i]) y += font_w name = file_name print(name + ' changed') im_txt.save(name)# 将图片合成视频def jpgToVideo(outfile_name, fps): fourcc = cv2.VideoWriter_fourcc(*"MJPG") images = os.listdir('Cache') im = Image.open('Cache/' + images[0]) vw = cv2.VideoWriter(outfile_name + '.avi', fourcc, fps, im.size) os.chdir('Cache') for image in range(len(images)): # Image.open(str(image)+'.jpg').convert("RGB").save(str(image)+'.jpg') frame = cv2.imread(str(image + 1) + '.jpg') vw.write(frame) print(str(image + 1) + '.jpg' + ' finished') os.chdir('..') vw.release()# 调用ffmpeg获取mp3音频文件def videoGetVoice(file_name, voice_file): # ffmpeg -i 'file_name' -vn -y -acodec copy 'voice_file' 获取视频中的音频文件 subprocess.call('ffmpeg -i ' + file_name + ' -vn -y -acodec copy ' + voice_file, shell=True)# 合成音频和视频文件def video_add_mp3(file_name, voice_file): outfile_name = file_name.split('.')[0] + '-txt.mp4' # ffmpeg -i 'file_name' -i 'voice_file' -strict -2 -f mp4 'outfile_name' 合并avi和aac音频生成mp4文件 subprocess.call('ffmpeg -i ' + file_name + ' -i ' + voice_file + ' -strict -2 -f mp4 ' + outfile_name, shell=True)# 递归删除目录def remove_dir(path): if os.path.exists(path): if os.path.isdir(path): dirs = os.listdir(path) for d in dirs: if os.path.isdir(path + '/' + d): remove_dir(path + '/' + d) elif os.path.isfile(path + '/' + d): os.remove(path + '/' + d) os.rmdir(path) return elif os.path.isfile(path): os.remove(path) returnif __name__ == '__main__': # 将视频拆分成图片 print('--------将视频拆分成图片------------------') vc = videoSplitJpg(INPUT) FPS = vc.get(cv2.CAP_PROP_FPS) # 获取帧率 vc.release() # 将图片合成视频 print('--------将图片合成视频------------------') jpgToVideo(INPUT.split('.')[0], FPS) # 音频文件名 voice_file = INPUT.split('.')[0] + '.aac' # 调用ffmpeg获取mp3音频文件 print('--------调用ffmpeg获取mp3音频文件------------------') videoGetVoice(INPUT, voice_file) # 合成音频和视频文件 print('--------合成音频和视频文件------------------') video_add_mp3(INPUT.split('.')[0] + '.avi', voice_file) if (not SAVE): print('--------删除生成的音频视频文件,和cache文件------------------') remove_dir("Cache") os.remove(voice_file) os.remove(INPUT.split('.')[0] + '.avi') print('--------删除成功------------------')

01

2018/12

Solr的schema.xml配置文件

html

1.需求一个solr库存储三种类型数据 (视频数据,新闻数据 注:两种数据id可能重复)1.配置将主键id改为了_id,为了避免与数据库id冲突<field name="id" type="string" indexed="true" stored="true" required="true" multiValued="false"/><uniqueKey>id</uniqueKey>改为:<field name="_id" type="string" indexed="true" stored="true" required="true" multiValued="false"/><uniqueKey>_id</uniqueKey>2.将主键id设置成自增添加uuid字段类型,修改字段id的类型。<field name="id" type="uuid" indexed="true" stored="true" required="true" multiValued="false" /> <!--添加这行配置--><fieldType name="uuid" class="solr.UUIDField" indexed="true"/>2.1修改配置solrconfig.xml文件添加更新策略配置,调用Solr中的UUIDUpdateProcessorFactory生成全局唯一的UUID。<updateRequestProcessorChain name="uuid"> <processor class="solr.UUIDUpdateProcessorFactory"> <str name="fieldName">_id</str> </processor> <processor class="solr.LogUpdateProcessorFactory" /> <processor class="solr.DistributedUpdateProcessorFactory" /> <processor class="solr.RunUpdateProcessorFactory" /></updateRequestProcessorChain>配置requestHandler,保证dataimport和update操作都可以自动生成UUID。添加这一行<str name="update.chain">uuid</str><initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell"><lst name="defaults"> <!--添加这一行 start --> <str name="update.chain">uuid</str> <!--添加这一行 start --> <str name="df">text</str></lst></initParams>3.搜索时同时搜索title和content两个字段<!-- 设置搜索虚拟字段 searchText 的搜索分词器 --><field name="searchText" type="text_ik" indexed="true" stored="false" multiValued="true" /><!-- 复制title内容和content内容到searchText虚拟字段 --><copyField source="title" dest="searchText" /><copyField source="content" dest="searchText" />4.Filed中indexed、stored、multiValued属性indexed=”true” 可以被搜索stored=”true” 结果集中有这个字段multiValued=”true” 可以有多个值,适用于搜索时,两个字段copy出来的虚拟字段注: 修改配置文件需要重启服务 <!--more-->完整schema.xml配置文件<?xml version="1.0" encoding="UTF-8" ?><!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.--><!-- This is the Solr schema file. This file should be named "schema.xml" and should be in the conf directory under the solr home (i.e. ./solr/conf/schema.xml by default) or located where the classloader for the Solr webapp can find it. This example schema is the recommended starting point for users. It should be kept correct and concise, usable out-of-the-box. For more information, on how to customize this file, please see http://wiki.apache.org/solr/SchemaXml--><schema name="example" version="1.5"> <!-- attribute "name" is the name of this schema and is only used for display purposes. version="x.y" is Solr's version number for the schema syntax and semantics. It should not normally be changed by applications. 1.0: multiValued attribute did not exist, all fields are multiValued by nature 1.1: multiValued attribute introduced, false by default 1.2: omitTermFreqAndPositions attribute introduced, true by default except for text fields. 1.3: removed optional field compress feature 1.4: autoGeneratePhraseQueries attribute introduced to drive QueryParser behavior when a single string produces multiple tokens. Defaults to off for version >= 1.4 1.5: omitNorms defaults to true for primitive field types (int, float, boolean, string...) --> <!-- Valid attributes for fields: name: mandatory - the name for the field type: mandatory - the name of a field type from the <types> fieldType section indexed: true if this field should be indexed (searchable or sortable) stored: true if this field should be retrievable docValues: true if this field should have doc values. Doc values are useful for faceting, grouping, sorting and function queries. Although not required, doc values will make the index faster to load, more NRT-friendly and more memory-efficient. They however come with some limitations: they are currently only supported by StrField, UUIDField and all Trie*Fields, and depending on the field type, they might require the field to be single-valued, be required or have a default value (check the documentation of the field type you're interested in for more information) multiValued: true if this field may contain multiple values per document omitNorms: (expert) set to true to omit the norms associated with this field (this disables length normalization and index-time boosting for the field, and saves some memory). Only full-text fields or fields that need an index-time boost need norms. Norms are omitted for primitive (non-analyzed) types by default. termVectors: [false] set to true to store the term vector for a given field. When using MoreLikeThis, fields used for similarity should be stored for best performance. termPositions: Store position information with the term vector. This will increase storage costs. termOffsets: Store offset information with the term vector. This will increase storage costs. required: The field is required. It will throw an error if the value does not exist default: a value that should be used if no value is specified when adding a document. --> <!-- field names should consist of alphanumeric or underscore characters only and not start with a digit. This is not currently strictly enforced, but other field names will not have first class support from all components and back compatibility is not guaranteed. Names with both leading and trailing underscores (e.g. _version_) are reserved. --> <!-- If you remove this field, you must _also_ disable the update log in solrconfig.xml or Solr won't start. _version_ and update log are required for SolrCloud --> <field name="_version_" type="long" indexed="true" stored="true"/> <!-- points to the root document of a block of nested documents. Required for nested document support, may be removed otherwise --> <field name="_root_" type="string" indexed="true" stored="false"/> <!-- Only remove the "id" field if you have a very good reason to. While not strictly required, it is highly recommended. A <uniqueKey> is present in almost all Solr installations. See the <uniqueKey> declaration below where <uniqueKey> is set to "id". Do NOT change the type and apply index-time analysis to the <uniqueKey> as it will likely make routing in SolrCloud and document replacement in general fail. Limited _query_ time analysis is possible as long as the indexing process is guaranteed to index the term in a compatible way. Any analysis applied to the <uniqueKey> should _not_ produce multiple tokens --> <!-- 这是主键id,修改成了_id --> <field name="_id" type="uuid" indexed="true" stored="true" required="true" multiValued="false"/> <!-- 将主键id设置成自增 --> <fieldType name="uuid" class="solr.UUIDField" indexed="true"/> <!-- 搜索字段 copy的title and item_title --> <field name="searchText" type="text_ik" indexed="true" stored="false" multiValued="true" /> <!-- index 库存储字段 --> <field name="id" type="int" default="0" indexed="true" stored="true"/> <field name="type" type="int" default="0" indexed="true" stored="true"/> <field name="title" type="text_ik" default="" indexed="true" stored="true"/> <field name="inputtime" type="int" default="0" indexed="true" stored="true"/> <field name="updatetime" type="int" default="0" indexed="true" stored="true"/> <field name="status" type="int" default="0" indexed="true" stored="true"/> <field name="share_url" type="string" default="" indexed="true" stored="true"/> <!-- 视频字段 --> <field name="user_id" type="int" indexed="true" stored="true"/> <field name="project_id" type="int" indexed="true" stored="true"/> <field name="slogan" type="string" indexed="true" stored="true"/> <field name="type_id" type="int" indexed="true" stored="true"/> <field name="cate_id" type="int" indexed="true" stored="true"/> <field name="views" type="int" indexed="true" stored="true"/> <field name="describe" type="string" indexed="true" stored="true"/> <field name="is_hot" type="int" indexed="true" stored="true"/> <field name="type_name" type="string" indexed="true" stored="true"/> <field name="cate_name" type="string" indexed="true" stored="true"/> <field name="cover_url" type="string" indexed="true" stored="true"/> <field name="video_url" type="string" indexed="true" stored="true"/> <field name="play_time_format" type="string" indexed="true" stored="true"/> <field name="file_size" type="string" indexed="true" stored="true"/> <field name="item_logo" type="string" indexed="true" stored="true"/> <!--项目 --> <field name="catid" type="int" indexed="true" stored="true"/> <field name="ispay" type="int" indexed="true" stored="true"/> <field name="item_title" type="text_ik" indexed="true" stored="true"/> <field name="item_brandword" type="string" indexed="true" stored="true"/> <field name="brand" type="string" indexed="true" stored="true"/> <field name="brandword" type="string" indexed="true" stored="true"/> <field name="logo" type="string" indexed="true" stored="true"/> <field name="funds" type="string" indexed="true" stored="true"/> <field name="franchisee" type="string" indexed="true" stored="true"/> <field name="3gurl" type="string" indexed="true" stored="true"/> <field name="mobile_url" type="string" indexed="true" stored="true"/> <field name="companyId" type="string" indexed="true" stored="true"/> <field name="app_url" type="string" indexed="true" stored="true"/> <field name="gbooks" type="string" indexed="true" stored="true"/> <field name="dpa_item_logo" type="string" indexed="true" stored="true"/> <field name="dpa_img_dan1" type="string" indexed="true" stored="true"/> <field name="dpa_img_dan2" type="string" indexed="true" stored="true"/> <!--资讯 --> <field name="img" type="string" indexed="true" stored="true"/> <field name="itemid" type="string" indexed="true" stored="true"/> <field name="create_time" type="string" indexed="true" stored="true"/> <field name="announcer" type="string" indexed="true" stored="true"/> <field name="tag" type="string" indexed="true" stored="true"/> <field name="flag" type="int" indexed="true" stored="true"/> <field name="uid" type="int" indexed="true" stored="true"/> <field name="touid" type="int" indexed="true" stored="true"/> <field name="username" type="string" indexed="true" stored="true"/> <field name="content" type="string" indexed="true" stored="true"/> <field name="answercount" type="string" indexed="true" stored="true"/> <field name="anonymity" type="string" indexed="true" stored="true"/> <field name="praise" type="string" indexed="true" stored="true"/> <field name="resource" type="string" indexed="true" stored="true"/> <!-- Dynamic field definitions allow using convention over configuration for fields via the specification of patterns to match field names. EXAMPLE: name="*_i" will match any field ending in _i (like myid_i, z_i) RESTRICTION: the glob-like pattern in the name attribute must have a "*" only at the start or the end. --> <dynamicField name="*_i" type="int" indexed="true" stored="true"/> <dynamicField name="*_is" type="int" indexed="true" stored="true" multiValued="true"/> <dynamicField name="*_s" type="string" indexed="true" stored="true"/> <dynamicField name="*_ss" type="string" indexed="true" stored="true" multiValued="true"/> <dynamicField name="*_l" type="long" indexed="true" stored="true"/> <dynamicField name="*_ls" type="long" indexed="true" stored="true" multiValued="true"/> <dynamicField name="*_t" type="text_general" indexed="true" stored="true"/> <dynamicField name="*_txt" type="text_general" indexed="true" stored="true" multiValued="true"/> <dynamicField name="*_en" type="text_en" indexed="true" stored="true" multiValued="true"/> <dynamicField name="*_b" type="boolean" indexed="true" stored="true"/> <dynamicField name="*_bs" type="boolean" indexed="true" stored="true" multiValued="true"/> <dynamicField name="*_f" type="float" indexed="true" stored="true"/> <dynamicField name="*_fs" type="float" indexed="true" stored="true" multiValued="true"/> <dynamicField name="*_d" type="double" indexed="true" stored="true"/> <dynamicField name="*_ds" type="double" indexed="true" stored="true" multiValued="true"/> <!-- Type used to index the lat and lon components for the "location" FieldType --> <dynamicField name="*_coordinate" type="tdouble" indexed="true" stored="false"/> <dynamicField name="*_dt" type="date" indexed="true" stored="true"/> <dynamicField name="*_dts" type="date" indexed="true" stored="true" multiValued="true"/> <dynamicField name="*_p" type="location" indexed="true" stored="true"/> <!-- some trie-coded dynamic fields for faster range queries --> <dynamicField name="*_ti" type="tint" indexed="true" stored="true"/> <dynamicField name="*_tl" type="tlong" indexed="true" stored="true"/> <dynamicField name="*_tf" type="tfloat" indexed="true" stored="true"/> <dynamicField name="*_td" type="tdouble" indexed="true" stored="true"/> <dynamicField name="*_tdt" type="tdate" indexed="true" stored="true"/> <dynamicField name="*_c" type="currency" indexed="true" stored="true"/> <dynamicField name="ignored_*" type="ignored" multiValued="true"/> <dynamicField name="attr_*" type="text_general" indexed="true" stored="true" multiValued="true"/> <dynamicField name="random_*" type="random"/> <!-- uncomment the following to ignore any fields that don't already match an existing field name or dynamic field, rather than reporting them as an error. alternately, change the type="ignored" to some other type e.g. "text" if you want unknown fields indexed and/or stored by default --> <!--dynamicField name="*" type="ignored" multiValued="true" /--> <!-- Field to use to determine and enforce document uniqueness. Unless this field is marked with required="false", it will be a required field --> <!-- 这是主键唯一id,修改成了_id --> <uniqueKey>_id</uniqueKey> <!-- copyField commands copy one field to another at the time a document is added to the index. It's used either to index the same field differently, or to add multiple fields to the same field for easier/faster searching. --> <copyField source="title" dest="searchText" /> <copyField source="item_title" dest="searchText" /> <!-- <copyField source="title" dest="text"/> <copyField source="body" dest="text"/> --> <!-- field type definitions. The "name" attribute is just a label to be used by field definitions. The "class" attribute and any other attributes determine the real behavior of the fieldType. Class names starting with "solr" refer to java classes in a standard package such as org.apache.solr.analysis --> <!-- The StrField type is not analyzed, but indexed/stored verbatim. It supports doc values but in that case the field needs to be single-valued and either required or have a default value. --> <fieldType name="string" class="solr.StrField" sortMissingLast="true"/> <!-- boolean type: "true" or "false" --> <fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/> <!-- sortMissingLast and sortMissingFirst attributes are optional attributes are currently supported on types that are sorted internally as strings and on numeric types. This includes "string","boolean", and, as of 3.5 (and 4.x), int, float, long, date, double, including the "Trie" variants. - If sortMissingLast="true", then a sort on this field will cause documents without the field to come after documents with the field, regardless of the requested sort order (asc or desc). - If sortMissingFirst="true", then a sort on this field will cause documents without the field to come before documents with the field, regardless of the requested sort order. - If sortMissingLast="false" and sortMissingFirst="false" (the default), then default lucene sorting will be used which places docs without the field first in an ascending sort and last in a descending sort. --> <!-- Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types. These fields support doc values, but they require the field to be single-valued and either be required or have a default value. --> <fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/> <fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/> <fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/> <fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/> <!-- Numeric field types that index each value at various levels of precision to accelerate range queries when the number of values between the range endpoints is large. See the javadoc for NumericRangeQuery for internal implementation details. Smaller precisionStep values (specified in bits) will lead to more tokens indexed per value, slightly larger index size, and faster range queries. A precisionStep of 0 disables indexing at different precision levels. --> <fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0"/> <fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/> <fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/> <fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/> <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and is a more restricted form of the canonical representation of dateTime http://www.w3.org/TR/xmlschema-2/#dateTime The trailing "Z" designates UTC time and is mandatory. Optional fractional seconds are allowed: 1995-12-31T23:59:59.999Z All other components are mandatory. Expressions can also be used to denote calculations that should be performed relative to "NOW" to determine the value, ie... NOW/HOUR ... Round to the start of the current hour NOW-1DAY ... Exactly 1 day prior to now NOW/DAY+6MONTHS+3DAYS ... 6 months and 3 days in the future from the start of the current day Consult the TrieDateField javadocs for more information. Note: For faster range queries, consider the tdate type --> <fieldType name="date" class="solr.TrieDateField" precisionStep="0" positionIncrementGap="0"/> <!-- A Trie based date field for faster date range queries and date faceting. --> <fieldType name="tdate" class="solr.TrieDateField" precisionStep="6" positionIncrementGap="0"/> <!--Binary data type. The data should be sent/retrieved in as Base64 encoded Strings --> <fieldType name="binary" class="solr.BinaryField"/> <!-- The "RandomSortField" is not used to store or search any data. You can declare fields of this type it in your schema to generate pseudo-random orderings of your docs for sorting or function purposes. The ordering is generated based on the field name and the version of the index. As long as the index version remains unchanged, and the same field name is reused, the ordering of the docs will be consistent. If you want different psuedo-random orderings of documents, for the same version of the index, use a dynamicField and change the field name in the request. --> <fieldType name="random" class="solr.RandomSortField" indexed="true"/> <!-- solr.TextField allows the specification of custom text analyzers specified as a tokenizer and a list of token filters. Different analyzers may be specified for indexing and querying. The optional positionIncrementGap puts space between multiple fields of this type on the same document, with the purpose of preventing false phrase matching across fields. For more info on customizing your analyzer chain, please see http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters --> <!-- One can also specify an existing Analyzer class that has a default constructor via the class attribute on the analyzer element. Example: <fieldType name="text_greek" class="solr.TextField"> <analyzer class="org.apache.lucene.analysis.el.GreekAnalyzer"/> </fieldType> --> <!-- A text field that only splits on whitespace for exact matching of words --> <fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100"> <analyzer> <tokenizer class="solr.WhitespaceTokenizerFactory"/> </analyzer> </fieldType> <!-- A general text field that has reasonable, generic cross-language defaults: it tokenizes with StandardTokenizer, removes stop words from case-insensitive "stopwords.txt" (empty by default), and down cases. At query time only, it also applies synonyms. --> <fieldType name="text_general" class="solr.TextField" positionIncrementGap="100"> <analyzer type="index"> <tokenizer class="solr.StandardTokenizerFactory"/> <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/> <!-- in this example, we will only use synonyms at query time <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/> --> <filter class="solr.LowerCaseFilterFactory"/> </analyzer> <analyzer type="query"> <tokenizer class="solr.StandardTokenizerFactory"/> <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/> <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/> <filter class="solr.LowerCaseFilterFactory"/> </analyzer> </fieldType> <!-- A text field with defaults appropriate for English: it tokenizes with StandardTokenizer, removes English stop words (lang/stopwords_en.txt), down cases, protects words from protwords.txt, and finally applies Porter's stemming. The query time analyzer also applies synonyms from synonyms.txt. --> <fieldType name="text_en" class="solr.TextField" positionIncrementGap="100"> <analyzer type="index"> <tokenizer class="solr.StandardTokenizerFactory"/> <!-- in this example, we will only use synonyms at query time <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/> --> <!-- Case insensitive stop word removal. --> <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt" /> <filter class="solr.LowerCaseFilterFactory"/> <filter class="solr.EnglishPossessiveFilterFactory"/> <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/> <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory: <filter class="solr.EnglishMinimalStemFilterFactory"/> --> <filter class="solr.PorterStemFilterFactory"/> </analyzer> <analyzer type="query"> <tokenizer class="solr.StandardTokenizerFactory"/> <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/> <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt" /> <filter class="solr.LowerCaseFilterFactory"/> <filter class="solr.EnglishPossessiveFilterFactory"/> <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/> <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory: <filter class="solr.EnglishMinimalStemFilterFactory"/> --> <filter class="solr.PorterStemFilterFactory"/> </analyzer> </fieldType> <!-- A text field with defaults appropriate for English, plus aggressive word-splitting and autophrase features enabled. This field is just like text_en, except it adds WordDelimiterFilter to enable splitting and matching of words on case-change, alpha numeric boundaries, and non-alphanumeric chars. This means certain compound word cases will work, for example query "wi fi" will match document "WiFi" or "wi-fi". --> <fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true"> <analyzer type="index"> <tokenizer class="solr.WhitespaceTokenizerFactory"/> <!-- in this example, we will only use synonyms at query time <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/> --> <!-- Case insensitive stop word removal. --> <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt" /> <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/> <filter class="solr.LowerCaseFilterFactory"/> <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/> <filter class="solr.PorterStemFilterFactory"/> </analyzer> <analyzer type="query"> <tokenizer class="solr.WhitespaceTokenizerFactory"/> <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/> <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt" /> <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/> <filter class="solr.LowerCaseFilterFactory"/> <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/> <filter class="solr.PorterStemFilterFactory"/> </analyzer> </fieldType> <!-- Less flexible matching, but less false matches. Probably not ideal for product names, but may be good for SKUs. Can insert dashes in the wrong place and still match. --> <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true"> <analyzer> <tokenizer class="solr.WhitespaceTokenizerFactory"/> <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/> <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/> <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/> <filter class="solr.LowerCaseFilterFactory"/> <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/> <filter class="solr.EnglishMinimalStemFilterFactory"/> <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes possible with WordDelimiterFilter in conjuncton with stemming. --> <filter class="solr.RemoveDuplicatesTokenFilterFactory"/> </analyzer> </fieldType> <!-- Just like text_general except it reverses the characters of each token, to enable more efficient leading wildcard queries. --> <fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100"> <analyzer type="index"> <tokenizer class="solr.StandardTokenizerFactory"/> <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/> <filter class="solr.LowerCaseFilterFactory"/> <filter class="solr.ReversedWildcardFilterFactory" withOriginal="true" maxPosAsterisk="3" maxPosQuestion="2" maxFractionAsterisk="0.33"/> </analyzer> <analyzer type="query"> <tokenizer class="solr.StandardTokenizerFactory"/> <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/> <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/> <filter class="solr.LowerCaseFilterFactory"/> </analyzer> </fieldType> <!-- This is an example of using the KeywordTokenizer along With various TokenFilterFactories to produce a sortable field that does not include some properties of the source text --> <fieldType name="alphaOnlySort" class="solr.TextField" sortMissingLast="true" omitNorms="true"> <analyzer> <!-- KeywordTokenizer does no actual tokenizing, so the entire input string is preserved as a single token --> <tokenizer class="solr.KeywordTokenizerFactory"/> <!-- The LowerCase TokenFilter does what you expect, which can be when you want your sorting to be case insensitive --> <filter class="solr.LowerCaseFilterFactory"/> <!-- The TrimFilter removes any leading or trailing whitespace --> <filter class="solr.TrimFilterFactory"/> <!-- The PatternReplaceFilter gives you the flexibility to use Java Regular expression to replace any sequence of characters matching a pattern with an arbitrary replacement string, which may include back references to portions of the original string matched by the pattern. See the Java Regular Expression documentation for more information on pattern and replacement string syntax. http://docs.oracle.com/javase/7/docs/api/java/util/regex/package-summary.html --> <filter class="solr.PatternReplaceFilterFactory" pattern="([^a-z])" replacement="" replace="all" /> </analyzer> </fieldType> <!-- lowercases the entire field value, keeping it as a single token. --> <fieldType name="lowercase" class="solr.TextField" positionIncrementGap="100"> <analyzer> <tokenizer class="solr.KeywordTokenizerFactory"/> <filter class="solr.LowerCaseFilterFactory"/> </analyzer> </fieldType> <!-- since fields of this type are by default not stored or indexed, any data added to them will be ignored outright. --> <fieldType name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField"/> <!-- This point type indexes the coordinates as separate fields (subFields) If subFieldType is defined, it references a type, and a dynamic field definition is created matching *___<typename>. Alternately, if subFieldSuffix is defined, that is used to create the subFields. Example: if subFieldType="double", then the coordinates would be indexed in fields myloc_0___double,myloc_1___double. Example: if subFieldSuffix="_d" then the coordinates would be indexed in fields myloc_0_d,myloc_1_d The subFields are an implementation detail of the fieldType, and end users normally should not need to know about them. --> <fieldType name="point" class="solr.PointType" dimension="2" subFieldSuffix="_d"/> <!-- A specialized field for geospatial search. If indexed, this fieldType must not be multivalued. --> <fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/> <!-- An alternative geospatial field type new to Solr 4. It supports multiValued and polygon shapes. For more information about this and other Spatial fields new to Solr 4, see: http://wiki.apache.org/solr/SolrAdaptersForLuceneSpatial4 --> <fieldType name="location_rpt" class="solr.SpatialRecursivePrefixTreeFieldType" geo="true" distErrPct="0.025" maxDistErr="0.001" distanceUnits="kilometers"/> <!-- Spatial rectangle (bounding box) field. It supports most spatial predicates, and has special relevancy modes: score=overlapRatio|area|area2D (local-param to the query). DocValues is recommended for relevancy. --> <fieldType name="bbox" class="solr.BBoxField" geo="true" distanceUnits="kilometers" numberType="_bbox_coord"/> <fieldType name="_bbox_coord" class="solr.TrieDoubleField" precisionStep="8" docValues="true" stored="false"/> <!-- Money/currency field type. See http://wiki.apache.org/solr/MoneyFieldType Parameters: defaultCurrency: Specifies the default currency if none specified. Defaults to "USD" precisionStep: Specifies the precisionStep for the TrieLong field used for the amount providerClass: Lets you plug in other exchange provider backend: solr.FileExchangeRateProvider is the default and takes one parameter: currencyConfig: name of an xml file holding exchange rates solr.OpenExchangeRatesOrgProvider uses rates from openexchangerates.org: ratesFileLocation: URL or path to rates JSON file (default latest.json on the web) refreshInterval: Number of minutes between each rates fetch (default: 1440, min: 60) --> <fieldType name="currency" class="solr.CurrencyField" precisionStep="8" defaultCurrency="USD" currencyConfig="currency.xml"/> <fieldType name="text_ik" class="solr.TextField"> <analyzer type="index"> <tokenizer class="org.wltea.analyzer.lucene.IKTokenizerFactory" useSmart="false" conf="ik.conf"/> <filter class="solr.LowerCaseFilterFactory"/> </analyzer> <analyzer type="query"> <tokenizer class="org.wltea.analyzer.lucene.IKTokenizerFactory" useSmart="true" conf="ik.conf"/> <filter class="solr.LowerCaseFilterFactory"/> </analyzer> </fieldType></schema>

05

2018/11

Liunx 磁盘扩容踩坑总结

html

Liunx 磁盘扩容踩坑总结查看当前磁盘使用[root@iZ2zebzrd1fwcb8kj2g8xiZ ~]# df -hFilesystem Size Used Avail Use% Mounted on/dev/xvda1 40G 36G 2.8G 93% /tmpfs 1003M 0 1003M 0% /dev/shm/dev/xvdb1 20G 16G 2.8G 86% /home/www/dev/xvdc 15G 15G 0 100% /home/svn我们在阿里云扩容/dev/xvdc 这个盘之后查看磁盘详解[root@iZ2zebzrd1fwcb8kj2g8xiZ ~]# fdisk -lDisk /dev/xvda: 42.9 GB, 42949672960 bytes255 heads, 63 sectors/track, 5221 cylindersUnits = cylinders of 16065 * 512 = 8225280 bytesSector size (logical/physical): 512 bytes / 512 bytesI/O size (minimum/optimal): 512 bytes / 512 bytesDisk identifier: 0x00071419 Device Boot Start End Blocks Id System/dev/xvda1 * 1 5222 41940992 83 LinuxDisk /dev/xvdb: 64.4 GB, 64424509440 bytes255 heads, 63 sectors/track, 7832 cylindersUnits = cylinders of 16065 * 512 = 8225280 bytesSector size (logical/physical): 512 bytes / 512 bytesI/O size (minimum/optimal): 512 bytes / 512 bytesDisk identifier: 0x453417b4 Device Boot Start End Blocks Id System/dev/xvdb1 1 2610 20964793+ 83 LinuxDisk /dev/xvdc: 32.2 GB, 32212254720 bytes255 heads, 63 sectors/track, 3916 cylindersUnits = cylinders of 16065 * 512 = 8225280 bytesSector size (logical/physical): 512 bytes / 512 bytesI/O size (minimum/optimal): 512 bytes / 512 bytesDisk identifier: 0x00000000Disk /dev/xvdc doesn't contain a valid partition table强制检查磁盘使用情况[root@iZ2zebzrd1fwcb8kj2g8xiZ ~]# e2fsck -f /dev/xvdce2fsck 1.41.12 (17-May-2010)Pass 1: Checking inodes, blocks, and sizesPass 2: Checking directory structurePass 3: Checking directory connectivity/lost+found not found. Create<y>? yesPass 4: Checking reference countsPass 5: Checking group summary information/dev/xvdc: ***** FILE SYSTEM WAS MODIFIED *****/dev/xvdc: 343096/983040 files (3.1% non-contiguous), 3788579/3932160 blocks重定义磁盘大小[root@iZ2zebzrd1fwcb8kj2g8xiZ ~]# resize2fs /dev/xvdcresize2fs 1.41.12 (17-May-2010)Resizing the filesystem on /dev/xvdc to 7864320 (4k) blocks.The filesystem on /dev/xvdc is now 7864320 blocks long.重新挂载光驱[root@iZ2zebzrd1fwcb8kj2g8xiZ ~]# mount /dev/xvdc /home/svn查询磁盘使用情况[root@iZ2zebzrd1fwcb8kj2g8xiZ ~]# df -hFilesystem Size Used Avail Use% Mounted on/dev/xvda1 40G 36G 2.8G 93% /tmpfs 1003M 0 1003M 0% /dev/shm/dev/xvdb1 20G 16G 2.8G 86% /home/www/dev/xvdc 30G 15G 14G 51% /home/svn注:磁盘格式一定要用ext4ext3文件系统一级子目录的个数默认为32000个,去掉.目录(代表当前目录)和..目录(代表上级目录),实际只能建31998个,ext3文件系统下单个目录里的最大文件数无特别的限制,是受限于所在文件系统的inode数

21

2018/09

yii2 console MySQL server

html

yii2 console MySQL server has gone away经过我的不屑努力和研究终于得到解决方案<?phpnamespace common\lib;class Command extends \yii\db\Command{ public function execute() { try { return parent::execute(); } catch (\yii\db\Exception $e) { if ($e->errorInfo[1] == 2006 || $e->errorInfo[1] == 2013) { $this->db->close(); $this->db->open(); $this->pdoStatement = null ; return parent::execute(); }else{ throw $e; } } } protected function queryInternal($method, $fetchMode = null){ try { return parent::queryInternal($method, $fetchMode); } catch (\yii\db\Exception $e) { if ($e->errorInfo[1] == 2006 || $e->errorInfo[1] == 2013) { $this->db->close(); $this->db->open(); $this->pdoStatement = null ; return parent::queryInternal($method, $fetchMode); }else{ throw $e; } } }}'db' => [ 'class' => 'yii\db\Connection', 'dsn' => 'mysql:host='.$db_config['host'].';dbname='.$db_config['dbname'], 'username' => $db_config['username'], 'password' => $db_config['password'], 'charset' => 'utf8', 'commandClass'=>"\\common\\lib\\Command", //<==注意这里要替换],<!--more-->

18

2018/09

Yii2 ElasticSearch的使用

html

Yii2 ElasticSearch的使用Yii2 elasticSearch – 配置配置部分如下:'elasticsearch' => [ 'class' => 'yii\elasticsearch\Connection', 'nodes' => [ ['http_address' => '192.168.0.199:9200'], ['http_address' => '192.168.0.210:9200'], ], ],您配置了es的集群,那么需要在http_address中把每一个节点的ip都要配置上,我只有两个节点,那么,我只写了两个IP。这样就完成了在Yii2中es的配置。 <!--more-->yii2 elasticSearch - model<?phpnamespace flow\models\elasticsearch;use \yii\elasticsearch\ActiveRecord;class IndexElasticSearch extends ActiveRecord{ public static $indexIndex; public $_dbName; public static function getDb() { return \Yii::$app->get('elasticsearch'); } /** * Description: 定义字段映射的方法 * Author: JiaMeng <666@majiameng.com> * Updater: * @param $data */ public function map($data){ foreach($data as $k=>$v){ if(in_array($k,$this->attributes())){ $this->$k = $v; } } } //db public static function index() { return 'index'; } //table public static function type() { return 'index'; } // 属性 public function attributes() { $mapConfig = self::mapConfig(); return array_keys($mapConfig['properties']); } // mapping配置 public static function mapConfig(){ return [ 'properties' => [ 'id' => ['type' => 'integer', "index" => true], 'title' => ['type' => 'text', "index" => true,"analyzer"=>'ik_max_word','search_analyzer'=>'ik_max_word'],//ik中文分词 'type' => ['type' => 'integer', "index" => true], 'inputtime' => ['type' => 'integer', "index" => true],//文章创建时间 'updatetime' => ['type' => 'integer', "index" => true],//文章更新时间 'content' => ['type' => 'text',"index" => true,"analyzer"=>'ik_max_word','search_analyzer'=>'ik_max_word'],//文章内容 'other' => ['type' => 'text',"index" => false],//'"index" => false' 不进行分词 ] ]; } public static function mapping() { return [ static::type() => self::mapConfig(), ]; } /** * Set (update) mappings for this model */ public static function updateMapping(){ $db = self::getDb(); $command = $db->createCommand(); if(!$command->indexExists(self::index())){ $command->createIndex(self::index()); } $command->setMapping(self::index(), self::type(), self::mapping()); } public static function getMapping(){ $db = self::getDb(); $command = $db->createCommand(); return $command->getMapping(); } /** * Description: 保存数据 * Author: JiaMeng <666@majiameng.com> * Updater: * @param $params * @return self */ static public function edit($params){ /** 查询当前id是否被被使用 */ $id = $params['type'].'_'.$params['id']; $query = [ "match" => [ '_id' => $id ] ]; $elastic = self::find()->query($query)->one(); if(empty($elastic)){ /** 添加数据 */ $elastic = new self(); $elastic->primaryKey = $id; } $elastic->map($params); if(!$elastic->save()){ echo array_values($askimg->firstErrors)[0]; } return $elastic; }}yii2 elasticSearch - search搜索 $must = []; //根据keyword搜索关键词 if(!empty($keyword)){ $must[] = [ "multi_match" => [//分词多字段搜索 'query' => $keyword, 'fields' => ['title','comments'],//搜索的字段 ], ]; } //根据type精确搜索 if(!empty($type)){ $must[] = [ "term" => [ 'type' => $type ] ] } //根据多条id精确搜索(类似于mysql的in) $ids = [1,2,3,4]; if(!empty($ids)){ $must[] = [ "terms" => [ 'id' => $ids ] ] } $query = [ 'bool'=>[ 'must'=>$must ], ]; $this->page = 1; $this->pageSize = 10; $searchModel = IndexElasticSearch::find() ->query($query); $elastic = $searchModel ->orderBy('id desc') ->offset(($this->page-1)*$this->pageSize) ->limit($this->page*$this->pageSize) ->asArray()->all(); return $elastic;1.清除ElasticSearch所有数据curl -v -X DELETE http://127.0.0.1:9200/_all

17

2018/08

ElasticSearch的安装

html

ElasticSearch 是一款优秀的搜索引擎,用java编写,restful接口的方式进行对接。1.安装java环境java环境的安装<!--more-->2.1安装ElasticSearchcd /tools wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.1.3.zip unzip elasticsearch-6.1.3.zip mv ./elasticsearch-6.1.3 /usr/local/elasticsearch cd /usr/local groupadd elasticsearch useradd -g elasticsearch elasticsearch chown elasticsearch:elasticsearch -R elasticsearch上面安装的是es6.2.2 设置开机启动: 我是本地,直接把iptables 关掉了vim /etc/rc.d/rc.local service iptables stop su elasticsearch -c "/usr/local/elasticsearch/bin/elasticsearch -d"一定要注意,elasticSearch不能用root账户启动,elasticSearch不能用root账户启动,elasticSearch不能用root账户启动,重要的说三遍,我这里用的是我新建的elasticsearch账户开机启动报错:$./bin/elasticsearchJava HotSpot(TM) 64-Bit Server VM warning: INFO: os::commit_memory(0x0000000085330000, 2060255232, 0) failed; error='Cannot allocate memory' (errno=12)## There is insufficient memory for the Java Runtime Environment to continue.# Native memory allocation (mmap) failed to map 2060255232 bytes for committing reserved memory.# An error report file with more information is saved as:# /data/elasticsearch-5.2.2/hs_err_pid26945.log解决方案:调小启动内存# vi /usr/local/elasticsearch/config/jvm.options#-Xms2g#-Xmx2g-Xms256m-Xmx64m上面设置的分配的内存的最大值为256MB和最小值64mb,您可以根据自己的机器情况设置内存大小。重新启动即可。2.3 配置vim /usr/local/elasticsearch/config/elasticsearch.yml修改如下:cluster.name: TA-application node.name: node-210 network.host: 192.168.0.210其中cluster.name 是集群名称,这个不要使用默认的,要修改,去掉注释,如果有多个机器,加入同一个集群,那么这个值必须一样noide.name 是集群里面每个节点的值,也就是当前机器的节点的值,这个值,每个节点要不一样。network host 改成当前的内网ip下面的部分是elasticsearch 2 部分的插件,在es6中已经不可用,es6的可视化GUI,请查看:http://www.fecshop.com/topic/6682.4 查看:http://192.168.0.210:9200/2.5 集群设置如果想要建立一个elasticSearch集群,可以按照下面的步骤,非常的简单,首先,想说明的是:对于elasticSearch,他隐藏了分布式的复杂性,分片和复制集,都是他自动完成,你只需要配置好ip就可以了,下面是配置的步骤:我有两台机器 192.169.0.210 192.168.0.199我的两台机器都按照上面的步骤配置完成,下面配置集群首先是192.168.0.210vim /usr/local/elasticsearch/config/elasticsearch.yml找到行 , 修改如下:discovery.zen.ping.unicast.hosts: ["192.168.0.199"]上面的ip就是其他的节点的ip,如果我有5台机器,那么,这里需要把其他四台机器的ip写上。同理,对于其他的节点,需要把其他的节点协商,用逗号隔开elasticSearch会找到对应的节点,自动分片和做复制集。2.6 安装ik插件(中文分词使用)报错原因我使用的Elasticsearch是6.2.2版本,按照学校文档创建字段时,使用了{“type”:”string”,”index”:”not_analyzed”}。原因分析检查拼写无误之后,我决定去查Elasticsearch新版本特性,因为之前也踩过head插件的安装方法的坑,就是因为版本问题。果不其然,Elasticsearch从5.X就引入了text和keyword,其中keyword适用于不分词字段,搜索时只能完全匹配,这时string还保留着。到了6.X就彻底移除string了。另外,”index”的值只能是boolean变量了。解决方法{“type”:”text”,”index”:false}

15

2018/08

ffmpeg生成视频缩略图

html

php使用ffmpeg生成视频缩略图/** * Name: ffmpeg.php. * Author: JiaMeng <666@majiameng.com> * Date: 2018/7/12 14:39 * Description: ffmpeg.php. */class ffmpeg{ const FFMPEG_COVER_COMMAND = '/usr/local/ffmpeg/bin/ffmpeg -i "%s" -y -f mjpeg -ss %s -t 0.001 -s "%s" "%s"';//获取视频封面 /** * Description: 获取视频封面 * Author: JiaMeng <666@majiameng.com> * @param string $file 视频文件路径 * @param string $videoCoverName 生成的封面文件路径 * @param string $resolution 分辨率 * @param int $time 截取封面图的开始时间 * @return bool */ static public function getVideoCover($file,$videoCoverName,$resolution='320x240',$time=1){ $command = sprintf(self::FFMPEG_COVER_COMMAND, $file,$time,$resolution,$videoCoverName); exec($command.' 2>&1 ',$output, $status); if($status == 0){ return true;//生成成功 } echo '生成视频封面:请检查文件上传目录权限,error:'.implode(',',$output));//输出异常 }<!--more-->

13

2018/07

ffmpeg获取视频信息

html

php使用ffmpeg获取视频信息可以获取到视频的分辨率、文件大小、播放时长、编码格式、视频格式、音频编码、音频采样频率、等…..废话不多说,直接上代码…<!--more-->/** * Name: ffmpeg.php. * Author: JiaMeng <666@majiameng.com> * Date: 2018/7/12 14:39 * Description: ffmpeg.php. */class ffmpeg{ const FFMPEG_COMMAND = '/usr/local/ffmpeg/bin/ffmpeg -i %s 2>&1';//操作ffmpeg命令 /** * Description: 获取视频信息 * Author: JiaMeng <666@majiameng.com> * @param string $file 视频文件路径 * @return array */ static function video_info($file) { /** 通过使用输出缓冲,获取到ffmpeg所有输出的内容 */ ob_start(); passthru(sprintf(self::FFMPEG_COMMAND, $file)); $info = ob_get_contents(); ob_end_clean(); $result = array(); // Duration: 01:24:12.73, start: 0.000000, bitrate: 456 kb/s if (preg_match("/Duration: (.*?), start: (.*?), bitrate: (\d*) kb\/s/", $info, $match)) { $result['duration'] = $match[1]; // 提取出播放时间 $da = explode(':', $match[1]); $result['seconds'] = $da[0] * 3600 + $da[1] * 60 + $da[2]; // 转换为秒 $result['start'] = $match[2]; // 开始时间 $result['bitrate'] = $match[3]; // bitrate 码率 单位 kb } // Stream #0.1: Video: rv40, yuv420p, 512x384, 355 kb/s, 12.05 fps, 12 tbr, 1k tbn, 12 tbc if (preg_match("/Video: (.*?), (.*?), (.*?), (.*?)[,\s]/", $info, $match)) { $result['vcodec'] = $match[1]; // 编码格式 $result['vformat'] = $match[2]; // 视频格式 $result['resolution'] = $match[3]; // 分辨率 if(strpos($result['resolution'],'x') === false){ // Stream #0:0(und): Video: h264 (Constrained Baseline) (avc1 / 0x31637661), yuv420p(tv, smpte170m/smpte170m/bt709), 320x240, 80 kb/s, 29.65 fps, 29.97 tbr, 90k tbn, 59.31 tbc (default) $result['resolution'] = $match[4]; // 分辨率 } $a = explode('x', $result['resolution']); $result['width'] = $a[0]; $result['height'] = $a[1]; } // Stream #0.0: Audio: cook, 44100 Hz, stereo, s16, 96 kb/s if (preg_match("/Audio: (\w*), (\d*) Hz/", $info, $match)) { $result['acodec'] = $match[1]; // 音频编码 $result['asamplerate'] = $match[2]; // 音频采样频率 } if (isset($result['seconds']) && isset($result['start'])) { $result['play_time'] = $result['seconds'] + $result['start']; // 实际播放时间 } $result['size'] = filesize($file); // 文件大小 return $result; }}怎么用这个类就不用我说了把

12

2018/07

在Liunx上安装Java环境JDK

html

java Jdk的安装安装Java环境首先检测是否安装javajava -version echo $JAVA_HOME如果java的版本过低,建议安装高版本,下面安装的是java 1.8cd /opt/ wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/8u66-b17/jdk-8u66-linux-x64.tar.gz" tar xzf jdk-8u66-linux-x64.tar.gzcd /opt/jdk1.8.0_66/ alternatives --install /usr/bin/java java /opt/jdk1.8.0_66/bin/java 2 alternatives --config java运行了上面的,会初选一个选择的地方,我的机器显示:There are 3 programs which provide 'java'. Selection Command ----------------------------------------------- 1 /opt/jdk1.7.0_71/bin/java +2 /opt/jdk1.8.0_45/bin/java 3 /opt/jdk1.8.0_51/bin/java 4 /opt/jdk1.8.0_66/bin/java Enter to keep the current selection[+], or type selection number: 4我们安装的是jdk1.8.0.66 所以,我选择的是4,这个看具体情况,jdk1.8.0.66 是第几个,就选择那个数字。alternatives --install /usr/bin/jar jar /opt/jdk1.8.0_66/bin/jar 2 alternatives --install /usr/bin/javac javac /opt/jdk1.8.0_66/bin/javac 2 alternatives --set jar /opt/jdk1.8.0_66/bin/jar alternatives --set javac /opt/jdk1.8.0_66/bin/javac安装完成,检查版本java -version java version "1.8.0_66" Java(TM) SE Runtime Environment (build 1.8.0_66-b17) Java HotSpot(TM) 64-Bit Server VM (build 25.66-b17, mixed mode)保存到文件 /etc/environment中,当服务器重启的时候加载:vi /etc/profile export JAVA_HOME=/opt/jdk1.8.0_66 export JRE_HOME=/opt/jdk1.8.0_66/jre export PATH=$PATH:/opt/jdk1.8.0_66/bin:/opt/jdk1.8.0_66/jre/bin重启linuxreboot查看是否安装成功java -version echo $JAVA_HOME

09

2018/05

solr的安装

html

安装solr1. 将 solr 压缩包解压,并将solr-5.3.0\server\solr-webap文件夹下有webapp文件夹,将之复制到Tomcat\webapps\目录下,并改成solrcp -r solr-5.3.0/server/solr-webap/webapp/ /usr/local/tomcat/webapps/solr2. 将 solr 压缩包中 solr-5.3.0\server\lib\ext 中的 jar 全部复制到 Tomcat\ webapps\solr\WEB-INF\lib 目录中cp solr-5.3.0/server/lib/ext/* /usr/local/tomcat/webapps/solr/WEB-INF/lib3. 将 solr 压缩包中 solr-5.3.0/server/resources/log4j.properties 复制到Tomcat\ webapps\solr\WEB-INF\lib 目录中cp solr-5.3.0/server/resources/log4j.properties /usr/local/tomcat/webapps/solr/WEB-INF/lib4. 创建一个<code> /usr/local/solr/solrhome </code> 的目录,并将 solr 压缩包中<code> solr-5.3.0/server/solr/ </code>目录所遇文件复制<code> /usr/local/solr/solrhome </code>目录下cp -r solr-5.3.0/server/solr/* /usr/local/solr/solrhome5. 打开/usr/local/tomcat/webapps/solr/WEB-INF下的web.xml,增加如下配置内容(初始状态下该内容是被注释掉的,第40多行): <env-entry> <env-entry-name>solr/home</env-entry-name> <env-entry-value>/usr/local/solr/solrhome</env-entry-value> <env-entry-type>java.lang.String</env-entry-type> </env-entry>6.访问页面 <code> http://localhost:8080/solr </code>, 查看core Admin 手动加一个mycore, ‘mycore1’ 就显示出来了7. 创建mycore库在/usr/local/solr/solrhome/ 下创建 mycore 文件夹将/usr/local/solr/solrhome/configsets/basic_configs/comf 复制到 mycore 文件夹里面8.Solr IK 中文分词的配置下载solr ik分词jar包移动到solr/WEB-INF/lib目录cp ik-analyzer-solr7-7.x.jar /usr/local/tomcat/webapps/solr/WEB-INF/lib修改配置solrhome/mycore/conf/schema.xml文件添加<fieldType name="text_ik" class="solr.TextField"> <analyzer type="index"> <tokenizer class="org.wltea.analyzer.lucene.IKTokenizerFactory" useSmart="false" conf="ik.conf"/> <filter class="solr.LowerCaseFilterFactory"/> </analyzer> <analyzer type="query"> <tokenizer class="org.wltea.analyzer.lucene.IKTokenizerFactory" useSmart="true" conf="ik.conf"/> <filter class="solr.LowerCaseFilterFactory"/> </analyzer></fieldType>注: 修改配置文件需要重启服务

09

2018/05