commit 6c18fe434d0140dc33e6b0df93070a609f77d78d Author: Meng Sen Date: Wed Jul 17 16:30:13 2024 +0800 提交合并 Signed-off-by: 萌森 diff --git a/.github/renovate.json b/.github/renovate.json new file mode 100644 index 00000000..0e298060 --- /dev/null +++ b/.github/renovate.json @@ -0,0 +1,101 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:base" + ], + "gitIgnoredAuthors": [ + "githubaction@githubaction.com" + ], + "rebaseWhen": "never", + "packageRules": [ + { + "packageNames": [ + "docker-compose" + ], + "automerge": true + }, + { + "matchFileNames": [ + "apps/postgresql/pg*/*.yml" + ], + "allowedVersions": "/^pg14*/" + }, + { + "matchFileNames": [ + "apps/postgresql/pg*/*.yml" + ], + "allowedVersions": "/^pg15*/" + }, + { + "matchFileNames": [ + "apps/postgresql/pg*/*.yml" + ], + "allowedVersions": "/^pg16*/" + }, + { + "matchFileNames": [ + "apps/redis/6.2.*/*.yml" + ], + "allowedVersions": "/^6.2.*/" + }, + { + "matchFileNames": [ + "apps/redis/7.2.*/*.yml" + ], + "allowedVersions": "/^7.2.*/" + }, + { + "matchFileNames": [ + "apps/mysql/5.7.*/*.yml" + ], + "allowedVersions": "/^5.7.*/" + }, + { + "matchFileNames": [ + "apps/mysql/8.*/*.yml" + ], + "allowedVersions": "/^8.*/" + }, + { + "matchFileNames": [ + "apps/qbittorrent/4.*.*/*.yml" + ], + "allowedVersions": "/^4.*.*/" + }, + { + "matchFileNames": [ + "apps/transmission/4.*.*/*.yml" + ], + "allowedVersions": "/^4.*.*/" + }, + { + "matchDatasources": [ + "docker" + ], + "matchPackageNames": [ + "emby/embyserver" + ], + "allowedVersions": "/^4\\.8\\.\\d+\\.\\d+$/" + }, + { + "matchDatasources": [ + "docker" + ], + "matchPackageNames": [ + "ghcr.io/*" + ], + "versionCompatibility": "^(?.*)-(?.*)$", + "versioning": "semver" + }, + { + "matchDatasources": [ + "docker" + ], + "matchPackageNames": [ + "ghcr.io/*" + ], + "versioning": "semver" + } + ], + "prCreation": "immediate" +} diff --git a/.github/workflows/renovate-app-version.sh b/.github/workflows/renovate-app-version.sh new file mode 100644 index 00000000..017b22e7 --- /dev/null +++ b/.github/workflows/renovate-app-version.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# This script copies the version from docker-compose.yml to config.json. + +app_name=$1 +old_version=$2 + +# find all docker-compose files under apps/$app_name (there should be only one) +docker_compose_files=$(find apps/$app_name/$old_version -name docker-compose.yml) + +for docker_compose_file in $docker_compose_files +do + # Assuming that the app version will be from the first docker image + first_service=$(yq '.services | keys | .[0]' $docker_compose_file) + + image=$(yq .services.$first_service.image $docker_compose_file) + + # Only apply changes if the format is : + if [[ "$image" == *":"* ]]; then + version=$(cut -d ":" -f2- <<< "$image") + + # Trim the "v" prefix + trimmed_version=${version/#"v"} + + mv apps/$app_name/$old_version apps/$app_name/$trimmed_version + fi +done diff --git a/.github/workflows/renovate-app-version.yml b/.github/workflows/renovate-app-version.yml new file mode 100644 index 00000000..266eb810 --- /dev/null +++ b/.github/workflows/renovate-app-version.yml @@ -0,0 +1,53 @@ +name: Update app version in Renovate Branches + +on: + push: + branches: [ 'renovate/*' ] + workflow_dispatch: + inputs: + manual-trigger: + description: 'Manually trigger Renovate' + default: '' +jobs: + update-app-version: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + with: + fetch-depth: 0 + + - name: Configure repo + run: | + git config --local user.email "githubaction@githubaction.com" + git config --local user.name "github-action update-app-version" + + - name: Get list of updated files by the last commit in this branch separated by space + id: updated-files + run: | + echo "::set-output name=files::$(git diff-tree --no-commit-id --name-only -r ${{ github.sha }} | tr '\n' ' ')" + + - name: Run renovate-app-version.sh on updated files + run: | + IFS=' ' read -ra files <<< "${{ steps.updated-files.outputs.files }}" + + for file in "${files[@]}"; do + if [[ $file == *"docker-compose.yml"* ]]; then + app_name=$(echo $file | cut -d'/' -f 2) + old_version=$(echo $file | cut -d'/' -f 3) + chmod +x .github/workflows/renovate-app-version.sh + .github/workflows/renovate-app-version.sh $app_name $old_version + fi + done + + - name: Commit & Push Changes + run: | + IFS=' ' read -ra files <<< "${{ steps.updated-files.outputs.files }}" + + for file in "${files[@]}"; do + if [[ $file == *"docker-compose.yml"* ]]; then + app_name=$(echo $file | cut -d'/' -f 2) + git add "apps/$app_name/*" && git commit -m "Update app version [skip ci]" --no-verify && git push || true + fi + done + diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml new file mode 100644 index 00000000..5005263a --- /dev/null +++ b/.github/workflows/renovate.yml @@ -0,0 +1,22 @@ +name: Renovate + +on: + schedule: + - cron: "0 0 * * *" + workflow_dispatch: + inputs: + manual-trigger: + description: 'Manually trigger Renovate' + default: '' + +jobs: + renovate: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - name: Run Renovate + uses: renovatebot/github-action@d4cde0ac34e53942ead1619a101748e3ab842937 # v40.2.1 + with: + useSlim: false + token: ${{ secrets.GITHUBTOKEN }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..601ca7d5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +# OSX +.DS_Store + +# IntelliJ +.idea + +# VSCode +.vscode diff --git a/Install-Readme.md b/Install-Readme.md new file mode 100644 index 00000000..b86692eb --- /dev/null +++ b/Install-Readme.md @@ -0,0 +1,145 @@ +# 应用制作说明 + +### 基础目录 + +`/app` + +### 应用格式 + +``` +├── app_name // 应用名称 + ├── logo.png // 应用 logo , 推荐 180 * 180 px + ├── data.yml // 应用声明文件 + ├── README.md // 应用说明文档 + ├── 2.2.0 // 应用版本 + │   ├── data.yml // 表单配置 + | ├── scripts // 脚本目录 + │   └── docker-compose.yml // docker-compose 文件 + └── 2.3.2 // 应用版本 + ├── data.yml + ├── data + └── docker-compose.yml +``` + +#### 应用声明文件 + +```yaml +# 固定参数 +additionalProperties: + # 应用的 key , 仅限英文 + key: app_name + # 应用名称 显示名称 + name: app_show_name + # 应用标签 + tags: + # 参考 tags.yml + - WebSite + # 应用中文描述, 推荐 30 字以内 + shortDescZh: 应用主要概述 + # 应用英文描述 + shortDescEn: Application main description + # 应用类型 + type: website + # 跨大版本升级 + crossVersionUpdate: true + # 安装限制, 0 代表无限制 + limit: 0 + # 官网地址 + website: https://app.com + # 仓库地址 + github: https://github.com/app/app + # 文档地址 + document: https://docs.app.com +``` + +##### 应用类型 + +| type | 说明 | +|---------|--------------------------------------------------------| +| website | website 类型在 1Panel 中支持在网站中一键部署,wordpress halo 都是此 type | +| runtime | mysql openresty redis 等类型的应用 | +| tool | phpMyAdmin redis-commander jenkins 等类型的应用 | + +#### 脚本目录 + ++ `scripts` 安装脚本文件 + + `init.sh` 安装前执行 + + `upgrade.sh` 升级前执行 + + `uninstall.sh` 卸载后执行 + +#### 表单配置 + +```yaml +# 固定参数 +additionalProperties: + formFields: + # 默认值 + - default: "" + # 安装后可修改 + edit: true + # 是否禁用 + disabled: false + # 环境变量 key + envKey: PANEL_DB_HOST + # 依赖应用 key + key: mysql + # 英文标签 + labelEn: Database Service + # 中文标签 + labelZh: 数据库服务 + # 是否必填 + required: true + # 依赖服务 + type: service + # 是否在默认值基础上增加随机字符 + random: true + # 校验规则 + rule: paramCommon + # 下拉选择 + - default: "" + envKey: LOG_LEVEL + labelEn: Log level + labelZh: 日志级别 + required: true + type: select + values: + - label: DEBUG + value: "DEBUG" + - label: INFO + value: "INFO" + - label: WARNING + value: "WARNING" + - label: ERROR + value: "ERROR" + - label: CRITICAL + value: "CRITICAL" + # 端口效验 + - default: 3306 + envKey: PANEL_APP_PORT_DB + labelEn: Database port + labelZh: 数据库端口 + rule: paramPort + type: number +``` + +##### 字段说明 + ++ `type` 字段类型 + + `service` 依赖服务 + + `password` 密码 + + `text` 文本 + + `number` 数字 + + `select` 下拉框 ++ `rule` 校验规则 + + `paramPort` 用于限制端口范围为 1-65535 + + `paramCommon` 英文、数字、.-和_,长度2-30 + + `paramComplexity` 支持英文、数字、.%@$!&~_-,长度6-30,特殊字符不能在首尾 + + `paramExtUrl` 格式为 http(s)://(域名/ip):(端口) ++ `key` 特殊值 + + `PANEL_APP_PORT_HTTP` Web应用端口 + + `PANEL_APP_PORT_HTTPS` Web应用端口 SSL + + `PANEL_APP_PORT` 前缀将认定为端口,并且用于安装前的端口占用校验 + +#### docker-compose.yml 文件 + +通过使用 `${envKey}` 获取`表单配置`中的值 diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..f288702d --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/README.md b/README.md new file mode 100644 index 00000000..7f53d3a6 --- /dev/null +++ b/README.md @@ -0,0 +1,208 @@ +# 1Panel 第三方应用商店 + +> # 2024年07月17日 公告 +> +> 正式开始维护,新老用户推荐使用自动化脚本完成软件订阅。 +> +> # 2024年07月16日 公告 +> +> 为了解决应用安装问题,我们将在近期发布新的应用安装脚本,敬请期待! +> +> 具体上线应用,可以参考下方应用列表状态标识。 +> +> 通过固定环境变量文件,解决应用安装问题,不再需要手动创建 `.env` 文件。 +> +> 对于 `FIT2CLOUD 飞致云` 旗下开源产品 `1Panel` 经过多次商讨,依旧无法将原有问题解决。迫于无奈,这次我们采用迂回战术,通过固定环境变量文件的方式解决问题。 +> +> 因此提高了维护成本,但相比于用户体验,我们认为这是值得的。简洁的 `docker-compose.yml` 才是我们的目标。 +> +> 后续接入 `Dockge` 应用,将会迎来新的安装方式,敬请期待! +> +> 新老用户均可使用新的安装方式,升级或安装自动化脚本,无需卸载原有安装脚本。 +> +> 由于结构变化,我们建议您,完全卸载应用后重新安装,以免出现问题。 +> +> # 2024年06月14日 公告 +> 由于 1Panel 的升级,自版本 `v1.10.10-lts` 开始,所有应用均已停止更新! +> +> 经过与 1Panel 官方商议,由于其认为安装或升级应用或面板出现的问题不属于 `Bug`,对于使用本仓库的用户,我们深感抱歉! +> 截至目前,我们将不再更新任何应用,如果您有任何问题,请放弃使用当前第三方应用。 +> +> 如果您依旧收到应用更新提示,当前唯一解决升级方案为:卸载应用并重新安装。 +> 安装时出现的相关问题,请参考常见问题解决。 +> +> 🥰 祝大家周末愉快~~ +> +> # 2024年06月12日 公告 +> 由于 1Panel 的升级,造成部分应用无法正常使用,我们考虑到用户体验,决定暂时关闭应用商店更新,等待团队商议解决方案。 +> +> ### 拟定解决方案 +> 1. 当您收到提示 /xxx/xxx/.env 文件不存在时,请手动创建该文件,不需要填写任何内容的空文件。 +> 2. 当您升级 1Panel 出现容器不存在时,请查看容器列表是否正常运行,如果没有运行,可卸载应用并重新安装。 +> 3. 当您升级 1Panel 出现应用无法正常使用时,请查看应用是否正常运行,如果没有运行,可卸载应用并重新安装。 +> 4. 关闭期间,您可能会收到部分应用的更新,没有关系,您可以选择性更新,不会影响您的使用。 +> 5. 由于数据均采用持久化方式存储,卸载应用不会删除数据,您可以放心卸载并重新安装。 +> +> # 2024年06月09日 公告 +> 警告 请勿升级 1Panel 到 v1.10.10-lts 版本,否则会导致应用商店无法使用! + +本仓库中的所有应用不会与 1Panel 官方应用商店冲突(存在相同应用并不影响后续的安装与升级,择优选择你需要的版本即可) +,我们会定期更新应用,如果您有任何问题,请联系我们。 +当前仓库由 [`新疆萌森软件开发工作室`](https://lifebus.top/) 维护,我们致力于为 1Panel 用户提供更多的应用程序。 + +## 维护状态 + +| 维护状态 | 应用名称 | 官网 | 描述 | 集合 | +|:----:|:-----------------------:|:-----------------------------------------:|:---------------------------------------------------------------------|:---------:| +| 🟢 | AList | https://alist.nn.ci/ | 一款支持多重存储的文件列表程序 | | +| 🟢 | Bark | https://bark.day.app/ | 一款注重隐私、安全可控的自定义通知推送工具 | | +| 🔴 | Cookie Cloud | https://github.com/easychen/CookieCloud/ | CookieCloud是一个和自架服务器同步浏览器Cookie和LocalStorage的小工具 | | +| 🔴 | DeepLX | https://deeplx.owo.network/ | DeepL免费API(无需TOKEN) | | +| 🟢 | Dockge | https://dockge.kuma.pet/ | 面向堆栈的管理器 | | +| 🔴 | Elasticsearch | https://www.elastic.co/elasticsearch/ | Elasticsearch 是一个分布式、RESTful 风格的搜索和数据分析引擎 | Elastic | +| 🔴 | Kibana | https://www.elastic.co/kibana/ | Kibana 针对大规模数据快速运行数据分析 | Elastic | +| 🟢 | Emby | https://emby.media/ | Emby Server 是一款个人媒体服务器,可在几乎所有设备上运行应用程序 | | +| 🟢 | Emby-lovechen | https://emby.media/ | 【开心版】Emby Server 是一款个人媒体服务器,可在几乎所有设备上运行应用程序 | | +| 🟢 | Gitea | https://gitea.io/ | 私有、快速、可靠的 DevOps 平台 | Gitea | +| 🟢 | Gitea Runner | https://gitea.io/ | 【Runner】私有、快速、可靠的 DevOps 平台 | Gitea | +| 🟢 | Halo | https://halo.run/ | 强大易用的开源建站工具 | | +| 🟢 | Immich | https://immich.app/ | 【完整版本】高性能自托管照片和视频管理解决方案 | Immich | +| 🟢 | Immich-Server | https://immich.app/ | 【主服务模块】高性能自托管照片和视频管理解决方案 | Immich | +| 🟢 | Immich-Machine-Learning | https://immich.app/ | 【机器学习模块】高性能自托管照片和视频管理解决方案 | Immich | +| 🟢 | IYUU Plus | https://doc.iyuu.cn/ | 基于特征码的索引工具 | | +| 🟢 | Jellyfin | https://jellyfin.org/ | 自由软件媒体系统 | | +| 🔴 | LinkDing | https://github.com/sissbruecker/linkding/ | 自托管书签管理器 | | +| 🔴 | MinIO | https://min.io/ | MinIO 是一种高性能、兼容 S3 的对象存储 | | +| 🟢 | MoviePilot | https://github.com/jxxghp/MoviePilot/ | NAS媒体库自动化管理工具 | | +| 🟢 | MySQL | https://www.mysql.com/ | 关系数据库管理系统 | MySQL | +| 🟢 | MariaDB | https://mariadb.org/ | 【MySQL分支】创新的开源数据库 | MySQL | +| 🟢 | Percona | https://www.percona.com/ | 【MySQL分支】关系数据库管理系统 | MySQL | +| 🔴 | Nacos | https://nacos.io/ | 动态服务发现、配置管理和服务管理平台 | | +| 🟢 | NeZha | https://nacos.io/ | 【哪吒监控】开源、轻量、易用的服务器监控、运维工具 | | +| 🟢 | OneDev | https://onedev.io/ | DevOps 平台、带有 CI/CD、看板和软件包的 Git 服务器 | | +| 🟢 | OutLine | https://www.getoutline.com/ | 快速、协作的团队知识库 | | +| 🟢 | PostgreSQL | https://www.postgresql.org/ | 世界上最先进的开源关系数据库 | | +| 🟢 | qBittorrent | https://www.qbittorrent.org/ | qBittorrent 比特流客户端 | | +| 🟢 | QingLong | https://github.com/whyour/qinglong/ | 【青龙】支持 Python3、JavaScript、Shell、Typescript 的定时任务管理平台 | | +| 🟢 | Redis | https://redis.io/ | 从世界上最快的内存数据库创建者那里获取该数据库 | | +| 🔴 | Sentinel | https://sentinelguard.io/ | 面向分布式、多语言异构化服务架构的流量治理组件 | | +| 🟢 | SiYuan | https://b3log.org/siyuan/ | 【思源笔记】一款隐私优先、自托管、完全开源的个人知识管理软件 | | +| 🔴 | SpeedTest | https://www.speedtest.net/ | 互联网速度测试 | SpeedTest | +| 🔴 | SpeedTest-Tracker | https://docs.speedtest-tracker.dev/ | Speedtest Tracker 是一款自托管互联网性能跟踪应用程序,可针对 Ookla 的 Speedtest 服务运行速度测试检查 | | +| 🔴 | Stream-Rec | https://github.com/hua0512/stream-rec | Stream-rec是一个用于各种流媒体服务的自动流媒体录制工具 | | +| 🟢 | Transmission | https://transmissionbt.com/ | 快速、简单、免费的 Bittorrent 客户端 | | +| 🟢 | Uptime Kuma | https://uptime.kuma.pet/ | 自托管监控工具 | | +| 🟢 | Umami | https://umami.is/ | 为速度和效率而构建的网站分析 | | +| 🔴 | Yarr | https://github.com/nkanaev/yarr | Rss 阅读器 | | +| 🔴 | Ztncui | https://www.zerotier.com/ | 【自建服务端】ZeroTier 可让您构建几乎任何类型的现代化安全多点虚拟化网络 | ZeroTier | + +> 说明: +> +> 以上排序不分先后,按照字母顺序排列。 +> +> 维护状态:🟢 维护中 🔴 未维护 🟡 表示不定期维护 + +## 应用安装 + +> 温馨提示: +> 当您已安装其他第三方库时并且存在应用冲突,安装过程中会主动删除冲突的第三方库应用,如果您不同意,请不要执行脚本。 +> +> 我们建议您在安装之前备份您的数据,或手动安装。 + +### 方案一:自动化安装 + +如果您不想每次都手动执行命令,可以使用一键式安装。我们优先推荐此方式。 + +```shell +curl -sSL https://install.lifebus.top/auto_install.sh | bash +``` + +> 卸载自动化脚本 (不会卸载应用) + +```shell +curl -sSL https://install.lifebus.top/auto_uninstall.sh | bash +``` + +如需卸载应用列表,您可以手动删除以下目录: + ++ 应用目录:`${1panel应用目录}/resource/apps/local` + ++ 应用公共文件目录:`/etc/1panel/envs` + ++ 应用数据目录:`${应用持久化目录}` + +### 方案二:手动安装 + +应用的升级与更新均需要重新执行安装脚本。 + +#### 手动执行模式 + +```shell +curl -sSL https://install.lifebus.top/app_install.sh | bash +``` + +### 方案三:计划任务模式 + +将内容写入Shell计划任务中,设定定期执行。 + +```shell +#!/bin/bash + +script_url="https://install.lifebus.top/app_install.sh" + +echo "Downloading and executing script from $script_url..." +bash <(curl -sL "$script_url") + +echo "Script execution completed." +``` + +#### 配置脚本网络代理 + +```sh +proxy_protocols="http" +proxy_server="server address" +proxy_port="server port" +export http_proxy="$proxy_protocols://$proxy_server:$proxy_port" +export https_proxy="$proxy_protocols://$proxy_server:$proxy_port" +``` + +## 常见问题 + ++ 升级失败 + + 检查网络状况与磁盘空间 + + 配置镜像地址 + + 尝试重新升级 + + 进行卸载重装 + + 多次升级依旧失败,请联系我们 ++ 安装提示 + + `Error: /xxx/xxx/.env file does not exist` + + 请手动创建 `.env` 文件,不需要填写任何内容的空文件 + + 重新点击安装 ++ 前置检查 + + `前置检查` 是利用 `1Panel` 的应用特性,进行安装前的环境检查,如果您的环境不符合要求,将无法安装应用。 + + 关于 `前置检查` 依旧需要填写 `数据库` 相关连接信息,是因为其提供的能力并不是完全可靠,避免后期出现问题,我们采用持久化的方式存储。 + + 如果您的环境符合要求,但是依旧无法安装,请联系我们。 + +## 温馨提示 + +安装应用前请查看应用说明,了解应用的使用方法和注意事项。 +当前第三方库应用均为开源应用,我们不对应用的安全性和稳定性负责。 +如果您在使用过程中遇到问题,请查看应用的官方文档或社区,或者联系我们。 + +## 联系我们 + +[📮邮箱](mailto:qyg2297248353@gmail.com) + +[🌍官网](https://lifebus.top/) + +[🌍博客](https://blog.lifebus.top/) + +[🌍GitHub](https://github.com/qyg2297248353) + +[✈️Telegram](https://t.me/qyg2297248353) + +[🌍Twitter](https://twitter.com/qyg2297248353) + +[🌍Facebook](https://www.facebook.com/qyg2297248353) + +[🌍Instagram](https://www.instagram.com/qyg2297248353) diff --git a/apps/alist-aria2/3.35.0/data.yml b/apps/alist-aria2/3.35.0/data.yml new file mode 100644 index 00000000..0f4fc7a2 --- /dev/null +++ b/apps/alist-aria2/3.35.0/data.yml @@ -0,0 +1,74 @@ +additionalProperties: + formFields: + - default: "/home/alist" + edit: true + envKey: ALIST_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 5244 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: "" + edit: true + envKey: JWT_SECRET + labelZh: 加密密钥 + labelEn: Encryption key + required: false + type: password + - default: "" + edit: true + envKey: SITE_URL + labelZh: 站点 URL + labelEn: Site URL + required: false + rule: paramExtUrl + type: text + - default: 48 + edit: true + envKey: TOKEN_EXPIRES_IN + labelZh: 登录过期时间 (小时) + labelEn: Login expiration time (hours) + required: true + type: number + - default: 0 + edit: true + envKey: DELAYED_START + labelZh: 延时启动 (秒) + labelEn: Delayed start (seconds) + required: true + type: number + - default: 0 + edit: true + envKey: max_connections + labelZh: 最大连接数 + labelEn: Maximum connections + required: true + type: number + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_1 + labelEn: Custom mount directory 1 + labelZh: 自定义挂载目录 1 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_2 + labelEn: Custom mount directory 2 + labelZh: 自定义挂载目录 2 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_3 + labelEn: Custom mount directory 3 + labelZh: 自定义挂载目录 3 + required: false + type: text diff --git a/apps/alist-aria2/3.35.0/docker-compose.yml b/apps/alist-aria2/3.35.0/docker-compose.yml new file mode 100644 index 00000000..f425ec2b --- /dev/null +++ b/apps/alist-aria2/3.35.0/docker-compose.yml @@ -0,0 +1,28 @@ +version: "3.8" +networks: + 1panel-network: + external: true +services: + alist-aria2: + image: xhofe/alist-aria2:v3.35.0 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:5244 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${ALIST_ROOT_PATH}/data:/opt/alist/data + - ${CUSTOM_MOUNT_DIRECTORY_1:-./default_mount_1}:${CUSTOM_MOUNT_DIRECTORY_1:-/default_mount_1} + - ${CUSTOM_MOUNT_DIRECTORY_2:-./default_mount_2}:${CUSTOM_MOUNT_DIRECTORY_2:-/default_mount_2} + - ${CUSTOM_MOUNT_DIRECTORY_3:-./default_mount_3}:${CUSTOM_MOUNT_DIRECTORY_3:-/default_mount_3} + environment: + - PUID=0 + - PGID=0 + - UMASK=022 + - FORCE=false diff --git a/apps/alist-aria2/3.35.0/scripts/init.sh b/apps/alist-aria2/3.35.0/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/alist-aria2/3.35.0/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/alist-aria2/3.35.0/scripts/uninstall.sh b/apps/alist-aria2/3.35.0/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/alist-aria2/3.35.0/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/alist-aria2/3.35.0/scripts/upgrade.sh b/apps/alist-aria2/3.35.0/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/alist-aria2/3.35.0/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/alist-aria2/README.md b/apps/alist-aria2/README.md new file mode 100644 index 00000000..83767946 --- /dev/null +++ b/apps/alist-aria2/README.md @@ -0,0 +1,84 @@ +# AList + +一个支持多种存储的文件列表程序 + +![AList](https://file.lifebus.top/imgs/alist_cover.png) + +## 特性 + ++ 使用简单 + + AList 从一开始就设计为易于安装,并且可以在所有平台上使用。 + ++ 多种存储 + + AList 支持多个存储提供商,包括本地存储、阿里云盘、OneDrive、Google Drive 等,且易于拓展。 + ++ 支持 WebDAV + + AList 支持所有 WebDAV 存储,这是一种用于访问文件的标准。 + ++ 黑暗模式 + + 自由切换明暗模式 + ++ 受保护的路由 + + 为特定路径添加密码保护和身份验证 + ++ 文件预览 + + 支持视频、音频、文档、PDF、图片预览等,甚至支持 ipa 安装 + ++ 打包下载/批量下载 + + 使用浏览器的 stream api 支持打包下载,无需使用服务器 / 使用Aria2进行批量下载支持文件夹 + ++ 单点登录 + + 使用单点登录快速登录AList + ++ 自动注册AList帐号 + + 使用单点登录自动注册为AList帐号快速注册 + ++ 离线下载 + + 将种子内容离线下载到指定的目录內,需要苛刻的网络环境 + ++ 保险箱加密/解密 文件 + + 任何人都可以安全地将加密数据存储在远程存储提供商上。数据存储在保险箱中,提供商只能看到保险箱,看不到您的数据。 + ++ 更多新功能 + + 包括文本编辑器、README/HTML 渲染、文件永久链接、Cloudflare Workers 代理等 + +## 安装说明 + +当前版本预装 `aria2` 用于下载 + +> 默认用户名:`admin` +> +> 默认密码:首次启动,可通过日志查询,或重置密码 + +### 重置密码 + ++ 随机生成一个密码 + +```shell +alist admin random +``` + ++ 设置指定密码 + +```shell +alist admin set {PASSWORD} +``` + +### 反向代理 + +> Nginx 配置示例 + +```conf +location / { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host:$server_port; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Range $http_range; + proxy_set_header If-Range $http_if_range; + proxy_redirect off; + proxy_pass http://127.0.0.1:5244; + # 文件最大上传大小 20GB + client_max_body_size 20000m; +} +``` diff --git a/apps/alist-aria2/data.yml b/apps/alist-aria2/data.yml new file mode 100644 index 00000000..65b5fc75 --- /dev/null +++ b/apps/alist-aria2/data.yml @@ -0,0 +1,18 @@ +name: Alist +title: 文件列表程序 +description: 一个支持多种存储的文件列表程序 +additionalProperties: + key: alist + name: Alist + tags: + - WebSite + - Storage + - Local + shortDescZh: 一个支持多种存储的文件列表程序 + shortDescEn: A file list program that supports multiple storage methods + type: website + crossVersionUpdate: true + limit: 0 + website: https://alist.nn.ci/ + github: https://github.com/alist-org/alist/ + document: https://alist.nn.ci/guide/ diff --git a/apps/alist-aria2/logo.png b/apps/alist-aria2/logo.png new file mode 100644 index 00000000..443d69ee Binary files /dev/null and b/apps/alist-aria2/logo.png differ diff --git a/apps/alist-ffmpeg/3.35.0/data.yml b/apps/alist-ffmpeg/3.35.0/data.yml new file mode 100644 index 00000000..0f4fc7a2 --- /dev/null +++ b/apps/alist-ffmpeg/3.35.0/data.yml @@ -0,0 +1,74 @@ +additionalProperties: + formFields: + - default: "/home/alist" + edit: true + envKey: ALIST_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 5244 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: "" + edit: true + envKey: JWT_SECRET + labelZh: 加密密钥 + labelEn: Encryption key + required: false + type: password + - default: "" + edit: true + envKey: SITE_URL + labelZh: 站点 URL + labelEn: Site URL + required: false + rule: paramExtUrl + type: text + - default: 48 + edit: true + envKey: TOKEN_EXPIRES_IN + labelZh: 登录过期时间 (小时) + labelEn: Login expiration time (hours) + required: true + type: number + - default: 0 + edit: true + envKey: DELAYED_START + labelZh: 延时启动 (秒) + labelEn: Delayed start (seconds) + required: true + type: number + - default: 0 + edit: true + envKey: max_connections + labelZh: 最大连接数 + labelEn: Maximum connections + required: true + type: number + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_1 + labelEn: Custom mount directory 1 + labelZh: 自定义挂载目录 1 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_2 + labelEn: Custom mount directory 2 + labelZh: 自定义挂载目录 2 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_3 + labelEn: Custom mount directory 3 + labelZh: 自定义挂载目录 3 + required: false + type: text diff --git a/apps/alist-ffmpeg/3.35.0/docker-compose.yml b/apps/alist-ffmpeg/3.35.0/docker-compose.yml new file mode 100644 index 00000000..37d68645 --- /dev/null +++ b/apps/alist-ffmpeg/3.35.0/docker-compose.yml @@ -0,0 +1,28 @@ +version: "3.8" +networks: + 1panel-network: + external: true +services: + alist: + image: xhofe/alist:v3.35.0-ffmpeg + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:5244 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${ALIST_ROOT_PATH}/data:/opt/alist/data + - ${CUSTOM_MOUNT_DIRECTORY_1:-./default_mount_1}:${CUSTOM_MOUNT_DIRECTORY_1:-/default_mount_1} + - ${CUSTOM_MOUNT_DIRECTORY_2:-./default_mount_2}:${CUSTOM_MOUNT_DIRECTORY_2:-/default_mount_2} + - ${CUSTOM_MOUNT_DIRECTORY_3:-./default_mount_3}:${CUSTOM_MOUNT_DIRECTORY_3:-/default_mount_3} + environment: + - PUID=0 + - PGID=0 + - UMASK=022 + - FORCE=false diff --git a/apps/alist-ffmpeg/3.35.0/scripts/init.sh b/apps/alist-ffmpeg/3.35.0/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/alist-ffmpeg/3.35.0/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/alist-ffmpeg/3.35.0/scripts/uninstall.sh b/apps/alist-ffmpeg/3.35.0/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/alist-ffmpeg/3.35.0/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/alist-ffmpeg/3.35.0/scripts/upgrade.sh b/apps/alist-ffmpeg/3.35.0/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/alist-ffmpeg/3.35.0/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/alist-ffmpeg/README.md b/apps/alist-ffmpeg/README.md new file mode 100644 index 00000000..ca610df5 --- /dev/null +++ b/apps/alist-ffmpeg/README.md @@ -0,0 +1,84 @@ +# AList + +一个支持多种存储的文件列表程序 + +![AList](https://file.lifebus.top/imgs/alist_cover.png) + +## 特性 + ++ 使用简单 + + AList 从一开始就设计为易于安装,并且可以在所有平台上使用。 + ++ 多种存储 + + AList 支持多个存储提供商,包括本地存储、阿里云盘、OneDrive、Google Drive 等,且易于拓展。 + ++ 支持 WebDAV + + AList 支持所有 WebDAV 存储,这是一种用于访问文件的标准。 + ++ 黑暗模式 + + 自由切换明暗模式 + ++ 受保护的路由 + + 为特定路径添加密码保护和身份验证 + ++ 文件预览 + + 支持视频、音频、文档、PDF、图片预览等,甚至支持 ipa 安装 + ++ 打包下载/批量下载 + + 使用浏览器的 stream api 支持打包下载,无需使用服务器 / 使用Aria2进行批量下载支持文件夹 + ++ 单点登录 + + 使用单点登录快速登录AList + ++ 自动注册AList帐号 + + 使用单点登录自动注册为AList帐号快速注册 + ++ 离线下载 + + 将种子内容离线下载到指定的目录內,需要苛刻的网络环境 + ++ 保险箱加密/解密 文件 + + 任何人都可以安全地将加密数据存储在远程存储提供商上。数据存储在保险箱中,提供商只能看到保险箱,看不到您的数据。 + ++ 更多新功能 + + 包括文本编辑器、README/HTML 渲染、文件永久链接、Cloudflare Workers 代理等 + +## 安装说明 + +当前版本预装 `ffmpeg` 用于封面截取 + +> 默认用户名:`admin` +> +> 默认密码:首次启动,可通过日志查询,或重置密码 + +### 重置密码 + ++ 随机生成一个密码 + +```shell +alist admin random +``` + ++ 设置指定密码 + +```shell +alist admin set {PASSWORD} +``` + +### 反向代理 + +> Nginx 配置示例 + +```conf +location / { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host:$server_port; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Range $http_range; + proxy_set_header If-Range $http_if_range; + proxy_redirect off; + proxy_pass http://127.0.0.1:5244; + # 文件最大上传大小 20GB + client_max_body_size 20000m; +} +``` diff --git a/apps/alist-ffmpeg/data.yml b/apps/alist-ffmpeg/data.yml new file mode 100644 index 00000000..65b5fc75 --- /dev/null +++ b/apps/alist-ffmpeg/data.yml @@ -0,0 +1,18 @@ +name: Alist +title: 文件列表程序 +description: 一个支持多种存储的文件列表程序 +additionalProperties: + key: alist + name: Alist + tags: + - WebSite + - Storage + - Local + shortDescZh: 一个支持多种存储的文件列表程序 + shortDescEn: A file list program that supports multiple storage methods + type: website + crossVersionUpdate: true + limit: 0 + website: https://alist.nn.ci/ + github: https://github.com/alist-org/alist/ + document: https://alist.nn.ci/guide/ diff --git a/apps/alist-ffmpeg/logo.png b/apps/alist-ffmpeg/logo.png new file mode 100644 index 00000000..443d69ee Binary files /dev/null and b/apps/alist-ffmpeg/logo.png differ diff --git a/apps/alist/3.35.0/data.yml b/apps/alist/3.35.0/data.yml new file mode 100644 index 00000000..0f4fc7a2 --- /dev/null +++ b/apps/alist/3.35.0/data.yml @@ -0,0 +1,74 @@ +additionalProperties: + formFields: + - default: "/home/alist" + edit: true + envKey: ALIST_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 5244 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: "" + edit: true + envKey: JWT_SECRET + labelZh: 加密密钥 + labelEn: Encryption key + required: false + type: password + - default: "" + edit: true + envKey: SITE_URL + labelZh: 站点 URL + labelEn: Site URL + required: false + rule: paramExtUrl + type: text + - default: 48 + edit: true + envKey: TOKEN_EXPIRES_IN + labelZh: 登录过期时间 (小时) + labelEn: Login expiration time (hours) + required: true + type: number + - default: 0 + edit: true + envKey: DELAYED_START + labelZh: 延时启动 (秒) + labelEn: Delayed start (seconds) + required: true + type: number + - default: 0 + edit: true + envKey: max_connections + labelZh: 最大连接数 + labelEn: Maximum connections + required: true + type: number + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_1 + labelEn: Custom mount directory 1 + labelZh: 自定义挂载目录 1 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_2 + labelEn: Custom mount directory 2 + labelZh: 自定义挂载目录 2 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_3 + labelEn: Custom mount directory 3 + labelZh: 自定义挂载目录 3 + required: false + type: text diff --git a/apps/alist/3.35.0/docker-compose.yml b/apps/alist/3.35.0/docker-compose.yml new file mode 100644 index 00000000..db0e1545 --- /dev/null +++ b/apps/alist/3.35.0/docker-compose.yml @@ -0,0 +1,28 @@ +version: "3.8" +networks: + 1panel-network: + external: true +services: + alist: + image: xhofe/alist:v3.35.0 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:5244 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${ALIST_ROOT_PATH}/data:/opt/alist/data + - ${CUSTOM_MOUNT_DIRECTORY_1:-./default_mount_1}:${CUSTOM_MOUNT_DIRECTORY_1:-/default_mount_1} + - ${CUSTOM_MOUNT_DIRECTORY_2:-./default_mount_2}:${CUSTOM_MOUNT_DIRECTORY_2:-/default_mount_2} + - ${CUSTOM_MOUNT_DIRECTORY_3:-./default_mount_3}:${CUSTOM_MOUNT_DIRECTORY_3:-/default_mount_3} + environment: + - PUID=0 + - PGID=0 + - UMASK=022 + - FORCE=false diff --git a/apps/alist/3.35.0/scripts/init.sh b/apps/alist/3.35.0/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/alist/3.35.0/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/alist/3.35.0/scripts/uninstall.sh b/apps/alist/3.35.0/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/alist/3.35.0/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/alist/3.35.0/scripts/upgrade.sh b/apps/alist/3.35.0/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/alist/3.35.0/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/alist/README.md b/apps/alist/README.md new file mode 100644 index 00000000..d04f1a19 --- /dev/null +++ b/apps/alist/README.md @@ -0,0 +1,82 @@ +# AList + +一个支持多种存储的文件列表程序 + +![AList](https://file.lifebus.top/imgs/alist_cover.png) + +## 特性 + ++ 使用简单 + + AList 从一开始就设计为易于安装,并且可以在所有平台上使用。 + ++ 多种存储 + + AList 支持多个存储提供商,包括本地存储、阿里云盘、OneDrive、Google Drive 等,且易于拓展。 + ++ 支持 WebDAV + + AList 支持所有 WebDAV 存储,这是一种用于访问文件的标准。 + ++ 黑暗模式 + + 自由切换明暗模式 + ++ 受保护的路由 + + 为特定路径添加密码保护和身份验证 + ++ 文件预览 + + 支持视频、音频、文档、PDF、图片预览等,甚至支持 ipa 安装 + ++ 打包下载/批量下载 + + 使用浏览器的 stream api 支持打包下载,无需使用服务器 / 使用Aria2进行批量下载支持文件夹 + ++ 单点登录 + + 使用单点登录快速登录AList + ++ 自动注册AList帐号 + + 使用单点登录自动注册为AList帐号快速注册 + ++ 离线下载 + + 将种子内容离线下载到指定的目录內,需要苛刻的网络环境 + ++ 保险箱加密/解密 文件 + + 任何人都可以安全地将加密数据存储在远程存储提供商上。数据存储在保险箱中,提供商只能看到保险箱,看不到您的数据。 + ++ 更多新功能 + + 包括文本编辑器、README/HTML 渲染、文件永久链接、Cloudflare Workers 代理等 + +## 安装说明 + +> 默认用户名:`admin` +> +> 默认密码:首次启动,可通过日志查询,或重置密码 + +### 重置密码 + ++ 随机生成一个密码 + +```shell +alist admin random +``` + ++ 设置指定密码 + +```shell +alist admin set {PASSWORD} +``` + +### 反向代理 + +> Nginx 配置示例 + +```conf +location / { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host:$server_port; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Range $http_range; + proxy_set_header If-Range $http_if_range; + proxy_redirect off; + proxy_pass http://127.0.0.1:5244; + # 文件最大上传大小 20GB + client_max_body_size 20000m; +} +``` diff --git a/apps/alist/data.yml b/apps/alist/data.yml new file mode 100644 index 00000000..65b5fc75 --- /dev/null +++ b/apps/alist/data.yml @@ -0,0 +1,18 @@ +name: Alist +title: 文件列表程序 +description: 一个支持多种存储的文件列表程序 +additionalProperties: + key: alist + name: Alist + tags: + - WebSite + - Storage + - Local + shortDescZh: 一个支持多种存储的文件列表程序 + shortDescEn: A file list program that supports multiple storage methods + type: website + crossVersionUpdate: true + limit: 0 + website: https://alist.nn.ci/ + github: https://github.com/alist-org/alist/ + document: https://alist.nn.ci/guide/ diff --git a/apps/alist/logo.png b/apps/alist/logo.png new file mode 100644 index 00000000..443d69ee Binary files /dev/null and b/apps/alist/logo.png differ diff --git a/apps/bark/2.1.5/data.yml b/apps/bark/2.1.5/data.yml new file mode 100644 index 00000000..bd610e8f --- /dev/null +++ b/apps/bark/2.1.5/data.yml @@ -0,0 +1,38 @@ +additionalProperties: + formFields: + - default: "/home/bark" + edit: true + envKey: BARK_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 8080 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: "" + edit: true + envKey: BARK_SERVER_DSN + labelEn: Database URL + labelZh: 数据库链接 + required: false + type: text + - default: "" + edit: true + envKey: BARK_SERVER_BASIC_AUTH_USER + labelEn: Basic Auth User + labelZh: 用户名 (服务基础验证) + required: false + type: text + - default: "" + edit: true + envKey: BARK_SERVER_BASIC_AUTH_PASSWORD + labelEn: Basic Auth Password + labelZh: 密码 (服务基础验证) + required: false + type: text diff --git a/apps/bark/2.1.5/docker-compose.yml b/apps/bark/2.1.5/docker-compose.yml new file mode 100644 index 00000000..df29f819 --- /dev/null +++ b/apps/bark/2.1.5/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + bark: + image: finab/bark-server:v2.1.5 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:8080 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${BARK_ROOT_PATH}/data:/data + environment: + - BARK_SERVER_ADDRESS=0.0.0.0:8080 + - BARK_SERVER_URL_PREFIX=/ + - BARK_SERVER_DATA_DIR=/data + - BARK_SERVER_SERVERLESS=false diff --git a/apps/bark/2.1.5/scripts/init.sh b/apps/bark/2.1.5/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/bark/2.1.5/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/bark/2.1.5/scripts/uninstall.sh b/apps/bark/2.1.5/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/bark/2.1.5/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/bark/2.1.5/scripts/upgrade.sh b/apps/bark/2.1.5/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/bark/2.1.5/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/bark/README.md b/apps/bark/README.md new file mode 100644 index 00000000..1eb4eb41 --- /dev/null +++ b/apps/bark/README.md @@ -0,0 +1,119 @@ +# Bark + +一款注重隐私、安全可控的自定义通知推送工具。 + +![Bark](https://file.lifebus.top/imgs/bark_logo.png) + +## 特性 + ++ 免费、轻量!简单调用接口即可给自己的iPhone发送推送。 ++ 依赖苹果APNs,及时、稳定、可靠 ++ 不会消耗设备的电量, 基于系统推送服务与推送扩展,APP本体并不需要运行。 ++ 隐私安全,可以通过一些方式确保包含作者本人在内的所有人都无法窃取你的隐私。 + +## 安装说明 + +### `数据库链接` 配置 + +MySQL DSN 数据库链接 + +格式: + +```shell +user:pass@tcp(host)/dbname + +# 例如 +root:123456@tcp(127.0.0.1:3306)/bark +``` + +#### 参数说明 + +| 参数 | 说明 | +|--------|--------| +| user | 数据库用户名 | +| pass | 数据库密码 | +| host | 数据库地址 | +| dbname | 数据库名称 | + +使用其他端口时 `host` 格式为 `host:port` + +## [URL格式](https://bark.day.app/#/tutorial?id=url格式) + +URL由推送key、参数 title、参数 body 组成。有下面两种组合方式 + +#### 服务基础验证 + +可选项,验证方式为 `Basic`,具体格式为 `Basic base64(username:password)`,其中 `base64(username:password)` +为 `username:password` 的 `base64` 编码结果。 + ++ 用户名 (服务基础验证) ++ 密码 (服务基础验证) + +``` +/:key/:body +/:key/:title/:body +``` + +## [请求方式](https://bark.day.app/#/tutorial?id=请求方式) + +##### [GET 请求参数拼接在 URL 后面,例如:](https://bark.day.app/#/tutorial?id=get-请求参数拼接在-url-后面,例如:) + +```sh +curl https://api.day.app/your_key/推送内容?group=分组©=复制 +``` + +*手动拼接参数到URL上时,请注意URL编码问题,可以参考阅读[常见问题:URL编码](https://bark.day.app/#/faq?id=推送特殊字符导致推送失败,比如-推送内容包含链接,或推送异常-比如-变成空格)* + +##### [POST 请求参数放在请求体中,例如:](https://bark.day.app/#/tutorial?id=post-请求参数放在请求体中,例如:) + +```sh +curl -X POST https://api.day.app/your_key \ + -d'body=推送内容&group=分组©=复制' +``` + +##### [POST 请求支持JSON,例如:](https://bark.day.app/#/tutorial?id=post-请求支持json,例如:) + +```sh +curl -X "POST" "https://api.day.app/your_key" \ + -H 'Content-Type: application/json; charset=utf-8' \ + -d $'{ + "body": "Test Bark Server", + "title": "Test Title", + "badge": 1, + "category": "myNotificationCategory", + "sound": "minuet.caf", + "icon": "https://day.app/assets/images/avatar.jpg", + "group": "test", + "url": "https://mritd.com" +}' +``` + +##### [JSON 请求 key 可以放进请求体中,URL 路径须为 /push,例如](https://bark.day.app/#/tutorial?id=json-请求-key-可以放进请求体中url-路径须为-push,例如) + +```sh +curl -X "POST" "https://api.day.app/push" \ + -H 'Content-Type: application/json; charset=utf-8' \ + -d $'{ + "body": "Test Bark Server", + "title": "Test Title", + "device_key": "your_key" +}' +``` + +## [请求参数](https://bark.day.app/#/tutorial?id=请求参数) + +支持的参数列表,具体效果可在APP内预览。 + +| 参数 | 说明 | +|-----------|---------------------------------------------------------------------------------------------| +| title | 推送标题 | +| body | 推送内容 | +| level | 推送中断级别。 active:默认值,系统会立即亮屏显示通知 timeSensitive:时效性通知,可在专注状态下显示通知。 passive:仅将通知添加到通知列表,不会亮屏提醒。 | +| badge | 推送角标,可以是任意数字 | +| autoCopy | iOS14.5以下自动复制推送内容,iOS14.5以上需手动长按推送或下拉推送 | +| copy | 复制推送时,指定复制的内容,不传此参数将复制整个推送内容。 | +| sound | 可以为推送设置不同的铃声 | +| icon | 为推送设置自定义图标,设置的图标将替换默认Bark图标。 图标会自动缓存在本机,相同的图标 URL 仅下载一次。 | +| group | 对消息进行分组,推送将按group分组显示在通知中心中。 也可在历史消息列表中选择查看不同的群组。 | +| isArchive | 传 1 保存推送,传其他的不保存推送,不传按APP内设置来决定是否保存。 | +| url | 点击推送时,跳转的URL ,支持URL Scheme 和 Universal Link | diff --git a/apps/bark/data.yml b/apps/bark/data.yml new file mode 100644 index 00000000..8a45def9 --- /dev/null +++ b/apps/bark/data.yml @@ -0,0 +1,19 @@ +name: Bark +title: 通知推送工具 +description: 通知推送工具 +additionalProperties: + key: bark + name: Bark + tags: + - WebSite + - Middleware + - Tool + - Local + shortDescZh: 通知推送工具 + shortDescEn: Notification push tool + type: website + crossVersionUpdate: true + limit: 0 + website: https://bark.day.app/ + github: https://github.com/finb/bark/ + document: https://bark.day.app/ diff --git a/apps/bark/logo.png b/apps/bark/logo.png new file mode 100644 index 00000000..eba752a8 Binary files /dev/null and b/apps/bark/logo.png differ diff --git a/apps/cookie-cloud/README.md b/apps/cookie-cloud/README.md new file mode 100644 index 00000000..ef0fa387 --- /dev/null +++ b/apps/cookie-cloud/README.md @@ -0,0 +1,19 @@ +# CookieCloud + +CookieCloud 是一个用于将 cookie 与您的自托管服务器同步的小工具,允许您将浏览器 cookie +和本地存储同步到您的手机和云端。它具有内置的端到端加密功能,并允许您设置同步间隔。 + +![CookieCloud](https://github.com/easychen/CookieCloud/blob/master/images/20230121092535.png) + +## 浏览器插件 + ++ [Edge Store](https://microsoftedge.microsoft.com/addons/detail/cookiecloud/bffenpfpjikaeocaihdonmgnjjdpjkeo) + ++ [Chrome Store](https://chrome.google.com/webstore/detail/cookiecloud/ffjiejobkoibkjlhjnlgmcnnigeelbdl) + +## FAQ 常问问题 + ++ 目前,同步只是单向的,这意味着一个浏览器可以上传,而另一个浏览器可以下载。 ++ 该浏览器扩展正式支持 Chrome 和 Edge。其他基于 Chromium 的浏览器可能可以工作,但尚未经过测试。使用源代码 cd extension && + pnpm build --target=firefox-mv2 自行编译Firefox版本。 ++ 请注意,Firefox 的 cookie 格式与 Chrome 的不同,并且它们不能混合。 diff --git a/apps/cookie-cloud/data.yml b/apps/cookie-cloud/data.yml new file mode 100644 index 00000000..0fdc4cd9 --- /dev/null +++ b/apps/cookie-cloud/data.yml @@ -0,0 +1,19 @@ +name: CookieCloud +title: Cookie 同步 +description: 自架服务器同步Cookie的小工具 +additionalProperties: + key: cookie-cloud + name: CookieCloud + tags: + - WebSite + - Tool + - Runtime + - Local + shortDescZh: 自架服务器同步Cookie的小工具 + shortDescEn: A small tool to synchronize cookies on self-built servers + type: website + crossVersionUpdate: true + limit: 0 + website: https://github.com/easychen/CookieCloud/ + github: https://github.com/easychen/CookieCloud/ + document: https://github.com/easychen/CookieCloud/ diff --git a/apps/cookie-cloud/latest/data.yml b/apps/cookie-cloud/latest/data.yml new file mode 100644 index 00000000..3e1af1d1 --- /dev/null +++ b/apps/cookie-cloud/latest/data.yml @@ -0,0 +1,24 @@ +additionalProperties: + formFields: + - default: 8088 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelEn: Port + labelZh: 端口 + required: true + rule: paramPort + type: number + - default: "/home/cookie-cloud" + edit: true + envKey: COOKIECLOUD_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "/" + edit: true + envKey: API_ROOT + labelZh: 网站路径 + labelEn: Web path + required: true + type: text diff --git a/apps/cookie-cloud/latest/docker-compose.yml b/apps/cookie-cloud/latest/docker-compose.yml new file mode 100644 index 00000000..aa678ed6 --- /dev/null +++ b/apps/cookie-cloud/latest/docker-compose.yml @@ -0,0 +1,21 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + cookie-cloud: + image: easychen/cookiecloud:latest + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:8088 + volumes: + - ${COOKIECLOUD_ROOT_PATH}/data:/data/api/data + env_file: + - .env diff --git a/apps/cookie-cloud/latest/scripts/init.sh b/apps/cookie-cloud/latest/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/cookie-cloud/latest/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/cookie-cloud/latest/scripts/uninstall.sh b/apps/cookie-cloud/latest/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/cookie-cloud/latest/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/cookie-cloud/latest/scripts/upgrade.sh b/apps/cookie-cloud/latest/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/cookie-cloud/latest/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/cookie-cloud/logo.png b/apps/cookie-cloud/logo.png new file mode 100644 index 00000000..00c7408c Binary files /dev/null and b/apps/cookie-cloud/logo.png differ diff --git a/apps/deeplx/0.9.5.1/data.yml b/apps/deeplx/0.9.5.1/data.yml new file mode 100644 index 00000000..da742d36 --- /dev/null +++ b/apps/deeplx/0.9.5.1/data.yml @@ -0,0 +1,39 @@ +additionalProperties: + formFields: + - default: 1188 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: API 端口 + labelEn: API Port + required: true + rule: paramPort + type: number + - default: "" + edit: true + envKey: TOKEN + labelZh: 访问令牌以保护您的 API + labelEn: Access Token to protect your API + required: false + type: text + - default: "" + edit: true + envKey: AUTHKEY + labelZh: DeepL官方提供的API Auth Key + labelEn: Official API Auth Key provided by DeepL + required: false + type: text + - default: "" + edit: true + envKey: DL_SESSION + labelZh: DeepL Pro 帐户 dl_session cookie + labelEn: DeepL Pro account dl_session cookie + required: false + type: text + - default: "" + edit: true + envKey: PROXY + labelZh: http代理服务器地址 + labelEn: http proxy server address + required: false + rule: paramExtUrl + type: text diff --git a/apps/deeplx/0.9.5.1/docker-compose.yml b/apps/deeplx/0.9.5.1/docker-compose.yml new file mode 100644 index 00000000..18812d58 --- /dev/null +++ b/apps/deeplx/0.9.5.1/docker-compose.yml @@ -0,0 +1,21 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + deeplx: + image: qyg2297248353/deeplx:v0.9.5.1 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:${PANEL_APP_PORT_HTTP:-1188} + environment: + - PORT=${PANEL_APP_PORT_HTTP:-1188} + env_file: + - .env diff --git a/apps/deeplx/0.9.5.1/scripts/init.sh b/apps/deeplx/0.9.5.1/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/deeplx/0.9.5.1/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/deeplx/0.9.5.1/scripts/uninstall.sh b/apps/deeplx/0.9.5.1/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/deeplx/0.9.5.1/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/deeplx/0.9.5.1/scripts/upgrade.sh b/apps/deeplx/0.9.5.1/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/deeplx/0.9.5.1/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/deeplx/README.md b/apps/deeplx/README.md new file mode 100644 index 00000000..19a6f228 --- /dev/null +++ b/apps/deeplx/README.md @@ -0,0 +1,115 @@ +# DeepLX + +强大的 DeepL 翻译 API + +![DeepLX](https://static.deepl.com/img/appDownload/macos_app_download.svg) + +## 简介 + +DeepLX 最初由 zu1k 命名并发布,尽管 zu1k 此后已停止维护它并删除了相关存储库。 + +感谢zu1k的贡献。 + +这是zu1k设计的DeepLX Docker镜像的备份,没有任何修改。 + +## 安装说明 + +### http代理服务器地址 + +> 基本格式 + +```shell +# 无鉴权代理 +http://127.0.0.1:7890 +# 有鉴权代理 +http://:@127.0.0.1:7890 +``` + +### 访问令牌以保护您的 API + +> 请注意,如果您不使用访问令牌,您的 API 可能会被滥用。 + +接口调用时,您可以使用 `X-Access-Token` 请求头来传递访问令牌。 + +### DeepL官方提供的API Auth Key + +开通DeepL API后,您将获得一个API Auth Key,您可以使用它来访问DeepL API。 + +### DeepL Pro 帐户 dl_session cookie + +如果您有DeepL Pro帐户,您可以使用 `dl_session` cookie 来访问DeepL API。 + +## 接口简介 + +### 免费端点 + +模拟DeepL iOS客户端发起翻译请求。无限制,但在一定时间内频繁请求会导致429错误。 + +接口:`/translate` +请求方式:POST +请求头: + +| Header | Description | Value | +|:----------------|:--------------------------------------|:---------------------------| +| `Content-Type` | The content type of the request body. | `application/json` | +| `Authorization` | The access token to protect your API. | `Bearer your_access_token` | + +> 如果无法使用 `Authorization` 请求头,您可以使用 `URL Params` 传递 `Authorization`。 +> +> 例如:`/translate?token=your_access_token` + +请求体: + +| 参数名 | 字段类型 | 描述 | 是否必须 | +|:--------------|:---------|:--------|:-------| +| `text` | `string` | 待翻译字段。 | `true` | +| `source_lang` | `string` | 源语言代码 | `true` | +| `target_lang` | `string` | 目标语言代码。 | `true` | + +请求体 JSON 示例: + +```json +{ + "text": "你听说过这个吗?", + "source_lang": "ZH", + "target_lang": "EN" +} +``` + +响应体: + +```json +{ + "alternatives": [ + "Did you hear about this?", + "You've heard about this?", + "You've heard of this?" + ], + "code": 200, + "data": "Have you heard about this?", + "id": 8356681003, + "method": "Free", + "source_lang": "ZH", + "target_lang": "EN" +} +``` + +### 专业端点 + +模拟 DeepL 专业版账户发起翻译请求。无限制,可有效避免 429 问题,但需要提供专业账户的 dl_session 参数,否则无法使用。 + +接口:`/v1/translate` +请求方式:POST + +> 其他参数与免费端点相同。 + +### 官方端点 + +模拟 DeepL 官方 API 发起翻译请求。无限制,但在一定时间内频繁请求将导致 429 错误。 + +接口:`/v2/translate` +请求方式:POST + +> 其他参数与免费端点相同。 +> +> 更多官方API配置请参考 [DeepL 官方文档](https://developers.deepl.com/docs/api-reference/translate) diff --git a/apps/deeplx/data.yml b/apps/deeplx/data.yml new file mode 100644 index 00000000..71525533 --- /dev/null +++ b/apps/deeplx/data.yml @@ -0,0 +1,19 @@ +name: DeepLX +title: DeepL 免费API +type: 实用工具 +description: DeepL 免费API +additionalProperties: + key: deeplx + name: DeepLX + tags: + - WebSite + - Middleware + - Local + shortDescZh: DeepL 免费API + shortDescEn: DeepL Free API + type: tool + crossVersionUpdate: true + limit: 0 + website: https://www.deepl.com/ + github: https://github.com/OwO-Network/DeepLX + document: https://deeplx.owo.network/ diff --git a/apps/deeplx/logo.png b/apps/deeplx/logo.png new file mode 100644 index 00000000..3314de2b Binary files /dev/null and b/apps/deeplx/logo.png differ diff --git a/apps/dockge/1.4.2/data.yml b/apps/dockge/1.4.2/data.yml new file mode 100644 index 00000000..368664a9 --- /dev/null +++ b/apps/dockge/1.4.2/data.yml @@ -0,0 +1,17 @@ +additionalProperties: + formFields: + - default: "/home/dockge" + edit: true + envKey: DOCKGE_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 5001 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number diff --git a/apps/dockge/1.4.2/docker-compose.yml b/apps/dockge/1.4.2/docker-compose.yml new file mode 100644 index 00000000..92d107f3 --- /dev/null +++ b/apps/dockge/1.4.2/docker-compose.yml @@ -0,0 +1,23 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + dockge: + image: louislam/dockge:1.4.2 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:5001 + volumes: + - ${DOCKGE_ROOT_PATH}/data:/app/data + - ${DOCKGE_ROOT_PATH}/stacks:/opt/dockge/stacks + - /var/run/docker.sock:/var/run/docker.sock + environment: + - DOCKGE_STACKS_DIR=/opt/dockge/stacks diff --git a/apps/dockge/1.4.2/scripts/init.sh b/apps/dockge/1.4.2/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/dockge/1.4.2/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/dockge/1.4.2/scripts/uninstall.sh b/apps/dockge/1.4.2/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/dockge/1.4.2/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/dockge/1.4.2/scripts/upgrade.sh b/apps/dockge/1.4.2/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/dockge/1.4.2/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/dockge/README.md b/apps/dockge/README.md new file mode 100644 index 00000000..19431342 --- /dev/null +++ b/apps/dockge/README.md @@ -0,0 +1,19 @@ +# Dockge + +一个花哨、易用、反应灵敏的自托管 docker compose.yaml 堆栈型管理器。 + +![Dockge](https://file.lifebus.top/imgs/dockge_cover.png) + +## 特性 + ++ 🧑‍💼 管理您的 compose.yaml 文件 + + 创建/编辑/启动/停止/重新启动/删除 + + 更新 Docker 镜像 ++ ⌨️ compose.yaml 的交互式编辑器 ++ 🦦 交互式网络终端 ++ 🕷️ (1.4.0 🆕) 多代理支持 - 您可以在一个界面中管理来自不同 Docker 主机的多个堆栈 ++ 🏪 将 docker run ... 命令转换为 compose.yaml ++ 📙 基于文件的结构 - Dockge 不会劫持您的撰写文件,它们会像往常一样存储在您的驱动器上。您可以使用普通的 docker compose + 命令与它们交互 ++ 🚄 反应式 - 一切都是响应式的。进度(Pull/Up/Down)和终端输出是实时的 ++ 🐣 易于使用且精美的 UI - 如果您喜欢 Uptime Kuma 的 UI/UX,您也会喜欢这个 diff --git a/apps/dockge/data.yml b/apps/dockge/data.yml new file mode 100644 index 00000000..09a5cac9 --- /dev/null +++ b/apps/dockge/data.yml @@ -0,0 +1,19 @@ +name: Dockge +title: 面向堆栈的管理器 +description: 面向堆栈的管理器 +additionalProperties: + key: dockge + name: Dockge + tags: + - WebSite + - Middleware + - Tool + - Local + shortDescZh: 面向堆栈的管理器 + shortDescEn: Stack-oriented manager + type: website + crossVersionUpdate: true + limit: 0 + website: https://dockge.kuma.pet/ + github: https://github.com/louislam/dockge/ + document: https://github.com/louislam/dockge/wiki/ diff --git a/apps/dockge/logo.png b/apps/dockge/logo.png new file mode 100644 index 00000000..a58944c2 Binary files /dev/null and b/apps/dockge/logo.png differ diff --git a/apps/elastic/8.12.0-cluster/data.yml b/apps/elastic/8.12.0-cluster/data.yml new file mode 100644 index 00000000..9b7510e3 --- /dev/null +++ b/apps/elastic/8.12.0-cluster/data.yml @@ -0,0 +1,94 @@ +additionalProperties: + formFields: + - default: "docker-cluster" + edit: true + envKey: CLUSTER_NAME + labelEn: cluster name + labelZh: 集群名称 + required: true + type: text + - default: "elastic-net" + edit: true + envKey: CLUSTER_NETWORK + labelEn: cluster network + labelZh: 集群网络 + required: true + type: text + - default: "" + edit: true + envKey: ELASTIC_PASSWORD + labelEn: Password for the 'elastic' user, Numbers and letters + labelZh: elastic 用户的密码 数字与字母组合 + required: true + random: true + type: password + - default: "" + edit: true + envKey: KIBANA_PASSWORD + labelEn: Password for the 'kibana_system' user, Numbers and letters + labelZh: kibana_system 用户的密码 数字与字母组合 + required: true + random: true + type: password + - default: "/home/elastic/cluster" + edit: true + envKey: ES_ROOT_PATH + labelEn: data persistence root path + labelZh: 数据持久化根路径 + required: true + type: text + - default: 1073741824 + edit: true + envKey: MEM_LIMIT + labelEn: Increase or decrease based on the available host memory (in bytes) + labelZh: 根据可用主机内存增加或减少(以字节为单位) + required: true + type: number + - default: "9200" + edit: true + envKey: PANEL_APP_PORT_HTTPS + labelEn: Port to expose Elasticsearch HTTP API to the host + labelZh: 开放API的端口 + required: false + type: text + - default: 5601 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelEn: Port to expose Kibana to the host + labelZh: Kibana面板端口 + required: true + rule: paramPort + type: number + - default: "9300" + edit: true + envKey: ES_COMMUNICATION_PORT + labelEn: Port to expose Elasticsearch communication to the host + labelZh: Elasticsearch通讯端口 9300 + required: false + type: text + - default: "512m" + edit: true + envKey: ES_JAVA_OPTS_XMS + labelEn: JVM memory allocation pool + labelZh: JVM内存分配池 初始化内存 + required: true + type: text + - default: "512m" + edit: true + envKey: ES_JAVA_OPTS_XMX + labelEn: JVM memory allocation pool + labelZh: JVM内存分配池 运行内存 + required: true + type: text + - default: "true" + edit: true + envKey: ES_XPACK_SECURITY_ENABLED + labelEn: Enable security verification (recommended) + labelZh: 证书安全验证(推荐开启) + required: true + type: select + values: + - label: "开启" + value: "true" + - label: "关闭" + value: "false" diff --git a/apps/elastic/8.12.0-cluster/docker-compose.yml b/apps/elastic/8.12.0-cluster/docker-compose.yml new file mode 100644 index 00000000..559c4ba4 --- /dev/null +++ b/apps/elastic/8.12.0-cluster/docker-compose.yml @@ -0,0 +1,289 @@ +version: "3.8" + +networks: + ${DOCKER_NET}: + external: true + +services: + elastic-init: + container_name: elastic-init + image: docker.elastic.co/elasticsearch/elasticsearch:8.12.0 + networks: + - ${CLUSTER_NETWORK} + volumes: + - ${ES_ROOT_PATH}/certs:/usr/share/elasticsearch/config/certs + user: "0" + command: > + bash -c ' + if [ x${ELASTIC_PASSWORD} == x ]; then + echo "Set the ELASTIC_PASSWORD environment variable in the .env file"; + exit 1; + elif [ x${KIBANA_PASSWORD} == x ]; then + echo "Set the KIBANA_PASSWORD environment variable in the .env file"; + exit 1; + fi; + if [ ! -f config/certs/ca.zip ]; then + echo "Creating CA"; + bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip; + unzip config/certs/ca.zip -d config/certs; + fi; + if [ ! -f config/certs/certs.zip ]; then + echo "Creating certs"; + echo -ne \ + "instances:\n"\ + " - name: es01\n"\ + " dns:\n"\ + " - es01\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + " - name: es02\n"\ + " dns:\n"\ + " - es02\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + " - name: es03\n"\ + " dns:\n"\ + " - es03\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + > config/certs/instances.yml; + bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key; + unzip config/certs/certs.zip -d config/certs; + fi; + echo "Setting file permissions" + chown -R root:root config/certs; + find . -type d -exec chmod 750 \{\} \;; + find . -type f -exec chmod 640 \{\} \;; + echo "Waiting for Elasticsearch availability"; + until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done; + echo "Setting kibana_system password"; + until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done; + echo "All done!"; + ' + healthcheck: + test: [ "CMD-SHELL", "[ -f config/certs/es01/es01.crt ]" ] + interval: 1s + timeout: 5s + retries: 120 + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + + + es01: + depends_on: + elastic-init: + condition: service_healthy + container_name: es01 + restart: always + image: docker.elastic.co/elasticsearch/elasticsearch:8.12.0 + volumes: + - ${ES_ROOT_PATH}/certs:/usr/share/elasticsearch/config/certs + - ${ES_ROOT_PATH}/es01/data:/usr/share/elasticsearch/data + - ${ES_ROOT_PATH}/es01/logs:/usr/share/elasticsearch/logs + - ${ES_ROOT_PATH}/es01/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml + - ${ES_ROOT_PATH}/es01/plugins:/usr/share/elasticsearch/plugins + ports: + - "${PANEL_APP_PORT_HTTPS}:9200" + - "${ES_COMMUNICATION_PORT}:9300" + networks: + - ${CLUSTER_NETWORK} + environment: + - node.name=es01 + - cluster.name=${CLUSTER_NAME} + - cluster.initial_master_nodes=es01,es02,es03 + - discovery.seed_hosts=es02,es03 + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - bootstrap.memory_lock=true + - xpack.security.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.http.ssl.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.http.ssl.key=certs/es01/es01.key + - xpack.security.http.ssl.certificate=certs/es01/es01.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.transport.ssl.key=certs/es01/es01.key + - xpack.security.transport.ssl.certificate=certs/es01/es01.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=basic + - ES_JAVA_OPTS=-Xms${ES_JAVA_OPTS_XMS} -Xmx${ES_JAVA_OPTS_XMX} + mem_limit: ${MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", + ] + interval: 10s + timeout: 10s + retries: 120 + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + + es02: + depends_on: + - es01 + container_name: es02 + restart: always + image: docker.elastic.co/elasticsearch/elasticsearch:8.12.0 + volumes: + - ${ES_ROOT_PATH}/certs:/usr/share/elasticsearch/config/certs + - ${ES_ROOT_PATH}/es02/data:/usr/share/elasticsearch/data + - ${ES_ROOT_PATH}/es02/logs:/usr/share/elasticsearch/logs + - ${ES_ROOT_PATH}/es02/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml + - ${ES_ROOT_PATH}/es02/plugins:/usr/share/elasticsearch/plugins + networks: + - ${CLUSTER_NETWORK} + environment: + - node.name=es02 + - cluster.name=${CLUSTER_NAME} + - cluster.initial_master_nodes=es01,es02,es03 + - discovery.seed_hosts=es01,es03 + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - bootstrap.memory_lock=true + - xpack.security.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.http.ssl.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.http.ssl.key=certs/es02/es02.key + - xpack.security.http.ssl.certificate=certs/es02/es02.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.transport.ssl.key=certs/es02/es02.key + - xpack.security.transport.ssl.certificate=certs/es02/es02.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=basic + - ES_JAVA_OPTS=-Xms${ES_JAVA_OPTS_XMS} -Xmx${ES_JAVA_OPTS_XMX} + mem_limit: ${MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", + ] + interval: 10s + timeout: 10s + retries: 120 + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + + es03: + depends_on: + - es02 + container_name: es03 + restart: always + image: docker.elastic.co/elasticsearch/elasticsearch:8.12.0 + volumes: + - ${ES_ROOT_PATH}/certs:/usr/share/elasticsearch/config/certs + - ${ES_ROOT_PATH}/es03/data:/usr/share/elasticsearch/data + - ${ES_ROOT_PATH}/es03/logs:/usr/share/elasticsearch/logs + - ${ES_ROOT_PATH}/es03/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml + - ${ES_ROOT_PATH}/es03/plugins:/usr/share/elasticsearch/plugins + networks: + - ${CLUSTER_NETWORK} + environment: + - node.name=es03 + - cluster.name=${CLUSTER_NAME} + - cluster.initial_master_nodes=es01,es02,es03 + - discovery.seed_hosts=es01,es02 + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - bootstrap.memory_lock=true + - xpack.security.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.http.ssl.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.http.ssl.key=certs/es03/es03.key + - xpack.security.http.ssl.certificate=certs/es03/es03.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.transport.ssl.key=certs/es03/es03.key + - xpack.security.transport.ssl.certificate=certs/es03/es03.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=basic + - ES_JAVA_OPTS=-Xms${ES_JAVA_OPTS_XMS} -Xmx${ES_JAVA_OPTS_XMX} + mem_limit: ${MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", + ] + interval: 10s + timeout: 10s + retries: 120 + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + + kibana: + depends_on: + es01: + condition: service_healthy + es02: + condition: service_healthy + es03: + condition: service_healthy + container_name: kibana-${CONTAINER_NAME} + restart: always + image: docker.elastic.co/kibana/kibana:8.12.0 + volumes: + - ${ES_ROOT_PATH}/certs:/usr/share/kibana/config/certs + - ${ES_ROOT_PATH}/kibana/data:/usr/share/kibana/data + - ${ES_ROOT_PATH}/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml + ports: + - "${PANEL_APP_PORT_HTTP}:5601" + networks: + - ${CLUSTER_NETWORK} + environment: + - SERVERNAME=kibana + - ELASTICSEARCH_HOSTS=https://es01:9200 + - ELASTICSEARCH_USERNAME=kibana_system + - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD} + - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt + mem_limit: ${MEM_LIMIT} + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'", + ] + interval: 10s + timeout: 10s + retries: 120 + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" diff --git a/apps/elastic/8.12.0-cluster/scripts/elasticsearch.yml b/apps/elastic/8.12.0-cluster/scripts/elasticsearch.yml new file mode 100644 index 00000000..7b3ac5ed --- /dev/null +++ b/apps/elastic/8.12.0-cluster/scripts/elasticsearch.yml @@ -0,0 +1,2 @@ +cluster.name: "docker-cluster" +network.host: 0.0.0.0 diff --git a/apps/elastic/8.12.0-cluster/scripts/init.sh b/apps/elastic/8.12.0-cluster/scripts/init.sh new file mode 100644 index 00000000..d33d402f --- /dev/null +++ b/apps/elastic/8.12.0-cluster/scripts/init.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# 检查 .env 文件是否存在 +if [ -f .env ]; then + # 导入 .env 文件中的变量 + source .env + + # 检查模板是否启用 + if [ "$MS_TEMPLATE_ENABLED" = "true" ]; then + # 检查模板文件是否存在 + if [ -e "docker-compose-template.yml" ]; then + # 读取模板文件的内容 + template_content=$( docker-compose.yml + # 将模板内容写入目标文件 + echo "$template_content" > docker-compose.yml + + echo "docker-compose.yml updated successfully." + else + echo "Error: docker-compose-template.yml not found." + exit 1 + fi + fi + + # 替换 docker-compose.yml 中的网络变量 + sed -i "s/\${DOCKER_NET}/$CLUSTER_NETWORK/" docker-compose.yml + + # 创建目录 + mkdir -p "$ES_ROOT_PATH" + + mkdir -p "$ES_ROOT_PATH/certs" + + mkdir -p "$ES_ROOT_PATH/es01/data" + mkdir -p "$ES_ROOT_PATH/es02/data" + mkdir -p "$ES_ROOT_PATH/es03/data" + + mkdir -p "$ES_ROOT_PATH/es01/logs" + mkdir -p "$ES_ROOT_PATH/es02/logs" + mkdir -p "$ES_ROOT_PATH/es03/logs" + + mkdir -p "$ES_ROOT_PATH/es01/config" + mkdir -p "$ES_ROOT_PATH/es02/config" + mkdir -p "$ES_ROOT_PATH/es03/config" + + mkdir -p "$ES_ROOT_PATH/es01/plugins" + mkdir -p "$ES_ROOT_PATH/es02/plugins" + mkdir -p "$ES_ROOT_PATH/es03/plugins" + + mkdir -p "$ES_ROOT_PATH/kibana/data" + mkdir -p "$ES_ROOT_PATH/kibana/config" + + # 生成 elasticsearch.yml 文件 + elasticsearch_config="cluster.name: \"$CLUSTER_NAME\"\nnetwork.host: 0.0.0.0" + echo -e "$elasticsearch_config" > elasticsearch.yml + cp elasticsearch.yml "$ES_ROOT_PATH/es01/config/elasticsearch.yml" + cp elasticsearch.yml "$ES_ROOT_PATH/es02/config/elasticsearch.yml" + cp elasticsearch.yml "$ES_ROOT_PATH/es03/config/elasticsearch.yml" + + # 生成 kibana.yml 文件 + kibana_config="server.host: \"0.0.0.0\"\nserver.shutdownTimeout: \"5s\"\nelasticsearch.hosts: [ \"https://es01:9200\", \"https://es02:9200\", \"https://es03:9200\" ]\nmonitoring.ui.container.elasticsearch.enabled: true" + echo -e "$kibana_config" > kibana.yml + cp kibana.yml "$ES_ROOT_PATH/kibana/config/kibana.yml" + + # 清理中间文件 + rm elasticsearch.yml kibana.yml + + # 设置权限 + chmod -R 777 "$ES_ROOT_PATH" + + # 创建网络 + docker network create "$CLUSTER_NETWORK" + # 检查创建是否成功 + if [ $? -eq 0 ]; then + echo "Network $CLUSTER_NETWORK created successfully." + else + echo "Failed to create network $CLUSTER_NETWORK." + fi + + echo "Directories and permissions set successfully." + +else + echo "Error: .env file not found." + exit 1 +fi diff --git a/apps/elastic/8.12.0-cluster/scripts/kibana.yml b/apps/elastic/8.12.0-cluster/scripts/kibana.yml new file mode 100644 index 00000000..fd51e7c4 --- /dev/null +++ b/apps/elastic/8.12.0-cluster/scripts/kibana.yml @@ -0,0 +1,4 @@ +server.host: "0.0.0.0" +server.shutdownTimeout: "5s" +elasticsearch.hosts: [ "http://localhost:9200" ] +monitoring.ui.container.elasticsearch.enabled: true diff --git a/apps/elastic/8.12.0-cluster/scripts/uninstall.sh b/apps/elastic/8.12.0-cluster/scripts/uninstall.sh new file mode 100644 index 00000000..04769b43 --- /dev/null +++ b/apps/elastic/8.12.0-cluster/scripts/uninstall.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# 检查 .env 文件是否存在 +if [ -f .env ]; then + # 导入 .env 文件中的变量 + source .env + + # 使用 docker network rm 命令删除网络 + docker network rm $CLUSTER_NETWORK + + # 检查删除是否成功 + if [ $? -eq 0 ]; then + echo "Network $CLUSTER_NETWORK deleted successfully." + else + echo "Failed to delete network $CLUSTER_NETWORK." + fi + +else + echo "Error: .env file not found." + exit 1 +fi diff --git a/apps/elastic/8.12.0-node/data.yml b/apps/elastic/8.12.0-node/data.yml new file mode 100644 index 00000000..a8c3a8d6 --- /dev/null +++ b/apps/elastic/8.12.0-node/data.yml @@ -0,0 +1,99 @@ +additionalProperties: + formFields: + - default: "docker-cluster" + edit: true + envKey: CLUSTER_NAME + labelEn: cluster name + labelZh: 集群名称 与现有集群名称一致 + required: true + type: text + - default: "elastic-net" + edit: true + envKey: CLUSTER_NETWORK + labelEn: cluster network + labelZh: 集群网络 与现有集群网络一致 + required: true + type: text + - default: "es04" + edit: true + envKey: ES_NODE_NAME + labelEn: node name + labelZh: 节点名称 与现有节点名称不一致 + required: true + type: text + - default: "" + edit: true + envKey: ELASTIC_PASSWORD + labelEn: Password for the 'elastic' user, Numbers and letters + labelZh: elastic 用户的密码 数字与字母组合 + required: true + random: true + type: password + - default: "es01,es02,es03" + edit: true + envKey: ES_SEED_HOSTS + labelEn: Seed hosts + labelZh: 其他节点的名称 节点发现 + required: true + type: text + - default: "es01,es02,es03,es04" + edit: true + envKey: ES_INITIAL_MASTER_NODES + labelEn: Initial master nodes + labelZh: 主节点选举 es04为当前节点 + required: true + type: text + - default: "/home/elastic/cluster" + edit: true + envKey: ES_ROOT_PATH + labelEn: data persistence root path + labelZh: 集群根路径 与现有集群根路径一致 + required: true + type: text + - default: 1073741824 + edit: true + envKey: MEM_LIMIT + labelEn: Increase or decrease based on the available host memory (in bytes) + labelZh: 根据可用主机内存增加或减少(以字节为单位) + required: true + type: number + - default: "9200" + edit: true + envKey: PANEL_APP_PORT_HTTPS + labelEn: Port to expose Elasticsearch HTTP API to the host + labelZh: 开放API的端口 + required: false + type: text + - default: "9300" + edit: true + envKey: ES_COMMUNICATION_PORT + labelEn: Port to expose Elasticsearch communication to the host + labelZh: Elasticsearch通讯端口 9300 + required: false + type: text + - default: "512m" + edit: true + envKey: ES_JAVA_OPTS_XMS + labelEn: JVM memory allocation pool + labelZh: JVM内存分配池 初始化内存 + required: true + type: text + - default: "512m" + edit: true + envKey: ES_JAVA_OPTS_XMX + labelEn: JVM memory allocation pool + labelZh: JVM内存分配池 运行内存 + required: true + type: text + - default: "true" + edit: true + envKey: ES_XPACK_SECURITY_ENABLED + labelEn: Enable security verification (recommended) + labelZh: 证书安全验证(推荐开启) + required: true + type: select + values: + - label: "开启" + value: "true" + - label: "关闭" + value: "false" diff --git a/apps/elastic/8.12.0-node/docker-compose.yml b/apps/elastic/8.12.0-node/docker-compose.yml new file mode 100644 index 00000000..6a03e1a2 --- /dev/null +++ b/apps/elastic/8.12.0-node/docker-compose.yml @@ -0,0 +1,84 @@ +version: "3.8" + +networks: + ${DOCKER_NET}: + external: true + +services: + es-node: + container_name: ${CONTAINER_NAME}-${ES_NODE_NAME} + restart: always + image: docker.elastic.co/elasticsearch/elasticsearch:8.12.0 + volumes: + - ${ES_ROOT_PATH}/certs:/usr/share/elasticsearch/config/certs + - ${ES_ROOT_PATH}/${ES_NODE_NAME}/data:/usr/share/elasticsearch/data + - ${ES_ROOT_PATH}/${ES_NODE_NAME}/logs:/usr/share/elasticsearch/logs + - ${ES_ROOT_PATH}/${ES_NODE_NAME}/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml + - ${ES_ROOT_PATH}/${ES_NODE_NAME}/plugins:/usr/share/elasticsearch/plugins + ports: + - "${PANEL_APP_PORT_HTTPS}:9200" + - "${ES_COMMUNICATION_PORT}:9300" + networks: + - ${CLUSTER_NETWORK} + command: > + bash -c ' + echo "start es-node"; + if [ ! -f config/certs/${ES_NODE_NAME}.zip ]; then + echo "Creating certs"; + echo -ne \ + "instances:\n"\ + " - name: ${ES_NODE_NAME}\n"\ + " dns:\n"\ + " - ${ES_NODE_NAME}\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + > config/certs/${ES_NODE_NAME}.yml; + bin/elasticsearch-certutil cert --silent --pem -out config/certs/${ES_NODE_NAME}.zip --in config/certs/${ES_NODE_NAME}.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key; + unzip config/certs/${ES_NODE_NAME}.zip -d config/certs; + fi; + echo "Setting file permissions" + chown -R root:root config/certs; + exec /usr/local/bin/docker-entrypoint.sh elasticsearch + ' + environment: + - node.name=${ES_NODE_NAME} + - cluster.name=${CLUSTER_NAME} + - cluster.initial_master_nodes=${ES_INITIAL_MASTER_NODES} + - discovery.seed_hosts=${ES_SEED_HOSTS} + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - bootstrap.memory_lock=true + - xpack.security.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.http.ssl.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.http.ssl.key=certs/${ES_NODE_NAME}/${ES_NODE_NAME}.key + - xpack.security.http.ssl.certificate=certs/${ES_NODE_NAME}/${ES_NODE_NAME}.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.transport.ssl.key=certs/${ES_NODE_NAME}/${ES_NODE_NAME}.key + - xpack.security.transport.ssl.certificate=certs/${ES_NODE_NAME}/${ES_NODE_NAME}.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=basic + - ES_JAVA_OPTS=-Xms${ES_JAVA_OPTS_XMS} -Xmx${ES_JAVA_OPTS_XMX} + mem_limit: ${MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", + ] + interval: 10s + timeout: 10s + retries: 120 + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" diff --git a/apps/elastic/8.12.0-node/scripts/init.sh b/apps/elastic/8.12.0-node/scripts/init.sh new file mode 100644 index 00000000..bd41991c --- /dev/null +++ b/apps/elastic/8.12.0-node/scripts/init.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# 检查 .env 文件是否存在 +if [ -f .env ]; then + # 导入 .env 文件中的变量 + source .env + + # 替换 docker-compose.yml 中的网络变量 + sed -i "s/\${DOCKER_NET}/$CLUSTER_NETWORK/" docker-compose.yml + + # 创建并设置权限 + mkdir -p "$ES_ROOT_PATH" + + mkdir -p "$ES_ROOT_PATH/$ES_NODE_NAME/data" + mkdir -p "$ES_ROOT_PATH/$ES_NODE_NAME/logs" + mkdir -p "$ES_ROOT_PATH/$ES_NODE_NAME/config" + mkdir -p "$ES_ROOT_PATH/$ES_NODE_NAME/plugins" + + # 生成 elasticsearch.yml 文件 + elasticsearch_config="cluster.name: \"$CLUSTER_NAME\"\nnetwork.host: 0.0.0.0" + echo -e "$elasticsearch_config" > elasticsearch.yml + cp elasticsearch.yml "$ES_ROOT_PATH/$ES_NODE_NAME/config/elasticsearch.yml" + + chmod -R 777 "$ES_ROOT_PATH" + + echo "Directories and permissions set successfully." + +else + echo "Error: .env file not found." + exit 1 +fi diff --git a/apps/elastic/8.12.0-single/data.yml b/apps/elastic/8.12.0-single/data.yml new file mode 100644 index 00000000..d1b267e5 --- /dev/null +++ b/apps/elastic/8.12.0-single/data.yml @@ -0,0 +1,89 @@ +additionalProperties: + formFields: + - default: "" + edit: true + envKey: ELASTIC_PASSWORD + labelEn: Password for the 'elastic' user, Numbers and letters + labelZh: elastic 用户的密码 数字与字母组合 + required: true + random: true + type: password + - default: "" + edit: true + envKey: KIBANA_PASSWORD + labelEn: Password for the 'kibana_system' user, Numbers and letters + labelZh: kibana_system 用户的密码 数字与字母组合 + required: true + random: true + type: password + - default: "/home/elastic/single" + edit: true + envKey: ES_ROOT_PATH + labelEn: data persistence root path + labelZh: 数据持久化根路径 + required: true + type: text + - default: 1073741824 + edit: true + envKey: MEM_LIMIT + labelEn: Increase or decrease based on the available host memory (in bytes) + labelZh: 根据可用主机内存增加或减少(以字节为单位) + required: true + type: number + - default: "9200" + edit: true + envKey: ES_HOST + labelEn: Access host restriction 127.0.0.1:9200 + labelZh: 主机限定 127.0.0.1:9200 + required: true + type: text + - default: 9200 + edit: true + envKey: PANEL_APP_PORT_HTTPS + labelEn: Port to expose Elasticsearch HTTP API to the host + labelZh: 开放API的端口 必须与主机限定端口一致 + required: true + rule: paramPort + type: number + - default: 5601 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelEn: Port to expose Kibana to the host + labelZh: Kibana面板端口 + required: true + rule: paramPort + type: number + - default: 9300 + edit: true + envKey: ES_COMMUNICATION_PORT + labelEn: Port to expose Elasticsearch communication to the host + labelZh: Elasticsearch通讯端口 9300 + required: true + rule: paramPort + type: number + - default: "512m" + edit: true + envKey: ES_JAVA_OPTS_XMS + labelEn: JVM memory allocation pool + labelZh: JVM内存分配池 初始化内存 + required: true + type: text + - default: "512m" + edit: true + envKey: ES_JAVA_OPTS_XMX + labelEn: JVM memory allocation pool + labelZh: JVM内存分配池 运行内存 + required: true + type: text + - default: "true" + edit: true + envKey: ES_XPACK_SECURITY_ENABLED + labelEn: Enable security verification (recommended) + labelZh: 证书安全验证(推荐开启) + required: true + type: select + values: + - label: "开启" + value: "true" + - label: "关闭" + value: "false" diff --git a/apps/elastic/8.12.0-single/docker-compose.yml b/apps/elastic/8.12.0-single/docker-compose.yml new file mode 100644 index 00000000..780e9acd --- /dev/null +++ b/apps/elastic/8.12.0-single/docker-compose.yml @@ -0,0 +1,159 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + es-single-init: + container_name: elastic-init + image: docker.elastic.co/elasticsearch/elasticsearch:8.12.0 + networks: + - 1panel-network + volumes: + - ${ES_ROOT_PATH}/certs:/usr/share/elasticsearch/config/certs + user: "0" + command: > + bash -c ' + if [ x${ELASTIC_PASSWORD} == x ]; then + echo "Set the ELASTIC_PASSWORD environment variable in the .env file"; + exit 1; + elif [ x${KIBANA_PASSWORD} == x ]; then + echo "Set the KIBANA_PASSWORD environment variable in the .env file"; + exit 1; + fi; + if [ ! -f config/certs/ca.zip ]; then + echo "Creating CA"; + bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip; + unzip config/certs/ca.zip -d config/certs; + fi; + if [ ! -f config/certs/certs.zip ]; then + echo "Creating certs"; + echo -ne \ + "instances:\n"\ + " - name: es-single-es01\n"\ + " dns:\n"\ + " - es-single-es01\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + > config/certs/instances.yml; + bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key; + unzip config/certs/certs.zip -d config/certs; + fi; + echo "Setting file permissions" + chown -R root:root config/certs; + find . -type d -exec chmod 750 \{\} \;; + find . -type f -exec chmod 640 \{\} \;; + echo "Waiting for Elasticsearch availability"; + until curl -s --cacert config/certs/ca/ca.crt https://es-single-es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done; + echo "Setting kibana_system password"; + until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es-single-es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done; + echo "All done!"; + ' + healthcheck: + test: [ "CMD-SHELL", "[ -f config/certs/es-single-es01/es-single-es01.crt ]" ] + interval: 1s + timeout: 5s + retries: 120 + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + + + es-single-es01: + depends_on: + es-single-init: + condition: service_healthy + container_name: es-single-es01 + restart: always + image: docker.elastic.co/elasticsearch/elasticsearch:8.12.0 + volumes: + - ${ES_ROOT_PATH}/certs:/usr/share/elasticsearch/config/certs + - ${ES_ROOT_PATH}/es01/data:/usr/share/elasticsearch/data + - ${ES_ROOT_PATH}/es01/logs:/usr/share/elasticsearch/logs + - ${ES_ROOT_PATH}/es01/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml + - ${ES_ROOT_PATH}/es01/plugins:/usr/share/elasticsearch/plugins + ports: + - "${PANEL_APP_PORT_HTTPS}:9200" + - "${ES_COMMUNICATION_PORT}:9300" + networks: + - 1panel-network + environment: + - discovery.type=single-node + - node.name=es-single-es01 + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - bootstrap.memory_lock=true + - xpack.security.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.http.ssl.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.http.ssl.key=certs/es-single-es01/es-single-es01.key + - xpack.security.http.ssl.certificate=certs/es-single-es01/es-single-es01.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.enabled=${ES_XPACK_SECURITY_ENABLED} + - xpack.security.transport.ssl.key=certs/es-single-es01/es-single-es01.key + - xpack.security.transport.ssl.certificate=certs/es-single-es01/es-single-es01.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=basic + - ES_JAVA_OPTS=-Xms${ES_JAVA_OPTS_XMS} -Xmx${ES_JAVA_OPTS_XMX} + mem_limit: ${MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", + ] + interval: 10s + timeout: 10s + retries: 120 + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + + es-single-kibana: + depends_on: + es-single-es01: + condition: service_healthy + container_name: kibana-${CONTAINER_NAME} + restart: always + image: docker.elastic.co/kibana/kibana:8.12.0 + volumes: + - ${ES_ROOT_PATH}/certs:/usr/share/kibana/config/certs + - ${ES_ROOT_PATH}/kibana/data:/usr/share/kibana/data + - ${ES_ROOT_PATH}/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml + ports: + - "${PANEL_APP_PORT_HTTP}:5601" + networks: + - 1panel-network + environment: + - SERVERNAME=kibana + - ELASTICSEARCH_HOSTS=https://es-single-es01:9200 + - ELASTICSEARCH_USERNAME=kibana_system + - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD} + - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt + mem_limit: ${MEM_LIMIT} + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'", + ] + interval: 10s + timeout: 10s + retries: 120 + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" diff --git a/apps/elastic/8.12.0-single/scripts/elasticsearch.yml b/apps/elastic/8.12.0-single/scripts/elasticsearch.yml new file mode 100644 index 00000000..7b3ac5ed --- /dev/null +++ b/apps/elastic/8.12.0-single/scripts/elasticsearch.yml @@ -0,0 +1,2 @@ +cluster.name: "docker-cluster" +network.host: 0.0.0.0 diff --git a/apps/elastic/8.12.0-single/scripts/init.sh b/apps/elastic/8.12.0-single/scripts/init.sh new file mode 100644 index 00000000..d2099f29 --- /dev/null +++ b/apps/elastic/8.12.0-single/scripts/init.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +# 检查 .env 文件是否存在 +if [ -f .env ]; then + # 导入 .env 文件中的变量 + source .env + + # 检查模板是否启用 + if [ "$MS_TEMPLATE_ENABLED" = "true" ]; then + # 检查模板文件是否存在 + if [ -e "docker-compose-template.yml" ]; then + # 读取模板文件的内容 + template_content=$( docker-compose.yml + # 将模板内容写入目标文件 + echo "$template_content" > docker-compose.yml + + echo "docker-compose.yml updated successfully." + else + echo "Error: docker-compose-template.yml not found." + exit 1 + fi + fi + + # 创建目录 + mkdir -p "$ES_ROOT_PATH" + + mkdir -p "$ES_ROOT_PATH/certs" + + mkdir -p "$ES_ROOT_PATH/es01/data" + + mkdir -p "$ES_ROOT_PATH/es01/logs" + + mkdir -p "$ES_ROOT_PATH/es01/config" + + mkdir -p "$ES_ROOT_PATH/es01/plugins" + + mkdir -p "$ES_ROOT_PATH/kibana/data" + mkdir -p "$ES_ROOT_PATH/kibana/config" + + # 生成 elasticsearch.yml 文件 + elasticsearch_config="cluster.name: \"$CLUSTER_NAME\"\nnetwork.host: 0.0.0.0" + echo -e "$elasticsearch_config" > elasticsearch.yml + cp elasticsearch.yml "$ES_ROOT_PATH/es01/config/elasticsearch.yml" + + # 生成 kibana.yml 文件 + kibana_config="server.host: \"0.0.0.0\"\nserver.shutdownTimeout: \"5s\"\nelasticsearch.hosts: [ \"https://es01:9200\" ]\nmonitoring.ui.container.elasticsearch.enabled: true" + echo -e "$kibana_config" > kibana.yml + cp kibana.yml "$ES_ROOT_PATH/kibana/config/kibana.yml" + + # 清理中间文件 + rm elasticsearch.yml kibana.yml + + # 设置权限 + chmod -R 777 "$ES_ROOT_PATH" + + echo "Directories and permissions set successfully." + +else + echo "Error: .env file not found." + exit 1 +fi diff --git a/apps/elastic/8.12.0-single/scripts/kibana.yml b/apps/elastic/8.12.0-single/scripts/kibana.yml new file mode 100644 index 00000000..fd51e7c4 --- /dev/null +++ b/apps/elastic/8.12.0-single/scripts/kibana.yml @@ -0,0 +1,4 @@ +server.host: "0.0.0.0" +server.shutdownTimeout: "5s" +elasticsearch.hosts: [ "http://localhost:9200" ] +monitoring.ui.container.elasticsearch.enabled: true diff --git a/apps/elastic/8.12.0-single/scripts/uninstall.sh b/apps/elastic/8.12.0-single/scripts/uninstall.sh new file mode 100644 index 00000000..04769b43 --- /dev/null +++ b/apps/elastic/8.12.0-single/scripts/uninstall.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# 检查 .env 文件是否存在 +if [ -f .env ]; then + # 导入 .env 文件中的变量 + source .env + + # 使用 docker network rm 命令删除网络 + docker network rm $CLUSTER_NETWORK + + # 检查删除是否成功 + if [ $? -eq 0 ]; then + echo "Network $CLUSTER_NETWORK deleted successfully." + else + echo "Failed to delete network $CLUSTER_NETWORK." + fi + +else + echo "Error: .env file not found." + exit 1 +fi diff --git a/apps/elastic/README.md b/apps/elastic/README.md new file mode 100644 index 00000000..2c86f6c9 --- /dev/null +++ b/apps/elastic/README.md @@ -0,0 +1,151 @@ +# Elastic + +Elastic NV是一家美籍荷兰公司,成立于2012年,位于荷兰阿姆斯特丹,以前称为Elasticsearch。这是一家搜索公司,它构建用于搜索,日志记录,安全性,可观察性和分析用例的自我管理和软件即服务产品。 + +Elastic NV is an American-Dutch company that was founded in 2012 in Amsterdam, the Netherlands, and was previously known +as Elasticsearch. + +## 参考资料 + +Docker@Elastic: [https://www.docker.elastic.co/](https://www.docker.elastic.co/) + +DockerFiles: [https://github.com/elastic/dockerfiles](https://github.com/elastic/dockerfiles) + +GitHub Elastic: [https://github.com/elastic](https://github.com/elastic) + +官方网站: [https://www.elastic.co/](https://www.elastic.co/) + +官方文档: [https://www.elastic.co/guide/index.html](https://www.elastic.co/guide/index.html) + +## Elastic Stack + +了解可帮助您构建搜索体验、解决问题并取得成功的搜索平台 + +核心产品包括 Elasticsearch、Kibana、Beats 和 Logstash(也称为 ELK Stack)等等。能够安全可靠地从任何来源获取任何格式的数据,然后对数据进行搜索、分析和可视化。 + +### ELASTICSEARCH + KIBANA + INTEGRATIONS + +集搜索驱动型产品和功能于一身 + +Elasticsearch 和 Kibana 都是在免费开放的基础上构建而成,适用于各种各样的用例,从日志开始,到您能想到的任何项目,无一不能胜任。Elastic +具备极有价值的功能组合,如 Machine Learning、安全和 Reporting,这些功能专为 Elastic 而生,让我们独树一帜。查看 Elastic Stack +功能的完整列表。 + +#### Elasticsearch + +GitHub: [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch) + +Elasticsearch 是一个基于 JSON 的分布式搜索和分析引擎。 +无论您正在查找来自特定 IP 地址的活动,还是正在分析交易请求数量为何突然飙升,或者正在方圆一公里内搜寻美食店,我们尝试解决的这些问题归根结底都是搜索问题。通过 +Elasticsearch,您可以快速存储、搜索和分析大量数据。 + +#### Kibana + +GitHub: [https://github.com/elastic/kibana](https://github.com/elastic/kibana) + +Kibana 是一个可扩展的用户界面,您可以借助它对数据进行可视化分析。 +在 Kibana 中通过炫酷的可视化来探索您的数据,从华夫饼图到热点图,再到时序数据分析,应有尽有。针对多样化数据源使用预配置仪表板,创建实时演示文稿以突出显示 +KPI,并使用单一 UI 来管理您的部署。 + +#### Integrations + +通过 Integrations,您可以使用 Elastic Stack 收集并关联数据。 +在收集、存储、搜索和分析数据时,发掘有价值的见解。使用 Elastic 代理、Beats +或网络爬虫等功能,从应用程序、基础架构和公共内容源中采集数据,在大量开箱即用型集成功能的加持下,分分钟即可开始工作。 + +## 版本介绍 + +### 集群模式 + +> 8.12.0-cluster + ++ Elasticsearch 8.12.0 x3 ++ Kibana 8.12.0 + +> 8.12.0-node + +新增节点,需要填写集群信息 + ++ Elasticsearch 8.12.0 + +### 单机模式 + +> 8.12.0-single + ++ Elasticsearch 8.12.0 ++ Kibana 8.12.0 + +> 8.12.0-elasticsearch + ++ Elasticsearch 8.12.0 + +> 8.12.0-kibana + ++ Kibana 8.12.0 + +## 安装事项 + +### 将 vm.max_map_count 设置为至少 262144 + +vm.max_map_count 内核设置必须至少设置为 262144 才能用于生产。 + +> Linux +> +> To view the current value for the vm.max_map_count setting, run: +> ```shell +> grep vm.max_map_count /etc/sysctl.conf +> ``` +> 显示值大于或等于 262144。即可,如果显示的值小于 262144,请执行以下步骤: + +临时设置 vm.max_map_count + +```shell +sudo sysctl -w vm.max_map_count=262144 +``` + +永久设置 vm.max_map_count + +```shell +sudo vi /etc/sysctl.conf +# 文件末尾添加 +vm.max_map_count=262144 +# 生效 +sudo sysctl -p +``` + +### 增加 nofile 和 nproc 的 ulimit 值 最小值 65535 + +> Linux +> +> root 用户 与 普通用户 请注意区别很大 +> +> To view the current value for the ulimit setting, run: +> ```shell +> ulimit -n +> ``` +> 显示值大于或等于 65535。即可,如果显示的值小于 65535,请执行以下步骤: + +临时设置 ulimit + +```shell +ulimit -n 65535 +``` + +永久设置 ulimit + +**涉及服务器重启** + +```shell +sudo vi /etc/security/limits.conf +# 文件末尾添加 +root soft nofile unlimited +root hard nofile unlimited +* soft nofile 65535 +* hard nofile 65535 +# 生效 重启(重启服务器后生效!!!) +sudo reboot +``` + +## 日志配置 + +当前采用 `JSON File logging driver` 记录日志 diff --git a/apps/elastic/data.yml b/apps/elastic/data.yml new file mode 100644 index 00000000..dc828a4e --- /dev/null +++ b/apps/elastic/data.yml @@ -0,0 +1,20 @@ +name: Elastic +tags: + - 中间件 +title: Elastic +type: 中间件 +description: 分布式、RESTful 风格的搜索和数据分析引擎 +additionalProperties: + key: elastic + name: Elastic + tags: + - Middleware + shortDescZh: 分布式、RESTful 风格的搜索和数据分析引擎 + shortDescEn: Distributed, RESTful search and data analytics engine + type: runtime + crossVersionUpdate: true + limit: 0 + recommend: 0 + website: https://www.elastic.co/ + github: https://github.com/elastic + document: https://www.elastic.co/guide/index.html diff --git a/apps/elastic/logo.png b/apps/elastic/logo.png new file mode 100644 index 00000000..ca3de1eb Binary files /dev/null and b/apps/elastic/logo.png differ diff --git a/apps/emby-lovechen/4.8.0.21/data.yml b/apps/emby-lovechen/4.8.0.21/data.yml new file mode 100644 index 00000000..6c2bfb89 --- /dev/null +++ b/apps/emby-lovechen/4.8.0.21/data.yml @@ -0,0 +1,61 @@ +additionalProperties: + formFields: + - default: "host" + edit: true + envKey: NETWORK_MODE + labelEn: Drive path + labelZh: 网络模式 + required: true + type: select + values: + - label: 主机模式 + value: "host" + - label: 桥接模式 + value: "bridge" + - label: 无网络 + value: "none" + - label: 1panel-network + value: "1panel-network" + - default: 8096 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelEn: WebUI Port + labelZh: 网页端口 HTTP + required: true + rule: paramPort + type: number + - default: "/home/emby" + edit: true + envKey: EMBY_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "" + edit: true + envKey: HTTP_SSL_PROXY + labelZh: HTTP(s) 网络代理 + labelEn: HTTP(s) Proxy + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_1 + labelEn: Custom mount directory 1 + labelZh: 自定义挂载目录 1 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_2 + labelEn: Custom mount directory 2 + labelZh: 自定义挂载目录 2 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_3 + labelEn: Custom mount directory 3 + labelZh: 自定义挂载目录 3 + required: false + type: text diff --git a/apps/emby-lovechen/4.8.0.21/docker-compose.yml b/apps/emby-lovechen/4.8.0.21/docker-compose.yml new file mode 100644 index 00000000..f442aad3 --- /dev/null +++ b/apps/emby-lovechen/4.8.0.21/docker-compose.yml @@ -0,0 +1,34 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + emby: + image: lovechen/embyserver:4.8.0.21 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + network_mode: ${NETWORK_MODE} + ports: + - ${PANEL_APP_PORT_HTTP}:8096 + devices: + - /dev/dri:/dev/dri + volumes: + - /etc/timezone:/etc/timezone + - /etc/localtime:/etc/localtime + - ${EMBY_ROOT_PATH}/config:/config + - ${EMBY_ROOT_PATH}/mnt:/mnt + - ${CUSTOM_MOUNT_DIRECTORY_1:-./default_mount_1}:${CUSTOM_MOUNT_DIRECTORY_1:-/default_mount_1} + - ${CUSTOM_MOUNT_DIRECTORY_2:-./default_mount_2}:${CUSTOM_MOUNT_DIRECTORY_2:-/default_mount_2} + - ${CUSTOM_MOUNT_DIRECTORY_3:-./default_mount_3}:${CUSTOM_MOUNT_DIRECTORY_3:-/default_mount_3} + environment: + - UID=0 + - GID=0 + - GIDLIST=0 + - NVIDIA_VISIBLE_DEVICES=all + - HTTP_PROXY=${HTTP_PROXY:-} + - HTTPS_PROXY=${HTTP_PROXY:-} + - NO_PROXY=localhost,127.0.0.1,::1 diff --git a/apps/emby-lovechen/4.8.0.21/scripts/init.sh b/apps/emby-lovechen/4.8.0.21/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/emby-lovechen/4.8.0.21/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/emby-lovechen/4.8.0.21/scripts/uninstall.sh b/apps/emby-lovechen/4.8.0.21/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/emby-lovechen/4.8.0.21/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/emby-lovechen/4.8.0.21/scripts/upgrade.sh b/apps/emby-lovechen/4.8.0.21/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/emby-lovechen/4.8.0.21/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/emby-lovechen/README.md b/apps/emby-lovechen/README.md new file mode 100644 index 00000000..5e5b7b09 --- /dev/null +++ b/apps/emby-lovechen/README.md @@ -0,0 +1,36 @@ +# Emby + +_Emby Media Server 特别版 | AMD64/ARM32/ARM64_ + +**愿你生而自由。** + +Emby是一个主从式架构的媒体服务器软件,可以用来整理服务器上的视频和音频,并将音频和视频流式传输到客户端设备。 + +![Emby](https://file.lifebus.top/imgs/emby_cover.png) + +## 简介 + +Emby(原名Media Browser)是一个主从式架构的媒体服务器软件,可以用来整理服务器上的视频和音频,并将音频和视频流式传输到客户端设备。 + +Emby服务器端支持Microsoft Windows、Linux、MacOS、FreeBSD,客户端支持HTML5网页,Android和IOS等移动操作系统,Roku、Amazon Fire +TV、Chromecast和Apple TV等流媒体设备,LG智能电视和三星智能电视等智能电视,以及PlayStation3、PlayStation4、Xbox 360和Xbox +One等游戏机。 + +Emby原本是大部分源代码是开源的,带有部分闭源工具,但是自从3.5.3版本开始变为闭源软件,Jellyfin为Emby开源分支基础上发展来的。 + +## 安装说明 + ++ 开启 `投屏服务(DLNA)` 与 `网络唤醒服务(WOL)` 功能 + +开启后,可以在局域网内的设备上投屏观看视频。 需要选择主机网络(host)模式。 + +## 特别版说明 + +额外修改: + ++ 完全离线不需要服务器验证 ++ 搜索相关 ++ 支持单字搜索 ++ 支持模糊搜索 ++ 可直接搜索剧季标题 ++ 去除自动更新 diff --git a/apps/emby-lovechen/data.yml b/apps/emby-lovechen/data.yml new file mode 100644 index 00000000..3cd01205 --- /dev/null +++ b/apps/emby-lovechen/data.yml @@ -0,0 +1,19 @@ +name: Emby 开心版 +tags: + - 多媒体 +title: 媒体服务器 +description: 主从式架构的媒体服务器软件 +additionalProperties: + key: emby-lovechen + name: Emby 开心版 + tags: + - Media + - Local + shortDescZh: 主从式架构的媒体服务器软件 + shortDescEn: A media server software with master-slave architecture + type: website + crossVersionUpdate: true + limit: 0 + website: https://hub.docker.com/r/lovechen/embyserver + github: https://hub.docker.com/r/lovechen/embyserver + document: https://hub.docker.com/r/lovechen/embyserver diff --git a/apps/emby-lovechen/logo.png b/apps/emby-lovechen/logo.png new file mode 100644 index 00000000..a9d49678 Binary files /dev/null and b/apps/emby-lovechen/logo.png differ diff --git a/apps/emby/4.8.8.0/data.yml b/apps/emby/4.8.8.0/data.yml new file mode 100644 index 00000000..ff8670d9 --- /dev/null +++ b/apps/emby/4.8.8.0/data.yml @@ -0,0 +1,61 @@ +additionalProperties: + formFields: + - default: "host" + edit: true + envKey: NETWORK_MODE + labelZh: 网络模式 + labelEn: Drive path + required: true + type: select + values: + - label: 主机模式 + value: "host" + - label: 桥接模式 + value: "bridge" + - label: 无网络 + value: "none" + - label: 1panel-network + value: "1panel-network" + - default: 8096 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: 网页端口 + labelEn: WebUI Port + required: true + rule: paramPort + type: number + - default: "/home/emby" + edit: true + envKey: EMBY_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "" + edit: true + envKey: HTTP_SSL_PROXY + labelZh: HTTP(s) 网络代理 + labelEn: HTTP(s) Proxy + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_1 + labelEn: Custom mount directory 1 + labelZh: 自定义挂载目录 1 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_2 + labelEn: Custom mount directory 2 + labelZh: 自定义挂载目录 2 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_3 + labelEn: Custom mount directory 3 + labelZh: 自定义挂载目录 3 + required: false + type: text diff --git a/apps/emby/4.8.8.0/docker-compose.yml b/apps/emby/4.8.8.0/docker-compose.yml new file mode 100644 index 00000000..9f58f545 --- /dev/null +++ b/apps/emby/4.8.8.0/docker-compose.yml @@ -0,0 +1,33 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + emby: + image: emby/embyserver:4.8.8.0 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + network_mode: ${NETWORK_MODE} + ports: + - ${PANEL_APP_PORT_HTTP}:8096 + devices: + - /dev/dri:/dev/dri + volumes: + - /etc/timezone:/etc/timezone + - /etc/localtime:/etc/localtime + - ${EMBY_ROOT_PATH}/config:/config + - ${EMBY_ROOT_PATH}/mnt:/mnt + - ${CUSTOM_MOUNT_DIRECTORY_1:-./default_mount_1}:${CUSTOM_MOUNT_DIRECTORY_1:-/default_mount_1} + - ${CUSTOM_MOUNT_DIRECTORY_2:-./default_mount_2}:${CUSTOM_MOUNT_DIRECTORY_2:-/default_mount_2} + - ${CUSTOM_MOUNT_DIRECTORY_3:-./default_mount_3}:${CUSTOM_MOUNT_DIRECTORY_3:-/default_mount_3} + environment: + - UID=0 + - GID=0 + - GIDLIST=0 + - HTTP_PROXY=${HTTP_PROXY:-} + - HTTPS_PROXY=${HTTP_PROXY:-} + - NO_PROXY=localhost,127.0.0.1,::1 diff --git a/apps/emby/4.8.8.0/scripts/init.sh b/apps/emby/4.8.8.0/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/emby/4.8.8.0/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/emby/4.8.8.0/scripts/uninstall.sh b/apps/emby/4.8.8.0/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/emby/4.8.8.0/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/emby/4.8.8.0/scripts/upgrade.sh b/apps/emby/4.8.8.0/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/emby/4.8.8.0/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/emby/README.md b/apps/emby/README.md new file mode 100644 index 00000000..b9bb03d8 --- /dev/null +++ b/apps/emby/README.md @@ -0,0 +1,21 @@ +# Emby + +Emby是一个主从式架构的媒体服务器软件,可以用来整理服务器上的视频和音频,并将音频和视频流式传输到客户端设备。 + +![Emby](https://file.lifebus.top/imgs/emby_cover.png) + +## 简介 + +Emby(原名Media Browser)是一个主从式架构的媒体服务器软件,可以用来整理服务器上的视频和音频,并将音频和视频流式传输到客户端设备。 + +Emby服务器端支持Microsoft Windows、Linux、MacOS、FreeBSD,客户端支持HTML5网页,Android和IOS等移动操作系统,Roku、Amazon Fire +TV、Chromecast和Apple TV等流媒体设备,LG智能电视和三星智能电视等智能电视,以及PlayStation3、PlayStation4、Xbox 360和Xbox +One等游戏机。 + +Emby原本是大部分源代码是开源的,带有部分闭源工具,但是自从3.5.3版本开始变为闭源软件,Jellyfin为Emby开源分支基础上发展来的。 + +## 安装说明 + ++ 开启 `投屏服务(DLNA)` 与 `网络唤醒服务(WOL)` 功能 + +开启后,可以在局域网内的设备上投屏观看视频。 需要选择主机网络(host)模式。 diff --git a/apps/emby/data.yml b/apps/emby/data.yml new file mode 100644 index 00000000..56109f43 --- /dev/null +++ b/apps/emby/data.yml @@ -0,0 +1,18 @@ +name: Emby +title: 媒体服务器 +description: 主从式架构的媒体服务器软件 +additionalProperties: + key: emby + name: Emby + tags: + - WebSite + - Media + - Local + shortDescZh: 主从式架构的媒体服务器软件 + shortDescEn: A media server software with master-slave architecture + type: website + crossVersionUpdate: true + limit: 0 + website: https://emby.media/ + github: https://github.com/MediaBrowser/Emby/ + document: https://emby.media/blog/ diff --git a/apps/emby/logo.png b/apps/emby/logo.png new file mode 100644 index 00000000..a9d49678 Binary files /dev/null and b/apps/emby/logo.png differ diff --git a/apps/gitea-runner/0.2.10/data.yml b/apps/gitea-runner/0.2.10/data.yml new file mode 100644 index 00000000..bea368a1 --- /dev/null +++ b/apps/gitea-runner/0.2.10/data.yml @@ -0,0 +1,37 @@ +additionalProperties: + formFields: + - default: "/home/gitea" + edit: true + envKey: GITEA_RUNNER_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "" + edit: true + envKey: GITEA_INSTANCE_URL + labelZh: Gitea 实例 URL + labelEn: Gitea instance URL + required: true + type: text + - default: "" + edit: true + envKey: GITEA_RUNNER_REGISTRATION_TOKEN + labelZh: 注册令牌 + labelEn: Registration token + required: true + type: text + - default: "" + edit: true + envKey: GITEA_RUNNER_NAME + labelZh: Runner 名称 + labelEn: Runner name + required: true + type: text + - default: "" + edit: true + envKey: GITEA_RUNNER_LABELS + labelZh: Runner 标签 + labelEn: Runner labels + required: true + type: text diff --git a/apps/gitea-runner/0.2.10/docker-compose.yml b/apps/gitea-runner/0.2.10/docker-compose.yml new file mode 100644 index 00000000..30685387 --- /dev/null +++ b/apps/gitea-runner/0.2.10/docker-compose.yml @@ -0,0 +1,24 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + gitea: + image: gitea/act_runner:0.2.10 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + privileged: true + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${GITEA_RUNNER_ROOT_PATH}/data:/data + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + - /var/run/docker.sock:/var/run/docker.sock diff --git a/apps/gitea-runner/0.2.10/scripts/init.sh b/apps/gitea-runner/0.2.10/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/gitea-runner/0.2.10/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/gitea-runner/0.2.10/scripts/uninstall.sh b/apps/gitea-runner/0.2.10/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/gitea-runner/0.2.10/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/gitea-runner/0.2.10/scripts/upgrade.sh b/apps/gitea-runner/0.2.10/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/gitea-runner/0.2.10/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/gitea-runner/README.md b/apps/gitea-runner/README.md new file mode 100644 index 00000000..b604ed37 --- /dev/null +++ b/apps/gitea-runner/README.md @@ -0,0 +1,71 @@ +# Gitea + +Gitea 是一个轻量级的 DevOps 平台软件。 + +![Gitea](https://about.gitea.com/img/home-screenshot.png) + +Gitea 是一个轻量级的 DevOps 平台软件。从开发计划到产品成型的整个软件生命周期,他都能够高效而轻松的帮助团队和开发者。包括 +Git 托管、代码审查、团队协作、软件包注册和 CI/CD。它与 GitHub、Bitbucket 和 GitLab 等比较类似。 Gitea 最初是从 Gogs +分支而来,几乎所有代码都已更改。 + +## 特性 + ++ 代码托管 + +Gitea⽀持创建和管理仓库、浏览提交历史和代码⽂件、审查和合并代码提交、管理协作者、管理分⽀等。它还⽀持许多常见的Git特性,⽐如标签、Cherry-pick、hook、集成协作⼯具等。 + ++ 轻量级和快速 + +Gitea 的设计目标之一就是轻量级和快速响应。它不像一些大型的代码托管平台那样臃肿,因此在性能方面表现出色,适用于资源有限的服务器环境。由于其轻量级设计,Gitea +在资源消耗方面相对较低,可以在资源有限的环境下运行良好。 + ++ 易于部署和维护 + +轻松地部署在各种服务器上,不需要复杂的配置和依赖。这使得个人开发者或小团队可以方便地设置和管理自己的 Git 服务。 + ++ 安全性 + +Gitea 注重安全性,提供了用户权限管理、访问控制列表等功能,可以确保代码和数据的安全性。 + ++ 代码评审 + +代码评审同时支持 Pull Request workflow 和 AGit workflow。评审⼈可以在线浏览代码,并提交评审意见或问题。 提交者可以接收到评审意见,并在线回 +复或修改代码。代码评审可以帮助用户和企业提⾼代码质量。 + ++ CI/CD + +Gitea Actions⽀持 CI/CD 功能,该功能兼容 GitHub Actions,⽤⼾可以采用熟悉的YAML格式编写workflows,也可以重⽤⼤量的已有的 Actions +插件。Actions 插件支持从任意的 Git 网站中下载。 + +项目管理:Gitea 通过看板和⼯单来跟踪⼀个项⽬的需求,功能和bug。⼯单⽀持分支,标签、⾥程碑、 指派、时间跟踪、到期时间、依赖关系等功能。 + ++ 制品库 + +Gitea支持超过 20 种不同种类的公有或私有软件包管理,包括:Cargo, Chef, Composer, Conan, Conda, Container, Helm, Maven, npm, +NuGet, Pub, PyPI, RubyGems, Vagrant等 + ++ 开源社区支持 + +Gitea 是一个基于 MIT 许可证的开源项目,Gitea 拥有一个活跃的开源社区,能够持续地进行开发和改进,同时也积极接受社区贡献,保持了平台的更新和创新。 + ++ 多语言支持 + +Gitea 提供多种语言界面,适应全球范围内的用户,促进了国际化和本地化。 + +## 反向代理 + +> Nginx + +```nginx +location / { + proxy_pass http://localhost:3000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; +} +``` + +## 安装说明 + +这是Gitea Actions的Runner,用于在Gitea Actions中执行任务。 diff --git a/apps/gitea-runner/data.yml b/apps/gitea-runner/data.yml new file mode 100644 index 00000000..bed5596b --- /dev/null +++ b/apps/gitea-runner/data.yml @@ -0,0 +1,19 @@ +name: Gitea Runner +title: 新一代的代码托管平台 +description: 新一代的代码托管平台 +additionalProperties: + key: gitea-runner + name: Gitea Runner + tags: + - WebSite + - DevOps + - Storage + - Local + shortDescZh: 新一代的代码托管平台 + shortDescEn: The next generation of code hosting platform + type: website + crossVersionUpdate: true + limit: 0 + website: https://gitea.io/ + github: https://github.com/go-gitea/gitea/ + document: https://docs.gitea.io/ diff --git a/apps/gitea-runner/logo.png b/apps/gitea-runner/logo.png new file mode 100644 index 00000000..82732ec6 Binary files /dev/null and b/apps/gitea-runner/logo.png differ diff --git a/apps/gitea/1.22.1/config/app.example.ini b/apps/gitea/1.22.1/config/app.example.ini new file mode 100644 index 00000000..c29d2e5b --- /dev/null +++ b/apps/gitea/1.22.1/config/app.example.ini @@ -0,0 +1,2706 @@ +; This file lists the default values used by Gitea +;; Copy required sections to your own app.ini (default is custom/conf/app.ini) +;; and modify as needed. +;; Do not copy the whole file as-is, as it contains some invalid sections for illustrative purposes. +;; If you don't know what a setting is you should not set it. +;; +;; see https://docs.gitea.com/administration/config-cheat-sheet for additional documentation. + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Default Configuration (non-`app.ini` configuration) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; These values are environment-dependent but form the basis of a lot of values. They will be +;; reported as part of the default configuration when running `gitea help` or on start-up. The order they are emitted there is slightly different but we will list them here in the order they are set-up. +;; +;; - _`AppPath`_: This is the absolute path of the running gitea binary. +;; - _`AppWorkPath`_: This refers to "working path" of the `gitea` binary. It is determined by using the first set thing in the following hierarchy: +;; - The "WORK_PATH" option in "app.ini" file +;; - The `--work-path` flag passed to the binary +;; - The environment variable `$GITEA_WORK_DIR` +;; - A built-in value set at build time (see building from source) +;; - Otherwise it defaults to the directory of the _`AppPath`_ +;; - If any of the above are relative paths then they are made absolute against the directory of the _`AppPath`_ +;; - _`CustomPath`_: This is the base directory for custom templates and other options. It is determined by using the first set thing in the following hierarchy: +;; - The `--custom-path` flag passed to the binary +;; - The environment variable `$GITEA_CUSTOM` +;; - A built-in value set at build time (see building from source) +;; - Otherwise it defaults to _`AppWorkPath`_`/custom` +;; - If any of the above are relative paths then they are made absolute against the directory of the _`AppWorkPath`_ +;; - _`CustomConf`_: This is the path to the `app.ini` file. +;; - The `--config` flag passed to the binary +;; - A built-in value set at build time (see building from source) +;; - Otherwise it defaults to _`CustomPath`_`/conf/app.ini` +;; - If any of the above are relative paths then they are made absolute against the directory of the _`CustomPath`_ +;; +;; In addition there is _`StaticRootPath`_ which can be set as a built-in at build time, but will otherwise default to _`AppWorkPath`_ + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; General Settings +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; App name that shows in every page title +APP_NAME = ; Gitea: Git with a cup of tea +;; +;; RUN_USER will automatically detect the current user - but you can set it here change it if you run locally +RUN_USER = ; git +;; +;; Application run mode, affects performance and debugging: "dev" or "prod", default is "prod" +;; Mode "dev" makes Gitea easier to develop and debug, values other than "dev" are treated as "prod" which is for production use. +;RUN_MODE = prod +;; +;; The working directory, see the comment of AppWorkPath above +;WORK_PATH = + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +[server] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; The protocol the server listens on. One of 'http', 'https', 'http+unix', 'fcgi' or 'fcgi+unix'. Defaults to 'http' +;; Note: Value must be lowercase. +;PROTOCOL = http +;; +;; Expect PROXY protocol headers on connections +;USE_PROXY_PROTOCOL = false +;; +;; Use PROXY protocol in TLS Bridging mode +;PROXY_PROTOCOL_TLS_BRIDGING = false +;; +; Timeout to wait for PROXY protocol header (set to 0 to have no timeout) +;PROXY_PROTOCOL_HEADER_TIMEOUT=5s +;; +; Accept PROXY protocol headers with UNKNOWN type +;PROXY_PROTOCOL_ACCEPT_UNKNOWN=false +;; +;; Set the domain for the server +;DOMAIN = localhost +;; +;; Overwrite the automatically generated public URL. Necessary for proxies and docker. +;ROOT_URL = %(PROTOCOL)s://%(DOMAIN)s:%(HTTP_PORT)s/ +;; +;; For development purpose only. It makes Gitea handle sub-path ("/sub-path/owner/repo/...") directly when debugging without a reverse proxy. +;; DO NOT USE IT IN PRODUCTION!!! +;USE_SUB_URL_PATH = false +;; +;; when STATIC_URL_PREFIX is empty it will follow ROOT_URL +;STATIC_URL_PREFIX = +;; +;; The address to listen on. Either a IPv4/IPv6 address or the path to a unix socket. +;; If PROTOCOL is set to `http+unix` or `fcgi+unix`, this should be the name of the Unix socket file to use. +;; Relative paths will be made absolute against the _`AppWorkPath`_. +;HTTP_ADDR = 0.0.0.0 +;; +;; The port to listen on. Leave empty when using a unix socket. +;HTTP_PORT = 3000 +;; +;; If REDIRECT_OTHER_PORT is true, and PROTOCOL is set to https an http server +;; will be started on PORT_TO_REDIRECT and it will redirect plain, non-secure http requests to the main +;; ROOT_URL. Defaults are false for REDIRECT_OTHER_PORT and 80 for +;; PORT_TO_REDIRECT. +;REDIRECT_OTHER_PORT = false +;PORT_TO_REDIRECT = 80 +;; +;; expect PROXY protocol header on connections to https redirector. +;REDIRECTOR_USE_PROXY_PROTOCOL = %(USE_PROXY_PROTOCOL)s +;; Minimum and maximum supported TLS versions +;SSL_MIN_VERSION=TLSv1.2 +;SSL_MAX_VERSION= +;; +;; SSL Curve Preferences +;SSL_CURVE_PREFERENCES=X25519,P256 +;; +;; SSL Cipher Suites +;SSL_CIPHER_SUITES=; Will default to "ecdhe_ecdsa_with_aes_256_gcm_sha384,ecdhe_rsa_with_aes_256_gcm_sha384,ecdhe_ecdsa_with_aes_128_gcm_sha256,ecdhe_rsa_with_aes_128_gcm_sha256,ecdhe_ecdsa_with_chacha20_poly1305,ecdhe_rsa_with_chacha20_poly1305" if aes is supported by hardware, otherwise chacha will be first. +;; +;; Timeout for any write to the connection. (Set to -1 to disable all timeouts.) +;PER_WRITE_TIMEOUT = 30s +;; +;; Timeout per Kb written to connections. +;PER_WRITE_PER_KB_TIMEOUT = 30s +;; +;; Permission for unix socket +;UNIX_SOCKET_PERMISSION = 666 +;; +;; Local (DMZ) URL for Gitea workers (such as SSH update) accessing web service. In +;; most cases you do not need to change the default value. Alter it only if +;; your SSH server node is not the same as HTTP node. For different protocol, the default +;; values are different. If `PROTOCOL` is `http+unix`, the default value is `http://unix/`. +;; If `PROTOCOL` is `fcgi` or `fcgi+unix`, the default value is `%(PROTOCOL)s://%(HTTP_ADDR)s:%(HTTP_PORT)s/`. +;; If listen on `0.0.0.0`, the default value is `%(PROTOCOL)s://localhost:%(HTTP_PORT)s/`, Otherwise the default +;; value is `%(PROTOCOL)s://%(HTTP_ADDR)s:%(HTTP_PORT)s/`. +;LOCAL_ROOT_URL = %(PROTOCOL)s://%(HTTP_ADDR)s:%(HTTP_PORT)s/ +;; +;; When making local connections pass the PROXY protocol header. +;LOCAL_USE_PROXY_PROTOCOL = %(USE_PROXY_PROTOCOL)s +;; +;; Disable SSH feature when not available +;DISABLE_SSH = false +;; +;; Whether to use the builtin SSH server or not. +;START_SSH_SERVER = false +;; +;; Expect PROXY protocol header on connections to the built-in SSH server +;SSH_SERVER_USE_PROXY_PROTOCOL = false +;; +;; Username to use for the builtin SSH server. If blank, then it is the value of RUN_USER. +;BUILTIN_SSH_SERVER_USER = %(RUN_USER)s +;; +;; Domain name to be exposed in clone URL +;SSH_DOMAIN = %(DOMAIN)s +;; +;; SSH username displayed in clone URLs. +;SSH_USER = %(BUILTIN_SSH_SERVER_USER)s +;; +;; The network interface the builtin SSH server should listen on +;SSH_LISTEN_HOST = +;; +;; Port number to be exposed in clone URL +;SSH_PORT = 22 +;; +;; The port number the builtin SSH server should listen on +;SSH_LISTEN_PORT = %(SSH_PORT)s +;; +;; Root path of SSH directory, default is '~/.ssh', but you have to use '/home/git/.ssh'. +;SSH_ROOT_PATH = +;; +;; Gitea will create a authorized_keys file by default when it is not using the internal ssh server +;; If you intend to use the AuthorizedKeysCommand functionality then you should turn this off. +;SSH_CREATE_AUTHORIZED_KEYS_FILE = true +;; +;; Gitea will create a authorized_principals file by default when it is not using the internal ssh server +;; If you intend to use the AuthorizedPrincipalsCommand functionality then you should turn this off. +;SSH_CREATE_AUTHORIZED_PRINCIPALS_FILE = true +;; +;; For the built-in SSH server, choose the ciphers to support for SSH connections, +;; for system SSH this setting has no effect +;SSH_SERVER_CIPHERS = chacha20-poly1305@openssh.com, aes128-ctr, aes192-ctr, aes256-ctr, aes128-gcm@openssh.com, aes256-gcm@openssh.com +;; +;; For the built-in SSH server, choose the key exchange algorithms to support for SSH connections, +;; for system SSH this setting has no effect +;SSH_SERVER_KEY_EXCHANGES = curve25519-sha256, ecdh-sha2-nistp256, ecdh-sha2-nistp384, ecdh-sha2-nistp521, diffie-hellman-group14-sha256, diffie-hellman-group14-sha1 +;; +;; For the built-in SSH server, choose the MACs to support for SSH connections, +;; for system SSH this setting has no effect +;SSH_SERVER_MACS = hmac-sha2-256-etm@openssh.com, hmac-sha2-256, hmac-sha1 +;; +;; For the built-in SSH server, choose the keypair to offer as the host key +;; The private key should be at SSH_SERVER_HOST_KEY and the public SSH_SERVER_HOST_KEY.pub +;; relative paths are made absolute relative to the %(APP_DATA_PATH)s +;SSH_SERVER_HOST_KEYS=ssh/gitea.rsa, ssh/gogs.rsa +;; +;; Directory to create temporary files in when testing public keys using ssh-keygen, +;; default is the system temporary directory. +;SSH_KEY_TEST_PATH = +;; +;; Use `ssh-keygen` to parse public SSH keys. The value is passed to the shell. By default, Gitea does the parsing itself. +;SSH_KEYGEN_PATH = +;; +;; Enable SSH Authorized Key Backup when rewriting all keys, default is false +;SSH_AUTHORIZED_KEYS_BACKUP = false +;; +;; Determines which principals to allow +;; - empty: if SSH_TRUSTED_USER_CA_KEYS is empty this will default to off, otherwise will default to email, username. +;; - off: Do not allow authorized principals +;; - email: the principal must match the user's email +;; - username: the principal must match the user's username +;; - anything: there will be no checking on the content of the principal +;SSH_AUTHORIZED_PRINCIPALS_ALLOW = email, username +;; +;; Enable SSH Authorized Principals Backup when rewriting all keys, default is true +;SSH_AUTHORIZED_PRINCIPALS_BACKUP = true +;; +;; Specifies the public keys of certificate authorities that are trusted to sign user certificates for authentication. +;; Multiple keys should be comma separated. +;; E.g."ssh- ". or "ssh- , ssh- ". +;; For more information see "TrustedUserCAKeys" in the sshd config manpages. +;SSH_TRUSTED_USER_CA_KEYS = +;; Absolute path of the `TrustedUserCaKeys` file gitea will manage. +;; Default this `RUN_USER`/.ssh/gitea-trusted-user-ca-keys.pem +;; If you're running your own ssh server and you want to use the gitea managed file you'll also need to modify your +;; sshd_config to point to this file. The official docker image will automatically work without further configuration. +;SSH_TRUSTED_USER_CA_KEYS_FILENAME = +;; +;; Enable exposure of SSH clone URL to anonymous visitors, default is false +;SSH_EXPOSE_ANONYMOUS = false +;; +;; Timeout for any write to ssh connections. (Set to -1 to disable all timeouts.) +;; Will default to the PER_WRITE_TIMEOUT. +;SSH_PER_WRITE_TIMEOUT = 30s +;; +;; Timeout per Kb written to ssh connections. +;; Will default to the PER_WRITE_PER_KB_TIMEOUT. +;SSH_PER_WRITE_PER_KB_TIMEOUT = 30s +;; +;; Indicate whether to check minimum key size with corresponding type +;MINIMUM_KEY_SIZE_CHECK = false +;; +;; Disable CDN even in "prod" mode +;OFFLINE_MODE = true +;; +;; TLS Settings: Either ACME or manual +;; (Other common TLS configuration are found before) +;ENABLE_ACME = false +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; ACME automatic TLS settings +;; +;; ACME directory URL (e.g. LetsEncrypt's staging/testing URL: https://acme-staging-v02.api.letsencrypt.org/directory) +;; Leave empty to default to LetsEncrypt's (production) URL +;ACME_URL = +;; +;; Explicitly accept the ACME's TOS. The specific TOS cannot be retrieved at the moment. +;ACME_ACCEPTTOS = false +;; +;; If the ACME CA is not in your system's CA trust chain, it can be manually added here +;ACME_CA_ROOT = +;; +;; Email used for the ACME registration service +;; Can be left blank to initialize at first run and use the cached value +;ACME_EMAIL = +;; +;; ACME live directory (not to be confused with ACME directory URL: ACME_URL) +;; (Refer to caddy's ACME manager https://github.com/caddyserver/certmagic) +;ACME_DIRECTORY = https +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Manual TLS settings: (Only applicable if ENABLE_ACME=false) +;; +;; Generate steps: +;; $ ./gitea cert -ca=true -duration=8760h0m0s -host=myhost.example.com +;; +;; Or from a .pfx file exported from the Windows certificate store (do +;; not forget to export the private key): +;; $ openssl pkcs12 -in cert.pfx -out cert.pem -nokeys +;; $ openssl pkcs12 -in cert.pfx -out key.pem -nocerts -nodes +;; Paths are relative to CUSTOM_PATH +;CERT_FILE = https/cert.pem +;KEY_FILE = https/key.pem +;; +;; Root directory containing templates and static files. +;; default is the path where Gitea is executed +;STATIC_ROOT_PATH = ; Will default to the built-in value _`StaticRootPath`_ +;; +;; Default path for App data +;APP_DATA_PATH = data ; relative paths will be made absolute with _`AppWorkPath`_ +;; +;; Enable gzip compression for runtime-generated content, static resources excluded +;ENABLE_GZIP = false +;; +;; Application profiling (memory and cpu) +;; For "web" command it listens on localhost:6060 +;; For "serve" command it dumps to disk at PPROF_DATA_PATH as (cpuprofile|memprofile)__ +;ENABLE_PPROF = false +;; +;; PPROF_DATA_PATH, use an absolute path when you start gitea as service +;PPROF_DATA_PATH = data/tmp/pprof ; Path is relative to _`AppWorkPath`_ +;; +;; Landing page, can be "home", "explore", "organizations", "login", or any URL such as "/org/repo" or even "https://anotherwebsite.com" +;; The "login" choice is not a security measure but just a UI flow change, use REQUIRE_SIGNIN_VIEW to force users to log in. +;LANDING_PAGE = home +;; +;; Enables git-lfs support. true or false, default is false. +;LFS_START_SERVER = false +;; +;; +;; LFS authentication secret, change this yourself +;LFS_JWT_SECRET = +;; +;; Alternative location to specify LFS authentication secret. You cannot specify both this and LFS_JWT_SECRET, and must pick one +;LFS_JWT_SECRET_URI = file:/etc/gitea/lfs_jwt_secret +;; +;; LFS authentication validity period (in time.Duration), pushes taking longer than this may fail. +;LFS_HTTP_AUTH_EXPIRY = 24h +;; +;; Maximum allowed LFS file size in bytes (Set to 0 for no limit). +;LFS_MAX_FILE_SIZE = 0 +;; +;; Maximum number of locks returned per page +;LFS_LOCKS_PAGING_NUM = 50 +;; +;; Allow graceful restarts using SIGHUP to fork +;ALLOW_GRACEFUL_RESTARTS = true +;; +;; After a restart the parent will finish ongoing requests before +;; shutting down. Force shutdown if this process takes longer than this delay. +;; set to a negative value to disable +;GRACEFUL_HAMMER_TIME = 60s +;; +;; Allows the setting of a startup timeout and waithint for Windows as SVC service +;; 0 disables this. +;STARTUP_TIMEOUT = 0 +;; +;; Static resources, includes resources on custom/, public/ and all uploaded avatars web browser cache time. Note that this cache is disabled when RUN_MODE is "dev". Default is 6h +;STATIC_CACHE_TIME = 6h + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +[database] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Database to use. Either "mysql", "postgres", "mssql" or "sqlite3". +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; MySQL Configuration +;; +DB_TYPE = mysql +HOST = 127.0.0.1:3306 ; can use socket e.g. /var/run/mysqld/mysqld.sock +NAME = gitea +USER = root +;PASSWD = ;Use PASSWD = `your password` for quoting if you use special characters in the password. +;SSL_MODE = false ; either "false" (default), "true", or "skip-verify" +;CHARSET_COLLATION = ; Empty as default, Gitea will try to find a case-sensitive collation. Don't change it unless you clearly know what you need. +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Postgres Configuration +;; +;DB_TYPE = postgres +;HOST = 127.0.0.1:5432 ; can use socket e.g. /var/run/postgresql/ +;NAME = gitea +;USER = root +;PASSWD = +;SCHEMA = +;SSL_MODE=disable ;either "disable" (default), "require", or "verify-full" +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; SQLite Configuration +;; +;DB_TYPE = sqlite3 +;PATH= ; defaults to data/gitea.db +;SQLITE_TIMEOUT = ; Query timeout defaults to: 500 +;SQLITE_JOURNAL_MODE = ; defaults to sqlite database default (often DELETE), can be used to enable WAL mode. https://www.sqlite.org/pragma.html#pragma_journal_mode +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; MSSQL Configuration +;; +;DB_TYPE = mssql +;HOST = 172.17.0.2:1433 +;NAME = gitea +;USER = SA +;PASSWD = MwantsaSecurePassword1 +;CHARSET_COLLATION = ; Empty as default, Gitea will try to find a case-sensitive collation. Don't change it unless you clearly know what you need. +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Other settings +;; +;; For iterate buffer, default is 50 +;ITERATE_BUFFER_SIZE = 50 +;; +;; Show the database generated SQL +;LOG_SQL = false +;; +;; Maximum number of DB Connect retries +;DB_RETRIES = 10 +;; +;; Backoff time per DB retry (time.Duration) +;DB_RETRY_BACKOFF = 3s +;; +;; Max idle database connections on connection pool, default is 2 +;MAX_IDLE_CONNS = 2 +;; +;; Database connection max life time, default is 0 or 3s mysql (See #6804 & #7071 for reasoning) +;CONN_MAX_LIFETIME = 3s +;; +;; Database maximum number of open connections, default is 0 meaning no maximum +;MAX_OPEN_CONNS = 0 +;; +;; Whether execute database models migrations automatically +;AUTO_MIGRATION = true +;; +;; Threshold value (in seconds) beyond which query execution time is logged as a warning in the xorm logger +;; +;SLOW_QUERY_THRESHOLD = 5s + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +[security] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Whether the installer is disabled (set to true to disable the installer) +INSTALL_LOCK = false +;; +;; Global secret key that will be used +;; This key is VERY IMPORTANT. If you lose it, the data encrypted by it (like 2FA secret) can't be decrypted anymore. +SECRET_KEY = +;; +;; Alternative location to specify secret key, instead of this file; you cannot specify both this and SECRET_KEY, and must pick one +;; This key is VERY IMPORTANT. If you lose it, the data encrypted by it (like 2FA secret) can't be decrypted anymore. +;SECRET_KEY_URI = file:/etc/gitea/secret_key +;; +;; Secret used to validate communication within Gitea binary. +INTERNAL_TOKEN = +;; +;; Alternative location to specify internal token, instead of this file; you cannot specify both this and INTERNAL_TOKEN, and must pick one +;INTERNAL_TOKEN_URI = file:/etc/gitea/internal_token +;; +;; How long to remember that a user is logged in before requiring relogin (in days) +;LOGIN_REMEMBER_DAYS = 31 +;; +;; Name of the cookie used to store the current username. +;COOKIE_USERNAME = gitea_awesome +;; +;; Name of cookie used to store authentication information. +;COOKIE_REMEMBER_NAME = gitea_incredible +;; +;; Reverse proxy authentication header name of user name, email, and full name +;REVERSE_PROXY_AUTHENTICATION_USER = X-WEBAUTH-USER +;REVERSE_PROXY_AUTHENTICATION_EMAIL = X-WEBAUTH-EMAIL +;REVERSE_PROXY_AUTHENTICATION_FULL_NAME = X-WEBAUTH-FULLNAME +;; +;; Interpret X-Forwarded-For header or the X-Real-IP header and set this as the remote IP for the request +;REVERSE_PROXY_LIMIT = 1 +;; +;; List of IP addresses and networks separated by comma of trusted proxy servers. Use `*` to trust all. +;REVERSE_PROXY_TRUSTED_PROXIES = 127.0.0.0/8,::1/128 +;; +;; The minimum password length for new Users +;MIN_PASSWORD_LENGTH = 8 +;; +;; Set to true to allow users to import local server paths +;IMPORT_LOCAL_PATHS = false +;; +;; Set to false to allow users with git hook privileges to create custom git hooks. +;; Custom git hooks can be used to perform arbitrary code execution on the host operating system. +;; This enables the users to access and modify this config file and the Gitea database and interrupt the Gitea service. +;; By modifying the Gitea database, users can gain Gitea administrator privileges. +;; It also enables them to access other resources available to the user on the operating system that is running the Gitea instance and perform arbitrary actions in the name of the Gitea OS user. +;; WARNING: This maybe harmful to you website or your operating system. +;; WARNING: Setting this to true does not change existing hooks in git repos; adjust it before if necessary. +;DISABLE_GIT_HOOKS = true +;; +;; Set to true to disable webhooks feature. +;DISABLE_WEBHOOKS = false +;; +;; Set to false to allow pushes to gitea repositories despite having an incomplete environment - NOT RECOMMENDED +;ONLY_ALLOW_PUSH_IF_GITEA_ENVIRONMENT_SET = true +;; +;;Comma separated list of character classes required to pass minimum complexity. +;;If left empty or no valid values are specified, the default is off (no checking) +;;Classes include "lower,upper,digit,spec" +;PASSWORD_COMPLEXITY = off +;; +;; Password Hash algorithm, either "argon2", "pbkdf2", "scrypt" or "bcrypt" +;PASSWORD_HASH_ALGO = pbkdf2 +;; +;; Set false to allow JavaScript to read CSRF cookie +;CSRF_COOKIE_HTTP_ONLY = true +;; +;; Validate against https://haveibeenpwned.com/Passwords to see if a password has been exposed +;PASSWORD_CHECK_PWN = false +;; +;; Cache successful token hashes. API tokens are stored in the DB as pbkdf2 hashes however, this means that there is a potentially significant hashing load when there are multiple API operations. +;; This cache will store the successfully hashed tokens in a LRU cache as a balance between performance and security. +;SUCCESSFUL_TOKENS_CACHE_SIZE = 20 +;; +;; Reject API tokens sent in URL query string (Accept Header-based API tokens only). This avoids security vulnerabilities +;; stemming from cached/logged plain-text API tokens. +;; In future releases, this will become the default behavior +;DISABLE_QUERY_AUTH_TOKEN = false + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +[camo] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; At the moment we only support images +;; +;; if the camo is enabled +;ENABLED = false +;; url to a camo image proxy, it **is required** if camo is enabled. +;SERVER_URL = +;; HMAC to encode urls with, it **is required** if camo is enabled. +;HMAC_KEY = +;; Set to true to use camo for https too lese only non https urls are proxyed +;ALLWAYS = false + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +[oauth2] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Enables OAuth2 provider +ENABLED = true +;; +;; Algorithm used to sign OAuth2 tokens. Valid values: HS256, HS384, HS512, RS256, RS384, RS512, ES256, ES384, ES512, EdDSA +;JWT_SIGNING_ALGORITHM = RS256 +;; +;; Private key file path used to sign OAuth2 tokens. The path is relative to APP_DATA_PATH. +;; This setting is only needed if JWT_SIGNING_ALGORITHM is set to RS256, RS384, RS512, ES256, ES384 or ES512. +;; The file must contain a RSA or ECDSA private key in the PKCS8 format. If no key exists a 4096 bit key will be created for you. +;JWT_SIGNING_PRIVATE_KEY_FILE = jwt/private.pem +;; +;; OAuth2 authentication secret for access and refresh tokens, change this yourself to a unique string. CLI generate option is helpful in this case. https://docs.gitea.io/en-us/command-line/#generate +;; This setting is only needed if JWT_SIGNING_ALGORITHM is set to HS256, HS384 or HS512. +;JWT_SECRET = +;; +;; Alternative location to specify OAuth2 authentication secret. You cannot specify both this and JWT_SECRET, and must pick one +;JWT_SECRET_URI = file:/etc/gitea/oauth2_jwt_secret +;; +;; Lifetime of an OAuth2 access token in seconds +;ACCESS_TOKEN_EXPIRATION_TIME = 3600 +;; +;; Lifetime of an OAuth2 refresh token in hours +;REFRESH_TOKEN_EXPIRATION_TIME = 730 +;; +;; Check if refresh token got already used +;INVALIDATE_REFRESH_TOKENS = false +;; +;; Maximum length of oauth2 token/cookie stored on server +;MAX_TOKEN_LENGTH = 32767 +;; +;; Pre-register OAuth2 applications for some universally useful services +;; * https://github.com/hickford/git-credential-oauth +;; * https://github.com/git-ecosystem/git-credential-manager +;; * https://gitea.com/gitea/tea +;DEFAULT_APPLICATIONS = git-credential-oauth, git-credential-manager, tea + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +[log] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Root path for the log files - defaults to %(GITEA_WORK_DIR)/log +;ROOT_PATH = +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Main Logger +;; +;; Either "console", "file" or "conn", default is "console" +;; Use comma to separate multiple modes, e.g. "console, file" +MODE = console +;; +;; Either "Trace", "Debug", "Info", "Warn", "Error" or "None", default is "Info" +LEVEL = Info +;; +;; Print Stacktrace with logs (rarely helpful, do not set) Either "Trace", "Debug", "Info", "Warn", "Error", default is "None" +;STACKTRACE_LEVEL = None +;; +;; Buffer length of the channel, keep it as it is if you don't know what it is. +;BUFFER_LEN = 10000 +;; +;; Sub logger modes, a single comma means use default MODE above, empty means disable it +;logger.access.MODE= +;logger.router.MODE=, +;logger.xorm.MODE=, +;; +;; Collect SSH logs (Creates log from ssh git request) +;; +;ENABLE_SSH_LOG = false +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Access Logger (Creates log in NCSA common log format) +;; +;; Print request id which parsed from request headers in access log, when access log is enabled. +;; * E.g: +;; * In request Header: X-Request-ID: test-id-123 +;; * Configuration in app.ini: REQUEST_ID_HEADERS = X-Request-ID +;; * Print in log: 127.0.0.1:58384 - - [14/Feb/2023:16:33:51 +0800] "test-id-123" +;; +;; If you configure more than one in the .ini file, it will match in the order of configuration, +;; and the first match will be finally printed in the log. +;; * E.g: +;; * In request Header: X-Trace-ID: trace-id-1q2w3e4r +;; * Configuration in app.ini: REQUEST_ID_HEADERS = X-Request-ID, X-Trace-ID, X-Req-ID +;; * Print in log: 127.0.0.1:58384 - - [14/Feb/2023:16:33:51 +0800] "trace-id-1q2w3e4r" +;; +;REQUEST_ID_HEADERS = +;; +;; Sets the template used to create the access log. +;ACCESS_LOG_TEMPLATE = {{.Ctx.RemoteHost}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.URL.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}" "{{.Ctx.Req.UserAgent}}" + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Log modes (aka log writers) +;; +;[log.%(WriterMode)] +;MODE=console/file/conn/... +;LEVEL= +;FLAGS = stdflags +;EXPRESSION = +;PREFIX = +;COLORIZE = false +;; +;[log.console] +;STDERR = false +;; +;[log.file] +;; Set the file_name for the logger. If this is a relative path this will be relative to ROOT_PATH +;FILE_NAME = +;; This enables automated log rotate(switch of following options), default is true +;LOG_ROTATE = true +;; Max size shift of a single file, default is 28 means 1 << 28, 256MB +;MAX_SIZE_SHIFT = 28 +;; Segment log daily, default is true +;DAILY_ROTATE = true +;; delete the log file after n days, default is 7 +;MAX_DAYS = 7 +;; compress logs with gzip +;COMPRESS = true +;; compression level see godoc for compress/gzip +;COMPRESSION_LEVEL = -1 +;; +;[log.conn] +;; Reconnect host for every single message, default is false +;RECONNECT_ON_MSG = false +;; Try to reconnect when connection is lost, default is false +;RECONNECT = false +;; Either "tcp", "unix" or "udp", default is "tcp" +;PROTOCOL = tcp +;; Host address +;ADDR = + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +[git] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; The path of git executable. If empty, Gitea searches through the PATH environment. +;PATH = +;; +;; The HOME directory for Git +;HOME_PATH = %(APP_DATA_PATH)s/home +;; +;; Disables highlight of added and removed changes +;DISABLE_DIFF_HIGHLIGHT = false +;; +;; Max number of lines allowed in a single file in diff view +;MAX_GIT_DIFF_LINES = 1000 +;; +;; Max number of allowed characters in a line in diff view +;MAX_GIT_DIFF_LINE_CHARACTERS = 5000 +;; +;; Max number of files shown in diff view +;MAX_GIT_DIFF_FILES = 100 +;; +;; Set the default commits range size +;COMMITS_RANGE_SIZE = 50 +;; +;; Set the default branches range size +;BRANCHES_RANGE_SIZE = 20 +;; +;; Arguments for command 'git gc', e.g. "--aggressive --auto" +;; see more on http://git-scm.com/docs/git-gc/ +;GC_ARGS = +;; +;; If use git wire protocol version 2 when git version >= 2.18, default is true, set to false when you always want git wire protocol version 1 +;; To enable this for Git over SSH when using a OpenSSH server, add `AcceptEnv GIT_PROTOCOL` to your sshd_config file. +;ENABLE_AUTO_GIT_WIRE_PROTOCOL = true +;; +;; Respond to pushes to a non-default branch with a URL for creating a Pull Request (if the repository has them enabled) +;PULL_REQUEST_PUSH_MESSAGE = true +;; +;; (Go-Git only) Don't cache objects greater than this in memory. (Set to 0 to disable.) +;LARGE_OBJECT_THRESHOLD = 1048576 +;; Set to true to forcibly set core.protectNTFS=false +;DISABLE_CORE_PROTECT_NTFS=false +;; Disable the usage of using partial clones for git. +;DISABLE_PARTIAL_CLONE = false + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Git Operation timeout in seconds +;[git.timeout] +;DEFAULT = 360 +;MIGRATE = 600 +;MIRROR = 300 +;CLONE = 300 +;PULL = 300 +;GC = 60 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Git config options +;; This section only does "set" config, a removed config key from this section won't be removed from git config automatically. The format is `some.configKey = value`. +;[git.config] +;diff.algorithm = histogram +;core.logAllRefUpdates = true +;gc.reflogExpire = 90 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +[service] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Time limit to confirm account/email registration +;ACTIVE_CODE_LIVE_MINUTES = 180 +;; +;; Time limit to perform the reset of a forgotten password +;RESET_PASSWD_CODE_LIVE_MINUTES = 180 +;; +;; Whether a new user needs to confirm their email when registering. +;REGISTER_EMAIL_CONFIRM = false +;; +;; Whether a new user needs to be confirmed manually after registration. (Requires `REGISTER_EMAIL_CONFIRM` to be disabled.) +;REGISTER_MANUAL_CONFIRM = false +;; +;; List of domain names that are allowed to be used to register on a Gitea instance, wildcard is supported +;; eg: gitea.io,example.com,*.mydomain.com +;EMAIL_DOMAIN_ALLOWLIST = +;; +;; Comma-separated list of domain names that are not allowed to be used to register on a Gitea instance, wildcard is supported +;EMAIL_DOMAIN_BLOCKLIST = +;; +;; Disallow registration, only allow admins to create accounts. +;DISABLE_REGISTRATION = false +;; +;; Allow registration only using gitea itself, it works only when DISABLE_REGISTRATION is false +;ALLOW_ONLY_INTERNAL_REGISTRATION = false +;; +;; Allow registration only using third-party services, it works only when DISABLE_REGISTRATION is false +;ALLOW_ONLY_EXTERNAL_REGISTRATION = false +;; +;; User must sign in to view anything. +;REQUIRE_SIGNIN_VIEW = false +;; +;; Mail notification +;ENABLE_NOTIFY_MAIL = false +;; +;; This setting enables gitea to be signed in with HTTP BASIC Authentication using the user's password +;; If you set this to false you will not be able to access the tokens endpoints on the API with your password +;; Please note that setting this to false will not disable OAuth Basic or Basic authentication using a token +;ENABLE_BASIC_AUTHENTICATION = true +;; +;; More detail: https://github.com/gogits/gogs/issues/165 +;ENABLE_REVERSE_PROXY_AUTHENTICATION = false +; Enable this to allow reverse proxy authentication for API requests, the reverse proxy is responsible for ensuring that no CSRF is possible. +;ENABLE_REVERSE_PROXY_AUTHENTICATION_API = false +;ENABLE_REVERSE_PROXY_AUTO_REGISTRATION = false +;ENABLE_REVERSE_PROXY_EMAIL = false +;ENABLE_REVERSE_PROXY_FULL_NAME = false +;; +;; Enable captcha validation for registration +;ENABLE_CAPTCHA = false +;; +;; Enable this to require captcha validation for login +;REQUIRE_CAPTCHA_FOR_LOGIN = false +;; +;; Type of captcha you want to use. Options: image, recaptcha, hcaptcha, mcaptcha, cfturnstile. +;CAPTCHA_TYPE = image +;; +;; Change this to use recaptcha.net or other recaptcha service +;RECAPTCHA_URL = https://www.google.com/recaptcha/ +;; Enable recaptcha to use Google's recaptcha service +;; Go to https://www.google.com/recaptcha/admin to sign up for a key +;RECAPTCHA_SECRET = +;RECAPTCHA_SITEKEY = +;; +;; For hCaptcha, create an account at https://accounts.hcaptcha.com/login to get your keys +;HCAPTCHA_SECRET = +;HCAPTCHA_SITEKEY = +;; +;; Change this to use demo.mcaptcha.org or your self-hosted mcaptcha.org instance. +;MCAPTCHA_URL = https://demo.mcaptcha.org +;; +;; Go to your configured mCaptcha instance and register a sitekey +;; and use your account's secret. +;MCAPTCHA_SECRET = +;MCAPTCHA_SITEKEY = +;; +;; Go to https://dash.cloudflare.com/?to=/:account/turnstile to sign up for a key +;CF_TURNSTILE_SITEKEY = +;CF_TURNSTILE_SECRET = +;; +;; Default value for KeepEmailPrivate +;; Each new user will get the value of this setting copied into their profile +;DEFAULT_KEEP_EMAIL_PRIVATE = false +;; +;; Default value for AllowCreateOrganization +;; Every new user will have rights set to create organizations depending on this setting +;DEFAULT_ALLOW_CREATE_ORGANIZATION = true +;; Default value for IsRestricted +;; Every new user will have restricted permissions depending on this setting +;DEFAULT_USER_IS_RESTRICTED = false +;; +;; Either "public", "limited" or "private", default is "public" +;; Limited is for users visible only to signed users +;; Private is for users visible only to members of their organizations +;; Public is for users visible for everyone +;DEFAULT_USER_VISIBILITY = public +;; +;; Set which visibility modes a user can have +;ALLOWED_USER_VISIBILITY_MODES = public,limited,private +;; +;; Either "public", "limited" or "private", default is "public" +;; Limited is for organizations visible only to signed users +;; Private is for organizations visible only to members of the organization +;; Public is for organizations visible to everyone +;DEFAULT_ORG_VISIBILITY = public +;; +;; Default value for DefaultOrgMemberVisible +;; True will make the membership of the users visible when added to the organisation +;DEFAULT_ORG_MEMBER_VISIBLE = false +;; +;; Default value for EnableDependencies +;; Repositories will use dependencies by default depending on this setting +;DEFAULT_ENABLE_DEPENDENCIES = true +;; +;; Dependencies can be added from any repository where the user is granted access or only from the current repository depending on this setting. +;ALLOW_CROSS_REPOSITORY_DEPENDENCIES = true +;; +;; Default map service. No external API support has been included. A service has to allow +;; searching using URL parameters, the location will be appended to the URL as escaped query parameter. +;; Disabled by default, some example values are: +;; - OpenStreetMap: https://www.openstreetmap.org/search?query= +;; - Google Maps: https://www.google.com/maps/place/ +;; - MapQuest: https://www.mapquest.com/search/ +;; - Bing Maps: https://www.bing.com/maps?where1= +; USER_LOCATION_MAP_URL = +;; +;; Enable heatmap on users profiles. +;ENABLE_USER_HEATMAP = true +;; +;; Enable Timetracking +;ENABLE_TIMETRACKING = true +;; +;; Default value for EnableTimetracking +;; Repositories will use timetracking by default depending on this setting +;DEFAULT_ENABLE_TIMETRACKING = true +;; +;; Default value for AllowOnlyContributorsToTrackTime +;; Only users with write permissions can track time if this is true +;DEFAULT_ALLOW_ONLY_CONTRIBUTORS_TO_TRACK_TIME = true +;; +;; Value for the domain part of the user's email address in the git log if user +;; has set KeepEmailPrivate to true. The user's email will be replaced with a +;; concatenation of the user name in lower case, "@" and NO_REPLY_ADDRESS. Default +;; value is "noreply." + DOMAIN, where DOMAIN resolves to the value from server.DOMAIN +;; Note: do not use the notation below +;NO_REPLY_ADDRESS = ; noreply. +;; +;; Show Registration button +;SHOW_REGISTRATION_BUTTON = true +;; +;; Show milestones dashboard page - a view of all the user's milestones +;SHOW_MILESTONES_DASHBOARD_PAGE = true +;; +;; Default value for AutoWatchNewRepos +;; When adding a repo to a team or creating a new repo all team members will watch the +;; repo automatically if enabled +;AUTO_WATCH_NEW_REPOS = true +;; +;; Default value for AutoWatchOnChanges +;; Make the user watch a repository When they commit for the first time +;AUTO_WATCH_ON_CHANGES = false +;; +;; Minimum amount of time a user must exist before comments are kept when the user is deleted. +;USER_DELETE_WITH_COMMENTS_MAX_TIME = 0 +;; Valid site url schemes for user profiles +;VALID_SITE_URL_SCHEMES=http,https + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Other Settings +;; +;; Uncomment the [section.header] if you wish to +;; set the below settings. +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[repository] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Root path for storing all repository data. By default, it is set to %(APP_DATA_PATH)s/gitea-repositories. +;; A relative path is interpreted as _`AppWorkPath`_/%(ROOT)s +;ROOT = +;; +;; The script type this server supports. Usually this is `bash`, but some users report that only `sh` is available. +;SCRIPT_TYPE = bash +;; +;; DETECTED_CHARSETS_ORDER tie-break order for detected charsets. +;; If the charsets have equal confidence, tie-breaking will be done by order in this list +;; with charsets earlier in the list chosen in preference to those later. +;; Adding "defaults" will place the unused charsets at that position. +;DETECTED_CHARSETS_ORDER = UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, UTF-32LE, ISO-8859, windows-1252, ISO-8859, windows-1250, ISO-8859, ISO-8859, ISO-8859, windows-1253, ISO-8859, windows-1255, ISO-8859, windows-1251, windows-1256, KOI8-R, ISO-8859, windows-1254, Shift_JIS, GB18030, EUC-JP, EUC-KR, Big5, ISO-2022, ISO-2022, ISO-2022, IBM424_rtl, IBM424_ltr, IBM420_rtl, IBM420_ltr +;; +;; Default ANSI charset to override non-UTF-8 charsets to +;ANSI_CHARSET = +;; +;; Force every new repository to be private +;FORCE_PRIVATE = false +;; +;; Default privacy setting when creating a new repository, allowed values: last, private, public. Default is last which means the last setting used. +;DEFAULT_PRIVATE = last +;; +;; Default private when using push-to-create +;DEFAULT_PUSH_CREATE_PRIVATE = true +;; +;; Global limit of repositories per user, applied at creation time. -1 means no limit +;MAX_CREATION_LIMIT = -1 +;; +;; Preferred Licenses to place at the top of the List +;; The name here must match the filename in options/license or custom/options/license +;PREFERRED_LICENSES = Apache License 2.0,MIT License +;; +;; Disable the ability to interact with repositories using the HTTP protocol +;DISABLE_HTTP_GIT = false +;; +;; Value for Access-Control-Allow-Origin header, default is not to present +;; WARNING: This may be harmful to your website if you do not give it a right value. +;ACCESS_CONTROL_ALLOW_ORIGIN = +;; +;; Force ssh:// clone url instead of scp-style uri when default SSH port is used +;USE_COMPAT_SSH_URI = false +;; +;; Value for the "go get" request returns the repository url as https or ssh, default is https +;GO_GET_CLONE_URL_PROTOCOL = https +;; +;; Close issues as long as a commit on any branch marks it as fixed +;DEFAULT_CLOSE_ISSUES_VIA_COMMITS_IN_ANY_BRANCH = false +;; +;; Allow users to push local repositories to Gitea and have them automatically created for a user or an org +;ENABLE_PUSH_CREATE_USER = false +;ENABLE_PUSH_CREATE_ORG = false +;; +;; Comma separated list of globally disabled repo units. Allowed values: repo.issues, repo.ext_issues, repo.pulls, repo.wiki, repo.ext_wiki, repo.projects, repo.packages, repo.actions. +;DISABLED_REPO_UNITS = +;; +;; Comma separated list of default new repo units. Allowed values: repo.code, repo.releases, repo.issues, repo.pulls, repo.wiki, repo.projects, repo.packages, repo.actions. +;; Note: Code and Releases can currently not be deactivated. If you specify default repo units you should still list them for future compatibility. +;; External wiki and issue tracker can't be enabled by default as it requires additional settings. +;; Disabled repo units will not be added to new repositories regardless if it is in the default list. +;DEFAULT_REPO_UNITS = repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages,repo.actions +;; +;; Comma separated list of default forked repo units. +;; The set of allowed values and rules are the same as DEFAULT_REPO_UNITS. +;DEFAULT_FORK_REPO_UNITS = repo.code,repo.pulls +;; +;; Prefix archive files by placing them in a directory named after the repository +;PREFIX_ARCHIVE_FILES = true +;; +;; Disable migrating feature. +;DISABLE_MIGRATIONS = false +;; +;; Disable stars feature. +;DISABLE_STARS = false +;; +;; The default branch name of new repositories +;DEFAULT_BRANCH = main +;; +;; Allow adoption of unadopted repositories +;ALLOW_ADOPTION_OF_UNADOPTED_REPOSITORIES = false +;; +;; Allow deletion of unadopted repositories +;ALLOW_DELETION_OF_UNADOPTED_REPOSITORIES = false + +;; Don't allow download source archive files from UI +;DISABLE_DOWNLOAD_SOURCE_ARCHIVES = false + +;; Allow fork repositories without maximum number limit +;ALLOW_FORK_WITHOUT_MAXIMUM_LIMIT = true + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[repository.editor] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; List of file extensions for which lines should be wrapped in the Monaco editor +;; Separate extensions with a comma. To line wrap files without an extension, just put a comma +;LINE_WRAP_EXTENSIONS = .txt,.md,.markdown,.mdown,.mkd,.livemd, + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[repository.local] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Path for local repository copy. Defaults to `tmp/local-repo` (content gets deleted on gitea restart) +;LOCAL_COPY_PATH = tmp/local-repo + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[repository.upload] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Whether repository file uploads are enabled. Defaults to `true` +;ENABLED = true +;; +;; Path for uploads. Defaults to `data/tmp/uploads` (content gets deleted on gitea restart) +;TEMP_PATH = data/tmp/uploads +;; +;; Comma-separated list of allowed file extensions (`.zip`), mime types (`text/plain`) or wildcard type (`image/*`, `audio/*`, `video/*`). Empty value or `*/*` allows all types. +;ALLOWED_TYPES = +;; +;; Max size of each file in megabytes. Defaults to 50MB +;FILE_MAX_SIZE = 50 +;; +;; Max number of files per upload. Defaults to 5 +;MAX_FILES = 5 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[repository.pull-request] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; List of prefixes used in Pull Request title to mark them as Work In Progress (matched in a case-insensitive manner) +;WORK_IN_PROGRESS_PREFIXES = WIP:,[WIP] +;; +;; List of keywords used in Pull Request comments to automatically close a related issue +;CLOSE_KEYWORDS = close,closes,closed,fix,fixes,fixed,resolve,resolves,resolved +;; +;; List of keywords used in Pull Request comments to automatically reopen a related issue +;REOPEN_KEYWORDS = reopen,reopens,reopened +;; +;; Set default merge style for repository creating, valid options: merge, rebase, rebase-merge, squash, fast-forward-only +;DEFAULT_MERGE_STYLE = merge +;; +;; In the default merge message for squash commits include at most this many commits +;DEFAULT_MERGE_MESSAGE_COMMITS_LIMIT = 50 +;; +;; In the default merge message for squash commits limit the size of the commit messages to this +;DEFAULT_MERGE_MESSAGE_SIZE = 5120 +;; +;; In the default merge message for squash commits walk all commits to include all authors in the Co-authored-by otherwise just use those in the limited list +;DEFAULT_MERGE_MESSAGE_ALL_AUTHORS = false +;; +;; In default merge messages limit the number of approvers listed as Reviewed-by: to this many +;DEFAULT_MERGE_MESSAGE_MAX_APPROVERS = 10 +;; +;; In default merge messages only include approvers who are official +;DEFAULT_MERGE_MESSAGE_OFFICIAL_APPROVERS_ONLY = true +;; +;; Add co-authored-by and co-committed-by trailers if committer does not match author +;ADD_CO_COMMITTER_TRAILERS = true +;; +;; In addition to testing patches using the three-way merge method, re-test conflicting patches with git apply +;TEST_CONFLICTING_PATCHES_WITH_GIT_APPLY = false +;; +;; Retarget child pull requests to the parent pull request branch target on merge of parent pull request. It only works on merged PRs where the head and base branch target the same repo. +;RETARGET_CHILDREN_ON_MERGE = true + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[repository.issue] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; List of reasons why a Pull Request or Issue can be locked +;LOCK_REASONS = Too heated,Off-topic,Resolved,Spam +;; Maximum number of pinned Issues per repo +;; Set to 0 to disable pinning Issues +;MAX_PINNED = 3 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[repository.release] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Comma-separated list of allowed file extensions (`.zip`), mime types (`text/plain`) or wildcard type (`image/*`, `audio/*`, `video/*`). Empty value or `*/*` allows all types. +;ALLOWED_TYPES = +;DEFAULT_PAGING_NUM = 10 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[repository.signing] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; GPG key to use to sign commits, Defaults to the default - that is the value of git config --get user.signingkey +;; run in the context of the RUN_USER +;; Switch to none to stop signing completely +;SIGNING_KEY = default +;; +;; If a SIGNING_KEY ID is provided and is not set to default, use the provided Name and Email address as the signer. +;; These should match a publicized name and email address for the key. (When SIGNING_KEY is default these are set to +;; the results of git config --get user.name and git config --get user.email respectively and can only be overridden +;; by setting the SIGNING_KEY ID to the correct ID.) +;SIGNING_NAME = +;SIGNING_EMAIL = +;; +;; Sets the default trust model for repositories. Options are: collaborator, committer, collaboratorcommitter +;DEFAULT_TRUST_MODEL = collaborator +;; +;; Determines when gitea should sign the initial commit when creating a repository +;; Either: +;; - never +;; - pubkey: only sign if the user has a pubkey +;; - twofa: only sign if the user has logged in with twofa +;; - always +;; options other than none and always can be combined as comma separated list +;INITIAL_COMMIT = always +;; +;; Determines when to sign for CRUD actions +;; - as above +;; - parentsigned: requires that the parent commit is signed. +;CRUD_ACTIONS = pubkey, twofa, parentsigned +;; Determines when to sign Wiki commits +;; - as above +;WIKI = never +;; +;; Determines when to sign on merges +;; - basesigned: require that the parent of commit on the base repo is signed. +;; - commitssigned: require that all the commits in the head branch are signed. +;; - approved: only sign when merging an approved pr to a protected branch +;MERGES = pubkey, twofa, basesigned, commitssigned + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[repository.mimetype_mapping] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Custom MIME type mapping for downloadable files +;.apk=application/vnd.android.package-archive + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[project] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Default templates for project boards +;PROJECT_BOARD_BASIC_KANBAN_TYPE = To Do, In Progress, Done +;PROJECT_BOARD_BUG_TRIAGE_TYPE = Needs Triage, High Priority, Low Priority, Closed + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cors] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; More information about CORS can be found here: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#The_HTTP_response_headers +;; enable cors headers (disabled by default) +;ENABLED = false +;; +;; list of requesting origins that are allowed, eg: "https://*.example.com" +;ALLOW_DOMAIN = * +;; +;; list of methods allowed to request +;METHODS = GET,HEAD,POST,PUT,PATCH,DELETE,OPTIONS +;; +;; max time to cache response +;MAX_AGE = 10m +;; +;; allow request with credentials +;ALLOW_CREDENTIALS = false +;; +;; headers to permit +;HEADERS = Content-Type,User-Agent +;; +;; set X-FRAME-OPTIONS header +;X_FRAME_OPTIONS = SAMEORIGIN + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[ui] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Number of repositories that are displayed on one explore page +;EXPLORE_PAGING_NUM = 20 +;; +;; Number of issues that are displayed on one page +;ISSUE_PAGING_NUM = 20 +;; +;; Number of maximum commits displayed in one activity feed +;FEED_MAX_COMMIT_NUM = 5 +;; +;; Number of items that are displayed in home feed +;FEED_PAGING_NUM = 20 +;; +;; Number of items that are displayed in a single subsitemap +;SITEMAP_PAGING_NUM = 20 +;; +;; Number of maximum commits displayed in commit graph. +;GRAPH_MAX_COMMIT_NUM = 100 +;; +;; Number of line of codes shown for a code comment +;CODE_COMMENT_LINES = 4 +;; +;; Max size of files to be displayed (default is 8MiB) +;MAX_DISPLAY_FILE_SIZE = 8388608 +;; +;; Detect ambiguous unicode characters in file contents and show warnings on the UI +;AMBIGUOUS_UNICODE_DETECTION = true +;; +;; Whether the email of the user should be shown in the Explore Users page +;SHOW_USER_EMAIL = true +;; +;; Set the default theme for the Gitea install +;DEFAULT_THEME = gitea-auto +;; +;; All available themes. Allow users select personalized themes regardless of the value of `DEFAULT_THEME`. +;; Leave it empty to allow users to select any theme from "{CustomPath}/public/assets/css/theme-*.css" +;THEMES = +;; +;; All available reactions users can choose on issues/prs and comments. +;; Values can be emoji alias (:smile:) or a unicode emoji. +;; For custom reactions, add a tightly cropped square image to public/assets/img/emoji/reaction_name.png +;REACTIONS = +1, -1, laugh, hooray, confused, heart, rocket, eyes +;; +;; Change the number of users that are displayed in reactions tooltip (triggered by mouse hover). +;REACTION_MAX_USER_NUM = 10 +;; +;; Additional Emojis not defined in the utf8 standard +;; By default we support gitea (:gitea:), to add more copy them to public/assets/img/emoji/emoji_name.png and add it to this config. +;; Dont mistake it for Reactions. +;CUSTOM_EMOJIS = gitea, codeberg, gitlab, git, github, gogs +;; +;; Whether the full name of the users should be shown where possible. If the full name isn't set, the username will be used. +;DEFAULT_SHOW_FULL_NAME = false +;; +;; Whether to search within description at repository search on explore page. +;SEARCH_REPO_DESCRIPTION = true +;; +;; Whether to only show relevant repos on the explore page when no keyword is specified and default sorting is used. +;; A repo is considered irrelevant if it's a fork or if it has no metadata (no description, no icon, no topic). +;ONLY_SHOW_RELEVANT_REPOS = false +;; +;; Change the sort type of the explore pages. +;; Default is "recentupdate", but you also have "alphabetically", "reverselastlogin", "newest", "oldest". +;EXPLORE_PAGING_DEFAULT_SORT = recentupdate +;; +;; The tense all timestamps should be rendered in. Possible values are `absolute` time (i.e. 1970-01-01, 11:59) and `mixed`. +;; `mixed` means most timestamps are rendered in relative time (i.e. 2 days ago). +;PREFERRED_TIMESTAMP_TENSE = mixed + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[ui.admin] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Number of users that are displayed on one page +;USER_PAGING_NUM = 50 +;; +;; Number of repos that are displayed on one page +;REPO_PAGING_NUM = 50 +;; +;; Number of notices that are displayed on one page +;NOTICE_PAGING_NUM = 25 +;; +;; Number of organizations that are displayed on one page +;ORG_PAGING_NUM = 50 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[ui.user] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Number of repos that are displayed on one page +;REPO_PAGING_NUM = 15 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[ui.meta] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;AUTHOR = Gitea - Git with a cup of tea +;DESCRIPTION = Gitea (Git with a cup of tea) is a painless self-hosted Git service written in Go +;KEYWORDS = go,git,self-hosted,gitea + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[ui.notification] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Control how often the notification endpoint is polled to update the notification +;; The timeout will increase to MAX_TIMEOUT in TIMEOUT_STEPs if the notification count is unchanged +;; Set MIN_TIMEOUT to -1 to turn off +;MIN_TIMEOUT = 10s +;MAX_TIMEOUT = 60s +;TIMEOUT_STEP = 10s +;; +;; This setting determines how often the db is queried to get the latest notification counts. +;; If the browser client supports EventSource and SharedWorker, a SharedWorker will be used in preference to polling notification. Set to -1 to disable the EventSource +;EVENT_SOURCE_UPDATE_TIME = 10s + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[ui.svg] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Whether to render SVG files as images. If SVG rendering is disabled, SVG files are displayed as text and cannot be embedded in markdown files as images. +;ENABLE_RENDER = true + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[ui.csv] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Maximum allowed file size in bytes to render CSV files as table. (Set to 0 for no limit). +;MAX_FILE_SIZE = 524288 +;; +;; Maximum allowed rows to render CSV files. (Set to 0 for no limit) +;MAX_ROWS = 2500 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[markdown] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Render soft line breaks as hard line breaks, which means a single newline character between +;; paragraphs will cause a line break and adding trailing whitespace to paragraphs is not +;; necessary to force a line break. +;; Render soft line breaks as hard line breaks for comments +;ENABLE_HARD_LINE_BREAK_IN_COMMENTS = true +;; +;; Render soft line breaks as hard line breaks for markdown documents +;ENABLE_HARD_LINE_BREAK_IN_DOCUMENTS = false +;; +;; Comma separated list of custom URL-Schemes that are allowed as links when rendering Markdown +;; for example git,magnet,ftp (more at https://en.wikipedia.org/wiki/List_of_URI_schemes) +;; URLs starting with http and https are always displayed, whatever is put in this entry. +;; If this entry is empty, all URL schemes are allowed. +;CUSTOM_URL_SCHEMES = +;; +;; List of file extensions that should be rendered/edited as Markdown +;; Separate the extensions with a comma. To render files without any extension as markdown, just put a comma +;FILE_EXTENSIONS = .md,.markdown,.mdown,.mkd,.livemd +;; +;; Enables math inline and block detection +;ENABLE_MATH = true + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[ssh.minimum_key_sizes] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Define allowed algorithms and their minimum key length (use -1 to disable a type) +;ED25519 = 256 +;ECDSA = 256 +;RSA = 3071 ; we allow 3071 here because an otherwise valid 3072 bit RSA key can be reported as having 3071 bit length +;DSA = -1 ; set to 1024 to switch on + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[indexer] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Issue Indexer settings +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Issue indexer type, currently support: bleve, db, elasticsearch or meilisearch default is bleve +;ISSUE_INDEXER_TYPE = bleve +;; +;; Issue indexer storage path, available when ISSUE_INDEXER_TYPE is bleve +;ISSUE_INDEXER_PATH = indexers/issues.bleve ; Relative paths will be made absolute against _`AppWorkPath`_. +;; +;; Issue indexer connection string, available when ISSUE_INDEXER_TYPE is elasticsearch (e.g. http://elastic:password@localhost:9200) or meilisearch (e.g. http://:apikey@localhost:7700) +;ISSUE_INDEXER_CONN_STR = +;; +;; Issue indexer name, available when ISSUE_INDEXER_TYPE is elasticsearch or meilisearch. +;ISSUE_INDEXER_NAME = gitea_issues +;; +;; Timeout the indexer if it takes longer than this to start. +;; Set to -1 to disable timeout. +;STARTUP_TIMEOUT = 30s +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Repository Indexer settings +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; repo indexer by default disabled, since it uses a lot of disk space +;REPO_INDEXER_ENABLED = false +;; +;; repo indexer units, the items to index, could be `sources`, `forks`, `mirrors`, `templates` or any combination of them separated by a comma. +;; If empty then it defaults to `sources` only, as if you'd like to disable fully please see REPO_INDEXER_ENABLED. +;REPO_INDEXER_REPO_TYPES = sources,forks,mirrors,templates +;; +;; Code search engine type, could be `bleve` or `elasticsearch`. +;REPO_INDEXER_TYPE = bleve +;; +;; Index file used for code search. available when `REPO_INDEXER_TYPE` is bleve +;REPO_INDEXER_PATH = indexers/repos.bleve +;; +;; Code indexer connection string, available when `REPO_INDEXER_TYPE` is elasticsearch. i.e. http://elastic:changeme@localhost:9200 +;REPO_INDEXER_CONN_STR = +;; +;; Code indexer name, available when `REPO_INDEXER_TYPE` is elasticsearch +;REPO_INDEXER_NAME = gitea_codes +;; +;; A comma separated list of glob patterns (see https://github.com/gobwas/glob) to include +;; in the index; default is empty +;REPO_INDEXER_INCLUDE = +;; +;; A comma separated list of glob patterns to exclude from the index; ; default is empty +;REPO_INDEXER_EXCLUDE = +;; +;MAX_FILE_SIZE = 1048576 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[queue] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Specific queues can be individually configured with [queue.name]. [queue] provides defaults +;; ([queue.issue_indexer] is special due to the old configuration described above) +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; General queue queue type, currently support: persistable-channel, channel, level, redis, dummy +;; default to persistable-channel +;TYPE = persistable-channel +;; +;; data-dir for storing persistable queues and level queues, individual queues will default to `queues/common` meaning the queue is shared. +;DATADIR = queues/ ; Relative paths will be made absolute against `%(APP_DATA_PATH)s`. +;; +;; Default queue length before a channel queue will block +;LENGTH = 100000 +;; +;; Batch size to send for batched queues +;BATCH_LENGTH = 20 +;; +;; Connection string for redis queues this will store the redis (or Redis cluster) connection string. +;; When `TYPE` is `persistable-channel`, this provides a directory for the underlying leveldb +;; or additional options of the form `leveldb://path/to/db?option=value&....`, and will override `DATADIR`. +;CONN_STR = "redis://127.0.0.1:6379/0" +;; +;; Provides the suffix of the default redis/disk queue name - specific queues can be overridden within in their [queue.name] sections. +;QUEUE_NAME = "_queue" +;; +;; Provides the suffix of the default redis/disk unique queue set name - specific queues can be overridden within in their [queue.name] sections. +;SET_NAME = "_unique" +;; +;; Maximum number of worker go-routines for the queue. Default value is "CpuNum/2" clipped to between 1 and 10. +;MAX_WORKERS = ; (dynamic) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[admin] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Disallow regular (non-admin) users from creating organizations. +;DISABLE_REGULAR_ORG_CREATION = false +;; +;; Default configuration for email notifications for users (user configurable). Options: enabled, onmention, disabled +;DEFAULT_EMAIL_NOTIFICATIONS = enabled +;; Disabled features for users could be "deletion", "manage_ssh_keys", "manage_gpg_keys", "manage_mfa", "manage_credentials" more features can be disabled in future +;; - deletion: a user cannot delete their own account +;; - manage_ssh_keys: a user cannot configure ssh keys +;; - manage_gpg_keys: a user cannot configure gpg keys +;; - manage_mfa: a user cannot configure mfa devices +;; - manage_credentials: a user cannot configure emails, passwords, or openid +;USER_DISABLED_FEATURES = +;; Comma separated list of disabled features ONLY if the user has an external login type (eg. LDAP, Oauth, etc.), could be "deletion", "manage_ssh_keys", "manage_gpg_keys", "manage_mfa", "manage_credentials". This setting is independent from `USER_DISABLED_FEATURES` and supplements its behavior. +;; - deletion: a user cannot delete their own account +;; - manage_ssh_keys: a user cannot configure ssh keys +;; - manage_gpg_keys: a user cannot configure gpg keys +;; - manage_mfa: a user cannot configure mfa devices +;; - manage_credentials: a user cannot configure emails, passwords, or openid +;;EXTERNAL_USER_DISABLE_FEATURES = + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[openid] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; OpenID is an open, standard and decentralized authentication protocol. +;; Your identity is the address of a webpage you provide, which describes +;; how to prove you are in control of that page. +;; +;; For more info: https://en.wikipedia.org/wiki/OpenID +;; +;; Current implementation supports OpenID-2.0 +;; +;; Tested to work providers at the time of writing: +;; - Any GNUSocial node (your.hostname.tld/username) +;; - Any SimpleID provider (http://simpleid.koinic.net) +;; - http://openid.org.cn/ +;; - openid.stackexchange.com +;; - login.launchpad.net +;; - .livejournal.com +;; +;; Whether to allow signin in via OpenID +;ENABLE_OPENID_SIGNIN = true +;; +;; Whether to allow registering via OpenID +;; Do not include to rely on rhw DISABLE_REGISTRATION setting +;;ENABLE_OPENID_SIGNUP = true +;; +;; Allowed URI patterns (POSIX regexp). +;; Space separated. +;; Only these would be allowed if non-blank. +;; Example value: trusted.domain.org trusted.domain.net +;WHITELISTED_URIS = +;; +;; Forbidden URI patterns (POSIX regexp). +;; Space separated. +;; Only used if WHITELISTED_URIS is blank. +;; Example value: loadaverage.org/badguy stackexchange.com/.*spammer +;BLACKLISTED_URIS = + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[oauth2_client] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Whether a new auto registered oauth2 user needs to confirm their email. +;; Do not include to use the REGISTER_EMAIL_CONFIRM setting from the `[service]` section. +;REGISTER_EMAIL_CONFIRM = +;; +;; Scopes for the openid connect oauth2 provider (separated by space, the openid scope is implicitly added). +;; Typical values are profile and email. +;; For more information about the possible values see https://openid.net/specs/openid-connect-core-1_0.html#ScopeClaims +;OPENID_CONNECT_SCOPES = +;; +;; Automatically create user accounts for new oauth2 users. +;ENABLE_AUTO_REGISTRATION = false +;; +;; The source of the username for new oauth2 accounts: +;; userid = use the userid / sub attribute +;; nickname = use the nickname attribute +;; preferred_username = use the preferred_username attribute +;; email = use the username part of the email attribute +;; Note: `nickname`, `preferred_username` and `email` options will normalize input strings using the following criteria: +;; - diacritics are removed +;; - the characters in the set ['´`] are removed +;; - the characters in the set [\s~+] are replaced with "-" +;USERNAME = nickname +;; +;; Update avatar if available from oauth2 provider. +;; Update will be performed on each login. +;UPDATE_AVATAR = false +;; +;; How to handle if an account / email already exists: +;; disabled = show an error +;; login = show an account linking login +;; auto = link directly with the account +;ACCOUNT_LINKING = login + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[webhook] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Hook task queue length, increase if webhook shooting starts hanging +;QUEUE_LENGTH = 1000 +;; +;; Deliver timeout in seconds +;DELIVER_TIMEOUT = 5 +;; +;; Webhook can only call allowed hosts for security reasons. Comma separated list, eg: external, 192.168.1.0/24, *.mydomain.com +;; Built-in: loopback (for localhost), private (for LAN/intranet), external (for public hosts on internet), * (for all hosts) +;; CIDR list: 1.2.3.0/8, 2001:db8::/32 +;; Wildcard hosts: *.mydomain.com, 192.168.100.* +;; Since 1.15.7. Default to * for 1.15.x, external for 1.16 and later +;ALLOWED_HOST_LIST = external +;; +;; Allow insecure certification +;SKIP_TLS_VERIFY = false +;; +;; Number of history information in each page +;PAGING_NUM = 10 +;; +;; Proxy server URL, support http://, https//, socks://, blank will follow environment http_proxy/https_proxy +;PROXY_URL = +;; +;; Comma separated list of host names requiring proxy. Glob patterns (*) are accepted; use ** to match all hosts. +;PROXY_HOSTS = + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[mailer] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; NOTICE: this section is for Gitea 1.18 and later. If you are using Gitea 1.17 or older, +;; please refer to +;; https://github.com/go-gitea/gitea/blob/release/v1.17/custom/conf/app.example.ini +;; https://github.com/go-gitea/gitea/blob/release/v1.17/docs/content/doc/advanced/config-cheat-sheet.en-us.md +;; +;ENABLED = false +;; +;; Buffer length of channel, keep it as it is if you don't know what it is. +;SEND_BUFFER_LEN = 100 +;; +;; Prefix displayed before subject in mail +;SUBJECT_PREFIX = +;; +;; Mail server protocol. One of "smtp", "smtps", "smtp+starttls", "smtp+unix", "sendmail", "dummy". +;; - sendmail: use the operating system's `sendmail` command instead of SMTP. This is common on Linux systems. +;; - dummy: send email messages to the log as a testing phase. +;; If your provider does not explicitly say which protocol it uses but does provide a port, +;; you can set SMTP_PORT instead and this will be inferred. +;; (Before 1.18, see the notice, this was controlled via MAILER_TYPE and IS_TLS_ENABLED.) +;PROTOCOL = +;; +;; Mail server address, e.g. smtp.gmail.com. +;; For smtp+unix, this should be a path to a unix socket instead. +;; (Before 1.18, see the notice, this was combined with SMTP_PORT as HOST.) +;SMTP_ADDR = +;; +;; Mail server port. Common ports are: +;; 25: insecure SMTP +;; 465: SMTP Secure +;; 587: StartTLS +;; If no protocol is specified, it will be inferred by this setting. +;; (Before 1.18, this was combined with SMTP_ADDR as HOST.) +;SMTP_PORT = +;; +;; Enable HELO operation. Defaults to true. +;ENABLE_HELO = true +;; +;; Custom hostname for HELO operation. +;; If no value is provided, one is retrieved from system. +;HELO_HOSTNAME = +;; +;; If set to `true`, completely ignores server certificate validation errors. +;; This option is unsafe. Consider adding the certificate to the system trust store instead. +;FORCE_TRUST_SERVER_CERT = false +;; +;; Use client certificate in connection. +;USE_CLIENT_CERT = false +;CLIENT_CERT_FILE = custom/mailer/cert.pem +;CLIENT_KEY_FILE = custom/mailer/key.pem +;; +;; Mail from address, RFC 5322. This can be just an email address, or the `"Name" ` format +;FROM = +;; +;; Sometimes it is helpful to use a different address on the envelope. Set this to use ENVELOPE_FROM as the from on the envelope. Set to `<>` to send an empty address. +;ENVELOPE_FROM = +;; +;; If gitea sends mails on behave of users, it will just use the name also displayed in the WebUI. If you want e.g. `Mister X (by CodeIt) `, +;; set it to `{{ .DisplayName }} (by {{ .AppName }})`. Available Variables: `.DisplayName`, `.AppName` and `.Domain`. +;FROM_DISPLAY_NAME_FORMAT = {{ .DisplayName }} +;; +;; Mailer user name and password, if required by provider. +;USER = +;; +;; Use PASSWD = `your password` for quoting if you use special characters in the password. +;PASSWD = +;; +;; Send mails only in plain text, without HTML alternative +;SEND_AS_PLAIN_TEXT = false +;; +;; Specify an alternative sendmail binary +;SENDMAIL_PATH = sendmail +;; +;; Specify any extra sendmail arguments +;; WARNING: if your sendmail program interprets options you should set this to "--" or terminate these args with "--" +;SENDMAIL_ARGS = +;; +;; Timeout for Sendmail +;SENDMAIL_TIMEOUT = 5m +;; +;; convert \r\n to \n for Sendmail +;SENDMAIL_CONVERT_CRLF = true + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[mailer.override_header] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; This is empty by default, use it only if you know what you need it for. +;Reply-To = test@example.com, test2@example.com +;Content-Type = text/html; charset=utf-8 +;In-Reply-To = + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[email.incoming] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Enable handling of incoming emails. +;ENABLED = false +;; +;; The email address including the %{token} placeholder that will be replaced per user/action. +;; Example: incoming+%{token}@example.com +;; The placeholder must appear in the user part of the address (before the @). +;REPLY_TO_ADDRESS = +;; +;; IMAP server host +;HOST = +;; +;; IMAP server port +;PORT = +;; +;; Username of the receiving account +;USERNAME = +;; +;; Password of the receiving account +;PASSWORD = +;; +;; Whether the IMAP server uses TLS. +;USE_TLS = false +;; +;; If set to true, completely ignores server certificate validation errors. This option is unsafe. +;SKIP_TLS_VERIFY = true +;; +;; The mailbox name where incoming mail will end up. +;MAILBOX = INBOX +;; +;; Whether handled messages should be deleted from the mailbox. +;DELETE_HANDLED_MESSAGE = true +;; +;; Maximum size of a message to handle. Bigger messages are ignored. Set to 0 to allow every size. +;MAXIMUM_MESSAGE_SIZE = 10485760 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cache] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Either "memory", "redis", "memcache", or "twoqueue". default is "memory" +;ADAPTER = memory +;; +;; For "memory" only, GC interval in seconds, default is 60 +;INTERVAL = 60 +;; +;; For "redis" and "memcache", connection host address +;; redis: `redis://127.0.0.1:6379/0?pool_size=100&idle_timeout=180s` (or `redis+cluster://127.0.0.1:6379/0?pool_size=100&idle_timeout=180s` for a Redis cluster) +;; memcache: `127.0.0.1:11211` +;; twoqueue: `{"size":50000,"recent_ratio":0.25,"ghost_ratio":0.5}` or `50000` +;HOST = +;; +;; Time to keep items in cache if not used, default is 16 hours. +;; Setting it to -1 disables caching +;ITEM_TTL = 16h + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Last commit cache +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cache.last_commit] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Time to keep items in cache if not used, default is 8760 hours. +;; Setting it to -1 disables caching +;ITEM_TTL = 8760h +;; +;; Only enable the cache when repository's commits count great than +;COMMITS_COUNT = 1000 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[session] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Either "memory", "file", "redis", "db", "mysql", "couchbase", "memcache" or "postgres" +;; Default is "memory". "db" will reuse the configuration in [database] +;PROVIDER = memory +;; +;; Provider config options +;; memory: doesn't have any config yet +;; file: session file path, e.g. `data/sessions` +;; redis: `redis://127.0.0.1:6379/0?pool_size=100&idle_timeout=180s` (or `redis+cluster://127.0.0.1:6379/0?pool_size=100&idle_timeout=180s` for a Redis cluster) +;; mysql: go-sql-driver/mysql dsn config string, e.g. `root:password@/session_table` +;PROVIDER_CONFIG = data/sessions ; Relative paths will be made absolute against _`AppWorkPath`_. +;; +;; Session cookie name +;COOKIE_NAME = i_like_gitea +;; +;; If you use session in https only: true or false. If not set, it defaults to `true` if the ROOT_URL is an HTTPS URL. +;COOKIE_SECURE = +;; +;; Session GC time interval in seconds, default is 86400 (1 day) +;GC_INTERVAL_TIME = 86400 +;; +;; Session life time in seconds, default is 86400 (1 day) +;SESSION_LIFE_TIME = 86400 +;; +;; SameSite settings. Either "none", "lax", or "strict" +;SAME_SITE=lax + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[picture] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;AVATAR_UPLOAD_PATH = data/avatars +;REPOSITORY_AVATAR_UPLOAD_PATH = data/repo-avatars +;; +;; How Gitea deals with missing repository avatars +;; none = no avatar will be displayed; random = random avatar will be displayed; image = default image will be used +;REPOSITORY_AVATAR_FALLBACK = none +;REPOSITORY_AVATAR_FALLBACK_IMAGE = /img/repo_default.png +;; +;; Max Width and Height of uploaded avatars. +;; This is to limit the amount of RAM used when resizing the image. +;AVATAR_MAX_WIDTH = 4096 +;AVATAR_MAX_HEIGHT = 4096 +;; +;; The multiplication factor for rendered avatar images. +;; Larger values result in finer rendering on HiDPI devices. +;AVATAR_RENDERED_SIZE_FACTOR = 2 +;; +;; Maximum allowed file size for uploaded avatars. +;; This is to limit the amount of RAM used when resizing the image. +;AVATAR_MAX_FILE_SIZE = 1048576 +;; +;; If the uploaded file is not larger than this byte size, the image will be used as is, without resizing/converting. +;AVATAR_MAX_ORIGIN_SIZE = 262144 +;; +;; Chinese users can choose "duoshuo" +;; or a custom avatar source, like: http://cn.gravatar.com/avatar/ +;GRAVATAR_SOURCE = gravatar +;; +;; This value will always be true in offline mode. +;DISABLE_GRAVATAR = false +;; +;; Federated avatar lookup uses DNS to discover avatar associated +;; with emails, see https://www.libravatar.org +;; This value will always be false in offline mode or when Gravatar is disabled. +;ENABLE_FEDERATED_AVATAR = false + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[attachment] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Whether issue and pull request attachments are enabled. Defaults to `true` +;ENABLED = true +;; +;; Comma-separated list of allowed file extensions (`.zip`), mime types (`text/plain`) or wildcard type (`image/*`, `audio/*`, `video/*`). Empty value or `*/*` allows all types. +;ALLOWED_TYPES = .csv,.docx,.fodg,.fodp,.fods,.fodt,.gif,.gz,.jpeg,.jpg,.log,.md,.mov,.mp4,.odf,.odg,.odp,.ods,.odt,.patch,.pdf,.png,.pptx,.svg,.tgz,.txt,.webm,.xls,.xlsx,.zip +;; +;; Max size of each file. Defaults to 2048MB +;MAX_SIZE = 2048 +;; +;; Max number of files per upload. Defaults to 5 +;MAX_FILES = 5 +;; +;; Storage type for attachments, `local` for local disk or `minio` for s3 compatible +;; object storage service, default is `local`. +;STORAGE_TYPE = local +;; +;; Allows the storage driver to redirect to authenticated URLs to serve files directly +;; Currently, only `minio` and `azureblob` is supported. +;SERVE_DIRECT = false +;; +;; Path for attachments. Defaults to `attachments`. Only available when STORAGE_TYPE is `local` +;; Relative paths will be resolved to `${AppDataPath}/${attachment.PATH}` +;PATH = attachments +;; +;; Minio endpoint to connect only available when STORAGE_TYPE is `minio` +;MINIO_ENDPOINT = localhost:9000 +;; +;; Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`. +;; If not provided and STORAGE_TYPE is `minio`, will search for credentials in known +;; environment variables (MINIO_ACCESS_KEY_ID, AWS_ACCESS_KEY_ID), credentials files +;; (~/.mc/config.json, ~/.aws/credentials), and EC2 instance metadata. +;MINIO_ACCESS_KEY_ID = +;; +;; Minio secretAccessKey to connect only available when STORAGE_TYPE is `minio` +;MINIO_SECRET_ACCESS_KEY = +;; +;; Minio bucket to store the attachments only available when STORAGE_TYPE is `minio` +;MINIO_BUCKET = gitea +;; +;; Minio location to create bucket only available when STORAGE_TYPE is `minio` +;MINIO_LOCATION = us-east-1 +;; +;; Minio base path on the bucket only available when STORAGE_TYPE is `minio` +;MINIO_BASE_PATH = attachments/ +;; +;; Minio enabled ssl only available when STORAGE_TYPE is `minio` +;MINIO_USE_SSL = false +;; +;; Minio skip SSL verification available when STORAGE_TYPE is `minio` +;MINIO_INSECURE_SKIP_VERIFY = false +;; +;; Minio checksum algorithm: default (for MinIO or AWS S3) or md5 (for Cloudflare or Backblaze) +;MINIO_CHECKSUM_ALGORITHM = default +;; +;; Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio` +;MINIO_BUCKET_LOOKUP_TYPE = auto +;; Azure Blob endpoint to connect only available when STORAGE_TYPE is `azureblob`, +;; e.g. https://accountname.blob.core.windows.net or http://127.0.0.1:10000/devstoreaccount1 +;AZURE_BLOB_ENDPOINT = +;; +;; Azure Blob account name to connect only available when STORAGE_TYPE is `azureblob` +;AZURE_BLOB_ACCOUNT_NAME = +;; +;; Azure Blob account key to connect only available when STORAGE_TYPE is `azureblob` +;AZURE_BLOB_ACCOUNT_KEY = +;; +;; Azure Blob container to store the attachments only available when STORAGE_TYPE is `azureblob` +;AZURE_BLOB_CONTAINER = gitea +;; +;; override the azure blob base path if storage type is azureblob +;AZURE_BLOB_BASE_PATH = attachments/ + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[time] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Location the UI time display i.e. Asia/Shanghai +;; Empty means server's location setting +;DEFAULT_UI_LOCATION = + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Common settings +;; +;; Setting this to true will enable all cron tasks periodically with default settings. +;ENABLED = false +;; Setting this to true will run all enabled cron tasks when Gitea starts. +;RUN_AT_START = false +;; +;; Note: ``SCHEDULE`` accept formats +;; - Full crontab specs, e.g. "* * * * * ?" +;; - Descriptors, e.g. "@midnight", "@every 1h30m" +;; See more: https://pkg.go.dev/github.com/gogs/cron@v0.0.0-20171120032916-9f6c956d3e14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Basic cron tasks - enabled by default +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Clean up old repository archives +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.archive_cleanup] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Whether to enable the job +;ENABLED = true +;; Whether to always run at least once at start up time (if ENABLED) +;RUN_AT_START = true +;; Whether to emit notice on successful execution too +;NOTICE_ON_SUCCESS = false +;; Time interval for job to run +;SCHEDULE = @midnight +;; Archives created more than OLDER_THAN ago are subject to deletion +;OLDER_THAN = 24h + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Update mirrors +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.update_mirrors] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;SCHEDULE = @every 10m +;; Enable running Update mirrors task periodically. +;ENABLED = true +;; Run Update mirrors task when Gitea starts. +;RUN_AT_START = false +;; Notice if not success +;NOTICE_ON_SUCCESS = false +;; Limit the number of mirrors added to the queue to this number +;; (negative values mean no limit, 0 will result in no result in no mirrors being queued effectively disabling pull mirror updating.) +;PULL_LIMIT=50 +;; Limit the number of mirrors added to the queue to this number +;; (negative values mean no limit, 0 will result in no mirrors being queued effectively disabling push mirror updating) +;PUSH_LIMIT=50 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Repository health check +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.repo_health_check] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;SCHEDULE = @midnight +;; Enable running Repository health check task periodically. +;ENABLED = true +;; Run Repository health check task when Gitea starts. +;RUN_AT_START = false +;; Notice if not success +;NOTICE_ON_SUCCESS = false +;TIMEOUT = 60s +;; Arguments for command 'git fsck', e.g. "--unreachable --tags" +;; see more on http://git-scm.com/docs/git-fsck +;ARGS = + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Check repository statistics +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.check_repo_stats] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Enable running check repository statistics task periodically. +;ENABLED = true +;; Run check repository statistics task when Gitea starts. +;RUN_AT_START = true +;; Notice if not success +;NOTICE_ON_SUCCESS = false +;SCHEDULE = @midnight + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.update_migration_poster_id] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; Update migrated repositories' issues and comments' posterid, it will always attempt synchronization when the instance starts. +;ENABLED = true +;; Update migrated repositories' issues and comments' posterid when starting server (default true) +;RUN_AT_START = true +;; Notice if not success +;NOTICE_ON_SUCCESS = false +;; Interval as a duration between each synchronization. (default every 24h) +;SCHEDULE = @midnight + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Synchronize external user data (only LDAP user synchronization is supported) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.sync_external_users] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = true +;; Synchronize external user data when starting server (default false) +;RUN_AT_START = false +;; Notice if not success +;NOTICE_ON_SUCCESS = false +;; Interval as a duration between each synchronization (default every 24h) +;SCHEDULE = @midnight +;; Create new users, update existing user data and disable users that are not in external source anymore (default) +;; or only create new users if UPDATE_EXISTING is set to false +;UPDATE_EXISTING = true + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Cleanup expired actions assets +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.cleanup_actions] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = true +;RUN_AT_START = true +;SCHEDULE = @midnight + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Clean-up deleted branches +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.deleted_branches_cleanup] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = true +;; Clean-up deleted branches when starting server (default true) +;RUN_AT_START = true +;; Notice if not success +;NOTICE_ON_SUCCESS = false +;; Interval as a duration between each synchronization (default every 24h) +;SCHEDULE = @midnight +;; deleted branches than OLDER_THAN ago are subject to deletion +;OLDER_THAN = 24h + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Cleanup hook_task table +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.cleanup_hook_task_table] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Whether to enable the job +;ENABLED = true +;; Whether to always run at start up time (if ENABLED) +;RUN_AT_START = false +;; Time interval for job to run +;SCHEDULE = @midnight +;; OlderThan or PerWebhook. How the records are removed, either by age (i.e. how long ago hook_task record was delivered) or by the number to keep per webhook (i.e. keep most recent x deliveries per webhook). +;CLEANUP_TYPE = OlderThan +;; If CLEANUP_TYPE is set to OlderThan, then any delivered hook_task records older than this expression will be deleted. +;OLDER_THAN = 168h +;; If CLEANUP_TYPE is set to PerWebhook, this is number of hook_task records to keep for a webhook (i.e. keep the most recent x deliveries). +;NUMBER_TO_KEEP = 10 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Cleanup expired packages +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.cleanup_packages] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Whether to enable the job +;ENABLED = true +;; Whether to always run at least once at start up time (if ENABLED) +;RUN_AT_START = true +;; Whether to emit notice on successful execution too +;NOTICE_ON_SUCCESS = false +;; Time interval for job to run +;SCHEDULE = @midnight +;; Unreferenced blobs created more than OLDER_THAN ago are subject to deletion +;OLDER_THAN = 24h + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; Extended cron task - not enabled by default +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Delete all unactivated accounts +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.delete_inactive_accounts] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = false +;RUN_AT_START = false +;NOTICE_ON_SUCCESS = false +;SCHEDULE = @annually +;OLDER_THAN = 168h + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Delete all repository archives +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.delete_repo_archives] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = false +;RUN_AT_START = false +;NOTICE_ON_SUCCESS = false +;SCHEDULE = @annually; + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Garbage collect all repositories +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.git_gc_repos] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = false +;RUN_AT_START = false +;NOTICE_ON_SUCCESS = false +;SCHEDULE = @every 72h +;TIMEOUT = 60s +;; Arguments for command 'git gc' +;; The default value is same with [git] -> GC_ARGS +;ARGS = + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Update the '.ssh/authorized_keys' file with Gitea SSH keys +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.resync_all_sshkeys] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = false +;RUN_AT_START = false +;NOTICE_ON_SUCCESS = false +;SCHEDULE = @every 72h + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Resynchronize pre-receive, update and post-receive hooks of all repositories. +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.resync_all_hooks] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = false +;RUN_AT_START = false +;NOTICE_ON_SUCCESS = false +;SCHEDULE = @every 72h + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Reinitialize all missing Git repositories for which records exist +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.reinit_missing_repos] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = false +;RUN_AT_START = false +;NOTICE_ON_SUCCESS = false +;SCHEDULE = @every 72h + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Delete all repositories missing their Git files +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.delete_missing_repos] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = false +;RUN_AT_START = false +;NOTICE_ON_SUCCESS = false +;SCHEDULE = @every 72h + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Delete generated repository avatars +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.delete_generated_repository_avatars] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = false +;RUN_AT_START = false +;NOTICE_ON_SUCCESS = false +;SCHEDULE = @every 72h + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Delete all old actions from database +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.delete_old_actions] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = false +;RUN_AT_START = false +;NOTICE_ON_SUCCESS = false +;SCHEDULE = @every 168h +;OLDER_THAN = 8760h + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Check for new Gitea versions +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.update_checker] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = true +;RUN_AT_START = false +;ENABLE_SUCCESS_NOTICE = false +;SCHEDULE = @every 168h +;HTTP_ENDPOINT = https://dl.gitea.com/gitea/version.json + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Delete all old system notices from database +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.delete_old_system_notices] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = false +;RUN_AT_START = false +;NO_SUCCESS_NOTICE = false +;SCHEDULE = @every 168h +;OLDER_THAN = 8760h + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Garbage collect LFS pointers in repositories +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[cron.gc_lfs] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;ENABLED = false +;; Garbage collect LFS pointers in repositories (default false) +;RUN_AT_START = false +;; Interval as a duration between each gc run (default every 24h) +;SCHEDULE = @every 24h +;; Only attempt to garbage collect LFSMetaObjects older than this (default 7 days) +;OLDER_THAN = 168h +;; Only attempt to garbage collect LFSMetaObjects that have not been attempted to be garbage collected for this long (default 3 days) +;LAST_UPDATED_MORE_THAN_AGO = 72h +; Minimum number of stale LFSMetaObjects to check per repo. Set to `0` to always check all. +;NUMBER_TO_CHECK_PER_REPO = 100 +;Check at least this proportion of LFSMetaObjects per repo. (This may cause all stale LFSMetaObjects to be checked.) +;PROPORTION_TO_CHECK_PER_REPO = 0.6 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[mirror] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Enables the mirror functionality. Set to **false** to disable all mirrors. Pre-existing mirrors remain valid but won't be updated; may be converted to regular repo. +;ENABLED = true +;; Disable the creation of **new** pull mirrors. Pre-existing mirrors remain valid. Will be ignored if `mirror.ENABLED` is `false`. +;DISABLE_NEW_PULL = false +;; Disable the creation of **new** push mirrors. Pre-existing mirrors remain valid. Will be ignored if `mirror.ENABLED` is `false`. +;DISABLE_NEW_PUSH = false +;; Default interval as a duration between each check +;DEFAULT_INTERVAL = 8h +;; Min interval as a duration must be > 1m +;MIN_INTERVAL = 10m + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[api] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Enables the API documentation endpoints (/api/swagger, /api/v1/swagger, …). True or false. +;ENABLE_SWAGGER = true +;; Max number of items in a page +;MAX_RESPONSE_ITEMS = 50 +;; Default paging number of api +;DEFAULT_PAGING_NUM = 30 +;; Default and maximum number of items per page for git trees api +;DEFAULT_GIT_TREES_PER_PAGE = 1000 +;; Default max size of a blob returned by the blobs API (default is 10MiB) +;DEFAULT_MAX_BLOB_SIZE = 10485760 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[i18n] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; The first locale will be used as the default if user browser's language doesn't match any locale in the list. +;LANGS = en-US,zh-CN,zh-HK,zh-TW,de-DE,fr-FR,nl-NL,lv-LV,ru-RU,uk-UA,ja-JP,es-ES,pt-BR,pt-PT,pl-PL,bg-BG,it-IT,fi-FI,tr-TR,cs-CZ,sv-SE,ko-KR,el-GR,fa-IR,hu-HU,id-ID,ml-IN +;NAMES = English,简体中文,繁體中文(香港),繁體中文(台灣),Deutsch,Français,Nederlands,Latviešu,Русский,Українська,日本語,Español,Português do Brasil,Português de Portugal,Polski,Български,Italiano,Suomi,Türkçe,Čeština,Српски,Svenska,한국어,Ελληνικά,فارسی,Magyar nyelv,Bahasa Indonesia,മലയാളം + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[highlight.mapping] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Extension mapping to highlight class +;; e.g. .toml=ini + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[other] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Show version information about Gitea and Go in the footer +;SHOW_FOOTER_VERSION = true +;; Show template execution time in the footer +;SHOW_FOOTER_TEMPLATE_LOAD_TIME = true +;; Show the "powered by" text in the footer +;SHOW_FOOTER_POWERED_BY = true +;; Generate sitemap. Defaults to `true`. +;ENABLE_SITEMAP = true +;; Enable/Disable RSS/Atom feed +;ENABLE_FEED = true + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[markup] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Set the maximum number of characters in a mermaid source. (Set to -1 to disable limits) +;MERMAID_MAX_SOURCE_CHARACTERS = 5000 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[markup.sanitizer.1] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; The following keys can appear once to define a sanitation policy rule. +;; This section can appear multiple times by adding a unique alphanumeric suffix to define multiple rules. +;; e.g., [markup.sanitizer.1] -> [markup.sanitizer.2] -> [markup.sanitizer.TeX] +;ELEMENT = span +;ALLOW_ATTR = class +;REGEXP = ^(info|warning|error)$ +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Other markup formats e.g. asciidoc +;; +;; uncomment and enable the below section. +;; (You can add other markup formats by copying the section and adjusting +;; the section name suffix "asciidoc" to something else.) +;[markup.asciidoc] +;ENABLED = false +;; List of file extensions that should be rendered by an external command +;FILE_EXTENSIONS = .adoc,.asciidoc +;; External command to render all matching extensions +;RENDER_COMMAND = "asciidoc --out-file=- -" +;; Don't pass the file on STDIN, pass the filename as argument instead. +;IS_INPUT_FILE = false +;; How the content will be rendered. +;; * sanitized: Sanitize the content and render it inside current page, default to only allow a few HTML tags and attributes. Customized sanitizer rules can be defined in [markup.sanitizer.*] . +;; * no-sanitizer: Disable the sanitizer and render the content inside current page. It's **insecure** and may lead to XSS attack if the content contains malicious code. +;; * iframe: Render the content in a separate standalone page and embed it into current page by iframe. The iframe is in sandbox mode with same-origin disabled, and the JS code are safely isolated from parent page. +;RENDER_CONTENT_MODE=sanitized + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[metrics] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Enables metrics endpoint. True or false; default is false. +;ENABLED = false +;; If you want to add authorization, specify a token here +;TOKEN = +;; Enable issue by label metrics; default is false +;ENABLED_ISSUE_BY_LABEL = false +;; Enable issue by repository metrics; default is false +;ENABLED_ISSUE_BY_REPOSITORY = false + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[migrations] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Max attempts per http/https request on migrations. +;MAX_ATTEMPTS = 3 +;; +;; Backoff time per http/https request retry (seconds) +;RETRY_BACKOFF = 3 +;; +;; Allowed domains for migrating, default is blank. Blank means everything will be allowed. +;; Multiple domains could be separated by commas. +;; Wildcard is supported: "github.com, *.github.com" +;ALLOWED_DOMAINS = +;; +;; Blocklist for migrating, default is blank. Multiple domains could be separated by commas. +;; When ALLOWED_DOMAINS is not blank, this option has a higher priority to deny domains. +;; Wildcard is supported. +;BLOCKED_DOMAINS = +;; +;; Allow private addresses defined by RFC 1918, RFC 1122, RFC 4632 and RFC 4291 (false by default) +;; If a domain is allowed by ALLOWED_DOMAINS, this option will be ignored. +;ALLOW_LOCALNETWORKS = false + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[federation] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Enable/Disable federation capabilities +;ENABLED = false +;; +;; Enable/Disable user statistics for nodeinfo if federation is enabled +;SHARE_USER_STATISTICS = true +;; +;; Maximum federation request and response size (MB) +;MAX_SIZE = 4 +;; +;; WARNING: Changing the settings below can break federation. +;; +;; HTTP signature algorithms +;ALGORITHMS = rsa-sha256, rsa-sha512, ed25519 +;; +;; HTTP signature digest algorithm +;DIGEST_ALGORITHM = SHA-256 +;; +;; GET headers for federation requests +;GET_HEADERS = (request-target), Date +;; +;; POST headers for federation requests +;POST_HEADERS = (request-target), Date, Digest + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[packages] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Enable/Disable package registry capabilities +;ENABLED = true +;; +;STORAGE_TYPE = local +;; override the minio base path if storage type is minio +;MINIO_BASE_PATH = packages/ +;; override the azure blob base path if storage type is azureblob +;AZURE_BLOB_BASE_PATH = packages/ +;; Allows the storage driver to redirect to authenticated URLs to serve files directly +;; Currently, only `minio` and `azureblob` is supported. +;SERVE_DIRECT = false +;; +;; Path for chunked uploads. Defaults to APP_DATA_PATH + `tmp/package-upload` +;CHUNKED_UPLOAD_PATH = tmp/package-upload +;; +;; Maximum count of package versions a single owner can have (`-1` means no limits) +;LIMIT_TOTAL_OWNER_COUNT = -1 +;; Maximum size of packages a single owner can use (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_TOTAL_OWNER_SIZE = -1 +;; Maximum size of an Alpine upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_ALPINE = -1 +;; Maximum size of a Cargo upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_CARGO = -1 +;; Maximum size of a Chef upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_CHEF = -1 +;; Maximum size of a Composer upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_COMPOSER = -1 +;; Maximum size of a Conan upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_CONAN = -1 +;; Maximum size of a Conda upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_CONDA = -1 +;; Maximum size of a Container upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_CONTAINER = -1 +;; Maximum size of a CRAN upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_CRAN = -1 +;; Maximum size of a Debian upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_DEBIAN = -1 +;; Maximum size of a Generic upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_GENERIC = -1 +;; Maximum size of a Go upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_GO = -1 +;; Maximum size of a Helm upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_HELM = -1 +;; Maximum size of a Maven upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_MAVEN = -1 +;; Maximum size of a npm upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_NPM = -1 +;; Maximum size of a NuGet upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_NUGET = -1 +;; Maximum size of a Pub upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_PUB = -1 +;; Maximum size of a PyPI upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_PYPI = -1 +;; Maximum size of a RPM upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_RPM = -1 +;; Maximum size of a RubyGems upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_RUBYGEMS = -1 +;; Maximum size of a Swift upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_SWIFT = -1 +;; Maximum size of a Vagrant upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`) +;LIMIT_SIZE_VAGRANT = -1 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; default storage for attachments, lfs and avatars +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[storage] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; storage type +;STORAGE_TYPE = local + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; repo-archive storage will override storage +;; +;[repo-archive] +;STORAGE_TYPE = local +;; +;; Where your lfs files reside, default is data/lfs. +;PATH = data/repo-archive +;; +;; override the minio base path if storage type is minio +;MINIO_BASE_PATH = repo-archive/ +;; override the azure blob base path if storage type is azureblob +;AZURE_BLOB_BASE_PATH = repo-archive/ + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; settings for repository archives, will override storage setting +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[storage.repo-archive] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; storage type +;STORAGE_TYPE = local + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; lfs storage will override storage +;; +;[lfs] +;STORAGE_TYPE = local +;; +;; Where your lfs files reside, default is data/lfs. +;PATH = data/lfs +;; +;; Allows the storage driver to redirect to authenticated URLs to serve files directly +;; Currently, only `minio` and `azureblob` is supported. +;SERVE_DIRECT = false +;; +;; override the minio base path if storage type is minio +;MINIO_BASE_PATH = lfs/ +;; +;; override the azure blob base path if storage type is azureblob +;AZURE_BLOB_BASE_PATH = lfs/ + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; settings for packages, will override storage setting +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[storage.packages] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; storage type +;STORAGE_TYPE = local + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; customize storage +;[storage.minio] +;STORAGE_TYPE = minio +;; +;; Minio endpoint to connect only available when STORAGE_TYPE is `minio` +;MINIO_ENDPOINT = localhost:9000 +;; +;; Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`. +;; If not provided and STORAGE_TYPE is `minio`, will search for credentials in known +;; environment variables (MINIO_ACCESS_KEY_ID, AWS_ACCESS_KEY_ID), credentials files +;; (~/.mc/config.json, ~/.aws/credentials), and EC2 instance metadata. +;MINIO_ACCESS_KEY_ID = +;; +;; Minio secretAccessKey to connect only available when STORAGE_TYPE is `minio` +;MINIO_SECRET_ACCESS_KEY = +;; +;; Minio bucket to store the attachments only available when STORAGE_TYPE is `minio` +;MINIO_BUCKET = gitea +;; +;; Minio location to create bucket only available when STORAGE_TYPE is `minio` +;MINIO_LOCATION = us-east-1 +;; +;; Minio enabled ssl only available when STORAGE_TYPE is `minio` +;MINIO_USE_SSL = false +;; +;; Minio skip SSL verification available when STORAGE_TYPE is `minio` +;MINIO_INSECURE_SKIP_VERIFY = false +;; +;; Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio` +;MINIO_BUCKET_LOOKUP_TYPE = auto + +;[storage.azureblob] +;STORAGE_TYPE = azureblob +;; +;; Azure Blob endpoint to connect only available when STORAGE_TYPE is `azureblob`, +;; e.g. https://accountname.blob.core.windows.net or http://127.0.0.1:10000/devstoreaccount1 +;AZURE_BLOB_ENDPOINT = +;; +;; Azure Blob account name to connect only available when STORAGE_TYPE is `azureblob` +;AZURE_BLOB_ACCOUNT_NAME = +;; +;; Azure Blob account key to connect only available when STORAGE_TYPE is `azureblob` +;AZURE_BLOB_ACCOUNT_KEY = +;; +;; Azure Blob container to store the attachments only available when STORAGE_TYPE is `azureblob` +;AZURE_BLOB_CONTAINER = gitea + +;[proxy] +;; Enable the proxy, all requests to external via HTTP will be affected +;PROXY_ENABLED = false +;; Proxy server URL, support http://, https//, socks://, blank will follow environment http_proxy/https_proxy/no_proxy +;PROXY_URL = +;; Comma separated list of host names requiring proxy. Glob patterns (*) are accepted; use ** to match all hosts. +;PROXY_HOSTS = + +; [actions] +;; Enable/Disable actions capabilities +;ENABLED = true +;; +;; Default platform to get action plugins, `github` for `https://github.com`, `self` for the current Gitea instance. +;DEFAULT_ACTIONS_URL = github +;; Default artifact retention time in days. Artifacts could have their own retention periods by setting the `retention-days` option in `actions/upload-artifact` step. +;ARTIFACT_RETENTION_DAYS = 90 +;; Timeout to stop the task which have running status, but haven't been updated for a long time +;ZOMBIE_TASK_TIMEOUT = 10m +;; Timeout to stop the tasks which have running status and continuous updates, but don't end for a long time +;ENDLESS_TASK_TIMEOUT = 3h +;; Timeout to cancel the jobs which have waiting status, but haven't been picked by a runner for a long time +;ABANDONED_JOB_TIMEOUT = 24h +;; Strings committers can place inside a commit message or PR title to skip executing the corresponding actions workflow +;SKIP_WORKFLOW_STRINGS = [skip ci],[ci skip],[no ci],[skip actions],[actions skip] + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; settings for action logs, will override storage setting +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;[storage.actions_log] +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; storage type +;STORAGE_TYPE = local diff --git a/apps/gitea/1.22.1/data.yml b/apps/gitea/1.22.1/data.yml new file mode 100644 index 00000000..7a71d3fc --- /dev/null +++ b/apps/gitea/1.22.1/data.yml @@ -0,0 +1,212 @@ +additionalProperties: + formFields: + - child: + default: "" + envKey: PANEL_DB_HOST + required: true + type: service + default: postgresql + edit: true + envKey: PANEL_DB_TYPE + labelZh: 数据库 服务 (前置检查) + labelEn: Database Service (Pre-check) + required: true + type: apps + values: + - label: PostgreSQL + value: postgresql + - label: MySQL + value: mysql + - label: MariaDB + value: mariadb + - label: Percona + value: percona + - default: "/home/gitea" + edit: true + envKey: GITEA_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 3000 + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: 222 + envKey: PANEL_APP_PORT_SSH + labelZh: SSH 端口 + labelEn: SSH Port + required: true + rule: paramPort + type: number + - default: postgresql + edit: true + envKey: DB_TYPE + labelZh: 数据库 类型 + labelEn: Database Type + required: true + type: select + values: + - label: PostgreSQL + value: postgresql + - label: MySQL (MariaDB, Percona) + value: mysql + - default: "127.0.0.1" + edit: true + envKey: DB_HOSTNAME + labelZh: 数据库 主机地址 + labelEn: Database Host + required: true + type: text + - default: 5432 + edit: true + envKey: DB_PORT + labelZh: 数据库 端口 + labelEn: Database Port + required: true + rule: paramPort + type: number + - default: "gitea" + edit: true + envKey: DB_USER + labelZh: 数据库 用户名 + labelEn: Database User + required: true + type: text + - default: "" + edit: true + envKey: DB_PASSWD + labelEn: Database Password + labelZh: 数据库 密码 + random: true + required: true + rule: paramComplexity + type: password + - default: "gitea" + edit: true + envKey: DB_NAME + labelZh: 数据库 名称 + labelEn: Database Name + required: true + type: text + - default: "Gitea: Git with a cup of tea" + edit: true + envKey: APP_NAME + labelZh: 应用名称 + labelEn: Application Name + required: true + type: text + - default: "127.0.0.1" + edit: true + envKey: DOMAIN + labelZh: HTTP 克隆域名 + labelEn: HTTP Clone Domain + required: true + type: text + - default: "127.0.0.1" + edit: true + envKey: SSH_DOMAIN + labelZh: SSH 克隆域名 + labelEn: SSH Clone Domain + required: true + type: text + - default: "222" + edit: true + envKey: SSH_PORT + labelZh: SSH 克隆端口 + labelEn: SSH Clone Port + required: true + type: text + - default: "" + edit: true + envKey: ROOT_URL + labelZh: 公共 URL (覆盖级) + labelEn: Public URL (Override) + required: false + type: text + - default: "false" + edit: true + envKey: LFS_START_SERVER + labelZh: 启用 Git LFS 支持 + labelEn: Enable Git LFS Support + required: true + type: select + values: + - label: 开启 + value: "true" + - label: 关闭 + value: "false" + - default: "false" + edit: true + envKey: DISABLE_REGISTRATION + labelZh: 禁用注册 + labelEn: Disable Registration + required: true + type: select + values: + - label: 开启 + value: "true" + - label: 关闭 + value: "false" + - default: "false" + edit: true + envKey: REQUIRE_SIGNIN_VIEW + labelZh: 强制登录 + labelEn: Require Signin + required: true + type: select + values: + - label: 开启 + value: "true" + - label: 关闭 + value: "false" + - default: "" + edit: true + envKey: INSTALL_LOCK + labelZh: 禁止访问安装页面 + labelEn: Disable Access to Install Page + required: true + type: select + values: + - label: 忽略 + value: "" + - label: 开启 + value: "true" + - label: 关闭 + value: "false" + - default: "" + edit: true + envKey: SECRET_KEY + labelZh: 全局密钥 (覆盖级) + labelEn: Global Secret Key (Override) + required: false + type: text + - default: "false" + edit: true + envKey: PROXY_ENABLED + labelZh: 启用代理 + labelEn: Enable Proxy + required: true + type: select + values: + - label: 开启 + value: "true" + - label: 关闭 + value: "false" + - default: "" + edit: true + envKey: PROXY_URL + labelZh: 代理服务器地址 + labelEn: Proxy Server URL + required: false + type: text + - default: "**" + edit: true + envKey: PROXY_HOSTS + labelZh: 代理网址 + labelEn: Proxy Hosts + required: false + type: text diff --git a/apps/gitea/1.22.1/docker-compose.yml b/apps/gitea/1.22.1/docker-compose.yml new file mode 100644 index 00000000..761a103c --- /dev/null +++ b/apps/gitea/1.22.1/docker-compose.yml @@ -0,0 +1,31 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + gitea: + image: gitea/gitea:1.22.1 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:3000 + - ${PANEL_APP_PORT_SSH}:22 + env_file: + - /etc/1panel/envs/global.env + - /etc/1panel/envs/gitea/gitea.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${GITEA_ROOT_PATH}/data:/data + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + environment: + - USER_UID=1000 + - USER_GID=1000 + - ENABLE_SWAGGER=false + - DB_HOST=${DB_HOSTNAME}:${DB_PORT} diff --git a/apps/gitea/1.22.1/scripts/init.sh b/apps/gitea/1.22.1/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/gitea/1.22.1/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/gitea/1.22.1/scripts/uninstall.sh b/apps/gitea/1.22.1/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/gitea/1.22.1/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/gitea/1.22.1/scripts/upgrade.sh b/apps/gitea/1.22.1/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/gitea/1.22.1/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/gitea/README.md b/apps/gitea/README.md new file mode 100644 index 00000000..85e57ebe --- /dev/null +++ b/apps/gitea/README.md @@ -0,0 +1,95 @@ +# Gitea + +Gitea 是一个轻量级的 DevOps 平台软件。 + +![Gitea](https://file.lifebus.top/imgs/gitea_cover_show.png) + +Gitea 是一个轻量级的 DevOps 平台软件。从开发计划到产品成型的整个软件生命周期,他都能够高效而轻松的帮助团队和开发者。包括 +Git 托管、代码审查、团队协作、软件包注册和 CI/CD。它与 GitHub、Bitbucket 和 GitLab 等比较类似。 Gitea 最初是从 Gogs +分支而来,几乎所有代码都已更改。 + +## 特性 + ++ 代码托管 + +Gitea⽀持创建和管理仓库、浏览提交历史和代码⽂件、审查和合并代码提交、管理协作者、管理分⽀等。它还⽀持许多常见的Git特性,⽐如标签、Cherry-pick、hook、集成协作⼯具等。 + ++ 轻量级和快速 + +Gitea 的设计目标之一就是轻量级和快速响应。它不像一些大型的代码托管平台那样臃肿,因此在性能方面表现出色,适用于资源有限的服务器环境。由于其轻量级设计,Gitea +在资源消耗方面相对较低,可以在资源有限的环境下运行良好。 + ++ 易于部署和维护 + +轻松地部署在各种服务器上,不需要复杂的配置和依赖。这使得个人开发者或小团队可以方便地设置和管理自己的 Git 服务。 + ++ 安全性 + +Gitea 注重安全性,提供了用户权限管理、访问控制列表等功能,可以确保代码和数据的安全性。 + ++ 代码评审 + +代码评审同时支持 Pull Request workflow 和 AGit workflow。评审⼈可以在线浏览代码,并提交评审意见或问题。 提交者可以接收到评审意见,并在线回 +复或修改代码。代码评审可以帮助用户和企业提⾼代码质量。 + ++ CI/CD + +Gitea Actions⽀持 CI/CD 功能,该功能兼容 GitHub Actions,⽤⼾可以采用熟悉的YAML格式编写workflows,也可以重⽤⼤量的已有的 Actions +插件。Actions 插件支持从任意的 Git 网站中下载。 + +项目管理:Gitea 通过看板和⼯单来跟踪⼀个项⽬的需求,功能和bug。⼯单⽀持分支,标签、⾥程碑、 指派、时间跟踪、到期时间、依赖关系等功能。 + ++ 制品库 + +Gitea支持超过 20 种不同种类的公有或私有软件包管理,包括:Cargo, Chef, Composer, Conan, Conda, Container, Helm, Maven, npm, +NuGet, Pub, PyPI, RubyGems, Vagrant等 + ++ 开源社区支持 + +Gitea 是一个基于 MIT 许可证的开源项目,Gitea 拥有一个活跃的开源社区,能够持续地进行开发和改进,同时也积极接受社区贡献,保持了平台的更新和创新。 + ++ 多语言支持 + +Gitea 提供多种语言界面,适应全球范围内的用户,促进了国际化和本地化。 + +## 安装说明 + +### 代理配置 + +开启 `启用代理` 配置后 + ++ `代理服务器地址` 填写代理服务器地址 + +代理服务器支持协议:`http://` `https://` `socks://` + ++ `代理网址` 代理匹配规则 + +支持使用 `*` 匹配符号 + +使用 `**` 代表所有网址 + +例如需要代理: `Github` `gitlab` + +```text +*.github.com,*.gitlab.com +``` + +## 反向代理 + +> Nginx + +```nginx +location / { + proxy_pass http://localhost:3000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; +} +``` + +## 常见问题 + +> 安装后,修改配置文件后,重启服务,但是配置文件没有生效? + +安装后如需修改配置,请修改 持久化目录 下的 `data/gitea/conf/app.ini` 文件,然后重启服务。 diff --git a/apps/gitea/data.yml b/apps/gitea/data.yml new file mode 100644 index 00000000..38e41c7e --- /dev/null +++ b/apps/gitea/data.yml @@ -0,0 +1,19 @@ +name: Gitea +title: 新一代的代码托管平台 +description: 新一代的代码托管平台 +additionalProperties: + key: gitea + name: Gitea + tags: + - WebSite + - DevOps + - Storage + - Local + shortDescZh: 新一代的代码托管平台 + shortDescEn: The next generation of code hosting platform + type: website + crossVersionUpdate: true + limit: 0 + website: https://gitea.io/ + github: https://github.com/go-gitea/gitea/ + document: https://docs.gitea.io/ diff --git a/apps/gitea/logo.png b/apps/gitea/logo.png new file mode 100644 index 00000000..82732ec6 Binary files /dev/null and b/apps/gitea/logo.png differ diff --git a/apps/halo/2.17.2/data.yml b/apps/halo/2.17.2/data.yml new file mode 100644 index 00000000..d6e8d45d --- /dev/null +++ b/apps/halo/2.17.2/data.yml @@ -0,0 +1,96 @@ +additionalProperties: + formFields: + - child: + default: "" + envKey: PANEL_DB_HOST + required: true + type: service + default: postgresql + envKey: PANEL_DB_TYPE + labelZh: 数据库服务 (前置检查) + labelEn: Database Service + required: true + type: apps + values: + - label: PostgreSQL + value: postgresql + - label: MySQL + value: mysql + - label: MariaDB + value: mariadb + - label: Percona + value: percona + - default: "/home/halo" + edit: true + envKey: HALO_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 8090 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelEn: Port + labelZh: 访问端口 + required: true + rule: paramPort + type: number + - default: http://127.0.0.1:8090 + edit: true + envKey: HALO_EXTERNAL_URL + labelEn: External URL + labelZh: 公网访问地址 + required: true + rule: paramExtUrl + type: text + - default: "127.0.0.1" + edit: true + envKey: DB_HOSTNAME + labelZh: 数据库 主机地址 + labelEn: Database Host + required: true + type: text + - default: "postgresql" + edit: true + envKey: DB_TYPE + labelZh: 数据库 类型 + labelEn: Database Type + required: true + type: select + values: + - label: PostgreSQL + value: postgresql + - label: MySQL (MariaDB, Percona) + value: mysql + - default: 5432 + edit: true + envKey: DB_PORT + labelZh: 数据库 端口 + labelEn: Database Port + required: true + rule: paramPort + type: number + - default: "halo" + edit: true + envKey: DB_NAME + labelZh: 数据库 名称 + labelEn: Database Name + required: true + rule: paramCommon + type: text + - default: "halo" + edit: true + envKey: DB_USER + labelZh: 数据库 用户名 + labelEn: Database Username + required: true + type: text + - default: "" + edit: true + envKey: DB_USER_PASSWORD + labelZh: 数据库 密码 + labelEn: Database Password + random: true + required: true + rule: paramComplexity + type: password diff --git a/apps/halo/2.17.2/docker-compose.yml b/apps/halo/2.17.2/docker-compose.yml new file mode 100644 index 00000000..b6714107 --- /dev/null +++ b/apps/halo/2.17.2/docker-compose.yml @@ -0,0 +1,25 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + halo: + image: halohub/halo:2.17.2 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + volumes: + - ${HALO_ROOT_PATH}/data:/root/.halo2 + ports: + - ${PANEL_APP_PORT_HTTP}:8090 + command: + - --spring.r2dbc.url=r2dbc:pool:${DB_TYPE}://${DB_HOSTNAME}:${DB_PORT}/${DB_NAME} + - --spring.r2dbc.username=${DB_USER} + - --spring.r2dbc.password=${DB_USER_PASSWORD} + - --spring.sql.init.platform=${DB_TYPE} + - --halo.external-url=${HALO_EXTERNAL_URL} diff --git a/apps/halo/2.17.2/scripts/init.sh b/apps/halo/2.17.2/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/halo/2.17.2/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/halo/2.17.2/scripts/uninstall.sh b/apps/halo/2.17.2/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/halo/2.17.2/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/halo/2.17.2/scripts/upgrade.sh b/apps/halo/2.17.2/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/halo/2.17.2/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/halo/README.md b/apps/halo/README.md new file mode 100644 index 00000000..cb9f6320 --- /dev/null +++ b/apps/halo/README.md @@ -0,0 +1,73 @@ +# Halo + +**强大易用的开源建站工具。** + +配合上丰富的模板与插件,帮助你构建你心中的理想站点。 + +![Halo](https://file.lifebus.top/imgs/halo_cover.png) + +## 简介 + +> ### 可插拔架构 + +Halo 采用可插拔架构,功能模块之间耦合度低、灵活性提高。支持用户按需安装、卸载插件,操作便捷。同时提供插件开发接口以确保较高扩展性和可维护性。 + +✅ 支持在运行时安装和卸载插件 + +✅ 更加方便地集成三方平台 + +✅ 统一的可配置设置表单 + +✅ 支持自定义模型,自动生成 RESTful API + + +> ### 功能丰富的主题机制 + +Halo 提供完整的主题模板机制,用于构建前台界面。这意味着用户可以根据自己的喜好选择不同类型的主题模板来定制化自己的站点外观。 + +✅ 动态切换主题模板 + +✅ 支持实时编辑和预览效果 + +✅ 多语言支持 + +✅ 与插件配合实现更多功能 + +> ### 编辑器 + +Halo 的富文本编辑器提供了方便丰富的功能,包括添加标题、段落、引用、列表、代码块等元素,并支持设置样式属性、上传图片、插入视频等功能。这些工具让你的文章创作更加便捷和生动。 + +✅ 完备的富文本格式支持 + +✅ 支持拖拽和粘贴图片上传 + +✅ 支持通过插件扩展编辑器 + +> 更多特性 +> +> 我们将不断探索和追求更好的使用体验,持续迭代出更加优秀的 Halo + ++ 代码开源 + +Halo 的项目代码开源在 GitHub 上且处于积极维护状态,截止目前已经发布了 109 个版本。你也可以在上面提交你的问题或者参与代码贡献。 + ++ 易于部署 + +推荐使用 Docker 的方式部署 Halo,便于升级,同时避免了各种环境依赖的问题。统一管理在工作目录中的应用数据也能方便地进行备份和迁移。 + ++ 插件机制 + +支持在插件运行时为系统添加新功能,同时保持 Halo 自身的简洁轻量。这种灵活的插件机制让用户根据自身需求自由扩展 Halo +的功能,帮助用户实现富有想象力的站点。 + ++ 模板机制 + +支持自定义配置、主题预览、多语言等功能。这种灵活的模板系统让用户可以针对自己的需求进行自定义配置,为网站带来更加个性化的外观和交互体验。 + ++ 附件管理 + +支持多种存储策略,并支持通过插件扩展外部存储位置,可以让用户更加灵活地地上传、查看和管理附件。 + ++ 搜索引擎 + +内置全文搜索引擎,支持关键字搜索文章和页面内容。同时支持通过插件扩展外部搜索引擎,做到让用户按需选择、自由扩展。 diff --git a/apps/halo/data.yml b/apps/halo/data.yml new file mode 100644 index 00000000..56a1cad6 --- /dev/null +++ b/apps/halo/data.yml @@ -0,0 +1,17 @@ +name: Halo +title: 强大易用的开源建站工具 +description: 强大易用的开源建站工具 +additionalProperties: + key: halo + name: Halo + tags: + - WebSite + - Local + shortDescZh: 强大易用的开源建站工具 + shortDescEn: Powerful and easy-to-use open source website builder + type: website + crossVersionUpdate: true + limit: 0 + website: https://halo.run/ + github: https://github.com/halo-dev/halo/ + document: https://docs.halo.run/ diff --git a/apps/halo/logo.png b/apps/halo/logo.png new file mode 100644 index 00000000..044ab596 Binary files /dev/null and b/apps/halo/logo.png differ diff --git a/apps/immich-machine-learning/1.108.0/data.yml b/apps/immich-machine-learning/1.108.0/data.yml new file mode 100644 index 00000000..7321cde6 --- /dev/null +++ b/apps/immich-machine-learning/1.108.0/data.yml @@ -0,0 +1,61 @@ +additionalProperties: + formFields: + - default: "/home/immich-machine-learning" + edit: true + envKey: IMMICH_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 2283 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: API 端口 + labelEn: API port + required: true + rule: paramPort + type: number + - default: "/dev/dri" + edit: true + envKey: IMMICH_DRIVE_PATH + labelZh: 硬件驱动路径 + labelEn: Drive path + required: true + type: text + - default: "Asia/Shanghai" + edit: true + envKey: TZ + labelZh: 时区 + labelEn: Timezone + required: true + type: text + - default: "XLM-Roberta-Large-Vit-B-16Plus" + edit: true + envKey: MACHINE_LEARNING_PRELOAD__CLIP + labelEn: Machine Learning Preload Model + labelZh: 机器学习 预加载模型 + required: false + type: text + - default: "buffalo_l" + edit: true + envKey: MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION + labelEn: Machine Learning Preload Model + labelZh: 人脸识别 预加载模型 + required: false + type: select + values: + - label: antelopev2 + value: "antelopev2" + - label: buffalo_l + value: "buffalo_l" + - label: buffalo_m + value: "buffalo_m" + - label: buffalo_s + value: "buffalo_s" + - default: "https://hf-mirror.com" + edit: true + envKey: HF_ENDPOINT + labelZh: Hugging Face 服务地址 + labelEn: Hugging Face Endpoint + required: false + type: text diff --git a/apps/immich-machine-learning/1.108.0/docker-compose.yml b/apps/immich-machine-learning/1.108.0/docker-compose.yml new file mode 100644 index 00000000..9dc3ebf1 --- /dev/null +++ b/apps/immich-machine-learning/1.108.0/docker-compose.yml @@ -0,0 +1,26 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +name: immich-machine-learning + +services: + immich-machine-learning: + image: ghcr.io/immich-app/immich-machine-learning:v1.108.0 + container_name: immich-machine-learning-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + devices: + - /dev/dri:/dev/dri + ports: + - ${PANEL_APP_PORT_HTTP}:3003 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${IMMICH_ROOT_PATH}/data/cache:/cache diff --git a/apps/immich-machine-learning/1.108.0/scripts/init.sh b/apps/immich-machine-learning/1.108.0/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/immich-machine-learning/1.108.0/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/immich-machine-learning/1.108.0/scripts/uninstall.sh b/apps/immich-machine-learning/1.108.0/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/immich-machine-learning/1.108.0/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/immich-machine-learning/1.108.0/scripts/upgrade.sh b/apps/immich-machine-learning/1.108.0/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/immich-machine-learning/1.108.0/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/immich-machine-learning/README.md b/apps/immich-machine-learning/README.md new file mode 100644 index 00000000..10e2e3c7 --- /dev/null +++ b/apps/immich-machine-learning/README.md @@ -0,0 +1,82 @@ +# Immich (机器学习模块) + +Immich - 高性能自托管照片和视频备份解决方案 + +![Immich](https://file.lifebus.top/imgs/immich_cover.png) + +## 简介 + +欢迎您 +您好,很高兴您能来到这里。 + +我叫亚历克斯。我在学校时是一名电气工程师,后来因为工作和对解决问题的纯粹热爱而成为了一名软件工程师。 + +我们和新生儿躺在床上,我妻子说:"我们开始积累大量宝宝的照片和视频,我不想再为 App-Which-Must-Not-Be-Name +付费了。你总是想为我建一些东西,为什么不为我建一个能做到这一点的应用程序呢? + +就这样,这个想法开始在我脑海中萌生。之后,我开始在自助托管领域寻找具有类似备份功能和 "非命名应用程序 " +性能水平的现有解决方案。我发现目前的解决方案主要集中在画廊类型的应用程序上。然而,我想要的是一个简单易用的备份工具,并带有一个能高效查看照片和视频的本地移动应用程序。于是,我作为一名如饥似渴的工程师踏上了寻找之旅。 + +另一个促使我执行 "不可名状的应用程序 "替代方案的动机是,我希望能为开源社区做出贡献,多年来我从这个社区中受益匪浅。 + +我很荣幸能与大家分享这一作品,它重视隐私、回忆,以及在易用、友好的界面中回顾这些时刻的喜悦。 + +如果您喜欢这款应用程序,或者它在某些方面对您有帮助,请考虑支持这个项目。这将有助于我继续开发和维护应用程序。 + +## 环境准备 + ++ `Redis` 服务 + +Immich 使用 Redis 作为缓存服务,所以需要安装 Redis 服务。 + +## 升级说明 + ++ **大版本** `v1.106.2` + +`2024/06/12` 上线,升级需要注意: + +1. 移除 `immich-microservices` 服务 +2. 环境变量发生了合并 +3. 底层API发生了变化 + 4. 移动端需要同步更新 + +## 安装参数 + +### 机器学习 预加载模型(CLIP) + +可选模型列表,粘贴时输入 `immich-app/` 之后的内容即可。 + +[immich-app's Collections - CLIP](https://huggingface.co/collections/immich-app/clip-654eaefb077425890874cd07) + +[immich-app's Collections - Multilingual CLIP](https://huggingface.co/collections/immich-app/multilingual-clip-654eb08c2382f591eeb8c2a7) + +默认模型 + +中文支持较好的模型 `XLM-Roberta-Large-Vit-B-16Plus` + +### 人脸识别 预加载模型 + +`buffalo_l`, `buffalo_m`, `buffalo_s`, `antelopev2` + +## 常见问题 + ++ 安装失败 + + 网络问题,可以尝试使用代理 ++ 升级失败 + + 请查看升级说明 + + 请查看日志,查看具体错误信息 + + 记录安装参数,进行卸载重装 + + 升级1Panel后,提示容器找不到 + + 请删除容器,重新安装 + + 提示文件或目录不存在 + + 可手动创建不存在的文件和目录,然后重试 + + 创建的文件可为空文件 + + 每次都升级失败 + + 很抱歉,官方应用不支持编排式应用(一个应用包含多个容器)的安装与升级,您可以尝试手动卸载安装最新版 ++ 无法访问 + + 请检查是否安装了 `Redis` 服务 + + 请检查是否正确配置了 `Redis` 服务 + + 请检查是否正确配置了 `域名` 和 `SSL` + + 请检查是否正确配置了 `端口` + + 请检查是否正确配置了 `防火墙` 并开放了 `端口` + + 请检查是否正确配置了 `Nginx` 服务 diff --git a/apps/immich-machine-learning/data.yml b/apps/immich-machine-learning/data.yml new file mode 100644 index 00000000..36f3b818 --- /dev/null +++ b/apps/immich-machine-learning/data.yml @@ -0,0 +1,19 @@ +name: Immich 机器学习模块 +tags: + - 多媒体 +title: 高性能自托管照片和视频备份解决方案 +description: 高性能自托管照片和视频备份解决方案 +additionalProperties: + key: immich-machine-learning + name: Immich 机器学习模块 + tags: + - AI + - Local + shortDescZh: 高性能自托管照片和视频备份解决方案 + shortDescEn: High performance self-hosted photo and video backup solution + type: tool + crossVersionUpdate: true + limit: 0 + website: https://immich.app/ + github: https://github.com/immich-app/immich + document: https://immich.app/docs/overview/introduction diff --git a/apps/immich-machine-learning/immich.png b/apps/immich-machine-learning/immich.png new file mode 100644 index 00000000..7fa1ecaf Binary files /dev/null and b/apps/immich-machine-learning/immich.png differ diff --git a/apps/immich-machine-learning/logo.png b/apps/immich-machine-learning/logo.png new file mode 100644 index 00000000..081f92e4 Binary files /dev/null and b/apps/immich-machine-learning/logo.png differ diff --git a/apps/immich-server/1.108.0/data.yml b/apps/immich-server/1.108.0/data.yml new file mode 100644 index 00000000..f890c384 --- /dev/null +++ b/apps/immich-server/1.108.0/data.yml @@ -0,0 +1,133 @@ +additionalProperties: + formFields: + - child: + default: "" + envKey: PANEL_REDIS_SERVICE + required: true + type: service + default: redis + envKey: PANEL_REDIS_TYPE + labelZh: Redis 服务 (前置检查) + labelEn: Redis Service (Pre-check) + required: true + type: apps + values: + - label: Redis + value: redis + - child: + default: "" + envKey: PANEL_POSTGRES_SERVICE + required: true + type: service + default: postgresql + envKey: PANEL_POSTGRES_TYPE + labelZh: Postgres 服务 (前置检查) + labelEn: Postgres Service (Pre-check) + required: true + type: apps + values: + - label: PostgreSQL + value: postgresql + - default: "/home/immich-app" + edit: true + envKey: IMMICH_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 2283 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: "/dev/dri" + edit: true + envKey: IMMICH_DRIVE_PATH + labelZh: 硬件驱动路径 + labelEn: Drive path + required: true + type: text + - default: "Asia/Shanghai" + edit: true + envKey: TZ + labelZh: 时区 + labelEn: Timezone + required: true + type: text + - default: "127.0.0.1" + edit: true + envKey: DB_HOSTNAME + labelZh: 数据库 主机地址 + labelEn: Database Host + required: true + type: text + - default: 5432 + edit: true + envKey: DB_PORT + labelZh: 数据库 端口 + labelEn: Database Port + required: true + rule: paramPort + type: number + - default: "postgres" + edit: true + envKey: DB_USERNAME + labelZh: 数据库 用户名 + labelEn: Database User + required: true + type: text + - default: "" + edit: true + envKey: DB_PASSWORD + labelZh: 数据库 密码 + labelEn: Database Password + random: true + required: true + rule: paramComplexity + type: password + - default: "immich" + edit: true + envKey: DB_DATABASE_NAME + labelZh: 数据库 名称 + labelEn: Database Name + required: true + type: text + - default: "127.0.0.1" + edit: true + envKey: REDIS_HOSTNAME + labelZh: Redis 主机 + labelEn: Redis Host + required: true + type: text + - default: 6379 + edit: true + envKey: REDIS_PORT + labelZh: Redis 端口 + labelEn: Redis Port + required: true + rule: paramPort + type: number + - default: 0 + edit: true + envKey: REDIS_DBINDEX + labelZh: Redis 索引 + labelEn: Redis Index + required: true + type: number + - default: "" + edit: true + envKey: REDIS_USERNAME + labelZh: Redis 用户名 + labelEn: Redis Username + required: false + type: text + - default: "" + edit: true + envKey: REDIS_PASSWORD + labelZh: Redis 密码 + labelEn: Redis Password + required: false + type: password diff --git a/apps/immich-server/1.108.0/docker-compose.yml b/apps/immich-server/1.108.0/docker-compose.yml new file mode 100644 index 00000000..19760df4 --- /dev/null +++ b/apps/immich-server/1.108.0/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +name: immich-server + +services: + immich-server: + image: ghcr.io/immich-app/immich-server:v1.108.0 + container_name: immich-server-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + devices: + - ${IMMICH_DRIVE_PATH:-/dev/dri}:/dev/dri + ports: + - ${PANEL_APP_PORT_HTTP}:3001 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${IMMICH_ROOT_PATH}/data/library:/usr/src/app/upload + - /etc/localtime:/etc/localtime diff --git a/apps/immich-server/1.108.0/scripts/init.sh b/apps/immich-server/1.108.0/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/immich-server/1.108.0/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/immich-server/1.108.0/scripts/uninstall.sh b/apps/immich-server/1.108.0/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/immich-server/1.108.0/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/immich-server/1.108.0/scripts/upgrade.sh b/apps/immich-server/1.108.0/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/immich-server/1.108.0/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/immich-server/README.md b/apps/immich-server/README.md new file mode 100644 index 00000000..515c686d --- /dev/null +++ b/apps/immich-server/README.md @@ -0,0 +1,82 @@ +# Immich + +Immich - 高性能自托管照片和视频备份解决方案 + +![Immich](https://file.lifebus.top/imgs/immich_cover.png) + +## 简介 + +欢迎您 +您好,很高兴您能来到这里。 + +我叫亚历克斯。我在学校时是一名电气工程师,后来因为工作和对解决问题的纯粹热爱而成为了一名软件工程师。 + +我们和新生儿躺在床上,我妻子说:"我们开始积累大量宝宝的照片和视频,我不想再为 App-Which-Must-Not-Be-Name +付费了。你总是想为我建一些东西,为什么不为我建一个能做到这一点的应用程序呢? + +就这样,这个想法开始在我脑海中萌生。之后,我开始在自助托管领域寻找具有类似备份功能和 "非命名应用程序 " +性能水平的现有解决方案。我发现目前的解决方案主要集中在画廊类型的应用程序上。然而,我想要的是一个简单易用的备份工具,并带有一个能高效查看照片和视频的本地移动应用程序。于是,我作为一名如饥似渴的工程师踏上了寻找之旅。 + +另一个促使我执行 "不可名状的应用程序 "替代方案的动机是,我希望能为开源社区做出贡献,多年来我从这个社区中受益匪浅。 + +我很荣幸能与大家分享这一作品,它重视隐私、回忆,以及在易用、友好的界面中回顾这些时刻的喜悦。 + +如果您喜欢这款应用程序,或者它在某些方面对您有帮助,请考虑支持这个项目。这将有助于我继续开发和维护应用程序。 + +## 环境准备 + ++ `Redis` 服务 + +Immich 使用 Redis 作为缓存服务,所以需要安装 Redis 服务。 + +## 升级说明 + ++ **大版本** `v1.106.2` + +`2024/06/12` 上线,升级需要注意: + +1. 移除 `immich-microservices` 服务 +2. 环境变量发生了合并 +3. 底层API发生了变化 + 4. 移动端需要同步更新 + +## 安装参数 + +### 机器学习 预加载模型(CLIP) + +可选模型列表,粘贴时输入 `immich-app/` 之后的内容即可。 + +[immich-app's Collections - CLIP](https://huggingface.co/collections/immich-app/clip-654eaefb077425890874cd07) + +[immich-app's Collections - Multilingual CLIP](https://huggingface.co/collections/immich-app/multilingual-clip-654eb08c2382f591eeb8c2a7) + +默认模型 + +中文支持较好的模型 `XLM-Roberta-Large-Vit-B-16Plus` + +### 人脸识别 预加载模型 + +`buffalo_l`, `buffalo_m`, `buffalo_s`, `antelopev2` + +## 常见问题 + ++ 安装失败 + + 网络问题,可以尝试使用代理 ++ 升级失败 + + 请查看升级说明 + + 请查看日志,查看具体错误信息 + + 记录安装参数,进行卸载重装 + + 升级1Panel后,提示容器找不到 + + 请删除容器,重新安装 + + 提示文件或目录不存在 + + 可手动创建不存在的文件和目录,然后重试 + + 创建的文件可为空文件 + + 每次都升级失败 + + 很抱歉,官方应用不支持编排式应用(一个应用包含多个容器)的安装与升级,您可以尝试手动卸载安装最新版 ++ 无法访问 + + 请检查是否安装了 `Redis` 服务 + + 请检查是否正确配置了 `Redis` 服务 + + 请检查是否正确配置了 `域名` 和 `SSL` + + 请检查是否正确配置了 `端口` + + 请检查是否正确配置了 `防火墙` 并开放了 `端口` + + 请检查是否正确配置了 `Nginx` 服务 diff --git a/apps/immich-server/data.yml b/apps/immich-server/data.yml new file mode 100644 index 00000000..9fbb1e95 --- /dev/null +++ b/apps/immich-server/data.yml @@ -0,0 +1,20 @@ +name: Immich 服务端 +tags: + - 多媒体 +title: 高性能自托管照片和视频备份解决方案 +description: 高性能自托管照片和视频备份解决方案 +additionalProperties: + key: immich-server + name: Immich 服务端 + tags: + - Media + - Storage + - Local + shortDescZh: 高性能自托管照片和视频备份解决方案 + shortDescEn: High performance self-hosted photo and video backup solution + type: website + crossVersionUpdate: true + limit: 0 + website: https://immich.app/ + github: https://github.com/immich-app/immich + document: https://immich.app/docs/overview/introduction diff --git a/apps/immich-server/immich.png b/apps/immich-server/immich.png new file mode 100644 index 00000000..7fa1ecaf Binary files /dev/null and b/apps/immich-server/immich.png differ diff --git a/apps/immich-server/logo.png b/apps/immich-server/logo.png new file mode 100644 index 00000000..081f92e4 Binary files /dev/null and b/apps/immich-server/logo.png differ diff --git a/apps/immich/1.108.0/data.yml b/apps/immich/1.108.0/data.yml new file mode 100644 index 00000000..6870e25a --- /dev/null +++ b/apps/immich/1.108.0/data.yml @@ -0,0 +1,144 @@ +additionalProperties: + formFields: + - child: + default: "" + envKey: PANEL_REDIS_SERVICE + required: true + type: service + default: redis + envKey: PANEL_REDIS_TYPE + labelZh: Redis 服务 (前置检查) + labelEn: Redis Service (Pre-check) + required: true + type: apps + values: + - label: Redis + value: redis + - default: "/home/immich-app" + edit: true + envKey: IMMICH_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 2283 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: 3003 + edit: true + envKey: PANEL_APP_PORT_MACHINE_LEARNING + labelZh: 机器学习端口 + labelEn: Machine Learning port + required: true + rule: paramPort + type: number + - default: "" + edit: true + envKey: DB_PASSWORD + labelEn: Database Password + labelZh: 数据库连接密码 + random: true + required: true + rule: paramComplexity + type: password + - default: 5432 + edit: true + envKey: PANEL_APP_PORT_DB + labelZh: 数据库 端口 + labelEn: Database Port + required: true + rule: paramPort + type: number + - default: "immich-pg14-vectors" + disabled: true + envKey: DB_HOSTNAME + labelZh: 数据库 主机地址 + labelEn: Database Host + required: true + type: text + - default: "postgres" + disabled: true + envKey: DB_USERNAME + labelZh: 数据库 用户名 + labelEn: Database User + required: true + type: text + - default: "immich" + disabled: true + envKey: DB_DATABASE_NAME + labelZh: 数据库 名称 + labelEn: Database Name + required: true + type: text + - default: "127.0.0.1" + edit: true + envKey: REDIS_HOSTNAME + labelZh: Redis 主机 + labelEn: Redis Host + required: true + type: text + - default: 6379 + edit: true + envKey: REDIS_PORT + labelZh: Redis 端口 + labelEn: Redis Port + required: true + rule: paramPort + type: number + - default: 0 + edit: true + envKey: REDIS_DBINDEX + labelZh: Redis 索引 + labelEn: Redis Index + required: true + type: number + - default: "" + edit: true + envKey: REDIS_USERNAME + labelZh: Redis 用户名 + labelEn: Redis Username + required: false + type: text + - default: "" + edit: true + envKey: REDIS_PASSWORD + labelZh: Redis 密码 + labelEn: Redis Password + required: false + type: password + - default: "XLM-Roberta-Large-Vit-B-16Plus" + edit: true + envKey: MACHINE_LEARNING_PRELOAD__CLIP + labelEn: Machine Learning Preload Model + labelZh: 机器学习 预加载模型 + required: false + type: text + - default: "buffalo_l" + edit: true + envKey: MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION + labelEn: Machine Learning Preload Model + labelZh: 人脸识别 预加载模型 + required: false + type: select + values: + - label: antelopev2 + value: "antelopev2" + - label: buffalo_l + value: "buffalo_l" + - label: buffalo_m + value: "buffalo_m" + - label: buffalo_s + value: "buffalo_s" + - default: "https://hf-mirror.com" + edit: true + envKey: HF_ENDPOINT + labelZh: Hugging Face 服务地址 + labelEn: Hugging Face Endpoint + rule: paramExtUrl + required: false + type: text diff --git a/apps/immich/1.108.0/docker-compose.yml b/apps/immich/1.108.0/docker-compose.yml new file mode 100644 index 00000000..5b568788 --- /dev/null +++ b/apps/immich/1.108.0/docker-compose.yml @@ -0,0 +1,71 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +name: immich + +services: + immich-pg14-vectors: + image: tensorchord/pgvecto-rs:pg14-v0.2.0 + container_name: pg14-vectors-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_DB:-5432}:5432 + volumes: + - ${IMMICH_ROOT_PATH}/pg14/data:/var/lib/postgresql/data + healthcheck: + test: pg_isready --dbname='${DB_DATABASE_NAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1 + interval: 5m + start_interval: 30s + start_period: 5m + command: [ "postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on" ] + environment: + POSTGRES_PASSWORD: ${DB_PASSWORD} + POSTGRES_USER: ${DB_USERNAME:-postgres} + POSTGRES_DB: ${DB_DATABASE_NAME:-immich} + POSTGRES_INITDB_ARGS: '--data-checksums' + + immich-machine-learning: + image: ghcr.io/immich-app/immich-machine-learning:v1.108.0 + container_name: immich-machine-learning-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + devices: + - /dev/dri:/dev/dri + ports: + - ${PANEL_APP_PORT_MACHINE_LEARNING}:3003 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${IMMICH_ROOT_PATH}/data/cache:/cache + + immich-server: + depends_on: + - immich-pg14-vectors + image: ghcr.io/immich-app/immich-server:v1.108.0 + container_name: immich-server-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + devices: + - /dev/dri:/dev/dri + ports: + - ${PANEL_APP_PORT_HTTP}:3001 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${IMMICH_ROOT_PATH}/data/library:/usr/src/app/upload + - /etc/localtime:/etc/localtime diff --git a/apps/immich/1.108.0/scripts/init.sh b/apps/immich/1.108.0/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/immich/1.108.0/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/immich/1.108.0/scripts/uninstall.sh b/apps/immich/1.108.0/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/immich/1.108.0/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/immich/1.108.0/scripts/upgrade.sh b/apps/immich/1.108.0/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/immich/1.108.0/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/immich/README.md b/apps/immich/README.md new file mode 100644 index 00000000..515c686d --- /dev/null +++ b/apps/immich/README.md @@ -0,0 +1,82 @@ +# Immich + +Immich - 高性能自托管照片和视频备份解决方案 + +![Immich](https://file.lifebus.top/imgs/immich_cover.png) + +## 简介 + +欢迎您 +您好,很高兴您能来到这里。 + +我叫亚历克斯。我在学校时是一名电气工程师,后来因为工作和对解决问题的纯粹热爱而成为了一名软件工程师。 + +我们和新生儿躺在床上,我妻子说:"我们开始积累大量宝宝的照片和视频,我不想再为 App-Which-Must-Not-Be-Name +付费了。你总是想为我建一些东西,为什么不为我建一个能做到这一点的应用程序呢? + +就这样,这个想法开始在我脑海中萌生。之后,我开始在自助托管领域寻找具有类似备份功能和 "非命名应用程序 " +性能水平的现有解决方案。我发现目前的解决方案主要集中在画廊类型的应用程序上。然而,我想要的是一个简单易用的备份工具,并带有一个能高效查看照片和视频的本地移动应用程序。于是,我作为一名如饥似渴的工程师踏上了寻找之旅。 + +另一个促使我执行 "不可名状的应用程序 "替代方案的动机是,我希望能为开源社区做出贡献,多年来我从这个社区中受益匪浅。 + +我很荣幸能与大家分享这一作品,它重视隐私、回忆,以及在易用、友好的界面中回顾这些时刻的喜悦。 + +如果您喜欢这款应用程序,或者它在某些方面对您有帮助,请考虑支持这个项目。这将有助于我继续开发和维护应用程序。 + +## 环境准备 + ++ `Redis` 服务 + +Immich 使用 Redis 作为缓存服务,所以需要安装 Redis 服务。 + +## 升级说明 + ++ **大版本** `v1.106.2` + +`2024/06/12` 上线,升级需要注意: + +1. 移除 `immich-microservices` 服务 +2. 环境变量发生了合并 +3. 底层API发生了变化 + 4. 移动端需要同步更新 + +## 安装参数 + +### 机器学习 预加载模型(CLIP) + +可选模型列表,粘贴时输入 `immich-app/` 之后的内容即可。 + +[immich-app's Collections - CLIP](https://huggingface.co/collections/immich-app/clip-654eaefb077425890874cd07) + +[immich-app's Collections - Multilingual CLIP](https://huggingface.co/collections/immich-app/multilingual-clip-654eb08c2382f591eeb8c2a7) + +默认模型 + +中文支持较好的模型 `XLM-Roberta-Large-Vit-B-16Plus` + +### 人脸识别 预加载模型 + +`buffalo_l`, `buffalo_m`, `buffalo_s`, `antelopev2` + +## 常见问题 + ++ 安装失败 + + 网络问题,可以尝试使用代理 ++ 升级失败 + + 请查看升级说明 + + 请查看日志,查看具体错误信息 + + 记录安装参数,进行卸载重装 + + 升级1Panel后,提示容器找不到 + + 请删除容器,重新安装 + + 提示文件或目录不存在 + + 可手动创建不存在的文件和目录,然后重试 + + 创建的文件可为空文件 + + 每次都升级失败 + + 很抱歉,官方应用不支持编排式应用(一个应用包含多个容器)的安装与升级,您可以尝试手动卸载安装最新版 ++ 无法访问 + + 请检查是否安装了 `Redis` 服务 + + 请检查是否正确配置了 `Redis` 服务 + + 请检查是否正确配置了 `域名` 和 `SSL` + + 请检查是否正确配置了 `端口` + + 请检查是否正确配置了 `防火墙` 并开放了 `端口` + + 请检查是否正确配置了 `Nginx` 服务 diff --git a/apps/immich/data.yml b/apps/immich/data.yml new file mode 100644 index 00000000..32412fb0 --- /dev/null +++ b/apps/immich/data.yml @@ -0,0 +1,19 @@ +name: Immich +title: 高性能自托管照片和视频备份解决方案 +description: 高性能自托管照片和视频备份解决方案 +additionalProperties: + key: immich + name: Immich + tags: + - WebSite + - Media + - Storage + - Local + shortDescZh: 高性能自托管照片和视频备份解决方案 + shortDescEn: High performance self-hosted photo and video backup solution + type: website + crossVersionUpdate: true + limit: 0 + website: https://immich.app/ + github: https://github.com/immich-app/immich/ + document: https://immich.app/docs/overview/introduction/ diff --git a/apps/immich/immich.png b/apps/immich/immich.png new file mode 100644 index 00000000..7fa1ecaf Binary files /dev/null and b/apps/immich/immich.png differ diff --git a/apps/immich/logo.png b/apps/immich/logo.png new file mode 100644 index 00000000..081f92e4 Binary files /dev/null and b/apps/immich/logo.png differ diff --git a/apps/iyuu-plus/README.md b/apps/iyuu-plus/README.md new file mode 100644 index 00000000..c2d6b397 --- /dev/null +++ b/apps/iyuu-plus/README.md @@ -0,0 +1,85 @@ +# IYUU Plus + +IYUU 是一个基于种子特征码的交叉索引工具 + +![IYUU Plus](https://file.lifebus.top/imgs/iyuuplus_cover.png) + +## 简介 + +使用php语言编写并使用php-cli常驻内存运行,通过计划任务,按用户设定的频率调用transmission、qBittorrent下载软件的API接口,提取正在做种的info_hash提交到IYUU辅种服务器的API接口https: +//api.iyuu.cn(辅种过程和PT站点没有交互,查询辅种压力由IYUU服务器承担),根据IYUU服务器的API接口https: +//api.iyuu.cn返回的数据拼接种子连接,提交给下载器,由下载器主动去站点下载种子、校验、做种,自动辅种各个站点。 + +集成webui界面、辅种、转移、下载、定时访问URL、动态域名ddns等常用功能,提供完善的插件机制。 + +支持下载器集群,支持多盘位,支持多下载目录,支持连接远程下载器等。 + +### 技术栈 + +| 组件 | 版本 | 官网 | +|:------------|:-------|:--------------------------------------------| +| Workerman | 4.1.15 | https://www.workerman.net/doc/workerman/ | +| Webman | 1.5.16 | https://www.workerman.net/doc/webman/ | +| WebmanAdmin | 0.6.24 | https://www.workerman.net/doc/webman-admin/ | +| PHP | 8.3.7 | https://www.php.net/ | +| MYSQL | 5.7.26 | https://www.mysql.com/ | +| Layui | 2.8.12 | https://layui.dev/ | +| Vue | 3.4.21 | https://vuejs.org/ | + +## 安装说明 + +> 爱语飞飞Token: 前往 [爱语飞飞-官网](https://iyuu.cn/) 获取授权Token +> +> 密码:首次登录填写为登录密码 +> +> 站点认证:请提前准备相应站点 + +## 支持的下载器 + ++ [transmission](https://transmissionbt.com/) + +Transmission 是一个种子客户端,可以让您在互联置上下载和共享文件。该应用程序适用于多种操作系统,包括 Ubuntu、Fedora、Arch +Linux、Debian Raspberry Pi 等。您可以使用 terminal 中的 apt 或 yum 命令来安装它。使用 +Transmission,您可以实时监测下载和上传情况,并显示连接一致性图表 + ++ [qBittorrent](https://www.qbittorrent.org/) + +qBittorrent是一款免费的开源种子下载工具,作为µTorrent的替代品。它在所有平台上都提供相同的功能,包括Windows、Linux和macOS。该应用程序还配备了一个可扩展的搜索引擎以及Web +UI遠端,以最大化你的torrent体验。使用qBittorrent,你可以在多个平台上轻松下载你喜爱的内容。 + +## 反向代理 + +> Nginx 配置 + +```nginx + location ^~ / { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_http_version 1.1; + proxy_set_header Connection ""; + if (!-f $request_filename){ + proxy_pass http://127.0.0.1:8787; + } + } +``` + +## 绑定合作站点 + +只有绑定了合作站点的才可以完整的使用IYUUPlus + +认证其一即可,绑定爱语飞飞Token并认证, 后续无需重复认证,未认证无法进行辅种 + +| 认证站点 | 别名 | 官网 | +|-----------|--------|----| +| pthome | 铂金家 | 自查 | +| hdhome | 家园 | 自查 | +| ourbits | 我堡 | 自查 | +| chdbits | 新岛/金钱岛 | 自查 | +| hdfans | 红豆饭 | 自查 | +| audiences | 观众/奥迪 | 自查 | +| piggo | 猪猪网 | 自查 | +| zhuque | 朱雀 | 自查 | +| zmpt | 织梦 | 自查 | +| agsvpt | 末日种子库 | 自查 | +| 其他站点 | 未知 | 未知 | diff --git a/apps/iyuu-plus/data.yml b/apps/iyuu-plus/data.yml new file mode 100644 index 00000000..29bf1e1b --- /dev/null +++ b/apps/iyuu-plus/data.yml @@ -0,0 +1,19 @@ +name: IYUU-Plus +title: 自动辅种工具 +description: 自动辅种工具 +additionalProperties: + key: iyuu-plus + name: IYUU-Plus + tags: + - WebSite + - Tool + - Runtime + - Local + shortDescZh: 自动辅种工具 + shortDescEn: Auto Reseed Tool + type: website + crossVersionUpdate: true + limit: 0 + website: https://iyuu.cn/ + github: https://github.com/ledccn/iyuuplus-dev + document: https://doc.iyuu.cn/ diff --git a/apps/iyuu-plus/latest/data.yml b/apps/iyuu-plus/latest/data.yml new file mode 100644 index 00000000..923f53b8 --- /dev/null +++ b/apps/iyuu-plus/latest/data.yml @@ -0,0 +1,54 @@ +additionalProperties: + formFields: + - default: "/home/iyuuplus" + edit: true + envKey: IYUUCN_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 8780 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 (Nginx) + labelEn: WebUI port (Nginx) + required: true + rule: paramPort + type: number + - default: 8787 + edit: true + envKey: PANEL_APP_PORT_IYUU + labelZh: WebUI 端口 (IYUU) + labelEn: WebUI port (IYUU) + required: false + rule: paramPort + type: number + - default: 3131 + edit: true + envKey: PANEL_APP_PORT_WS + labelZh: WebUI 端口 (WS) + labelEn: WebUI port (WS) + required: false + rule: paramPort + type: number + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_1 + labelEn: Custom mount directory 1 + labelZh: 自定义挂载目录 1 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_2 + labelEn: Custom mount directory 2 + labelZh: 自定义挂载目录 2 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_3 + labelEn: Custom mount directory 3 + labelZh: 自定义挂载目录 3 + required: false + type: text diff --git a/apps/iyuu-plus/latest/docker-compose.yml b/apps/iyuu-plus/latest/docker-compose.yml new file mode 100644 index 00000000..1d699b30 --- /dev/null +++ b/apps/iyuu-plus/latest/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + iyuuplus: + image: iyuucn/iyuuplus-dev:latest + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:8780 + - ${PANEL_APP_PORT_IYUU:-}:8787 + - ${PANEL_APP_PORT_WS:-}:3131 + volumes: + - ${IYUUCN_ROOT_PATH}/iyuu:/iyuu + - ${IYUUCN_ROOT_PATH}/data:/data + - ${IYUUCN_ROOT_PATH}/qBittorrent:/qBittorrent + - ${IYUUCN_ROOT_PATH}/transmission:/transmission + - ${CUSTOM_MOUNT_DIRECTORY_1:-./default_mount_1}:${CUSTOM_MOUNT_DIRECTORY_1:-/default_mount_1} + - ${CUSTOM_MOUNT_DIRECTORY_2:-./default_mount_2}:${CUSTOM_MOUNT_DIRECTORY_2:-/default_mount_2} + - ${CUSTOM_MOUNT_DIRECTORY_3:-./default_mount_3}:${CUSTOM_MOUNT_DIRECTORY_3:-/default_mount_3} diff --git a/apps/iyuu-plus/latest/scripts/init.sh b/apps/iyuu-plus/latest/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/iyuu-plus/latest/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/iyuu-plus/latest/scripts/uninstall.sh b/apps/iyuu-plus/latest/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/iyuu-plus/latest/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/iyuu-plus/latest/scripts/upgrade.sh b/apps/iyuu-plus/latest/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/iyuu-plus/latest/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/iyuu-plus/logo.png b/apps/iyuu-plus/logo.png new file mode 100644 index 00000000..1af46e75 Binary files /dev/null and b/apps/iyuu-plus/logo.png differ diff --git a/apps/jellyfin/10.9.7/data.yml b/apps/jellyfin/10.9.7/data.yml new file mode 100644 index 00000000..6abe2bdd --- /dev/null +++ b/apps/jellyfin/10.9.7/data.yml @@ -0,0 +1,61 @@ +additionalProperties: + formFields: + - default: "host" + edit: true + envKey: NETWORK_MODE + labelZh: 网络模式 + labelEn: Network mode + required: true + type: select + values: + - label: 主机模式 + value: "host" + - label: 桥接模式 + value: "bridge" + - label: 无网络 + value: "none" + - label: 1panel-network + value: "1panel-network" + - default: "/home/jellyfin" + edit: true + envKey: JELLYFIN_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 8096 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI Port + required: true + rule: paramPort + type: number + - default: "" + edit: true + envKey: HTTP_SSL_PROXY + labelZh: HTTP(s) 网络代理 + labelEn: HTTP(s) Proxy + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_1 + labelEn: Custom mount directory 1 + labelZh: 自定义挂载目录 1 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_2 + labelEn: Custom mount directory 2 + labelZh: 自定义挂载目录 2 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_3 + labelEn: Custom mount directory 3 + labelZh: 自定义挂载目录 3 + required: false + type: text diff --git a/apps/jellyfin/10.9.7/docker-compose.yml b/apps/jellyfin/10.9.7/docker-compose.yml new file mode 100644 index 00000000..644a9060 --- /dev/null +++ b/apps/jellyfin/10.9.7/docker-compose.yml @@ -0,0 +1,34 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + jellyfin: + image: jellyfin/jellyfin:10.9.7 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + network_mode: ${NETWORK_MODE} + user: 0:0 + ports: + - ${PANEL_APP_PORT_HTTP}:8096 + volumes: + - ${JELLYFIN_ROOT_PATH}/config:/config + - ${JELLYFIN_ROOT_PATH}/cache:/cache + - ${JELLYFIN_ROOT_PATH}/media:/media + - ${JELLYFIN_ROOT_PATH}/config/font:/config/font + - ${JELLYFIN_ROOT_PATH}/config/dejavu:/usr/share/fonts/truetype/dejavu + - ${CUSTOM_MOUNT_DIRECTORY_1:-./default_mount_1}:${CUSTOM_MOUNT_DIRECTORY_1:-/default_mount_1} + - ${CUSTOM_MOUNT_DIRECTORY_2:-./default_mount_2}:${CUSTOM_MOUNT_DIRECTORY_2:-/default_mount_2} + - ${CUSTOM_MOUNT_DIRECTORY_3:-./default_mount_3}:${CUSTOM_MOUNT_DIRECTORY_3:-/default_mount_3} + devices: + - /dev/dri:/dev/dri + - /dev/kfd:/dev/kfd + environment: + - ROC_ENABLE_PRE_VEGA=1 + - HTTP_PROXY=${HTTP_PROXY:-} + - HTTPS_PROXY=${HTTP_PROXY:-} + - NO_PROXY=localhost,127.0.0.1,::1 diff --git a/apps/jellyfin/10.9.7/scripts/init.sh b/apps/jellyfin/10.9.7/scripts/init.sh new file mode 100644 index 00000000..c066befc --- /dev/null +++ b/apps/jellyfin/10.9.7/scripts/init.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +if [[ -f .env ]]; then + source .env + + BASE_URL="https://f.lifebus.top/public/1panel/appstore/jellyfin/" + + DEJAVU_FILES=( + "DejaVuSans.ttf" + "DejaVuSans-Bold.ttf" + "DejaVuSansMono.ttf" + "DejaVuSansMono-Bold.ttf" + "DejaVuSerif.ttf" + "DejaVuSerif-Bold.ttf" + ) + + FONT_FILES=( + "NotoSansMonoCJKsc-Bold.woff2" + "NotoSansMonoCJKsc-Regular.otf" + "NotoSansCJKsc-Regular.woff" + "NotoSansMonoCJKsc-Regular.woff" + "NotoSansMonoCJKsc-Regular.woff2" + "NotoSansMonoCJKsc-Bold.otf" + "NotoSansMonoCJKsc-Bold.woff" + "NotoSansCJKsc-Medium.otf" + "NotoSansCJKsc-Regular.otf" + "NotoSansCJKsc-Thin.woff2" + "NotoSansCJKsc-Thin.woff" + "NotoSansCJKsc-Regular.woff2" + "NotoSansCJKsc-Thin.otf" + "NotoSansCJKsc-Medium.woff" + "NotoSansCJKsc-Medium.woff2" + "NotoSansCJKsc-Light.woff" + "NotoSansCJKsc-Light.woff2" + "NotoSansCJKsc-Bold.otf" + "NotoSansCJKsc-Black.otf" + "NotoSansCJKsc-Light.otf" + "NotoSansCJKsc-Bold.woff" + "NotoSansCJKsc-DemiLight.otf" + "NotoSansCJKsc-Black.woff" + "NotoSansCJKsc-DemiLight.woff" + "NotoSansCJKsc-Black.woff2" + "NotoSansCJKsc-DemiLight.woff2" + "NotoSansCJKsc-Bold.woff2" + "font.css" + ) + + if [ ! -d "$JELLYFIN_ROOT_PATH/config/font" ]; then + mkdir -p "$JELLYFIN_ROOT_PATH/config/font" + for FILE in "${FONT_FILES[@]}"; do + wget -q "${BASE_URL}font/${FILE}" -P "$JELLYFIN_ROOT_PATH/config/font/" || echo "Failed to download $FILE, continuing..." + done + fi + + if [ ! -d "$JELLYFIN_ROOT_PATH/config/dejavu" ]; then + mkdir -p "$JELLYFIN_ROOT_PATH/config/dejavu" + for FILE in "${DEJAVU_FILES[@]}"; do + wget -q "${BASE_URL}dejavu/${FILE}" -P "$JELLYFIN_ROOT_PATH/config/dejavu/" || echo "Failed to download $FILE, continuing..." + done + fi + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/jellyfin/10.9.7/scripts/uninstall.sh b/apps/jellyfin/10.9.7/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/jellyfin/10.9.7/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/jellyfin/10.9.7/scripts/upgrade.sh b/apps/jellyfin/10.9.7/scripts/upgrade.sh new file mode 100644 index 00000000..c066befc --- /dev/null +++ b/apps/jellyfin/10.9.7/scripts/upgrade.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +if [[ -f .env ]]; then + source .env + + BASE_URL="https://f.lifebus.top/public/1panel/appstore/jellyfin/" + + DEJAVU_FILES=( + "DejaVuSans.ttf" + "DejaVuSans-Bold.ttf" + "DejaVuSansMono.ttf" + "DejaVuSansMono-Bold.ttf" + "DejaVuSerif.ttf" + "DejaVuSerif-Bold.ttf" + ) + + FONT_FILES=( + "NotoSansMonoCJKsc-Bold.woff2" + "NotoSansMonoCJKsc-Regular.otf" + "NotoSansCJKsc-Regular.woff" + "NotoSansMonoCJKsc-Regular.woff" + "NotoSansMonoCJKsc-Regular.woff2" + "NotoSansMonoCJKsc-Bold.otf" + "NotoSansMonoCJKsc-Bold.woff" + "NotoSansCJKsc-Medium.otf" + "NotoSansCJKsc-Regular.otf" + "NotoSansCJKsc-Thin.woff2" + "NotoSansCJKsc-Thin.woff" + "NotoSansCJKsc-Regular.woff2" + "NotoSansCJKsc-Thin.otf" + "NotoSansCJKsc-Medium.woff" + "NotoSansCJKsc-Medium.woff2" + "NotoSansCJKsc-Light.woff" + "NotoSansCJKsc-Light.woff2" + "NotoSansCJKsc-Bold.otf" + "NotoSansCJKsc-Black.otf" + "NotoSansCJKsc-Light.otf" + "NotoSansCJKsc-Bold.woff" + "NotoSansCJKsc-DemiLight.otf" + "NotoSansCJKsc-Black.woff" + "NotoSansCJKsc-DemiLight.woff" + "NotoSansCJKsc-Black.woff2" + "NotoSansCJKsc-DemiLight.woff2" + "NotoSansCJKsc-Bold.woff2" + "font.css" + ) + + if [ ! -d "$JELLYFIN_ROOT_PATH/config/font" ]; then + mkdir -p "$JELLYFIN_ROOT_PATH/config/font" + for FILE in "${FONT_FILES[@]}"; do + wget -q "${BASE_URL}font/${FILE}" -P "$JELLYFIN_ROOT_PATH/config/font/" || echo "Failed to download $FILE, continuing..." + done + fi + + if [ ! -d "$JELLYFIN_ROOT_PATH/config/dejavu" ]; then + mkdir -p "$JELLYFIN_ROOT_PATH/config/dejavu" + for FILE in "${DEJAVU_FILES[@]}"; do + wget -q "${BASE_URL}dejavu/${FILE}" -P "$JELLYFIN_ROOT_PATH/config/dejavu/" || echo "Failed to download $FILE, continuing..." + done + fi + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/jellyfin/README.md b/apps/jellyfin/README.md new file mode 100644 index 00000000..d0fcddee --- /dev/null +++ b/apps/jellyfin/README.md @@ -0,0 +1,18 @@ +# JellyFin + +**Jellyfin** 是一个免费开源的媒体服务器软件,用于组织、管理和流媒体共享您的音频、视频和图片等媒体内容。 + +![JellyFin](https://file.lifebus.top/imgs/jellyfin_cover.png) + +## 安装说明 + ++ 开启 `投屏服务(DLNA)` 与 `网络唤醒服务(WOL)` 功能 + +开启后,可以在局域网内的设备上投屏观看视频。 需要选择主机网络(host)模式。 + +## 中文字幕支持 (启用备用字体) + +请在设置中添加备用字体路径 +默认路径:`/home/jellyfin/config/font` + +`/home/jellyfin` 为安装根路径,请在应用参数中查询具体参数 diff --git a/apps/jellyfin/data.yml b/apps/jellyfin/data.yml new file mode 100644 index 00000000..b6ef760c --- /dev/null +++ b/apps/jellyfin/data.yml @@ -0,0 +1,18 @@ +name: JellyFin +title: 多媒体应用程序软件套装 +description: 多媒体应用程序软件套装 +additionalProperties: + key: jellyfin + name: JellyFin + tags: + - WebSite + - Media + - Local + shortDescZh: 多媒体应用程序软件套装 + shortDescEn: A multimedia application software suite + type: website + crossVersionUpdate: true + limit: 0 + website: https://jellyfin.org/ + github: https://github.com/jellyfin/jellyfin + document: https://jellyfin.org/docs/ diff --git a/apps/jellyfin/logo.png b/apps/jellyfin/logo.png new file mode 100644 index 00000000..617e9f18 Binary files /dev/null and b/apps/jellyfin/logo.png differ diff --git a/apps/linkding/1.31.0/data.yml b/apps/linkding/1.31.0/data.yml new file mode 100644 index 00000000..a00513c9 --- /dev/null +++ b/apps/linkding/1.31.0/data.yml @@ -0,0 +1,168 @@ +additionalProperties: + formFields: + - child: + default: "" + envKey: PANEL_POSTGRES_SERVICE + required: true + type: service + default: postgresql + envKey: PANEL_POSTGRES_TYPE + labelZh: Postgres 服务 (前置检查) + labelEn: Postgres Service (Pre-check) + required: true + type: apps + values: + - label: PostgreSQL + value: postgresql + - default: "/home/linkding" + edit: true + envKey: LINKDING_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 9090 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: "" + edit: true + envKey: LD_SUPERUSER_NAME + labelZh: 管理员 用户名 + labelEn: Superuser name + required: true + type: text + - default: "" + edit: true + envKey: LD_SUPERUSER_PASSWORD + labelZh: 管理员 密码 + labelEn: Superuser password + required: true + rule: paramComplexity + type: password + - default: "False" + edit: true + envKey: LD_DISABLE_BACKGROUND_TASKS + labelZh: 禁用后台任务 + labelEn: Disable background tasks + required: true + type: select + values: + - label: 禁用 + value: "False" + - label: 启用 + value: "True" + - default: "False" + edit: true + envKey: LD_DISABLE_URL_VALIDATION + labelZh: 禁用 URL 验证 + labelEn: Disable URL validation + required: true + type: select + values: + - label: 禁用 + value: "False" + - label: 启用 + value: "True" + - default: 60 + edit: true + envKey: LD_REQUEST_TIMEOUT + labelZh: 请求超时时间 (秒) + labelEn: Request timeout (seconds) + required: true + type: number + - default: "" + edit: true + envKey: LD_CONTEXT_PATH + labelZh: 网站路径 + labelEn: Context path + required: false + type: text + - default: "false" + edit: true + envKey: LD_LOG_X_FORWARDED_FOR + labelZh: 记录真实 IP + labelEn: Log real IP + required: true + type: select + values: + - label: 禁用 + value: "false" + - label: 启用 + value: "true" + - default: "https://t1.gstatic.com/faviconV2?client=SOCIAL&type=FAVICON&fallback_opts=TYPE,SIZE,URL&url={url}&size=32" + edit: true + envKey: LD_FAVICON_PROVIDER + labelZh: 网站图标提供商 + labelEn: Favicon provider + required: true + type: text + - default: "localhost" + edit: true + envKey: LD_DB_HOST + labelZh: 数据库 主机 + labelEn: Database Host + required: true + type: text + - default: 5432 + edit: true + envKey: LD_DB_PORT + labelEn: Database Port + labelZh: 数据库 端口 + required: true + rule: paramPort + type: number + - default: "linkding" + edit: true + envKey: LD_DB_USER + labelZh: 数据库 用户名 + labelEn: Database User + required: true + type: text + - default: "" + edit: true + envKey: LD_DB_PASSWORD + labelZh: 数据库 密码 + labelEn: Database Password + required: true + type: password + - default: "linkding" + edit: true + envKey: LD_DB_DATABASE + labelZh: 数据库名称 + labelEn: Database Name + required: true + type: text + - default: "{}" + edit: true + envKey: LD_DB_OPTIONS + labelZh: 数据库选项 (JSON) + labelEn: Database Options (JSON) + required: true + type: text + - default: "" + edit: true + envKey: LD_CSRF_TRUSTED_ORIGINS + labelZh: CSRF 可信来源 + labelEn: CSRF trusted origins + required: false + type: text + - default: "" + edit: true + envKey: ALL_PROXY + labelZh: 代理地址 + labelEn: Proxy address + required: false + rule: paramExtUrl + type: text + - default: "localhost,127.0.0.1,::1" + edit: true + envKey: NO_PROXY + labelZh: 代理白名单 + labelEn: Proxy whitelist + required: false + type: text diff --git a/apps/linkding/1.31.0/docker-compose.yml b/apps/linkding/1.31.0/docker-compose.yml new file mode 100644 index 00000000..cb17c1ca --- /dev/null +++ b/apps/linkding/1.31.0/docker-compose.yml @@ -0,0 +1,26 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + linkding: + image: sissbruecker/linkding:1.31.0-plus-alpine + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:9090 + volumes: + - ${LINKDING_ROOT_PATH}/data:/etc/linkding/data + env_file: + - .env + environment: + - LD_SERVER_PORT=9090 + - LD_DB_ENGINE=postgres + - HTTP_PROXY=${ALL_PROXY} + - HTTPS_PROXY=${ALL_PROXY} diff --git a/apps/linkding/1.31.0/scripts/init.sh b/apps/linkding/1.31.0/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/linkding/1.31.0/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/linkding/1.31.0/scripts/uninstall.sh b/apps/linkding/1.31.0/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/linkding/1.31.0/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/linkding/1.31.0/scripts/upgrade.sh b/apps/linkding/1.31.0/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/linkding/1.31.0/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/linkding/README.md b/apps/linkding/README.md new file mode 100644 index 00000000..0502306e --- /dev/null +++ b/apps/linkding/README.md @@ -0,0 +1,44 @@ +# linkding + +链接是一个简单的书签服务,您可以自己托管。 + +![linkding](https://github.com/sissbruecker/linkding/blob/master/docs/linkding-screenshot.png) + +## 特性 + ++ 用于组织书签的标签 ++ 按文本或标签搜索 ++ 批量编辑 ++ 书签存档 ++ 深色模式 ++ 在 Internet Archive Wayback Machine 上自动创建已添加书签的网站的快照 ++ 自动提供已添加书签的网站的标题和描述 ++ 以 Netscape HTML 格式导入和导出书签 ++ Firefox 和 Chrome 的扩展程序,以及应该在大多数浏览器中工作的书签 ++ 用于开发第三方应用程序的 REST API ++ 用于用户自助服务和原始数据访问的管理面板 ++ 使用Docker易于设置,使用SQLite作为数据库 + +## 安装说明 + +### 网站图标提供商 + ++ 默认供应商 Google + `https://t1.gstatic.com/faviconV2?client=SOCIAL&type=FAVICON&fallback_opts=TYPE,SIZE,URL&url={url}&size=32` ++ 可选供应商 DuckDuckGo + `https://icons.duckduckgo.com/ip3/{domain}.ico` + +> 自定义供应商规则 + ++ 参数 `{url}` + +包括网站的方案和主机名,例如 https://example.com + ++ 参数 `{domain}` + +仅包含网站的主机名,例如 example.com + +## 插件应用 + ++ [Firefox 书签同步](https://addons.mozilla.org/de/firefox/addon/linkding-extension/) ++ [Chrome 书签同步](https://chrome.google.com/webstore/detail/linkding-extension/beakmhbijpdhipnjhnclmhgjlddhidpe) diff --git a/apps/linkding/data.yml b/apps/linkding/data.yml new file mode 100644 index 00000000..b71d2c93 --- /dev/null +++ b/apps/linkding/data.yml @@ -0,0 +1,18 @@ +name: Linkding +title: Linkding +description: 自己托管的书签管理器 +additionalProperties: + key: linkding + name: Linkding + tags: + - WebSite + - Storage + - Local + shortDescZh: 自己托管的书签管理器 + shortDescEn: Self-hosted bookmark manager + type: website + crossVersionUpdate: true + limit: 0 + website: https://demo.linkding.link/ + github: https://github.com/sissbruecker/linkding/ + document: https://github.com/sissbruecker/linkding/blob/master/README.md diff --git a/apps/linkding/logo.png b/apps/linkding/logo.png new file mode 100644 index 00000000..4eef9ed1 Binary files /dev/null and b/apps/linkding/logo.png differ diff --git a/apps/mariadb/11.4.2/config/my.cnf b/apps/mariadb/11.4.2/config/my.cnf new file mode 100644 index 00000000..9d0d4c14 --- /dev/null +++ b/apps/mariadb/11.4.2/config/my.cnf @@ -0,0 +1,30 @@ +# The MariaDB configuration file +# +# The MariaDB/MySQL tools read configuration files in the following order: +# 0. "/etc/mysql/my.cnf" symlinks to this file, reason why all the rest is read. +# 1. "/etc/mysql/mariadb.cnf" (this file) to set global defaults, +# 2. "/etc/mysql/conf.d/*.cnf" to set global options. +# 3. "/etc/mysql/mariadb.conf.d/*.cnf" to set MariaDB-only options. +# 4. "~/.my.cnf" to set user-specific options. +# +# If the same option is defined multiple times, the last one will apply. +# +# One can use all long options that the program supports. +# Run program with --help to get a list of available options and with +# --print-defaults to see which it would actually understand and use. +# +# If you are new to MariaDB, check out https://mariadb.com/kb/en/basic-mariadb-articles/ + +# +# This group is read both by the client and the server +# use it for options that affect everything +# +[client-server] +# Port or socket location where to connect +# port = 3306 +socket = /run/mysqld/mysqld.sock + +# Import all .cnf files from configuration directory + +!includedir /etc/mysql/mariadb.conf.d/ +!includedir /etc/mysql/conf.d/ diff --git a/apps/mariadb/11.4.2/data.yml b/apps/mariadb/11.4.2/data.yml new file mode 100644 index 00000000..a5ca8abb --- /dev/null +++ b/apps/mariadb/11.4.2/data.yml @@ -0,0 +1,24 @@ +additionalProperties: + formFields: + - default: "/home/mariadb" + edit: true + envKey: MARIADB_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 3306 + envKey: PANEL_APP_PORT_HTTP + labelZh: 连接端口 + labelEn: Connection Port + required: true + rule: paramPort + type: number + - default: "" + envKey: MYSQL_ROOT_PASSWORD + labelZh: 管理员密码 + labelEn: Admin Password + random: true + required: true + rule: paramComplexity + type: password diff --git a/apps/mariadb/11.4.2/docker-compose.yml b/apps/mariadb/11.4.2/docker-compose.yml new file mode 100644 index 00000000..6f1dd1ae --- /dev/null +++ b/apps/mariadb/11.4.2/docker-compose.yml @@ -0,0 +1,23 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + mariadb: + image: mariadb:11.4.2 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:3306 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${MARIADB_ROOT_PATH}/data:/var/lib/mysql + - ${MARIADB_ROOT_PATH}/config/my.cnf:/etc/mysql/my.cnf diff --git a/apps/mariadb/11.4.2/scripts/init.sh b/apps/mariadb/11.4.2/scripts/init.sh new file mode 100644 index 00000000..0a969d7f --- /dev/null +++ b/apps/mariadb/11.4.2/scripts/init.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + mkdir -p "$MARIADB_ROOT_PATH" + mkdir -p "$MARIADB_ROOT_PATH/config" + mkdir -p "$MARIADB_ROOT_PATH/data" + + cp ./config/my.cnf "$MARIADB_ROOT_PATH/config/my.cnf" + + chown -R 1000:1000 "$MYSQL_ROOT_PATH" + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/mariadb/11.4.2/scripts/uninstall.sh b/apps/mariadb/11.4.2/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/mariadb/11.4.2/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/mariadb/11.4.2/scripts/upgrade.sh b/apps/mariadb/11.4.2/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/mariadb/11.4.2/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/mariadb/README.md b/apps/mariadb/README.md new file mode 100644 index 00000000..106284f4 --- /dev/null +++ b/apps/mariadb/README.md @@ -0,0 +1,18 @@ +# MariaDB + +MariaDB 服务器是社区开发的 MySQL 服务器分支。 + +MariaDB 由原始 MySQL 团队的核心成员发起,积极与外部开发人员合作,提供业界功能最丰富、最稳定且许可合理的开放 SQL 服务器。 + +![MariaDB](https://file.lifebus.top/imgs/mariadb_logo.png) + +## 简介 + +MariaDB是MySQL关系数据库管理系统的一个分叉,由社区开发,有商业支持,旨在继续保持在GNU GPL下开源。 +MariaDB的开发是由MySQL的一些原始开发者领导的,他们担心甲骨文公司收购MySQL后会有一些隐患。 + +MariaDB打算保持与MySQL的高度兼容性,与MySQL API和命令精确匹配。MariaDB自带了一个新的存储引擎Aria,它可以替代MyISAM,成为默认的事务和非事务引擎。 +它最初使用XtraDB作为默认存储引擎,并从10.2版本切换回InnoDB。 + +MariaDB的API和协议兼容MySQL,另外又添加了一些功能,以支持原生的非阻塞操作和进度报告。这意味着,所有使用MySQL的连接器、程序库和应用程序也将可以在MariaDB下工作。 +在此基础上,由于担心甲骨文MySQL的一个更加封闭的软件项目,Fedora等Linux发行版已经在最新版本中以MariaDB取代MySQL,维基媒体基金会的服务器同样也使用MariaDB取代了MySQL。 diff --git a/apps/mariadb/data.yml b/apps/mariadb/data.yml new file mode 100644 index 00000000..71c33701 --- /dev/null +++ b/apps/mariadb/data.yml @@ -0,0 +1,17 @@ +name: MariaDB +title: 开源关系数据库管理系统 +description: 开源关系数据库管理系统 +additionalProperties: + key: mariadb + name: MariaDB + tags: + - Database + - Local + shortDescZh: 开源关系数据库管理系统 + shortDescEn: Open source relational database management system + type: runtime + crossVersionUpdate: true + limit: 0 + website: https://mariadb.org/ + github: https://github.com/MariaDB/server + document: https://mariadb.org/documentation/ diff --git a/apps/mariadb/logo.png b/apps/mariadb/logo.png new file mode 100644 index 00000000..14054e3e Binary files /dev/null and b/apps/mariadb/logo.png differ diff --git a/apps/minio/2024-06-11/data.yml b/apps/minio/2024-06-11/data.yml new file mode 100644 index 00000000..5f4f7a5a --- /dev/null +++ b/apps/minio/2024-06-11/data.yml @@ -0,0 +1,103 @@ +additionalProperties: + formFields: + - default: "/home/minio" + edit: true + envKey: MINIO_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 9001 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: 9000 + edit: true + envKey: PANEL_APP_PORT_API + labelZh: API 端口 (映射内部端口) + labelEn: API Port (mapped internal port) + required: true + rule: paramPort + type: number + - default: 9000 + edit: true + envKey: MINIO_API_PORT + labelZh: API 内部端口 + labelEn: Internal API Port + required: true + rule: paramPort + type: number + - default: "minio" + edit: true + envKey: MINIO_ROOT_USER + labelZh: 管理员 用户名 + labelEn: Root User + required: true + type: text + - default: "" + edit: true + envKey: MINIO_ROOT_PASSWORD + labelZh: 管理员 密码 + labelEn: Root Password + random: true + required: true + rule: paramComplexity + type: password + - default: "on" + edit: true + envKey: MINIO_BROWSER + labelEn: Enable WebUI + labelZh: 启用 WebUI + required: true + type: select + values: + - label: 开启 + value: "on" + - label: 关闭 + value: "off" + - default: "on" + edit: true + envKey: MINIO_BROWSER_LOGIN_ANIMATION + labelEn: WebUI login animation + labelZh: WebUI 登录动画 + required: true + type: select + values: + - label: 开启 + value: "on" + - label: 关闭 + value: "off" + - default: "12h" + edit: true + envKey: MINIO_BROWSER_SESSION_DURATION + labelEn: Session duration + labelZh: 会话持续时间 (s/秒 m/分钟 h/小时 d/天) + required: true + type: text + - default: "http://127.0.0.1:9000" + edit: true + envKey: MINIO_SERVER_URL + labelZh: API 服务器 URL + labelEn: API Server URL + rule: paramExtUrl + required: true + type: text + - default: "localhost" + edit: true + envKey: MINIO_SERVER_HOST + labelZh: API 域名 + labelEn: API Hostname + required: true + type: text + - default: "http://127.0.0.1:9001" + edit: true + envKey: MINIO_BROWSER_REDIRECT_URL + labelZh: WebUI 重定向 URL + labelEn: WebUI Redirect URL + rule: paramExtUrl + required: true + type: text diff --git a/apps/minio/2024-06-11/docker-compose.yml b/apps/minio/2024-06-11/docker-compose.yml new file mode 100644 index 00000000..7a8d8478 --- /dev/null +++ b/apps/minio/2024-06-11/docker-compose.yml @@ -0,0 +1,35 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + minio: + image: minio/minio:RELEASE.2024-06-11T03-13-30Z + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:${PANEL_APP_PORT_HTTP} + - ${PANEL_APP_PORT_API}:${MINIO_API_PORT} + command: server --console-address :${PANEL_APP_PORT_HTTP} --address :${MINIO_API_PORT} --json + environment: + - MINIO_VOLUMES=/data + - MINIO_API_ROOT_ACCESS=on + - MINIO_BROWSER_REDIRECT=true + env_file: + - .env + extra_hosts: + - ${MINIO_SERVER_HOST:-localhost}:127.0.0.1 + volumes: + - ${MINIO_ROOT_PATH}/certs:/root/.minio/certs + - ${MINIO_ROOT_PATH}/data:/data + logging: + options: + max-size: "5M" + max-file: "10" + driver: json-file diff --git a/apps/minio/2024-06-11/scripts/init.sh b/apps/minio/2024-06-11/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/minio/2024-06-11/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/minio/2024-06-11/scripts/uninstall.sh b/apps/minio/2024-06-11/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/minio/2024-06-11/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/minio/2024-06-11/scripts/upgrade.sh b/apps/minio/2024-06-11/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/minio/2024-06-11/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/minio/README.md b/apps/minio/README.md new file mode 100644 index 00000000..ec32f5d3 --- /dev/null +++ b/apps/minio/README.md @@ -0,0 +1,66 @@ +# MinIO + +MinIO 是一种高性能、S3 兼容的对象存储。它是为大规模 AI/ML、数据湖和数据库工作负载。它是软件定义的并在任何云或本地基础设施上运行。 + +![MinIO](https://github.com/minio/minio/raw/master/docs/screenshots/pic1.png) + +## 特性 + ++ 简单 + +简单性是百亿亿次数据基础设施的基础——无论是技术上还是操作上。没有其他对象存储可以让您在更短的时间内从下载到生产。 + ++ 高性能 + +MinIO 是世界上最快的对象存储,已发布的 GET/PUT 结果在 32 个 NVMe 驱动器节点和 100GbE 网络上超过 325 GiB/秒和 165 GiB/秒。 + ++ Kubernetes 原生 + +通过原生 Kubernetes 操作集成,MinIO 支持公共云、私有云和边缘云上的所有主要 Kubernetes 发行版。 + ++ 人工智能就绪 + +MinIO 专为 AI 打造,可与所有主要 AI/ML 技术一起开箱即用。从预测模型到 GenAI,MinIO 提供的性能和可扩展性为 AI 企业提供动力。 + +## 安装说明 + +### API 端口 + +MinIO API 端口默认是 9000。 + +| API 端口 (映射内部端口) | API 内部端口 | +|-----------------|----------| +| 9000 | 9000 | + ++ `API 内部端口` 是容器内部使用的端口,一般无需改动。 ++ `API 端口` 是容器外部实际访问的端口,可以根据自己的需要进行修改。 + +### 域名配置 + +假定 MinIO WebUI 服务的域名为 `web.minio.com` + +假定 MinIO API 服务的域名为 `f.minio.com` + +#### API 服务器 URL + +默认情况为:`http://localhost:9000` + +端口 `9000` 取决于 `API 内部端口` 的配置。 + +域名配置,可填写:`https://f.minio.com` + +协议 `https` 取决于是否启用了 SSL/TLS, 是否配置了域名证书。 + +#### API 域名 + +默认情况为:`localhost` + +域名配置,可填写:`f.minio.com` + +#### WebUI 重定向 URL + +默认情况为:`http://localhost:9001` + +域名配置,可填写:`https://web.minio.com` + +协议 `https` 取决于是否启用了 SSL/TLS, 是否配置了域名证书。 diff --git a/apps/minio/data.yml b/apps/minio/data.yml new file mode 100644 index 00000000..6a104180 --- /dev/null +++ b/apps/minio/data.yml @@ -0,0 +1,20 @@ +name: MinIO +title: 开源的对象存储服务器 +description: 开源的对象存储服务器 +additionalProperties: + key: minio + name: MinIO + tags: + - WebSite + - Middleware + - Storage + - Runtime + - Local + shortDescZh: 开源的对象存储服务器 + shortDescEn: Open source object storage server + type: website + crossVersionUpdate: true + limit: 0 + website: https://min.io/ + github: https://github.com/minio/minio/ + document: https://min.io/docs/ diff --git a/apps/minio/logo.png b/apps/minio/logo.png new file mode 100644 index 00000000..cb983e25 Binary files /dev/null and b/apps/minio/logo.png differ diff --git a/apps/movie-pilot/1.9.12/data.yml b/apps/movie-pilot/1.9.12/data.yml new file mode 100644 index 00000000..a34b342d --- /dev/null +++ b/apps/movie-pilot/1.9.12/data.yml @@ -0,0 +1,341 @@ +additionalProperties: + formFields: + - default: "/home/movie-pilot" + edit: true + envKey: MOVIEPILOT_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 3000 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI Port + required: true + rule: paramPort + type: number + - default: 3001 + edit: true + envKey: PANEL_APP_PORT_API + labelZh: API 端口 + labelEn: API Port + required: true + rule: paramPort + type: number + - default: "admin" + edit: true + envKey: SUPERUSER + labelZh: 管理员用户名 + labelEn: Superuser Username + required: true + type: text + - default: "moviepilot" + edit: true + envKey: API_TOKEN + labelZh: API 令牌 + labelEn: API Token + required: true + type: password + - default: "" + edit: true + envKey: PROXY_HOST + labelZh: 网络代理 + labelEn: Proxy Host + required: false + rule: paramExtUrl + type: text + - default: "false" + edit: true + envKey: AUTO_UPDATE_RESOURCES + labelZh: 自动更新资源包 + labelEn: Auto Update Resources + required: true + type: select + values: + - label: 开启 + value: "true" + - label: 关闭 + value: "false" + - default: "false" + edit: true + envKey: MOVIEPILOT_AUTO_UPDATE + labelZh: 自动更新 + labelEn: Auto Update + required: true + type: select + values: + - label: 开启 + value: "true" + - label: 发布版 + value: "release" + - label: 测试版 + value: "dev" + - label: 关闭 + value: "false" + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_1 + labelEn: Custom mount directory 1 + labelZh: 自定义挂载目录 1 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_2 + labelEn: Custom mount directory 2 + labelZh: 自定义挂载目录 2 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_3 + labelEn: Custom mount directory 3 + labelZh: 自定义挂载目录 3 + required: false + type: text + - default: "" + edit: true + envKey: IYUU_SIGN + labelEn: IYUU Sign + labelZh: IYUU登录令牌 + required: false + type: password + - default: "" + edit: true + envKey: HHCLUB_USERNAME + labelZh: hhclub 用户名 + labelEn: hhclub username + required: false + type: text + - default: "" + edit: true + envKey: HHCLUB_PASSKEY + labelZh: hhclub 密钥 + labelEn: hhclub passkay + required: false + type: password + - default: "" + edit: true + envKey: AUDIENCES_UID + labelZh: audiences 用户ID + labelEn: audiences username + required: false + type: text + - default: "" + edit: true + envKey: AUDIENCES_PASSKEY + labelZh: audiences 密钥 + labelEn: audiences passkay + required: false + type: password + - default: "" + edit: true + envKey: HDDOLBY_ID + labelZh: hddolby 用户ID + labelEn: hddolby username + required: false + type: text + - default: "" + edit: true + envKey: HDDOLBY_PASSKEY + labelZh: hddolby 密钥 + labelEn: hddolby passkay + required: false + type: password + - default: "" + edit: true + envKey: ZMPT_UID + labelZh: zmpt 用户ID + labelEn: zmpt username + required: false + type: text + - default: "" + edit: true + envKey: ZMPT_PASSKEY + labelZh: zmpt 密钥 + labelEn: zmpt passkay + required: false + type: password + - default: "" + edit: true + envKey: FREEFARM_UID + labelZh: freefarm 用户ID + labelEn: freefarm username + required: false + type: text + - default: "" + edit: true + envKey: FREEFARM_PASSKEY + labelZh: freefarm 密钥 + labelEn: freefarm passkay + required: false + type: password + - default: "" + edit: true + envKey: HDFANS_UID + labelZh: hdfans 用户ID + labelEn: hdfans username + required: false + type: text + - default: "" + edit: true + envKey: HDFANS_PASSKEY + labelZh: hdfans 密钥 + labelEn: hdfans passkay + required: false + type: password + - default: "" + edit: true + envKey: WINTERSAKURA_UID + labelZh: wintersakura 用户ID + labelEn: wintersakura username + required: false + type: text + - default: "" + edit: true + envKey: WINTERSAKURA_PASSKEY + labelZh: wintersakura 密钥 + labelEn: wintersakura passkay + required: false + type: password + - default: "" + edit: true + envKey: LEAVES_UID + labelZh: leaves 用户ID + labelEn: leaves username + required: false + type: text + - default: "" + edit: true + envKey: LEAVES_PASSKEY + labelZh: leaves 密钥 + labelEn: leaves passkay + required: false + type: password + - default: "" + edit: true + envKey: PTBA_UID + labelZh: ptba 用户ID + labelEn: ptba username + required: false + type: text + - default: "" + edit: true + envKey: PTBA_PASSKEY + labelZh: ptba 密钥 + labelEn: ptba passkay + required: false + type: password + - default: "" + edit: true + envKey: ICC2022_UID + labelZh: icc2022 用户ID + labelEn: icc2022 username + required: false + type: text + - default: "" + edit: true + envKey: ICC2022_PASSKEY + labelZh: icc2022 密钥 + labelEn: icc2022 passkay + required: false + type: password + - default: "" + edit: true + envKey: XINGTAN_UID + labelZh: xingtan 用户ID + labelEn: xingtan username + required: false + type: text + - default: "" + edit: true + envKey: XINGTAN_PASSKEY + labelZh: xingtan 密钥 + labelEn: xingtan passkay + required: false + type: password + - default: "" + edit: true + envKey: PTVICOMO_UID + labelZh: ptvicomo 用户ID + labelEn: ptvicomo username + required: false + type: text + - default: "" + edit: true + envKey: PTVICOMO_PASSKEY + labelZh: ptvicomo 密钥 + labelEn: ptvicomo passkay + required: false + type: password + - default: "" + edit: true + envKey: AGSVPT_UID + labelZh: agsvpt 用户ID + labelEn: agsvpt username + required: false + type: text + - default: "" + edit: true + envKey: AGSVPT_PASSKEY + labelZh: agsvpt 密钥 + labelEn: agsvpt passkay + required: false + type: password + - default: "" + edit: true + envKey: HDKYL_UID + labelZh: hdkyl 用户ID + labelEn: hdkyl username + required: false + type: text + - default: "" + edit: true + envKey: HDKYL_PASSKEY + labelZh: hdkyl 密钥 + labelEn: hdkyl passkay + required: false + type: password + - default: "" + edit: true + envKey: QINGWA_UID + labelZh: qingwa 用户ID + labelEn: qingwa username + required: false + type: text + - default: "" + edit: true + envKey: QINGWA_PASSKEY + labelZh: qingwa 密钥 + labelEn: qingwa passkay + required: false + type: password + - default: "" + edit: true + envKey: DISCFAN_UID + labelZh: discfan 用户ID + labelEn: discfan username + required: false + type: text + - default: "" + edit: true + envKey: DISCFAN_PASSKEY + labelZh: discfan 密钥 + labelEn: discfan passkay + required: false + type: password + - default: "" + edit: true + envKey: ROUSI_UID + labelZh: rousi 用户ID + labelEn: rousi username + required: false + type: text + - default: "" + edit: true + envKey: ROUSI_PASSKEY + labelZh: rousi 密钥 + labelEn: rousi passkay + required: false + type: password diff --git a/apps/movie-pilot/1.9.12/docker-compose.yml b/apps/movie-pilot/1.9.12/docker-compose.yml new file mode 100644 index 00000000..3e775114 --- /dev/null +++ b/apps/movie-pilot/1.9.12/docker-compose.yml @@ -0,0 +1,40 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + movie-pilot: + image: jxxghp/moviepilot:1.9.12 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:3000 + - ${PANEL_APP_PORT_API}:3001 + env_file: + - /etc/1panel/envs/global.env + - /etc/1panel/envs/moviepilot/moviepilot.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - ${MOVIEPILOT_ROOT_PATH}/config:/config + - ${MOVIEPILOT_ROOT_PATH}/moviepilot:/moviepilot + - ${MOVIEPILOT_ROOT_PATH}/download:/download + - ${CUSTOM_MOUNT_DIRECTORY_1:-./default_mount_1}:${CUSTOM_MOUNT_DIRECTORY_1:-/default_mount_1} + - ${CUSTOM_MOUNT_DIRECTORY_2:-./default_mount_2}:${CUSTOM_MOUNT_DIRECTORY_2:-/default_mount_2} + - ${CUSTOM_MOUNT_DIRECTORY_3:-./default_mount_3}:${CUSTOM_MOUNT_DIRECTORY_3:-/default_mount_3} + environment: + - NGINX_PORT=3000 + - PORT=3001 + - PUID=0 + - PGID=0 + - UMASK=022 + logging: + driver: json-file + options: + max-size: 5m diff --git a/apps/movie-pilot/1.9.12/scripts/init.sh b/apps/movie-pilot/1.9.12/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/movie-pilot/1.9.12/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/movie-pilot/1.9.12/scripts/uninstall.sh b/apps/movie-pilot/1.9.12/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/movie-pilot/1.9.12/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/movie-pilot/1.9.12/scripts/upgrade.sh b/apps/movie-pilot/1.9.12/scripts/upgrade.sh new file mode 100644 index 00000000..1ed2e78e --- /dev/null +++ b/apps/movie-pilot/1.9.12/scripts/upgrade.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." +else + echo "Error: .env file not found." +fi diff --git a/apps/movie-pilot/README.md b/apps/movie-pilot/README.md new file mode 100644 index 00000000..0c0cefc5 --- /dev/null +++ b/apps/movie-pilot/README.md @@ -0,0 +1,119 @@ +# MoviePilot + +MoviePilot 基于 NAStool 部分代码重新设计,聚焦自动化核心需求,减少问题同时更易于扩展和维护 + +> 仪表盘 + +![MoviePilot-Dashboard](https://file.lifebus.top/imgs/movie_pilot_cover.png) + +> 插件库 + +![MoviePilot-Plugin](https://file.lifebus.top/imgs/movie_pilot_plugin.png) + +## 安装环境 + +### 网络 + +MoviePilot通过调用 TheMovieDb 的Api来读取和匹配媒体元数据,通过访问 Github 来执行程序升级、安装插件等。 + +### Linux 系统 + +部分功能基于文件系统监控实现(如目录监控等),监控的文件较多时,往往会因为操作系统默认允许的文件句柄数太小导致报错,相关功能失效。 +需在宿主机操作系统上(不是docker容器内)执行以下命令并重启生效: + +```shell +echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf +echo fs.inotify.max_user_instances=524288 | sudo tee -a /etc/sysctl.conf +sudo sysctl -p +``` + +### 站点 + +MoviePilot包括两大部分功能:文件整理刮削、资源订阅下载,其中资源订阅下载功能需要有可用的PT站点。 + +#### 用户认证 + +为了控制用户数量避免大规范泛滥使用,MoviePilot引入了PT用户认证机制,你需要有认证站点范围内的账号才能使用软件的资源搜索、订阅及下载功能,出此下策与利益无关,属NAStool一路走来,吸取失败经验的无奈之举。 + +> 站点配置参数 +> +> 安装应用时(除IYUU外),填写格式为 参数名=值,例如: +> +> `HHCLUB_USERNAME=xxxxx` +> +> `HHCLUB_PASSKEY=xxxxx` + +| 站点 | 用户名(用户ID) | 密钥(授权码) | +|:------------:|:------------------:|------------------------| +| iyuu | `IYUU_SIGN` | `无` | +| hhclub | `HHCLUB_USERNAME` | `HHCLUB_PASSKEY` | +| audiences | `AUDIENCES_UID` | `AUDIENCES_PASSKEY` | +| hddolby | `HDDOLBY_ID` | `HDDOLBY_PASSKEY` | +| zmpt | `ZMPT_UID` | `ZMPT_PASSKEY` | +| freefarm | `FREEFARM_UID` | `FREEFARM_PASSKEY` | +| hdfans | `HDFANS_UID` | `HDFANS_PASSKEY` | +| wintersakura | `WINTERSAKURA_UID` | `WINTERSAKURA_PASSKEY` | +| leaves | `LEAVES_UID` | `LEAVES_PASSKEY` | +| ptba | `PTBA_UID` | `PTBA_PASSKEY` | +| icc2022 | `ICC2022_UID` | `ICC2022_PASSKEY` | +| xingtan | `XINGTAN_UID` | `XINGTAN_PASSKEY` | +| ptvicomo | `PTVICOMO_UID` | `PTVICOMO_PASSKEY` | +| agsvpt | `AGSVPT_UID` | `AGSVPT_PASSKEY` | +| hdkyl | `HDKYL_UID` | `HDKYL_PASSKEY` | +| qingwa | `QINGWA_UID` | `QINGWA_PASSKEY` | +| discfan | `DISCFAN_UID` | `DISCFAN_PASSKEY` | + +### 配套软件 + +MoviePilot只是媒体库自动化管理的一环,需要通过调用下载器来完成资源的下载,需要通过媒体服务器来管理和展示媒体资源,同时通过媒体服务器Api来查询库存情况控制重复下载,通过CookieCloud来快速同步站点Cookie和新增站点。安装前需要先完成配套软件的安装。 + +#### 下载器 + ++ Qbittorrent `^4.3.9` ++ Transmission `^3.0` + +#### 媒体服务器 + ++ Emby `^4.8.0.45` ++ Jellyfin `新版` ++ Plex `新版` + +#### CookieCloud + ++ CookieCloud服务端 `可选` + +MoviePilot已经内置了CookieCloud服务端,如需独立安装可参考 easychen/CookieCloud 说明 + ++ CookieCloud浏览器插件 + +不管是使用CookieCloud独立服务端还是使用内置服务,都需要安装浏览器插件。 + +## 安装说明 + ++ 用户初始密码 + +前往 `日志` 页面查看初始密码,首次登录后请及时修改密码 + +## 反向代理 + +如需开启域名访问MoviePilot,则需要搭建反向代理服务。以nginx为例,需要添加以下配置项。 + +```nginx +location / { + proxy_pass http://${Host}:${Port}; + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; +} +``` + +反向代理使用SSL时,还需要开启http2,否则会导致日志加载时间过长或不可用。 + +```nginx +server { + listen 443 ssl; + http2 on; + # other settings +} +``` diff --git a/apps/movie-pilot/data.yml b/apps/movie-pilot/data.yml new file mode 100644 index 00000000..7b871a43 --- /dev/null +++ b/apps/movie-pilot/data.yml @@ -0,0 +1,19 @@ +name: MoviePilot +tags: + - 实用工具 +title: NAS媒体库自动化管理工具 +description: NAS媒体库自动化管理工具 +additionalProperties: + key: movie-pilot + name: MoviePilot + tags: + - WebSite + - Local + shortDescZh: NAS媒体库自动化管理工具 + shortDescEn: NAS media library automation management tool + type: website + crossVersionUpdate: true + limit: 0 + website: https://github.com/jxxghp/MoviePilot/ + github: https://github.com/jxxghp/MoviePilot/ + document: https://wiki.movie-pilot.org/ diff --git a/apps/movie-pilot/logo.png b/apps/movie-pilot/logo.png new file mode 100644 index 00000000..f85c5121 Binary files /dev/null and b/apps/movie-pilot/logo.png differ diff --git a/apps/mysql/5.7.44/config/my.cnf b/apps/mysql/5.7.44/config/my.cnf new file mode 100644 index 00000000..6d6e1a93 --- /dev/null +++ b/apps/mysql/5.7.44/config/my.cnf @@ -0,0 +1,42 @@ +# For advice on how to change settings please see +# http://dev.mysql.com/doc/refman/5.7/en/server-configuration-defaults.html + +[mysqld] +# +# Remove leading # and set to the amount of RAM for the most important data +# cache in MySQL. Start at 70% of total RAM for dedicated server, else 10%. +# innodb_buffer_pool_size = 128M +# +# Remove leading # to turn on a very important data integrity option: logging +# changes to the binary log between backups. +# log_bin +# +# Remove leading # to set options mainly useful for reporting servers. +# The server defaults are faster for transactions and fast SELECTs. +# Adjust sizes as needed, experiment to find the optimal values. +# join_buffer_size = 128M +# sort_buffer_size = 2M +# read_rnd_buffer_size = 2M +skip-host-cache +skip-name-resolve +datadir=/var/lib/mysql +socket=/var/run/mysqld/mysqld.sock +secure-file-priv=/var/lib/mysql-files +user=mysql + +# Disabling symbolic-links is recommended to prevent assorted security risks +symbolic-links=0 + +#log-error=/var/log/mysqld.log +pid-file=/var/run/mysqld/mysqld.pid + +max_allowed_packet=64M +character_set_server=utf8mb4 +lower_case_table_names=1 +group_concat_max_len=1024000 + +[client] +socket=/var/run/mysqld/mysqld.sock + +!includedir /etc/mysql/conf.d/ +!includedir /etc/mysql/mysql.conf.d/ diff --git a/apps/mysql/5.7.44/data.yml b/apps/mysql/5.7.44/data.yml new file mode 100644 index 00000000..b5a36428 --- /dev/null +++ b/apps/mysql/5.7.44/data.yml @@ -0,0 +1,24 @@ +additionalProperties: + formFields: + - default: "/home/mysql" + edit: true + envKey: MYSQL_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 3306 + envKey: PANEL_APP_PORT_HTTP + labelZh: 连接端口 + labelEn: Connection Port + required: true + rule: paramPort + type: number + - default: "" + envKey: MYSQL_ROOT_PASSWORD + labelZh: 管理员密码 + labelEn: Admin Password + random: true + required: true + rule: paramComplexity + type: password diff --git a/apps/mysql/5.7.44/docker-compose.yml b/apps/mysql/5.7.44/docker-compose.yml new file mode 100644 index 00000000..5fae33aa --- /dev/null +++ b/apps/mysql/5.7.44/docker-compose.yml @@ -0,0 +1,32 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + mysql: + image: mysql:5.7.44 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + user: 1000:1000 + ports: + - ${PANEL_APP_PORT_HTTP}:3306 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${MYSQL_ROOT_PATH}/data/:/var/lib/mysql + - ${MYSQL_ROOT_PATH}/config/my.cnf:/etc/mysql/my.cnf + - ${MYSQL_ROOT_PATH}/log:/var/log/mysql + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + command: + - --character-set-server=utf8mb4 + - --lower_case_table_names=1 + - --collation-server=utf8mb4_general_ci + - --explicit_defaults_for_timestamp=true diff --git a/apps/mysql/5.7.44/scripts/init.sh b/apps/mysql/5.7.44/scripts/init.sh new file mode 100644 index 00000000..5819bd69 --- /dev/null +++ b/apps/mysql/5.7.44/scripts/init.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + mkdir -p "$MYSQL_ROOT_PATH" + mkdir -p "$MYSQL_ROOT_PATH/config" + mkdir -p "$MYSQL_ROOT_PATH/data" + mkdir -p "$MYSQL_ROOT_PATH/log" + + cp ./config/my.cnf "$MYSQL_ROOT_PATH/config/my.cnf" + + chown -R 1000:1000 "$MYSQL_ROOT_PATH" + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/mysql/5.7.44/scripts/uninstall.sh b/apps/mysql/5.7.44/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/mysql/5.7.44/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/mysql/5.7.44/scripts/upgrade.sh b/apps/mysql/5.7.44/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/mysql/5.7.44/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/mysql/8.4.1/config/my.cnf b/apps/mysql/8.4.1/config/my.cnf new file mode 100644 index 00000000..c53ca32d --- /dev/null +++ b/apps/mysql/8.4.1/config/my.cnf @@ -0,0 +1,20 @@ +[mysqld] +host_cache_size=0 +skip-name-resolve +datadir=/var/lib/mysql +socket=/var/run/mysqld/mysqld.sock +user=mysql + +mysql_native_password=ON +character_set_server=utf8mb4 +collation_server=utf8mb4_unicode_ci +lower_case_table_names=1 +group_concat_max_len=1024000 +log_bin_trust_function_creators=1 + +secure_file_priv= +pid_file=/var/run/mysqld/mysqld.pid +[client] +socket=/var/run/mysqld/mysqld.sock + +!includedir /etc/mysql/conf.d/ diff --git a/apps/mysql/8.4.1/data.yml b/apps/mysql/8.4.1/data.yml new file mode 100644 index 00000000..b5a36428 --- /dev/null +++ b/apps/mysql/8.4.1/data.yml @@ -0,0 +1,24 @@ +additionalProperties: + formFields: + - default: "/home/mysql" + edit: true + envKey: MYSQL_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 3306 + envKey: PANEL_APP_PORT_HTTP + labelZh: 连接端口 + labelEn: Connection Port + required: true + rule: paramPort + type: number + - default: "" + envKey: MYSQL_ROOT_PASSWORD + labelZh: 管理员密码 + labelEn: Admin Password + random: true + required: true + rule: paramComplexity + type: password diff --git a/apps/mysql/8.4.1/docker-compose.yml b/apps/mysql/8.4.1/docker-compose.yml new file mode 100644 index 00000000..fcf0b0a6 --- /dev/null +++ b/apps/mysql/8.4.1/docker-compose.yml @@ -0,0 +1,31 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + mysql: + image: mysql:8.4.1 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + user: 1000:1000 + ports: + - ${PANEL_APP_PORT_HTTP}:3306 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${MYSQL_ROOT_PATH}/data:/var/lib/mysql + - ${MYSQL_ROOT_PATH}/config/my.cnf:/etc/my.cnf + - ${MYSQL_ROOT_PATH}/log:/var/log/mysql + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + command: + - --character-set-server=utf8mb4 + - --collation-server=utf8mb4_unicode_ci + - --mysql-native-password=ON diff --git a/apps/mysql/8.4.1/scripts/init.sh b/apps/mysql/8.4.1/scripts/init.sh new file mode 100644 index 00000000..5819bd69 --- /dev/null +++ b/apps/mysql/8.4.1/scripts/init.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + mkdir -p "$MYSQL_ROOT_PATH" + mkdir -p "$MYSQL_ROOT_PATH/config" + mkdir -p "$MYSQL_ROOT_PATH/data" + mkdir -p "$MYSQL_ROOT_PATH/log" + + cp ./config/my.cnf "$MYSQL_ROOT_PATH/config/my.cnf" + + chown -R 1000:1000 "$MYSQL_ROOT_PATH" + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/mysql/8.4.1/scripts/uninstall.sh b/apps/mysql/8.4.1/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/mysql/8.4.1/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/mysql/8.4.1/scripts/upgrade.sh b/apps/mysql/8.4.1/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/mysql/8.4.1/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/mysql/README.md b/apps/mysql/README.md new file mode 100644 index 00000000..ddd27566 --- /dev/null +++ b/apps/mysql/README.md @@ -0,0 +1,18 @@ +# MySQL + +![MySQL](https://file.lifebus.top/imgs/mysql_logo.svg) + +MySQL 是一个关系型数据库管理系统,由瑞典MySQL AB 公司开发,目前属于Oracle 公司。 + +## 特性 + ++ 使用C和C++编写,并使用了多种编译器进行测试,保证源代码的可移植性。 ++ 支持AIX、BSDi、FreeBSD、HP-UX、Linux、Mac OS、Novell NetWare、NetBSD、OpenBSD、OS/2 Wrap、Solaris、Windows等多种操作系统。 ++ 为多种编程语言提供了API。这些編程语言包括C、C++、C#、VB.NET、Delphi、Eiffel、Java、Perl、PHP、Python、Ruby和Tcl等。 ++ 支持多线程,充分利用CPU资源,支持多用户。 ++ 优化的SQL查询算法,有效地提高查询速度。 ++ 既能够作为一个单独的应用程序在客户端服务器网络环境中执行,也能够作为一个程序库而嵌入到其他的软件中。 ++ 提供多语言支持,常见的编码如中文的GB 2312、BIG5,日文的Shift JIS等都可以用作数据表名和数据列名。 ++ 提供TCP/IP、ODBC和JDBC等多种数据库连接途径。 ++ 提供用于管理、检查、優化数据库操作的管理工具。 ++ 可以处理拥有上千万条记录的大型数据库。 diff --git a/apps/mysql/data.yml b/apps/mysql/data.yml new file mode 100644 index 00000000..49039f95 --- /dev/null +++ b/apps/mysql/data.yml @@ -0,0 +1,17 @@ +name: MySQL +title: 开源关系型数据库 +description: 开源关系型数据库 +additionalProperties: + key: mysql + name: MySQL + tags: + - Database + - Local + shortDescZh: 开源关系型数据库 + shortDescEn: Open source relational database management system + type: runtime + crossVersionUpdate: true + limit: 0 + website: https://www.mysql.com/ + github: https://github.com/mysql/mysql-server/ + document: https://dev.mysql.com/doc/ diff --git a/apps/mysql/logo.png b/apps/mysql/logo.png new file mode 100644 index 00000000..269640f5 Binary files /dev/null and b/apps/mysql/logo.png differ diff --git a/apps/nacos/2.3.1/data.yml b/apps/nacos/2.3.1/data.yml new file mode 100644 index 00000000..4add778f --- /dev/null +++ b/apps/nacos/2.3.1/data.yml @@ -0,0 +1,169 @@ +additionalProperties: + formFields: + - child: + default: "" + envKey: PANEL_DB_HOST + required: true + type: service + default: mysql + edit: true + envKey: PANEL_DB_TYPE + labelZh: MySQL 服务 (前置检查) + labelEn: Database Service (Pre-check) + required: true + type: apps + values: + - label: MySQL + value: mysql + - label: MariaDB + value: mariadb + - label: Percona + value: percona + - default: "/home/nacos" + edit: true + envKey: NACOS_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 8848 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: 9848 + edit: true + envKey: PANEL_APP_PORT_GRPC_CLIENT + labelZh: Nacos gRPC 客户端端口 + labelEn: Nacos gRPC client port + required: true + rule: paramPort + type: number + - default: 9849 + edit: true + envKey: PANEL_APP_PORT_GRPC_SERVER + labelZh: Nacos gRPC 服务端端口 + labelEn: Nacos gRPC server port + required: true + rule: paramPort + type: number + - default: "ip" + envKey: PREFER_HOST_MODE + labelZh: IP模式/域名模式 + labelEn: IP mode/hostname mode + required: true + type: select + values: + - label: IP模式 + value: "ip" + - label: 域名模式 + value: "hostname" + - default: "" + edit: true + envKey: NACOS_SERVER_IP + labelZh: 服务IP + labelEn: Server IP + required: true + type: text + - default: "serverIdentity" + edit: true + envKey: NACOS_AUTH_IDENTITY_KEY + labelZh: Nacos身份验证密钥键 + labelEn: Nacos auth identity key + required: true + type: text + - default: "security" + edit: true + envKey: NACOS_AUTH_IDENTITY_VALUE + labelZh: Nacos身份验证密钥值 + labelEn: Nacos auth identity value + required: true + type: text + - default: "SecretKey012345678901234567890123456789012345678901234567890123456789" + edit: true + envKey: NACOS_AUTH_TOKEN + labelZh: Nacos身份验证令牌(至少32位字符的Base64编码) + labelEn: Nacos auth token + required: true + type: text + - default: "512m" + edit: true + envKey: JVM_XMS + labelZh: JVM_XMS (初始内存大小) + labelEn: JVM_XMS (Initial memory size) + required: true + type: text + - default: "2g" + edit: true + envKey: JVM_XMX + labelEn: JVM_XMX (Maximum memory size) + labelZh: JVM_XMX (最大内存大小) + required: true + type: text + - default: "128m" + edit: true + envKey: JVM_XMN + labelEn: JVM_XMN (New generation memory) + labelZh: JVM_XMN (新生代内存) + required: true + type: text + - default: "128m" + edit: true + envKey: JVM_MS + labelEn: JVM_MS (Initial metaspace) + labelZh: JVM_MS (初始元空间大小) + required: true + type: text + - default: "320m" + edit: true + envKey: JVM_MMS + labelEn: JVM_MMS (Maximum metaspace) + labelZh: JVM_MMS (最大元空间大小) + required: true + type: text + - default: "127.0.0.1" + edit: true + envKey: MYSQL_SERVICE_HOST + labelZh: 数据库 主机 + labelEn: Database Host + required: true + type: text + - default: 3306 + edit: true + envKey: MYSQL_SERVICE_PORT + labelZh: 数据库 端口 + labelEn: Database Port + required: true + rule: paramPort + type: number + - default: "nacos" + edit: true + envKey: MYSQL_SERVICE_USER + labelZh: 数据库 用户名 + labelEn: Database Username + required: true + type: text + - default: "" + edit: true + envKey: MYSQL_SERVICE_PASSWORD + labelZh: 数据库 密码 + labelEn: Database Password + required: true + type: password + - default: "nacos" + edit: true + envKey: MYSQL_SERVICE_DB_NAME + labelZh: 数据库 名称 + labelEn: Database Name + required: true + type: text + - default: "characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true" + edit: true + envKey: MYSQL_SERVICE_DB_PARAM + labelZh: 数据库 连接参数 + labelEn: Database Connection Parameters + required: true + type: text diff --git a/apps/nacos/2.3.1/docker-compose.yml b/apps/nacos/2.3.1/docker-compose.yml new file mode 100644 index 00000000..3848a123 --- /dev/null +++ b/apps/nacos/2.3.1/docker-compose.yml @@ -0,0 +1,28 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + nacos: + image: nacos/nacos-server:v2.3.1 + container_name: ${CONTAINER_NAME} + restart: always + labels: + createdBy: "Apps" + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:8848 + - ${NACOS_COMMUNICATION_PORT}:9848 + volumes: + - ${NACOS_ROOT_PATH}/logs:/home/nacos/logs + - ${NACOS_ROOT_PATH}/data:/home/nacos/data + environment: + - MODE=standalone + - NACOS_SERVER_PORT=8848 + - NACOS_AUTH_ENABLE=true + - SPRING_DATASOURCE_PLATFORM=mysql + env_file: + - .env diff --git a/apps/nacos/2.3.1/init/mysql-schema.sql b/apps/nacos/2.3.1/init/mysql-schema.sql new file mode 100644 index 00000000..067d67a1 --- /dev/null +++ b/apps/nacos/2.3.1/init/mysql-schema.sql @@ -0,0 +1,227 @@ +/* + * Copyright 1999-2018 Alibaba Group Holding Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/******************************************/ +/* 表名称 = config_info */ +/******************************************/ +CREATE TABLE `config_info` +( + `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id', + `data_id` varchar(255) NOT NULL COMMENT 'data_id', + `group_id` varchar(128) DEFAULT NULL COMMENT 'group_id', + `content` longtext NOT NULL COMMENT 'content', + `md5` varchar(32) DEFAULT NULL COMMENT 'md5', + `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', + `src_user` text COMMENT 'source user', + `src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip', + `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name', + `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段', + `c_desc` varchar(256) DEFAULT NULL COMMENT 'configuration description', + `c_use` varchar(64) DEFAULT NULL COMMENT 'configuration usage', + `effect` varchar(64) DEFAULT NULL COMMENT '配置生效的描述', + `type` varchar(64) DEFAULT NULL COMMENT '配置的类型', + `c_schema` text COMMENT '配置的模式', + `encrypted_data_key` text NOT NULL COMMENT '密钥', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`,`group_id`,`tenant_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info'; + +/******************************************/ +/* 表名称 = config_info_aggr */ +/******************************************/ +CREATE TABLE `config_info_aggr` +( + `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id', + `data_id` varchar(255) NOT NULL COMMENT 'data_id', + `group_id` varchar(128) NOT NULL COMMENT 'group_id', + `datum_id` varchar(255) NOT NULL COMMENT 'datum_id', + `content` longtext NOT NULL COMMENT '内容', + `gmt_modified` datetime NOT NULL COMMENT '修改时间', + `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name', + `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`,`group_id`,`tenant_id`,`datum_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='增加租户字段'; + + +/******************************************/ +/* 表名称 = config_info_beta */ +/******************************************/ +CREATE TABLE `config_info_beta` +( + `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id', + `data_id` varchar(255) NOT NULL COMMENT 'data_id', + `group_id` varchar(128) NOT NULL COMMENT 'group_id', + `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name', + `content` longtext NOT NULL COMMENT 'content', + `beta_ips` varchar(1024) DEFAULT NULL COMMENT 'betaIps', + `md5` varchar(32) DEFAULT NULL COMMENT 'md5', + `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', + `src_user` text COMMENT 'source user', + `src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip', + `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段', + `encrypted_data_key` text NOT NULL COMMENT '密钥', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`,`group_id`,`tenant_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_beta'; + +/******************************************/ +/* 表名称 = config_info_tag */ +/******************************************/ +CREATE TABLE `config_info_tag` +( + `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id', + `data_id` varchar(255) NOT NULL COMMENT 'data_id', + `group_id` varchar(128) NOT NULL COMMENT 'group_id', + `tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id', + `tag_id` varchar(128) NOT NULL COMMENT 'tag_id', + `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name', + `content` longtext NOT NULL COMMENT 'content', + `md5` varchar(32) DEFAULT NULL COMMENT 'md5', + `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', + `src_user` text COMMENT 'source user', + `src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`,`group_id`,`tenant_id`,`tag_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_tag'; + +/******************************************/ +/* 表名称 = config_tags_relation */ +/******************************************/ +CREATE TABLE `config_tags_relation` +( + `id` bigint(20) NOT NULL COMMENT 'id', + `tag_name` varchar(128) NOT NULL COMMENT 'tag_name', + `tag_type` varchar(64) DEFAULT NULL COMMENT 'tag_type', + `data_id` varchar(255) NOT NULL COMMENT 'data_id', + `group_id` varchar(128) NOT NULL COMMENT 'group_id', + `tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id', + `nid` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'nid, 自增长标识', + PRIMARY KEY (`nid`), + UNIQUE KEY `uk_configtagrelation_configidtag` (`id`,`tag_name`,`tag_type`), + KEY `idx_tenant_id` (`tenant_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_tag_relation'; + +/******************************************/ +/* 表名称 = group_capacity */ +/******************************************/ +CREATE TABLE `group_capacity` +( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID', + `group_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Group ID,空字符表示整个集群', + `quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值', + `usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量', + `max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值', + `max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数,,0表示使用默认值', + `max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值', + `max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量', + `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_group_id` (`group_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='集群、各Group容量信息表'; + +/******************************************/ +/* 表名称 = his_config_info */ +/******************************************/ +CREATE TABLE `his_config_info` +( + `id` bigint(20) unsigned NOT NULL COMMENT 'id', + `nid` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'nid, 自增标识', + `data_id` varchar(255) NOT NULL COMMENT 'data_id', + `group_id` varchar(128) NOT NULL COMMENT 'group_id', + `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name', + `content` longtext NOT NULL COMMENT 'content', + `md5` varchar(32) DEFAULT NULL COMMENT 'md5', + `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', + `src_user` text COMMENT 'source user', + `src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip', + `op_type` char(10) DEFAULT NULL COMMENT 'operation type', + `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段', + `encrypted_data_key` text NOT NULL COMMENT '密钥', + PRIMARY KEY (`nid`), + KEY `idx_gmt_create` (`gmt_create`), + KEY `idx_gmt_modified` (`gmt_modified`), + KEY `idx_did` (`data_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='多租户改造'; + + +/******************************************/ +/* 表名称 = tenant_capacity */ +/******************************************/ +CREATE TABLE `tenant_capacity` +( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID', + `tenant_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID', + `quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值', + `usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量', + `max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值', + `max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数', + `max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值', + `max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量', + `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_tenant_id` (`tenant_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='租户容量信息表'; + + +CREATE TABLE `tenant_info` +( + `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id', + `kp` varchar(128) NOT NULL COMMENT 'kp', + `tenant_id` varchar(128) default '' COMMENT 'tenant_id', + `tenant_name` varchar(128) default '' COMMENT 'tenant_name', + `tenant_desc` varchar(256) DEFAULT NULL COMMENT 'tenant_desc', + `create_source` varchar(32) DEFAULT NULL COMMENT 'create_source', + `gmt_create` bigint(20) NOT NULL COMMENT '创建时间', + `gmt_modified` bigint(20) NOT NULL COMMENT '修改时间', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`,`tenant_id`), + KEY `idx_tenant_id` (`tenant_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='tenant_info'; + +CREATE TABLE `users` +( + `username` varchar(50) NOT NULL PRIMARY KEY COMMENT 'username', + `password` varchar(500) NOT NULL COMMENT 'password', + `enabled` boolean NOT NULL COMMENT 'enabled' +); + +CREATE TABLE `roles` +( + `username` varchar(50) NOT NULL COMMENT 'username', + `role` varchar(50) NOT NULL COMMENT 'role', + UNIQUE INDEX `idx_user_role` (`username` ASC, `role` ASC) USING BTREE +); + +CREATE TABLE `permissions` +( + `role` varchar(50) NOT NULL COMMENT 'role', + `resource` varchar(128) NOT NULL COMMENT 'resource', + `action` varchar(8) NOT NULL COMMENT 'action', + UNIQUE INDEX `uk_role_permission` (`role`,`resource`,`action`) USING BTREE +); + +INSERT INTO users (username, password, enabled) +VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE); + +INSERT INTO roles (username, role) +VALUES ('nacos', 'ROLE_ADMIN'); diff --git a/apps/nacos/2.3.1/scripts/init.sh b/apps/nacos/2.3.1/scripts/init.sh new file mode 100644 index 00000000..88691a0f --- /dev/null +++ b/apps/nacos/2.3.1/scripts/init.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + if command -v mysql &> /dev/null; then + if [[ -f ./init/mysql-schema.sql ]]; then + mysql -u"$MYSQL_SERVICE_USER" -p"$MYSQL_SERVICE_PASSWORD" -h"$MYSQL_SERVICE_HOST" -P"$MYSQL_SERVICE_PORT" --protocol=TCP "$MYSQL_SERVICE_DB_NAME" < ./init/mysql-schema.sql + else + echo "mysql-schema.sql not found." + fi + else + echo "mysql command not found." + fi + + echo "Check Finish." +else + echo "Error: .env file not found." +fi diff --git a/apps/nacos/2.3.1/scripts/uninstall.sh b/apps/nacos/2.3.1/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/nacos/2.3.1/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/nacos/2.3.1/scripts/upgrade.sh b/apps/nacos/2.3.1/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/nacos/2.3.1/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/nacos/README.md b/apps/nacos/README.md new file mode 100644 index 00000000..072375df --- /dev/null +++ b/apps/nacos/README.md @@ -0,0 +1,79 @@ +# Nacos + +一个易于使用的动态服务发现、配置和服务管理平台,用于构建云原生应用。 + +![Nacos](https://github.com/alibaba/nacos/raw/develop/doc/Nacos_Logo.png) + +## 特性 + +## 安装说明 + +> 安全路径:`/nacos` +> +> 用户名:`nacos` +> +> 密码:`nacos` + +### 初始化数据库 + +安装前,需要先创建数据库,数据库初始化文件:`mysql-schema.sql` +,可前往[Nacos](https://github.com/alibaba/nacos/blob/master/distribution/conf/mysql-schema.sql)下载。 + +或安装完成后,进入安装目录,在 `init` 文件夹下,导入 `mysql-schema.sql` 文件。 + +### 加密配置 + ++ `Nacos身份验证令牌` + +32位字符串,并使用Base64编码。 + +### JVM参数 + ++ `JVM_XMS` + +这个参数设置 Java 虚拟机堆的初始内存大小。 + +它指定了 JVM 在启动时分配的堆内存大小。 + +例如,-Xms512m 表示 JVM 在启动时将分配 512MB 的堆内存。 + ++ `JVM_XMX` + +这个参数设置 Java 虚拟机堆的最大内存大小。 + +它指定了 JVM 堆内存的上限。 + +例如,-Xmx1024m 表示 JVM 的堆内存最多可以使用 1024MB。 + ++ `JVM_XMN` + +这个参数用于设置新生代的大小。 + +新生代是 JVM 堆内存中的一部分,用于存放新创建的对象。 +设置 -Xmn 参数可以控制新生代的初始大小。 + +例如,-Xmn256m 表示将新生代的初始大小设置为 256MB。 + ++ `JVM_MS` + +这个参数用于设置 JVM 的初始元空间大小。 + +元空间是用于存储类元数据的区域,它在 Java 8 中取代了永久代。 + +例如,-XX: MetaspaceSize=128m 表示将初始的元空间大小设置为 128MB。 + ++ `JVM_MMS` + +这个参数用于设置 JVM 的最大元空间大小。 + +它指定了元空间能够增长的最大限制。 + +例如,-XX:MaxMetaspaceSize=256m 表示将元空间的最大大小限制为 256MB。 + +```shell +- JVM_XMS=64m #-Xms default :1g +- JVM_XMX=64m #-Xmx default :1g +- JVM_XMN=16m #-Xmn default :512m +- JVM_MS=8m #-XX:MetaspaceSize default :128m +- JVM_MMS=8m #-XX:MaxMetaspaceSize default :320m +``` diff --git a/apps/nacos/data.yml b/apps/nacos/data.yml new file mode 100644 index 00000000..bdf91500 --- /dev/null +++ b/apps/nacos/data.yml @@ -0,0 +1,23 @@ +name: Nacos +tags: + - 中间件 +title: 动态服务发现、配置管理和服务管理平台 +type: 中间件 +description: 动态服务发现、配置管理和服务管理平台 +additionalProperties: + key: nacos + name: Nacos + tags: + - WebSite + - Tool + - Middleware + - Local + shortDescZh: 动态服务发现、配置管理和服务管理平台 + shortDescEn: Dynamic service discovery, configuration and service management platform + type: website + crossVersionUpdate: true + limit: 0 + recommend: 0 + website: https://nacos.io/ + github: https://github.com/alibaba/nacos + document: https://github.com/alibaba/nacos/blob/develop/README.md diff --git a/apps/nacos/logo.png b/apps/nacos/logo.png new file mode 100644 index 00000000..e2bc7018 Binary files /dev/null and b/apps/nacos/logo.png differ diff --git a/apps/nezha/0.18.1/data.yml b/apps/nezha/0.18.1/data.yml new file mode 100644 index 00000000..3d6fca20 --- /dev/null +++ b/apps/nezha/0.18.1/data.yml @@ -0,0 +1,94 @@ +additionalProperties: + formFields: + - default: "/home/nezha" + edit: true + envKey: NEZHA_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 8008 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: 5555 + edit: true + envKey: PANEL_APP_PORT_GRPC + labelZh: gRPC 端口 + labelEn: gRPC Port + required: true + rule: paramPort + type: number + - default: "github" + edit: true + envKey: OAUTH2_TYPE + labelZh: OAuth 服务商 + labelEn: OAuth Provider + required: true + type: select + values: + - label: GitHub + value: "github" + - label: Cloudflare + value: "cloudflare" + - label: GitLab + value: "gitlab" + - label: Gitee + value: "gitee" + - label: Gitea + value: "gitea" + - label: Jihulab + value: "jihulab" + - default: "" + edit: true + envKey: OAUTH2_ADMIN + labelZh: 管理员账号ID + labelEn: Admin Account ID + required: true + type: text + - default: "" + edit: true + envKey: OAUTH2_CLIENTID + labelEn: OAuth Client ID + labelZh: OAuth 客户端 ID + required: true + type: text + - default: "" + edit: true + envKey: OAUTH2_CLIENTSECRET + labelEn: OAuth Client Secret + labelZh: OAuth 客户端 Secret + required: true + type: text + - default: "" + edit: true + envKey: OAUTH2_ENDPOINT + labelEn: OAuth Callback + labelZh: OAuth 端点 (可选) + required: false + type: text + - default: "哪吒监控" + edit: true + envKey: NZ_SITE_TITLE + labelZh: 网站标题 + labelEn: Site Title + required: true + type: text + - default: "nezha-dashboard" + edit: true + envKey: NZ_COOKIE_NAME + labelZh: Cookie 名称 (默认) + labelEn: Cookie Name + required: true + type: text + - default: "default" + edit: true + envKey: NZ_THEME + labelZh: 网站主题 (默认) + labelEn: Theme + required: true + type: text diff --git a/apps/nezha/0.18.1/data/config-example.yaml b/apps/nezha/0.18.1/data/config-example.yaml new file mode 100644 index 00000000..d063cf9f --- /dev/null +++ b/apps/nezha/0.18.1/data/config-example.yaml @@ -0,0 +1,33 @@ +debug: false +httpport: 80 +language: zh-CN +grpcport: nz_grpc_port +oauth2: + type: "nz_oauth2_type" + admin: "nz_admin_logins" + clientid: "nz_github_oauth_client_id" + clientsecret: "nz_github_oauth_client_secret" + endpoint: "" +site: + brand: "nz_site_title" + cookiename: "nezha-dashboard" + theme: "default" +ddns: + enable: false + provider: "webhook" + accessid: "" + accesssecret: "" + webhookmethod: "" + webhookurl: "" + webhookrequestbody: "" + webhookheaders: "" + maxretries: 3 + profiles: + example: + provider: "" + accessid: "" + accesssecret: "" + webhookmethod: "" + webhookurl: "" + webhookrequestbody: "" + webhookheaders: "" diff --git a/apps/nezha/0.18.1/docker-compose.yml b/apps/nezha/0.18.1/docker-compose.yml new file mode 100644 index 00000000..df24b025 --- /dev/null +++ b/apps/nezha/0.18.1/docker-compose.yml @@ -0,0 +1,23 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + nezha-dashboard: + image: ghcr.io/naiba/nezha-dashboard:v0.18.1 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:80 + - ${PANEL_APP_PORT_GRPC}:${PANEL_APP_PORT_GRPC} + volumes: + - ${NEZHA_ROOT_PATH}/data:/dashboard/data + - ${NEZHA_ROOT_PATH}/static-custom/static:/dashboard/resource/static/custom + - ${NEZHA_ROOT_PATH}/theme-custom/template:/dashboard/resource/template/theme-custom + - ${NEZHA_ROOT_PATH}/dashboard-custom/template:/dashboard/resource/template/dashboard-custom diff --git a/apps/nezha/0.18.1/scripts/init.sh b/apps/nezha/0.18.1/scripts/init.sh new file mode 100644 index 00000000..47a3bd83 --- /dev/null +++ b/apps/nezha/0.18.1/scripts/init.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +if [[ -f .env ]]; then + source .env + + mkdir -p $NEZHA_ROOT_PATH + mkdir -p $NEZHA_ROOT_PATH/data + + cp -f ./data/config-example.yaml ./data/nezha-config.yaml + sed -i "s/grpcport: nz_grpc_port/grpcport: $PANEL_APP_PORT_GRPC/g" ./data/nezha-config.yaml + sed -i "s/type: \"nz_oauth2_type\"/type: \"$OAUTH2_TYPE\"/g" ./data/nezha-config.yaml + sed -i "s/admin: \"nz_admin_logins\"/admin: \"$OAUTH2_ADMIN\"/g" ./data/nezha-config.yaml + sed -i "s/clientid: \"nz_github_oauth_client_id\"/clientid: \"$OAUTH2_CLIENTID\"/g" ./data/nezha-config.yaml + sed -i "s/clientsecret: \"nz_github_oauth_client_secret\"/clientsecret: \"$OAUTH2_CLIENTSECRET\"/g" ./data/nezha-config.yaml + sed -i "s/endpoint: \"\"/endpoint: \"$OAUTH2_ENDPOINT\"/g" ./data/nezha-config.yaml + sed -i "s/brand: \"nz_site_title\"/brand: \"$NZ_SITE_TITLE\"/g" ./data/nezha-config.yaml + sed -i "s/cookiename: \"nezha-dashboard\"/cookiename: \"$NZ_COOKIE_NAME\"/g" ./data/nezha-config.yaml + sed -i "s/theme: \"default\"/theme: \"$NZ_THEME\"/g" ./data/nezha-config.yaml + cp -f ./data/nezha-config.yaml $NEZHA_ROOT_PATH/data/config.yaml + chmod -R 777 $NEZHA_ROOT_PATH + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/nezha/0.18.1/scripts/uninstall.sh b/apps/nezha/0.18.1/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/nezha/0.18.1/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/nezha/0.18.1/scripts/upgrade.sh b/apps/nezha/0.18.1/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/nezha/0.18.1/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/nezha/README.md b/apps/nezha/README.md new file mode 100644 index 00000000..0481d3e6 --- /dev/null +++ b/apps/nezha/README.md @@ -0,0 +1,88 @@ +# 哪吒监控 + +开源、轻量、易用的服务器监控、运维工具 + +![哪吒监控](https://file.lifebus.top/imgs/nezha_logo.svg) + +## OAuth2 配置 + +推荐使用 `Gitee` 或 `Gitea` 作为管理员账号登录,因为 `Github` 在中国大陆访问速度较慢。 + +### 获取 Github 的 Client ID 和密钥 + +哪吒监控接入 Github、Gitlab、Gitee 作为后台管理员账号 + ++ 新建一个验证应用 + 以 Github 为例,登录 Github 后,打开 https://github.com/settings/developers + + 依次选择 “OAuth Apps” - “New OAuth App” + + Application name - 随意填写 + + Homepage URL - 填写面板的访问域名,如:"http://dashboard.example.com" (你的域名) + + Authorization callback URL - 填写回调地址,如:"http://dashboard.example.com/oauth2/callback" + (不要忘记/oauth2/callback) ++ 点击 “Register application” ++ 保存页面中的 Client ID,然后点击 “Generate a new client secret“,创建一个新的 Client Secret,新建的密钥仅会显示一次,请妥善保存 + +### 获取 Cloudflare Access 作为 OAuth2 提供方 + +位于中国大陆的用户可能无法直接连接 Github,如您在使用 Github、Gitlab、Gitee 作为管理员账户登录时遇到问题,您可以优先考虑切换 +使用 Cloudflare Access 作为 OAuth2 提供方作为登录方式 + +新建 SaaS-OIDC 应用流程 + ++ 前往 Zero Trust Dashboard,使用 Cloudflare 账号登录; ++ My Team -> Users -> <具体用户> -> 获取 User ID 并保存; ++ Access -> Application -> Add an Application; ++ 选择 SaaS,在 Application 中输入自定义的应用名称(例如 nezha),选择 OIDC 后点击 Add application; ++ Scopes 选择 openid, email, profile, groups; ++ Redirect URLs 填写你的 callback 地址,例如 https://dashboard.example.com/oauth2/callback; ++ 保存 Client ID、Client Secret、Issuer 地址中协议与域名的部分,例如 https://xxxxx.cloudflareaccess.com + +> 使用此方式,安装 Dashboard,需要将 Endpoint 配置修改为之前保存的Issuer地址 + +## 反向代理 + +> Nginx + +```nginx + location / { + proxy_pass http://127.0.0.1:8008; + proxy_set_header Host $http_host; + proxy_set_header Upgrade $http_upgrade; + } + + location ~ ^/(ws|terminal/.+)$ { + proxy_pass http://127.0.0.1:8008; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $http_host; + } +``` + +## 关于 Agent 安装 + +Agent 是哪吒监控的数据采集工具,用于采集服务器的性能数据,Agent 会将数据发送到哪吒监控的服务端 + +### 在 Linux(Ubuntu、Debian、CentOS) 中安装 Agent + ++ 首先在管理面板中添加一台服务器 ++ 点击新添加的服务器旁,绿色的 Linux 图标按钮,复制一键安装命令 ++ 在被控端服务器中运行复制的一键安装命令,等待安装完成后返回到 Dashboard 主页查看服务器是否上线 + +### 在 Windows 中安装 Agent + +参考文章: [哪吒探针 - Windows 客户端安装](https://nyko.me/2020/12/13/nezha-windows-client.html) + +## DDNS 支持 + +哪吒监控支持 DDNS,可以在管理面板中添加 DDNS 配置即可修改配置文件。 + +如需手动修改配置文件,可以在 `/home/nezha/data/config.yaml` 中修改 `ddns` 配置。 + +其中 `/home/nezha` 为持久化路径,如果你的持久化路径不同,请自行替换。 + +## 常见问题 + ++ 修改配置重建不生效 + +请前往持久化目录下的 `data` 目录中的 `config.yaml` 修改配置后,然后重启容器 diff --git a/apps/nezha/data.yml b/apps/nezha/data.yml new file mode 100644 index 00000000..f3152d38 --- /dev/null +++ b/apps/nezha/data.yml @@ -0,0 +1,19 @@ +name: 哪吒监控 +title: 服务器监控、运维工具 +type: 实用工具 +description: 开源、轻量、易用的服务器监控、运维工具 +additionalProperties: + key: nezha + name: 哪吒监控 + tags: + - WebSite + - DevOps + - Local + shortDescZh: 开源、轻量、易用的服务器监控、运维工具 + shortDescEn: Open source, lightweight, easy-to-use server monitoring and operation and maintenance tools + type: tool + crossVersionUpdate: true + limit: 0 + website: https://nezha.wiki/ + github: https://github.com/naiba/nezha + document: https://nezha.wiki/guide/dashboard.html diff --git a/apps/nezha/logo.png b/apps/nezha/logo.png new file mode 100644 index 00000000..2f5199ad Binary files /dev/null and b/apps/nezha/logo.png differ diff --git a/apps/onedev/10.9.7/data.yml b/apps/onedev/10.9.7/data.yml new file mode 100644 index 00000000..762286ba --- /dev/null +++ b/apps/onedev/10.9.7/data.yml @@ -0,0 +1,64 @@ +additionalProperties: + formFields: + - default: "/home/onedev" + edit: true + envKey: ONEDEV_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 6610 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI Port + required: true + rule: paramPort + type: number + - default: 6611 + edit: true + envKey: PANEL_APP_PORT_SSH + labelZh: SSH 端口 + labelEn: SSH Port + required: true + rule: paramPort + type: number + - default: "root" + edit: true + envKey: INITIAL_USER + labelZh: 管理员帐户 + labelEn: Admin Username + required: false + rule: paramCommon + type: text + - default: "" + edit: true + envKey: INITIAL_PASSWORD + labelZh: 管理员密码 + labelEn: Admin Password + required: false + random: true + rule: paramComplexity + type: password + - default: "" + edit: true + envKey: INITIAL_EMAIL + labelZh: 管理员电子邮件 + labelEn: Admin Email + required: false + type: text + - default: "" + edit: true + envKey: INITIAL_SERVER_URL + labelZh: 服务端地址 + labelEn: Server URL + required: false + rule: paramExtUrl + type: text + - default: "" + edit: true + envKey: INITIAL_SSH_ROOT_URL + labelZh: SSH 服务端地址 + labelEn: SSH Server URL + required: false + type: text diff --git a/apps/onedev/10.9.7/docker-compose.yml b/apps/onedev/10.9.7/docker-compose.yml new file mode 100644 index 00000000..ee0c7155 --- /dev/null +++ b/apps/onedev/10.9.7/docker-compose.yml @@ -0,0 +1,31 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + onedev: + image: 1dev/server:10.9.7 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:6610 + - ${PANEL_APP_PORT_SSH}:6611 + volumes: + - ${ONEDEV_ROOT_PATH}/data:/opt/onedev + - /var/run/docker.sock:/var/run/docker.sock + env_file: + - /etc/1panel/envs/global.env + - /etc/1panel/envs/onedev/onedev.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + environment: + - initial_user=${INITIAL_USER} + - initial_password=${INITIAL_PASSWORD} + - initial_email=${INITIAL_EMAIL} + - initial_server_url=${INITIAL_SERVER_URL} + - initial_ssh_root_url=${INITIAL_SSH_ROOT_URL} diff --git a/apps/onedev/10.9.7/scripts/init.sh b/apps/onedev/10.9.7/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/onedev/10.9.7/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/onedev/10.9.7/scripts/uninstall.sh b/apps/onedev/10.9.7/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/onedev/10.9.7/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/onedev/10.9.7/scripts/upgrade.sh b/apps/onedev/10.9.7/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/onedev/10.9.7/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/onedev/README.md b/apps/onedev/README.md new file mode 100644 index 00000000..5eccbbd7 --- /dev/null +++ b/apps/onedev/README.md @@ -0,0 +1,230 @@ +# OneDev + +DevOps 平台 + +![OneDev](https://file.lifebus.top/imgs/onedev_cover.png) + +## 简介 + +OneDev 可以在 2 核 2GB 盒子上愉快地运行。 + +OneDev 是迄今为止我在软件工程师职业生涯中使用过的最好的工具。 +它使我和我的团队能够轻松跟踪任务和代码,为我们的 CI/CD 创建深入的可重用管道,实施(可选)基于角色的分支保护,为拉取请求提供高度详细的代码审查。 + +### 功能特性 + +🔎 开箱即用的代码搜索和导航 + +任何提交中的语言感知符号搜索和导航。单击符号可显示当前文件中出现的情况。使用正则表达式进行快速代码搜索。 + +👮‍ 灵活的安全性和合规性扫描 + +扫描代码依赖项、构建的二进制文件或容器映像,以查找安全漏洞、许可证违规或秘密泄露。 + +🚦 用覆盖率和问题注释代码 + +代码将标注覆盖率信息以及 CI/CD 管道中发现的问题,以方便代码审查。 + +💬 随时随地代码讨论 + +选择任何代码或差异以开始讨论。建议并应用更改。讨论集中在代码上,以帮助理解代码。 + +🔒 多功能的代码保护规则 + +设置规则,当某些用户触摸某些分支中的某些文件时,要求进行审查或 CI/CD 验证。 + +📋 自动化看板以保持团队井井有条 + +在看板中手动移动任务,或定义规则以在提交/测试/发布/部署相关工作时自动移动任务。 + +🛠 可定制且灵活的问题工作流程 + +自定义问题状态和字段。手动或自动状态转换规则。发布同步操作和状态的链接。公共项目中的机密问题。 + +📨 服务台将电子邮件与问题链接起来 + +使用问题作为票证系统通过电子邮件为客户提供支持,无需他们注册帐户。为不同的项目或客户分配不同的支持联系人。 + +⏰ 时间跟踪和报告 + +跟踪任务上的估计/花费时间。自动聚合子任务的时间。生成工作统计和计费的时间表。 + +💡 CI/CD 作为代码,无需编写代码 + +用于创建 CI/CD 作业的直观 GUI。典型框架的模板。键入参数。矩阵作业。 CI/CD 逻辑重用。缓存管理。 + +🚀 从简单到规模化的多功能 CI/CD 执行器 + +在容器中或裸机上开箱即用地运行 CI/CD。与 Kubernetes 或代理同时运行大量作业。 + +🛠 调试 CI/CD 作业的工具 + +暂停作业执行的命令。用于检查作业执行环境的Web终端。针对未提交的更改在本地运行作业。 + +📦 内置包注册表 + +内置注册表来管理二进制包。将包与 CI/CD 作业链接。 + +🧩 深度融合、信息交叉引用 + +通过提交、CI/CD 或拉取请求传输问题状态。显示问题的修复版本。查询构建/包版本之间的已修复问题或代码更改。 + +🌲 项目树,方便维护 + +使用树形结构清晰有效地组织项目。在父项目中定义通用设置并在子项目中继承。 + +🐒 智能查询,可保存、订阅 + +强大而直观的查询功能。保存查询以便快速访问。订阅查询以获取有趣事件的通知。 + +🎛️ 团队和用户的仪表板 + +在自定义仪表板中排列小工具,以便一目了然地获取重要信息。与用户或组共享仪表板,或将其公开给所有人。 + +👯 轻松的高可用性和可扩展性 + +轻松的集群设置。跨不同服务器复制项目以实现高可用性,或分发项目以实现水平可扩展性。 + +🛸 用于快速访问的命令面板 + +使用 cmd/ctrl-k 从任何地方调出命令面板。搜索任何内容并跳转到它,而无需翻阅菜单。 + +📈 各语言的 SLOC 趋势 + +检查主分支的 git 历史记录,以有效计算按语言划分的源代码行趋势 + +🕊️ 快速、轻便、可靠 + +精心设计时考虑了资源使用和性能。通过适用于中型项目的 1 核 2G 内存盒获得上述所有功能。密集使用超过5年,可靠性经过实战验证。 + +## 安装说明 + +### 无人值守 + +通过填写必须的环境变量来配置 OneDev,无需通过 Web 界面进行配置。 + ++ `管理员帐户` - 管理员帐户的用户名 ++ `管理员密码` - 管理员帐户的密码 ++ `管理员电子邮件` - 管理员帐户的电子邮件地址 ++ `服务端地址` - OneDev 服务端的地址 (例如 `https://onedev.io`) ++ `SSH 服务端地址` - (可选) 用于 SSH 克隆的地址 (例如 `ssh://onedev.io`) + +不填写 `SSH 服务端地址` 时,将从 `服务端地址` 派生。 + +### 使用外部数据库 + +OneDev 默认使用嵌入式数据库存储数据。使用外部数据库需要前往应用安装目录下的 `config` 目录,编辑 `onedev.env` 文件,配置数据库连接信息。 + +填写 `onedev.env` 时,请删除对应 `#` 开头的注释行。数据库只能选择一种,填写多个数据库配置信息将导致启动失败。 + +外部数据库类型,支持 `MySQL`、`PostgreSQL`、`MariaDB`、`MS SQL Server` + +> 数据库配置信息 +> +> `hibernate_dialect` - 数据库方言 (固定值) +> +> `hibernate_connection_driver_class` - 数据库驱动 (固定值) +> +> `hibernate_connection_url` - 数据库连接地址 (请根据数据库类型填写) +> +> `hibernate_connection_username` - 数据库用户名 (请根据数据库类型填写) +> +> `hibernate_connection_password` - 数据库密码 (请根据数据库类型填写) + +| Database | Hibernate Dialect | Driver Class | Connection URL | Username | Password | Notes | +|---------------|------------------------------------------------|----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|----------|----------|-------------------------| +| MySQL | org.hibernate.dialect.MySQL5InnoDBDialect | com.mysql.cj.jdbc.Driver | jdbc:mysql://localhost:3306/onedev?serverTimezone=UTC&allowPublicKeyRetrieval=true&useSSL=false&disableMariaDbDriver=true | root | root | | +| PostgreSQL | io.onedev.server.persistence.PostgreSQLDialect | org.postgresql.Driver | jdbc:postgresql://localhost:5432/onedev | postgres | postgres | 请确保在此处使用 OneDev 自己版本的方言 | +| MariaDB | org.hibernate.dialect.MySQL5InnoDBDialect | org.mariadb.jdbc.Driver | jdbc:mariadb://localhost:3306/onedev | root | root | | +| MS SQL Server | org.hibernate.dialect.SQLServer2012Dialect | com.microsoft.sqlserver.jdbc.SQLServerDriver | jdbc:sqlserver://localhost:1433;databaseName=onedev | sa | sa | | + +#### 示例 + +> MySQL + +```env +hibernate_dialect=org.hibernate.dialect.MySQL5InnoDBDialect +hibernate_connection_driver_class=com.mysql.cj.jdbc.Driver +hibernate_connection_url=jdbc:mysql://localhost:3306/onedev?serverTimezone=UTC&allowPublicKeyRetrieval=true&useSSL=false&disableMariaDbDriver=true +hibernate_connection_username=root +hibernate_connection_password=root +``` + +> PostgreSQL + +```env +hibernate_dialect=io.onedev.server.persistence.PostgreSQLDialect +hibernate_connection_driver_class=org.postgresql.Driver +hibernate_connection_url=jdbc:postgresql://localhost:5432/onedev +hibernate_connection_username=postgres +hibernate_connection_password=postgres +``` + +> MariaDB + +```env +hibernate_dialect=org.hibernate.dialect.MySQL5InnoDBDialect +hibernate_connection_driver_class=org.mariadb.jdbc.Driver +hibernate_connection_url=jdbc:mariadb://localhost:3306/onedev +hibernate_connection_username=root +hibernate_connection_password=root +``` + +> MS SQL Server + +```env +hibernate_dialect=org.hibernate.dialect.SQLServer2012Dialect +hibernate_connection_driver_class=com.microsoft.sqlserver.jdbc.SQLServerDriver +hibernate_connection_url=jdbc:sqlserver://localhost:1433;databaseName=onedev +hibernate_connection_username=sa +hibernate_connection_password=sa +``` + +## 反向代理 + +假设您的 OneDev 实例在端口 6610 上运行,并且您希望通过 http://onedev.example.com 访问它。 + +请注意不要使用 `localhost` 作为 `proxy_pass` 的目标,Docker 容器内的 `localhost` 是容器本身,而不是宿主机。 + +### Nginx + +```nginx +server { + listen 80; + listen [::]:80; + + server_name onedev.example.com; + + # 上传文件大小不限制 + client_max_body_size 0; + + location /wicket/websocket { + proxy_pass http://localhost:6610/wicket/websocket; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + } + + location /~server { + proxy_pass http://localhost:6610/~server; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + } + + location /~api/streaming { + proxy_pass http://localhost:6610/~api/streaming; + proxy_buffering off; + } + + location / { + proxy_pass http://localhost:6610/; + } +} +``` + +### Caddy Server + +```caddy + caddy reverse-proxy --from onedev.example.com --to localhost:6610 +``` diff --git a/apps/onedev/data.yml b/apps/onedev/data.yml new file mode 100644 index 00000000..144d5d95 --- /dev/null +++ b/apps/onedev/data.yml @@ -0,0 +1,18 @@ +name: OneDev +title: DevOps平台 +description: 单一 DevOps 平台 +additionalProperties: + key: onedev + name: OneDev + tags: + - DevOps + - Storage + - Local + shortDescZh: 单一 DevOps 平台 + shortDescEn: Single DevOps Platform + type: website + crossVersionUpdate: true + limit: 0 + website: https://onedev.io + github: https://github.com/theonedev/onedev/ + document: https://docs.onedev.io diff --git a/apps/onedev/logo.png b/apps/onedev/logo.png new file mode 100644 index 00000000..2a258552 Binary files /dev/null and b/apps/onedev/logo.png differ diff --git a/apps/outline/0.77.3/data.yml b/apps/outline/0.77.3/data.yml new file mode 100644 index 00000000..c2e78f0e --- /dev/null +++ b/apps/outline/0.77.3/data.yml @@ -0,0 +1,274 @@ +additionalProperties: + formFields: + - child: + default: "" + envKey: PANEL_REDIS_SERVICE + required: true + type: service + default: redis + envKey: PANEL_REDIS_TYPE + labelZh: Redis 服务 (前置检查) + labelEn: Redis Service (Pre-check) + required: true + type: apps + values: + - label: Redis + value: redis + - child: + default: "" + envKey: PANEL_POSTGRES_SERVICE + required: true + type: service + default: postgresql + envKey: PANEL_POSTGRES_TYPE + labelZh: Postgres 服务 (前置检查) + labelEn: Postgres Service (Pre-check) + required: true + type: apps + values: + - label: PostgreSQL + value: postgresql + - default: "/home/outline" + edit: true + envKey: OUTLINE_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 3000 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: "fbad4f5da794acbc10f060cdfcf8673300f3302d87f076d7f6e781d4840e9c3d" + edit: true + envKey: SECRET_KEY + labelZh: 加密密钥 + labelEn: Secret Key + required: true + type: password + - default: "b2be1c54acbfe6d965bda01e63195cc815e4743a6fb5ba5f82eae916d98989b5" + edit: true + envKey: UTILS_SECRET + labelZh: 工具密钥 + labelEn: Utils Secret + required: true + type: password + - default: "http://127.0.0.1:3000" + edit: true + envKey: URL + labelZh: 域名 + labelEn: Domain + required: true + rule: paramExtUrl + type: text + - default: "" + edit: true + envKey: CDN_URL + labelZh: CDN 域名 + labelEn: CDN Domain + required: false + type: text + - default: "https://iframe.ly/api/iframely" + edit: true + envKey: IFRAMELY_URL + labelZh: iFramely 地址 + labelEn: iFramely URL + required: true + rule: paramExtUrl + type: text + - default: "" + edit: true + envKey: IFRAMELY_API_KEY + labelZh: iFramely 密钥 + labelEn: iFramely API Key + required: false + type: text + - default: "127.0.0.1" + edit: true + envKey: DB_HOSTNAME + labelZh: 数据库 主机地址 + labelEn: Database Host + required: true + type: text + - default: 5432 + edit: true + envKey: DB_PORT + labelZh: 数据库 端口 + labelEn: Database Port + required: true + rule: paramPort + type: number + - default: "outline" + edit: true + envKey: DB_USERNAME + labelZh: 数据库 用户名 + labelEn: Database User + required: true + type: text + - default: "" + edit: true + envKey: DB_PASSWORD + labelZh: 数据库 密码 + labelEn: Database Password + random: true + required: true + rule: paramComplexity + type: password + - default: "outline" + edit: true + envKey: DB_DATABASE_NAME + labelZh: 数据库 名称 + labelEn: Database Name + required: true + type: text + - default: "redis://localhost:6379" + edit: true + envKey: REDIS_URL + labelZh: Redis 链接 + labelEn: Redis URL + required: true + type: text + - default: "" + edit: true + envKey: SLACK_CLIENT_ID + labelZh: Slack 客户端 ID (鉴权) + labelEn: Slack Client ID (Auth) + required: false + type: text + - default: "" + edit: true + envKey: SLACK_CLIENT_SECRET + labelZh: Slack 客户端密钥 (鉴权) + labelEn: Slack Client Secret (Auth) + required: false + type: password + - default: "" + edit: true + envKey: GOOGLE_CLIENT_ID + labelZh: Google 客户端 ID (鉴权) + labelEn: Google Client ID (Auth) + required: false + type: text + - default: "" + edit: true + envKey: GOOGLE_CLIENT_SECRET + labelZh: Google 客户端密钥 (鉴权) + labelEn: Google Client Secret (Auth) + required: false + type: password + - default: "" + edit: true + envKey: DISCORD_CLIENT_ID + labelZh: Discord 客户端 ID (鉴权) + labelEn: Discord Client ID (Auth) + required: false + type: text + - default: "" + edit: true + envKey: DISCORD_CLIENT_SECRET + labelZh: Discord 客户端密钥 (鉴权) + labelEn: Discord Client Secret (Auth) + required: false + type: password + - default: "" + edit: true + envKey: DISCORD_SERVER_ID + labelZh: Discord 服务端 ID (鉴权) + labelEn: Discord Server ID (Auth) + required: false + type: text + - default: "" + edit: true + envKey: DISCORD_SERVER_ROLES + labelZh: Discord 服务端角色 (鉴权) + labelEn: Discord Server Roles (Auth) + required: false + type: text + - default: "" + edit: true + envKey: SMTP_HOST + labelZh: SMTP 主机 + labelEn: SMTP Host + required: false + type: text + - default: 465 + edit: true + envKey: SMTP_PORT + labelZh: SMTP 端口 + labelEn: SMTP Port + required: false + type: number + - default: "" + edit: true + envKey: SMTP_USERNAME + labelZh: SMTP 用户名 + labelEn: SMTP Username + required: false + type: text + - default: "" + edit: true + envKey: SMTP_PASSWORD + labelZh: SMTP 密码 + labelEn: SMTP Password + required: false + type: password + - default: "" + edit: true + envKey: SMTP_FROM_EMAIL + labelZh: SMTP 发件人 + labelEn: SMTP From Email + required: false + type: text + - default: "" + edit: true + envKey: SLACK_VERIFICATION_TOKEN + labelZh: Slack 验证令牌 (Slack 集成) + labelEn: Slack Verification Token (Slack Integration) + required: false + type: text + - default: "" + edit: true + envKey: SLACK_APP_ID + labelZh: Slack 应用ID (Slack 集成) + labelEn: Slack App ID (Slack Integration) + required: false + type: text + - default: "" + edit: true + envKey: GITHUB_CLIENT_ID + labelZh: GitHub 客户端 ID (GitHub 集成) + labelEn: GitHub Client ID (GitHub Integration) + required: false + type: text + - default: "" + edit: true + envKey: GITHUB_CLIENT_SECRET + labelZh: GitHub 客户端密钥 (GitHub 集成) + labelEn: GitHub Client Secret (GitHub Integration) + required: false + type: password + - default: "" + edit: true + envKey: GITHUB_APP_NAME + labelZh: GitHub 应用名称 (GitHub 集成) + labelEn: GitHub App Name (GitHub Integration) + required: false + type: text + - default: "" + edit: true + envKey: GITHUB_APP_ID + labelZh: GitHub 应用ID (GitHub 集成) + labelEn: GitHub App ID (GitHub Integration) + required: false + type: text + - default: "" + edit: true + envKey: GITHUB_APP_PRIVATE_KEY + labelZh: GitHub 应用私钥 (GitHub 集成) + labelEn: GitHub App Private Key (GitHub Integration) + required: false + type: password diff --git a/apps/outline/0.77.3/docker-compose.yml b/apps/outline/0.77.3/docker-compose.yml new file mode 100644 index 00000000..6928a3bc --- /dev/null +++ b/apps/outline/0.77.3/docker-compose.yml @@ -0,0 +1,25 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + outline: + image: outlinewiki/outline:0.77.3 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:3000 + volumes: + - ${OUTLINE_ROOT_PATH}/data:/var/lib/outline/data + env_file: + - /etc/1panel/envs/global.env + - /etc/1panel/envs/outline/outline.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + environment: + - DATABASE_URL=postgres://${DB_USERNAME}:${DB_PASSWORD}@${DB_HOSTNAME}:${DB_PORT}/${DB_DATABASE_NAME} diff --git a/apps/outline/0.77.3/scripts/init.sh b/apps/outline/0.77.3/scripts/init.sh new file mode 100644 index 00000000..95adde72 --- /dev/null +++ b/apps/outline/0.77.3/scripts/init.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + # setup-2 remove empty values + sed -i '/^.*=""/d' .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/outline/0.77.3/scripts/uninstall.sh b/apps/outline/0.77.3/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/outline/0.77.3/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/outline/0.77.3/scripts/upgrade.sh b/apps/outline/0.77.3/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/outline/0.77.3/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/outline/README.md b/apps/outline/README.md new file mode 100644 index 00000000..57df4a20 --- /dev/null +++ b/apps/outline/README.md @@ -0,0 +1,76 @@ +# Outline + +您团队的知识库 + +![Outline](https://file.lifebus.top/imgs/outline_cover.png) + +迷失在乱七八糟的文档中?无法确定谁有访问权限?同事在聊天中反复询问相同的信息?是时候整理团队知识了。 + +## 安装说明 + +### 参数 `加密密钥` 与 `工具密钥` 配置 + +`加密密钥` 与 `工具密钥` 为必填项,且必须为 32 位长度的字符串。 + +可在终端使用 `openssl` 生成 + +```sh +openssl rand -hex 32 +``` + +### 参数 `Redis 链接` 配置 + +由于Redis特殊性,需要按照格式填写,格式如下: + +格式:`redis://[:password@]host[:port][/database][?option=value]` + +``` +# 无密码 +redis://127.0.0.1:6379 + +# 有密码 password +redis://password@127.0.0.1:6379 + +# 有用户名 username,有密码 password +redis://username:password@127.0.0.1:6379 + +# 有密码,指定数据库 1 +redis://password@127.0.0.1:6379/1 + +# 有密码,指定数据库,指定超时时间 +redis://password@127.0.0.1:6379/1?timeout=10 + +# 有密码,指定数据库,指定超时时间,指定连接池大小 +redis://password@127.0.0.1:6379/1?timeout=10&pool_size=10 +``` + +同时,支持使用 `ioredis://` 协议,格式如下: + +> 示例: +> `ioredis://eyJzZW50aW5lbHMiOlt7Imhvc3QiOiJzZW50aW5lbC0wIiwicG9ydCI6MjYzNzl9LHsiaG9zdCI6InNlbnRpbmVsLTEiLCJwb3J0IjoyNjM3OX1dLCJuYW1lIjoibXltYXN0ZXIifQ==` +> +> 对应的解码后的内容为: +> {"sentinels":[{"host":"sentinel-0","port":26379},{"host":"sentinel-1","port":26379}],"name":"mymaster"} + +格式:`ioredis://Base64({})` + +## 反向代理 + +> Nginx + +```nginx + location / { + proxy_pass http://localhost:3000/; + + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;proxy_set_header Host $host; + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Scheme $scheme; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_redirect off; + } +``` diff --git a/apps/outline/data.yml b/apps/outline/data.yml new file mode 100644 index 00000000..e3625bdd --- /dev/null +++ b/apps/outline/data.yml @@ -0,0 +1,18 @@ +name: Outline +title: 您团队的知识库 +description: 您团队的知识库 +additionalProperties: + key: outline + name: Outline + tags: + - WebSite + - Storage + - Local + shortDescZh: 您团队的知识库 + shortDescEn: Your team's knowledge base + type: website + crossVersionUpdate: true + limit: 0 + website: https://www.getoutline.com/ + github: https://github.com/outline/outline/ + document: https://docs.getoutline.com/ diff --git a/apps/outline/logo.png b/apps/outline/logo.png new file mode 100644 index 00000000..215ab1c4 Binary files /dev/null and b/apps/outline/logo.png differ diff --git a/apps/percona/8.0/config/my.cnf b/apps/percona/8.0/config/my.cnf new file mode 100644 index 00000000..feee7b63 --- /dev/null +++ b/apps/percona/8.0/config/my.cnf @@ -0,0 +1,19 @@ +[mysqld] +skip-host-cache +skip-name-resolve +datadir=/var/lib/mysql +socket=/var/run/mysqld/mysqld.sock +secure-file-priv=/var/lib/mysql-files +user=mysql +log_error_suppression_list='MY-013360' + +character_set_server=utf8 +lower_case_table_names=1 +group_concat_max_len=1024000 +log_bin_trust_function_creators=1 + +pid-file=/var/run/mysqld/mysqld.pid +[client] +socket=/var/run/mysqld/mysqld.sock + +!includedir /etc/mysql/conf.d/ diff --git a/apps/percona/8.0/data.yml b/apps/percona/8.0/data.yml new file mode 100644 index 00000000..787019e9 --- /dev/null +++ b/apps/percona/8.0/data.yml @@ -0,0 +1,24 @@ +additionalProperties: + formFields: + - default: "/home/percona" + edit: true + envKey: PERCONA_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 3306 + envKey: PANEL_APP_PORT_HTTP + labelZh: 连接端口 + labelEn: Connection Port + required: true + rule: paramPort + type: number + - default: "" + envKey: MYSQL_ROOT_PASSWORD + labelZh: 管理员密码 + labelEn: Admin Password + random: true + required: true + rule: paramComplexity + type: password diff --git a/apps/percona/8.0/docker-compose.yml b/apps/percona/8.0/docker-compose.yml new file mode 100644 index 00000000..845116c4 --- /dev/null +++ b/apps/percona/8.0/docker-compose.yml @@ -0,0 +1,29 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + percona: + image: percona:8.0 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + user: ${UID:-1000}:${GID:-1000} + ports: + - ${PANEL_APP_PORT_HTTP}:3306 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${PERCONA_ROOT_PATH}/data:/var/lib/mysql + - ${PERCONA_ROOT_PATH}/config/my.cnf:/etc/my.cnf + - ${PERCONA_ROOT_PATH}/log:/var/log/mysql + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + command: + - --default-authentication-plugin=mysql_native_password diff --git a/apps/percona/8.0/scripts/init.sh b/apps/percona/8.0/scripts/init.sh new file mode 100644 index 00000000..c52b0c79 --- /dev/null +++ b/apps/percona/8.0/scripts/init.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + mkdir -p "$PERCONA_ROOT_PATH" + mkdir -p "$PERCONA_ROOT_PATH/config" + mkdir -p "$PERCONA_ROOT_PATH/data" + mkdir -p "$PERCONA_ROOT_PATH/log" + + cp ./config/my.cnf "$PERCONA_ROOT_PATH/config/my.cnf" + + chown -R 1000:1000 "$PERCONA_ROOT_PATH" + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/percona/8.0/scripts/uninstall.sh b/apps/percona/8.0/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/percona/8.0/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/percona/8.0/scripts/upgrade.sh b/apps/percona/8.0/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/percona/8.0/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/percona/README.md b/apps/percona/README.md new file mode 100644 index 00000000..822590a8 --- /dev/null +++ b/apps/percona/README.md @@ -0,0 +1,12 @@ +# Percona + +Percona Server for MySQL + +![Percona](https://file.lifebus.top/imgs/percona_logo.jpg) + +## 简介 + +Percona Server for MySQL 是任何 MySQL 数据库的免费、完全兼容、增强且开源的直接替代品。它提供卓越的性能、可扩展性和仪器。 + +Percona Server for MySQL 受到数千家企业的信赖,可以为其最苛刻的工作负载提供更好的性能和并发性。 +它通过优化的性能、更高的性能可扩展性和可用性、增强的备份以及更高的可见性为 MySQL 服务器用户提供更高的价值。 diff --git a/apps/percona/data.yml b/apps/percona/data.yml new file mode 100644 index 00000000..20157fb0 --- /dev/null +++ b/apps/percona/data.yml @@ -0,0 +1,17 @@ +name: Percona +title: 关系型数据库 +description: 关系型数据库 +additionalProperties: + key: percona + name: Percona + tags: + - Database + - Local + shortDescZh: 关系型数据库 + shortDescEn: Open source relational database management system + type: runtime + crossVersionUpdate: true + limit: 0 + website: https://www.percona.com/ + github: https://github.com/percona/percona-server/ + document: https://www.percona.com/mysql/software/percona-server-for-mysql/ diff --git a/apps/percona/logo.png b/apps/percona/logo.png new file mode 100644 index 00000000..c8dd3e36 Binary files /dev/null and b/apps/percona/logo.png differ diff --git a/apps/postgresql/README.md b/apps/postgresql/README.md new file mode 100644 index 00000000..6b74416d --- /dev/null +++ b/apps/postgresql/README.md @@ -0,0 +1,44 @@ +# PostgreSQL + +### PostgreSQL:世界上最先进的开源关系数据库 + +![PostgreSQL](https://file.lifebus.top/imgs/postgresql_logo.png) + +## 简介 + +PostgreSQL 是一个功能强大的开源对象关系型数据库系统,它使用并扩展了 SQL 语言,并结合了多种功能,可以安全地存储和扩展最复杂的数据工作负载。PostgreSQL +起源于 1986 年加州大学伯克利分校的 POSTGRES 项目,在核心平台上已有超过 35 年的活跃开发历史。 + +PostgreSQL 因其成熟的架构、可靠性、数据完整性、强大的功能集、可扩展性,以及该软件背后的开源社区为持续提供高性能和创新解决方案所做出的奉献而赢得了良好的声誉。 +PostgreSQL 可在所有主流操作系统上运行,自 2001 年以来一直符合 ACID 标准,并拥有强大的附加功能,如广受欢迎的 PostGIS +地理空间数据库扩展器。因此,PostgreSQL 成为许多人和组织首选的开源关系数据库也就不足为奇了。 + +开始使用PostgreSQL从未如此简单--选择一个你想建立的项目,让PostgreSQL安全稳健地存储你的数据。 + +### 为什么使用 PostgreSQL + +PostgreSQL 具有许多功能,旨在帮助开发人员构建应用程序,帮助管理员保护数据完整性和构建容错环境,并帮助您管理数据,无论数据集大小。除了免费和开源之外,PostgreSQL +还具有高度可扩展性。例如,您可以定义自己的数据类型,创建自定义函数,甚至可以使用不同的编程语言编写代码,而无需重新编译数据库! + +在不违背传统功能或可能导致架构决策失误的情况下,PostgreSQL 会尽量符合 SQL 标准。SQL 标准所要求的许多功能都得到了支持,尽管有时语法或功能略有不同。 +随着时间的推移,有望进一步实现一致性。截至 2023 年 9 月发布的第 16 版,PostgreSQL 至少符合 SQL:2023 核心一致性 179 个强制功能中的 +170 个。 +截至目前,还没有关系型数据库完全符合这一标准。 + +## 版本选择 + +- `vectors` - 内置 vectors 插件。 + + + 插件名称:vectors + + + 插件描述:vectors 插件用于存储和查询矢量数据,如地理坐标、几何图形等。 + + + 插件链接:[https://github.com/tensorchord/pgvecto.rs](https://github.com/tensorchord/pgvecto.rs) + +- `alpine` - 基于 Alpine Linux 发行版。 + + + 精简版 PostgreSQL,基于 Alpine Linux 发行版,提供了更小的镜像体积和更快的启动速度。 + +- `{version}` 默认版 + + + 官方默认 PostgreSQL 版本,基于 Debian Linux 发行版。 diff --git a/apps/postgresql/data.yml b/apps/postgresql/data.yml new file mode 100644 index 00000000..81f8aaaf --- /dev/null +++ b/apps/postgresql/data.yml @@ -0,0 +1,19 @@ +name: PostgreSQL +tags: + - 数据库 +title: 开源关系型数据库 +description: 开源关系型数据库 +additionalProperties: + key: postgresql + name: PostgreSQL + tags: + - Database + - Local + shortDescZh: 开源关系型数据库 + shortDescEn: Open Source Relational Database + type: runtime + crossVersionUpdate: true + limit: 0 + website: https://www.postgresql.org/ + github: https://github.com/postgres/postgres/ + document: https://www.postgresql.org/docs/ diff --git a/apps/postgresql/logo.png b/apps/postgresql/logo.png new file mode 100644 index 00000000..2c14e305 Binary files /dev/null and b/apps/postgresql/logo.png differ diff --git a/apps/postgresql/pg14-alpine/data.yml b/apps/postgresql/pg14-alpine/data.yml new file mode 100644 index 00000000..ac55698b --- /dev/null +++ b/apps/postgresql/pg14-alpine/data.yml @@ -0,0 +1,40 @@ +additionalProperties: + formFields: + - default: "/home/postgres" + edit: true + envKey: POSTGRES_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "" + edit: false + envKey: POSTGRES_PASSWORD + labelZh: Postgres 密码 (首次生效) + labelEn: Postgres Password (First Time) + required: true + random: true + rule: paramComplexity + type: password + - default: 5432 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: Postgres 端口 + labelEn: Postgres Port + required: true + rule: paramPort + type: number + - default: "postgres" + edit: false + envKey: POSTGRES_USER + labelZh: Postgres 用户(不建议改动) + labelEn: Postgres User + required: false + type: text + - default: "postgres" + edit: false + envKey: POSTGRES_DB + labelZh: Postgres 数据库名称(不建议改动) + labelEn: Postgres Database Name + required: false + type: text diff --git a/apps/postgresql/pg14-alpine/docker-compose.yml b/apps/postgresql/pg14-alpine/docker-compose.yml new file mode 100644 index 00000000..5b9abd5d --- /dev/null +++ b/apps/postgresql/pg14-alpine/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + postgresql: + image: postgres:14-alpine + container_name: pg14-alpine-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:5432 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${POSTGRES_ROOT_PATH}/data:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_USER: ${POSTGRES_USER:-postgres} + POSTGRES_DB: ${POSTGRES_DB:-postgres} + POSTGRES_INITDB_ARGS: "--data-checksums" diff --git a/apps/postgresql/pg14-alpine/scripts/init.sh b/apps/postgresql/pg14-alpine/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg14-alpine/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg14-alpine/scripts/uninstall.sh b/apps/postgresql/pg14-alpine/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/postgresql/pg14-alpine/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg14-alpine/scripts/upgrade.sh b/apps/postgresql/pg14-alpine/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg14-alpine/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg14-vectors/data.yml b/apps/postgresql/pg14-vectors/data.yml new file mode 100644 index 00000000..ac55698b --- /dev/null +++ b/apps/postgresql/pg14-vectors/data.yml @@ -0,0 +1,40 @@ +additionalProperties: + formFields: + - default: "/home/postgres" + edit: true + envKey: POSTGRES_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "" + edit: false + envKey: POSTGRES_PASSWORD + labelZh: Postgres 密码 (首次生效) + labelEn: Postgres Password (First Time) + required: true + random: true + rule: paramComplexity + type: password + - default: 5432 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: Postgres 端口 + labelEn: Postgres Port + required: true + rule: paramPort + type: number + - default: "postgres" + edit: false + envKey: POSTGRES_USER + labelZh: Postgres 用户(不建议改动) + labelEn: Postgres User + required: false + type: text + - default: "postgres" + edit: false + envKey: POSTGRES_DB + labelZh: Postgres 数据库名称(不建议改动) + labelEn: Postgres Database Name + required: false + type: text diff --git a/apps/postgresql/pg14-vectors/docker-compose.yml b/apps/postgresql/pg14-vectors/docker-compose.yml new file mode 100644 index 00000000..16ce5601 --- /dev/null +++ b/apps/postgresql/pg14-vectors/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + postgresql: + image: tensorchord/pgvecto-rs:pg14-v0.2.1 + container_name: pg14-vectors-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:5432 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${POSTGRES_ROOT_PATH}/data:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_USER: ${POSTGRES_USER:-postgres} + POSTGRES_DB: ${POSTGRES_DB:-postgres} + POSTGRES_INITDB_ARGS: "--data-checksums" diff --git a/apps/postgresql/pg14-vectors/scripts/init.sh b/apps/postgresql/pg14-vectors/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg14-vectors/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg14-vectors/scripts/uninstall.sh b/apps/postgresql/pg14-vectors/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/postgresql/pg14-vectors/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg14-vectors/scripts/upgrade.sh b/apps/postgresql/pg14-vectors/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg14-vectors/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg14/data.yml b/apps/postgresql/pg14/data.yml new file mode 100644 index 00000000..ac55698b --- /dev/null +++ b/apps/postgresql/pg14/data.yml @@ -0,0 +1,40 @@ +additionalProperties: + formFields: + - default: "/home/postgres" + edit: true + envKey: POSTGRES_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "" + edit: false + envKey: POSTGRES_PASSWORD + labelZh: Postgres 密码 (首次生效) + labelEn: Postgres Password (First Time) + required: true + random: true + rule: paramComplexity + type: password + - default: 5432 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: Postgres 端口 + labelEn: Postgres Port + required: true + rule: paramPort + type: number + - default: "postgres" + edit: false + envKey: POSTGRES_USER + labelZh: Postgres 用户(不建议改动) + labelEn: Postgres User + required: false + type: text + - default: "postgres" + edit: false + envKey: POSTGRES_DB + labelZh: Postgres 数据库名称(不建议改动) + labelEn: Postgres Database Name + required: false + type: text diff --git a/apps/postgresql/pg14/docker-compose.yml b/apps/postgresql/pg14/docker-compose.yml new file mode 100644 index 00000000..33c3efb3 --- /dev/null +++ b/apps/postgresql/pg14/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + postgresql: + image: postgres:14 + container_name: pg14-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:5432 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${POSTGRES_ROOT_PATH}/data:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_USER: ${POSTGRES_USER:-postgres} + POSTGRES_DB: ${POSTGRES_DB:-postgres} + POSTGRES_INITDB_ARGS: "--data-checksums" diff --git a/apps/postgresql/pg14/scripts/init.sh b/apps/postgresql/pg14/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg14/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg14/scripts/uninstall.sh b/apps/postgresql/pg14/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/postgresql/pg14/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg14/scripts/upgrade.sh b/apps/postgresql/pg14/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg14/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg15-alpine/data.yml b/apps/postgresql/pg15-alpine/data.yml new file mode 100644 index 00000000..ac55698b --- /dev/null +++ b/apps/postgresql/pg15-alpine/data.yml @@ -0,0 +1,40 @@ +additionalProperties: + formFields: + - default: "/home/postgres" + edit: true + envKey: POSTGRES_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "" + edit: false + envKey: POSTGRES_PASSWORD + labelZh: Postgres 密码 (首次生效) + labelEn: Postgres Password (First Time) + required: true + random: true + rule: paramComplexity + type: password + - default: 5432 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: Postgres 端口 + labelEn: Postgres Port + required: true + rule: paramPort + type: number + - default: "postgres" + edit: false + envKey: POSTGRES_USER + labelZh: Postgres 用户(不建议改动) + labelEn: Postgres User + required: false + type: text + - default: "postgres" + edit: false + envKey: POSTGRES_DB + labelZh: Postgres 数据库名称(不建议改动) + labelEn: Postgres Database Name + required: false + type: text diff --git a/apps/postgresql/pg15-alpine/docker-compose.yml b/apps/postgresql/pg15-alpine/docker-compose.yml new file mode 100644 index 00000000..1ac36a0d --- /dev/null +++ b/apps/postgresql/pg15-alpine/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + postgresql: + image: postgres:15-alpine + container_name: pg15-alpine-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:5432 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${POSTGRES_ROOT_PATH}/data:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_USER: ${POSTGRES_USER:-postgres} + POSTGRES_DB: ${POSTGRES_DB:-postgres} + POSTGRES_INITDB_ARGS: "--data-checksums" diff --git a/apps/postgresql/pg15-alpine/scripts/init.sh b/apps/postgresql/pg15-alpine/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg15-alpine/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg15-alpine/scripts/uninstall.sh b/apps/postgresql/pg15-alpine/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/postgresql/pg15-alpine/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg15-alpine/scripts/upgrade.sh b/apps/postgresql/pg15-alpine/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg15-alpine/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg15-vectors/data.yml b/apps/postgresql/pg15-vectors/data.yml new file mode 100644 index 00000000..ac55698b --- /dev/null +++ b/apps/postgresql/pg15-vectors/data.yml @@ -0,0 +1,40 @@ +additionalProperties: + formFields: + - default: "/home/postgres" + edit: true + envKey: POSTGRES_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "" + edit: false + envKey: POSTGRES_PASSWORD + labelZh: Postgres 密码 (首次生效) + labelEn: Postgres Password (First Time) + required: true + random: true + rule: paramComplexity + type: password + - default: 5432 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: Postgres 端口 + labelEn: Postgres Port + required: true + rule: paramPort + type: number + - default: "postgres" + edit: false + envKey: POSTGRES_USER + labelZh: Postgres 用户(不建议改动) + labelEn: Postgres User + required: false + type: text + - default: "postgres" + edit: false + envKey: POSTGRES_DB + labelZh: Postgres 数据库名称(不建议改动) + labelEn: Postgres Database Name + required: false + type: text diff --git a/apps/postgresql/pg15-vectors/docker-compose.yml b/apps/postgresql/pg15-vectors/docker-compose.yml new file mode 100644 index 00000000..a0c5e5e4 --- /dev/null +++ b/apps/postgresql/pg15-vectors/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + postgresql: + image: tensorchord/pgvecto-rs:pg15-v0.2.1 + container_name: pg15-vectors-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:5432 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${POSTGRES_ROOT_PATH}/data:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_USER: ${POSTGRES_USER:-postgres} + POSTGRES_DB: ${POSTGRES_DB:-postgres} + POSTGRES_INITDB_ARGS: "--data-checksums" diff --git a/apps/postgresql/pg15-vectors/scripts/init.sh b/apps/postgresql/pg15-vectors/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg15-vectors/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg15-vectors/scripts/uninstall.sh b/apps/postgresql/pg15-vectors/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/postgresql/pg15-vectors/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg15-vectors/scripts/upgrade.sh b/apps/postgresql/pg15-vectors/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg15-vectors/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg15/data.yml b/apps/postgresql/pg15/data.yml new file mode 100644 index 00000000..ac55698b --- /dev/null +++ b/apps/postgresql/pg15/data.yml @@ -0,0 +1,40 @@ +additionalProperties: + formFields: + - default: "/home/postgres" + edit: true + envKey: POSTGRES_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "" + edit: false + envKey: POSTGRES_PASSWORD + labelZh: Postgres 密码 (首次生效) + labelEn: Postgres Password (First Time) + required: true + random: true + rule: paramComplexity + type: password + - default: 5432 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: Postgres 端口 + labelEn: Postgres Port + required: true + rule: paramPort + type: number + - default: "postgres" + edit: false + envKey: POSTGRES_USER + labelZh: Postgres 用户(不建议改动) + labelEn: Postgres User + required: false + type: text + - default: "postgres" + edit: false + envKey: POSTGRES_DB + labelZh: Postgres 数据库名称(不建议改动) + labelEn: Postgres Database Name + required: false + type: text diff --git a/apps/postgresql/pg15/docker-compose.yml b/apps/postgresql/pg15/docker-compose.yml new file mode 100644 index 00000000..06dad580 --- /dev/null +++ b/apps/postgresql/pg15/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + postgresql: + image: postgres:15 + container_name: pg15-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:5432 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${POSTGRES_ROOT_PATH}/data:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_USER: ${POSTGRES_USER:-postgres} + POSTGRES_DB: ${POSTGRES_DB:-postgres} + POSTGRES_INITDB_ARGS: "--data-checksums" diff --git a/apps/postgresql/pg15/scripts/init.sh b/apps/postgresql/pg15/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg15/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg15/scripts/uninstall.sh b/apps/postgresql/pg15/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/postgresql/pg15/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg15/scripts/upgrade.sh b/apps/postgresql/pg15/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg15/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg16-alpine/data.yml b/apps/postgresql/pg16-alpine/data.yml new file mode 100644 index 00000000..ac55698b --- /dev/null +++ b/apps/postgresql/pg16-alpine/data.yml @@ -0,0 +1,40 @@ +additionalProperties: + formFields: + - default: "/home/postgres" + edit: true + envKey: POSTGRES_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "" + edit: false + envKey: POSTGRES_PASSWORD + labelZh: Postgres 密码 (首次生效) + labelEn: Postgres Password (First Time) + required: true + random: true + rule: paramComplexity + type: password + - default: 5432 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: Postgres 端口 + labelEn: Postgres Port + required: true + rule: paramPort + type: number + - default: "postgres" + edit: false + envKey: POSTGRES_USER + labelZh: Postgres 用户(不建议改动) + labelEn: Postgres User + required: false + type: text + - default: "postgres" + edit: false + envKey: POSTGRES_DB + labelZh: Postgres 数据库名称(不建议改动) + labelEn: Postgres Database Name + required: false + type: text diff --git a/apps/postgresql/pg16-alpine/docker-compose.yml b/apps/postgresql/pg16-alpine/docker-compose.yml new file mode 100644 index 00000000..aef8f1ce --- /dev/null +++ b/apps/postgresql/pg16-alpine/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + postgresql: + image: postgres:16-alpine + container_name: pg16-alpine-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:5432 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${POSTGRES_ROOT_PATH}/data:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_USER: ${POSTGRES_USER:-postgres} + POSTGRES_DB: ${POSTGRES_DB:-postgres} + POSTGRES_INITDB_ARGS: "--data-checksums" diff --git a/apps/postgresql/pg16-alpine/scripts/init.sh b/apps/postgresql/pg16-alpine/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg16-alpine/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg16-alpine/scripts/uninstall.sh b/apps/postgresql/pg16-alpine/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/postgresql/pg16-alpine/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg16-alpine/scripts/upgrade.sh b/apps/postgresql/pg16-alpine/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg16-alpine/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg16-vectors/data.yml b/apps/postgresql/pg16-vectors/data.yml new file mode 100644 index 00000000..ac55698b --- /dev/null +++ b/apps/postgresql/pg16-vectors/data.yml @@ -0,0 +1,40 @@ +additionalProperties: + formFields: + - default: "/home/postgres" + edit: true + envKey: POSTGRES_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "" + edit: false + envKey: POSTGRES_PASSWORD + labelZh: Postgres 密码 (首次生效) + labelEn: Postgres Password (First Time) + required: true + random: true + rule: paramComplexity + type: password + - default: 5432 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: Postgres 端口 + labelEn: Postgres Port + required: true + rule: paramPort + type: number + - default: "postgres" + edit: false + envKey: POSTGRES_USER + labelZh: Postgres 用户(不建议改动) + labelEn: Postgres User + required: false + type: text + - default: "postgres" + edit: false + envKey: POSTGRES_DB + labelZh: Postgres 数据库名称(不建议改动) + labelEn: Postgres Database Name + required: false + type: text diff --git a/apps/postgresql/pg16-vectors/docker-compose.yml b/apps/postgresql/pg16-vectors/docker-compose.yml new file mode 100644 index 00000000..0950f839 --- /dev/null +++ b/apps/postgresql/pg16-vectors/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + postgresql: + image: tensorchord/pgvecto-rs:pg16-v0.2.1 + container_name: pg16-vectors-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:5432 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${POSTGRES_ROOT_PATH}/data:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_USER: ${POSTGRES_USER:-postgres} + POSTGRES_DB: ${POSTGRES_DB:-postgres} + POSTGRES_INITDB_ARGS: "--data-checksums" diff --git a/apps/postgresql/pg16-vectors/scripts/init.sh b/apps/postgresql/pg16-vectors/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg16-vectors/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg16-vectors/scripts/uninstall.sh b/apps/postgresql/pg16-vectors/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/postgresql/pg16-vectors/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg16-vectors/scripts/upgrade.sh b/apps/postgresql/pg16-vectors/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg16-vectors/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg16/data.yml b/apps/postgresql/pg16/data.yml new file mode 100644 index 00000000..ac55698b --- /dev/null +++ b/apps/postgresql/pg16/data.yml @@ -0,0 +1,40 @@ +additionalProperties: + formFields: + - default: "/home/postgres" + edit: true + envKey: POSTGRES_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "" + edit: false + envKey: POSTGRES_PASSWORD + labelZh: Postgres 密码 (首次生效) + labelEn: Postgres Password (First Time) + required: true + random: true + rule: paramComplexity + type: password + - default: 5432 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: Postgres 端口 + labelEn: Postgres Port + required: true + rule: paramPort + type: number + - default: "postgres" + edit: false + envKey: POSTGRES_USER + labelZh: Postgres 用户(不建议改动) + labelEn: Postgres User + required: false + type: text + - default: "postgres" + edit: false + envKey: POSTGRES_DB + labelZh: Postgres 数据库名称(不建议改动) + labelEn: Postgres Database Name + required: false + type: text diff --git a/apps/postgresql/pg16/docker-compose.yml b/apps/postgresql/pg16/docker-compose.yml new file mode 100644 index 00000000..912c934a --- /dev/null +++ b/apps/postgresql/pg16/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + postgresql: + image: postgres:16 + container_name: pg16-${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:5432 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${POSTGRES_ROOT_PATH}/data:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_USER: ${POSTGRES_USER:-postgres} + POSTGRES_DB: ${POSTGRES_DB:-postgres} + POSTGRES_INITDB_ARGS: "--data-checksums" diff --git a/apps/postgresql/pg16/scripts/init.sh b/apps/postgresql/pg16/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg16/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg16/scripts/uninstall.sh b/apps/postgresql/pg16/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/postgresql/pg16/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/postgresql/pg16/scripts/upgrade.sh b/apps/postgresql/pg16/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/postgresql/pg16/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/qbittorrent/4.6.5/data.yml b/apps/qbittorrent/4.6.5/data.yml new file mode 100644 index 00000000..20f798b2 --- /dev/null +++ b/apps/qbittorrent/4.6.5/data.yml @@ -0,0 +1,62 @@ +additionalProperties: + formFields: + - default: "host" + edit: true + envKey: NETWORK_MODE + labelZh: 网络模式 + labelEn: Network Mode + required: true + type: select + values: + - label: 主机模式 + value: "host" + - label: 桥接模式 + value: "bridge" + - label: 无网络 + value: "none" + - label: 1panel-network + value: "1panel-network" + - default: 8080 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI Port + required: true + rule: paramPort + type: number + - default: 6881 + edit: true + envKey: PANEL_APP_PORT_TORRENTING + labelZh: Torrenting 端口 + labelEn: Torrenting Port + required: true + rule: paramPort + type: number + - default: "/home/qBittorrent" + edit: true + envKey: QBITTORRENT_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_1 + labelEn: Custom mount directory 1 + labelZh: 自定义挂载目录 1 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_2 + labelEn: Custom mount directory 2 + labelZh: 自定义挂载目录 2 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_3 + labelEn: Custom mount directory 3 + labelZh: 自定义挂载目录 3 + required: false + type: text diff --git a/apps/qbittorrent/4.6.5/docker-compose.yml b/apps/qbittorrent/4.6.5/docker-compose.yml new file mode 100644 index 00000000..b42598d3 --- /dev/null +++ b/apps/qbittorrent/4.6.5/docker-compose.yml @@ -0,0 +1,32 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + qbittorrent: + image: linuxserver/qbittorrent:4.6.5 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + network_mode: ${NETWORK_MODE} + ports: + - ${PANEL_APP_PORT_HTTP} + - ${PANEL_APP_PORT_TORRENTING} + - ${PANEL_APP_PORT_TORRENTING}/udp + env_file: + - /etc/1panel/envs/global.env + volumes: + - ${QBITTORRENT_ROOT_PATH}/config:/config + - ${QBITTORRENT_ROOT_PATH}/downloads:/downloads + - ${CUSTOM_MOUNT_DIRECTORY_1:-./default_mount_1}:${CUSTOM_MOUNT_DIRECTORY_1:-/default_mount_1} + - ${CUSTOM_MOUNT_DIRECTORY_2:-./default_mount_2}:${CUSTOM_MOUNT_DIRECTORY_2:-/default_mount_2} + - ${CUSTOM_MOUNT_DIRECTORY_3:-./default_mount_3}:${CUSTOM_MOUNT_DIRECTORY_3:-/default_mount_3} + environment: + - PUID=0 + - PGID=0 + - UMASK=022 + - WEBUI_PORT=${PANEL_APP_PORT_HTTP} + - TORRENTING_PORT=${PANEL_APP_PORT_TORRENTING} diff --git a/apps/qbittorrent/4.6.5/scripts/init.sh b/apps/qbittorrent/4.6.5/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/qbittorrent/4.6.5/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/qbittorrent/4.6.5/scripts/uninstall.sh b/apps/qbittorrent/4.6.5/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/qbittorrent/4.6.5/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/qbittorrent/4.6.5/scripts/upgrade.sh b/apps/qbittorrent/4.6.5/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/qbittorrent/4.6.5/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/qbittorrent/README.md b/apps/qbittorrent/README.md new file mode 100644 index 00000000..2c8bf2ab --- /dev/null +++ b/apps/qbittorrent/README.md @@ -0,0 +1,31 @@ +# qBittorrent + +qBittorrent是一个跨平台的开源、自由的BitTorrent客户端,其图形用户界面是通过Qt所写,后端使用libtorrent。 + +![qBittorrent](https://file.lifebus.top/imgs/qbittorrent_cover.jpg) + +## 特性 + ++ DHT, PEX, 加密连接, 本地节点发现, UPnP, NAT-PMP 端口中继, µTP, 磁力链接, PT下载 ++ 带宽管理 ++ 可以指定网络接口 ++ 易于管理torrent, tracker 和节点(torrent的队列、优先顺序,和其中文件各自分别的下载顺序) ++ Unicode支持 ++ 75种语言的本地化。 ++ 综合的搜索引擎,接口类似eMule。 ++ IP过滤: 文件类型 eMule dat, 或 PeerGuardian ++ 支持IPv6 ++ 集成RSS阅读器,可以订阅并下载 ++ 内置torrrent搜索引擎(同时搜索多个torrent网站) ++ Web界面 ++ Torrent创建工具 + +## 安装说明 + +您必须在设置的 Web UI 部分更改用户名/密码。 + +如果不更改密码,每次容器启动时都会生成一个新密码。 + +> 用户名:admin +> +> 密码:{日志中查询} diff --git a/apps/qbittorrent/data.yml b/apps/qbittorrent/data.yml new file mode 100644 index 00000000..82239e3f --- /dev/null +++ b/apps/qbittorrent/data.yml @@ -0,0 +1,18 @@ +name: qBittorrent +title: BitTorrent客户端 +description: BitTorrent客户端 +additionalProperties: + key: qbittorrent + name: qBittorrent + tags: + - WebSite + - Tool + - Local + shortDescZh: BitTorrent客户端 + shortDescEn: BitTorrent client + type: website + crossVersionUpdate: true + limit: 0 + website: https://www.qbittorrent.org/ + github: https://github.com/qbittorrent/qBittorrent/ + document: https://github.com/qbittorrent/qBittorrent/wiki/ diff --git a/apps/qbittorrent/logo.png b/apps/qbittorrent/logo.png new file mode 100644 index 00000000..fb57111b Binary files /dev/null and b/apps/qbittorrent/logo.png differ diff --git a/apps/qinglong/2.17.8/data.yml b/apps/qinglong/2.17.8/data.yml new file mode 100644 index 00000000..b32ae486 --- /dev/null +++ b/apps/qinglong/2.17.8/data.yml @@ -0,0 +1,24 @@ +additionalProperties: + formFields: + - default: "/home/qinglong" + edit: true + envKey: QINGLONG_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 5700 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI Port + required: true + rule: paramPort + type: number + - default: "/" + edit: true + envKey: QINGLONG_BASE_URL + labelZh: 部署路径 + labelEn: Deployment path + required: false + type: text diff --git a/apps/qinglong/2.17.8/docker-compose.yml b/apps/qinglong/2.17.8/docker-compose.yml new file mode 100644 index 00000000..02de27b5 --- /dev/null +++ b/apps/qinglong/2.17.8/docker-compose.yml @@ -0,0 +1,22 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + qinglong: + image: whyour/qinglong:2.17.8 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP:-5700}:5700 + volumes: + - ${QINGLONG_ROOT_PATH}/data:/ql/data + environment: + - QlPort=5700 + - QlBaseUrl=${QINGLONG_BASE_URL:-/} diff --git a/apps/qinglong/2.17.8/scripts/init.sh b/apps/qinglong/2.17.8/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/qinglong/2.17.8/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/qinglong/2.17.8/scripts/uninstall.sh b/apps/qinglong/2.17.8/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/qinglong/2.17.8/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/qinglong/2.17.8/scripts/upgrade.sh b/apps/qinglong/2.17.8/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/qinglong/2.17.8/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/qinglong/README.md b/apps/qinglong/README.md new file mode 100644 index 00000000..51fa3b65 --- /dev/null +++ b/apps/qinglong/README.md @@ -0,0 +1,73 @@ +# 青龙 + +支持 Python3、JavaScript、Shell、Typescript 的定时任务管理平台 + +![青龙](https://file.lifebus.top/imgs/qinglong_cover.png) + +## 简介 + ++ 支持多种脚本语言(python3、javaScript、shell、typescript) ++ 支持在线管理脚本、环境变量、配置文件 ++ 支持在线查看任务日志 ++ 支持秒级任务设置 ++ 支持系统级通知 ++ 支持暗黑模式 ++ 支持手机端操作 + +## API 文档 + +[青龙 Apifox](https://qinglong.apifox.cn/) + +## 内置命令 + +### `task` 任务命令 + +```shell +# 依次执行,如果设置了随机延迟,将随机延迟一定秒数 +task +# 依次执行,无论是否设置了随机延迟,均立即运行,前台会输出日,同时记录在日志文件中 +task now +# 并发执行,无论是否设置了随机延迟,均立即运行,前台不产生日,直接记录在日志文件中,且可指定账号执行 +task conc (可选的) +# 指定账号执行,无论是否设置了随机延迟,均立即运行 +task desi +# 设置任务超时时间 +task -m +# 使用 -- 分割,-- 后面的参数会传给脚本,下面的例子,脚本就可接收到参数 -u whyour -p password +task -- -u whyour -p password +``` + +### `ql` 青龙命令 + +```shell +# 更新并重启青龙 +ql update +# 运行自定义脚本extra.sh +ql extra +# 添加单个脚本文件 +ql raw +# 添加单个仓库的指定脚本 +ql repo +# 删除旧日志 +ql rmlog +# 启动tg-bot +ql bot +# 检测青龙环境并修复 +ql check +# 重置登录错误次数 +ql resetlet +# 禁用两步登录 +ql resettfa +``` + +| **参数** | **说明** | +|------------|---------------------------------------------------| +| file_url | 脚本地址 | +| repo_url | 仓库地址 | +| whitelist | 拉取仓库时的白名单,即就是需要拉取的脚本的路径包含的字符串,多个竖线分割 | +| blacklist | 拉取仓库时的黑名单,即就是需要拉取的脚本的路径不包含的字符串,多个竖线分割 | +| dependence | 拉取仓库需要的依赖文件,会直接从仓库拷贝到scripts下的仓库目录,不受黑名单影响,多个竖线分割 | +| extensions | 拉取仓库的文件后缀,多个竖线分割 | +| branch | 拉取仓库的分支 | +| days | 需要保留的日志的天数 | +| file_path | 任务执行时的文件路径 | diff --git a/apps/qinglong/data.yml b/apps/qinglong/data.yml new file mode 100644 index 00000000..6042012d --- /dev/null +++ b/apps/qinglong/data.yml @@ -0,0 +1,17 @@ +name: 青龙 +title: 定时任务管理平台 +description: 定时任务管理平台 +additionalProperties: + key: qinglong + name: 青龙 + tags: + - WebSite + - Local + shortDescZh: 定时任务管理平台 + shortDescEn: A scheduled task management platform + type: website + crossVersionUpdate: true + limit: 0 + website: https://qinglong.online/ + github: https://github.com/whyour/qinglong/ + document: https://qinglong.online/ diff --git a/apps/qinglong/logo.png b/apps/qinglong/logo.png new file mode 100644 index 00000000..81308ade Binary files /dev/null and b/apps/qinglong/logo.png differ diff --git a/apps/redis/6.2.14/config/redis.conf b/apps/redis/6.2.14/config/redis.conf new file mode 100644 index 00000000..ca22ec9f --- /dev/null +++ b/apps/redis/6.2.14/config/redis.conf @@ -0,0 +1,1877 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Note that option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all available network interfaces on the host machine. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only on the +# IPv4 loopback interface address (this means Redis will only be able to +# accept client connections from the same host that it is running on). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT OUT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind 0.0.0.0 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode yes + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need a high backlog in order +# to avoid slow clients connection issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Force network equipment in the middle to consider the connection to be +# alive. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# TLS/SSL ##################################### + +# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration +# directive can be used to define TLS-listening ports. To enable TLS on the +# default port, use: +# +# port 0 +# tls-port 6379 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. These files should be +# PEM formatted. +# +# tls-cert-file redis.crt +# tls-key-file redis.key + +# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange: +# +# tls-dh-params-file redis.dh + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Redis requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients (including replica servers) on a TLS port are required +# to authenticate using valid client side certificates. +# +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. +# +# tls-auth-clients no +# tls-auth-clients optional + +# By default, a Redis replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# TLS for the bus protocol, use the following directive: +# +# tls-cluster yes + +# Explicitly specify TLS versions to support. Allowed values are case insensitive +# and include "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" (OpenSSL >= 1.1.1) or +# any combination. To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# requires "expect stop" in your upstart job config +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous pings back to your supervisor. +supervised no + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY. Basically this means +# that normally a logo is displayed only in interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo yes + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behavior will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# By default compression is enabled as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# Remove RDB files used by replication in instances without persistence +# enabled. By default this option is disabled, however there are environments +# where for regulations or other security concerns, RDB files persisted on +# disk by masters in order to feed replicas, or stored on disk by replicas +# in order to load them for the initial synchronization, should be deleted +# ASAP. Note that this option ONLY WORKS in instances that have both AOF +# and RDB persistence disabled, otherwise is completely ignored. +# +# An alternative (and sometimes better) way to obtain the same effect is +# to use diskless replication on both master and replicas instances. However +# in the case of replicas, diskless is not always an option. +rdb-del-sync-files no + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth +# +# However this is not enough if you are using Redis ACLs (for Redis version +# 6 or greater), and the default user is not capable of running the PSYNC +# command and/or other commands needed for replication. In this case it's +# better to configure a special user to use with replication, and specify the +# masteruser configuration as such: +# +# masteruser +# +# When masteruser is specified, the replica will authenticate against its +# master using the new AUTH form: AUTH . + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) If replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all commands except: +# INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, +# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, +# HOST and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# New replicas and reconnecting replicas that are not able to continue the +# replication process just receiving differences, need to do what is called a +# "full synchronization". An RDB file is transmitted from the master to the +# replicas. +# +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child +# producing the RDB file finishes its work. With diskless replication instead +# once the transfer starts, new replicas arriving will be queued and a new +# transfer will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple +# replicas will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the +# server waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# ----------------------------------------------------------------------------- +# WARNING: RDB diskless load is experimental. Since in this setup the replica +# does not immediately store an RDB on disk, it may cause data loss during +# failovers. RDB diskless load + Redis modules not handling I/O reads may also +# cause Redis to abort in case of I/O errors during the initial synchronization +# stage with the master. Use only if your do what you are doing. +# ----------------------------------------------------------------------------- +# +# Replica can load the RDB it reads from the replication link directly from the +# socket, or store the RDB to a file and read that file after it was completely +# received from the master. +# +# In many cases the disk is slower than the network, and storing and loading +# the RDB file may increase replication time (and even increase the master's +# Copy on Write memory and salve buffers). +# However, parsing the RDB file directly from the socket may mean that we have +# to flush the contents of the current database before the full rdb was +# received. For this reason we have the following options: +# +# "disabled" - Don't use diskless load (store the rdb file to the disk first) +# "on-empty-db" - Use diskless load only when it is completely safe. +# "swapdb" - Keep a copy of the current db contents in RAM while parsing +# the data directly from the socket. note that this requires +# sufficient memory, if you don't have it, you risk an OOM kill. +repl-diskless-load disabled + +# Replicas send PINGs to server in a predefined interval. It's possible to +# change this interval with the repl_ping_replica_period option. The default +# value is 10 seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. The default +# value is 60 seconds. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a +# replica wants to reconnect again, often a full resync is not needed, but a +# partial resync is enough, just passing the portion of data the replica +# missed while disconnected. +# +# The bigger the replication backlog, the longer the replica can endure the +# disconnect and later be able to perform a partial resynchronization. +# +# The backlog is only allocated if there is at least one replica connected. +# +# repl-backlog-size 1mb + +# After a master has no connected replicas for some time, the backlog will be +# freed. The following option configures the amount of seconds that need to +# elapse, starting from the time the last replica disconnected, for the backlog +# buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with other replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO +# output. It is used by Redis Sentinel in order to select a replica to promote +# into a master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel +# will pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP address and port normally reported by a replica is +# obtained in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may actually be reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +############################### KEYS TRACKING ################################# + +# Redis implements server assisted support for client side caching of values. +# This is implemented using an invalidation table that remembers, using +# 16 millions of slots, what clients may have certain subsets of keys. In turn +# this is used in order to send invalidation messages to clients. Please +# check this page to understand more about the feature: +# +# https://redis.io/topics/client-side-caching +# +# When tracking is enabled for a client, all the read only queries are assumed +# to be cached: this will force Redis to store information in the invalidation +# table. When keys are modified, such information is flushed away, and +# invalidation messages are sent to the clients. However if the workload is +# heavily dominated by reads, Redis could use more and more memory in order +# to track the keys fetched by many clients. +# +# For this reason it is possible to configure a maximum fill value for the +# invalidation table. By default it is set to 1M of keys, and once this limit +# is reached, Redis will start to evict keys in the invalidation table +# even if they were not modified, just to reclaim memory: this will in turn +# force the clients to invalidate the cached values. Basically the table +# maximum size is a trade off between the memory you want to spend server +# side to track information about who cached what, and the ability of clients +# to retain cached objects in memory. +# +# If you set the value to 0, it means there are no limits, and Redis will +# retain as many keys as needed in the invalidation table. +# In the "stats" INFO section, you can find information about the number of +# keys in the invalidation table at every given moment. +# +# Note: when key tracking is used in broadcasting mode, no memory is used +# in the server side so this setting is useless. +# +# tracking-table-max-keys 1000000 + +################################## SECURITY ################################### + +# Warning: since Redis is pretty fast, an outside user can try up to +# 1 million passwords per second against a modern box. This means that you +# should use very strong passwords, otherwise they will be very easy to break. +# Note that because the password is really a shared secret between the client +# and the server, and should not be memorized by any human, the password +# can be easily a long string from /dev/urandom or whatever, so by using a +# long and unguessable password no brute force attack will be possible. + +# Redis ACL users are defined in the following format: +# +# user ... acl rules ... +# +# For example: +# +# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 +# +# The special username "default" is used for new connections. If this user +# has the "nopass" rule, then new connections will be immediately authenticated +# as the "default" user without the need of any password provided via the +# AUTH command. Otherwise if the "default" user is not flagged with "nopass" +# the connections will start in not authenticated state, and will require +# AUTH (or the HELLO command AUTH option) in order to be authenticated and +# start to work. +# +# The ACL rules that describe what a user can do are the following: +# +# on Enable the user: it is possible to authenticate as this user. +# off Disable the user: it's no longer possible to authenticate +# with this user, however the already authenticated connections +# will still work. +# + Allow the execution of that command +# - Disallow the execution of that command +# +@ Allow the execution of all the commands in such category +# with valid categories are like @admin, @set, @sortedset, ... +# and so forth, see the full list in the server.c file where +# the Redis command table is described and defined. +# The special category @all means all the commands, but currently +# present in the server, and that will be loaded in the future +# via modules. +# +|subcommand Allow a specific subcommand of an otherwise +# disabled command. Note that this form is not +# allowed as negative like -DEBUG|SEGFAULT, but +# only additive starting with "+". +# allcommands Alias for +@all. Note that it implies the ability to execute +# all the future commands loaded via the modules system. +# nocommands Alias for -@all. +# ~ Add a pattern of keys that can be mentioned as part of +# commands. For instance ~* allows all the keys. The pattern +# is a glob-style pattern like the one of KEYS. +# It is possible to specify multiple patterns. +# allkeys Alias for ~* +# resetkeys Flush the list of allowed keys patterns. +# > Add this password to the list of valid password for the user. +# For example >mypass will add "mypass" to the list. +# This directive clears the "nopass" flag (see later). +# < Remove this password from the list of valid passwords. +# nopass All the set passwords of the user are removed, and the user +# is flagged as requiring no password: it means that every +# password will work against this user. If this directive is +# used for the default user, every new connection will be +# immediately authenticated with the default user without +# any explicit AUTH command required. Note that the "resetpass" +# directive will clear this condition. +# resetpass Flush the list of allowed passwords. Moreover removes the +# "nopass" status. After "resetpass" the user has no associated +# passwords and there is no way to authenticate without adding +# some password (or setting it as "nopass" later). +# reset Performs the following actions: resetpass, resetkeys, off, +# -@all. The user returns to the same state it has immediately +# after its creation. +# +# ACL rules can be specified in any order: for instance you can start with +# passwords, then flags, or key patterns. However note that the additive +# and subtractive rules will CHANGE MEANING depending on the ordering. +# For instance see the following example: +# +# user alice on +@all -DEBUG ~* >somepassword +# +# This will allow "alice" to use all the commands with the exception of the +# DEBUG command, since +@all added all the commands to the set of the commands +# alice can use, and later DEBUG was removed. However if we invert the order +# of two ACL rules the result will be different: +# +# user alice on -DEBUG +@all ~* >somepassword +# +# Now DEBUG was removed when alice had yet no commands in the set of allowed +# commands, later all the commands are added, so the user will be able to +# execute everything. +# +# Basically ACL rules are processed left-to-right. +# +# For more information about ACL configuration please refer to +# the Redis web site at https://redis.io/topics/acl + +# ACL LOG +# +# The ACL Log tracks failed commands and authentication events associated +# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. +acllog-max-len 128 + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the external +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside redis.conf to describe users. +# +# aclfile /etc/redis/users.acl + +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility +# layer on top of the new ACL system. The option effect will be just setting +# the password for the default user. Clients will still authenticate using +# AUTH as usually, or more explicitly with AUTH default +# if they follow the new protocol: both will work. +# +# requirepass foobared + +# Command renaming (DEPRECATED). +# +# ------------------------------------------------------------------------ +# WARNING: avoid using this option if possible. Instead use ACLs to remove +# commands from the default user, and put them only in some admin user you +# create for administrative purposes. +# ------------------------------------------------------------------------ +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select one from the following behaviors: +# +# volatile-lru -> Evict using approximated LRU, only keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key having an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. By default Redis will check five keys and pick the one that was +# used least recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica +# to have a different memory setting, and you are sure all the writes performed +# to the replica are idempotent, then you may change this default (but be sure +# to understand what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory +# and so forth). So make sure you monitor your replicas and make sure they +# have enough memory to never hit a real out-of-memory condition before the +# master hits the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +# Redis reclaims expired keys in two ways: upon access when those keys are +# found to be expired, and also in background, in what is called the +# "active expire key". The key space is slowly and interactively scanned +# looking for expired keys to reclaim, so that it is possible to free memory +# of keys that are expired and will never be accessed again in a short time. +# +# The default effort of the expire cycle will try to avoid having more than +# ten percent of expired keys still in memory, and will try to avoid consuming +# more than 25% of total memory and to add latency to the system. However +# it is possible to increase the expire "effort" that is normally set to +# "1", to a greater value, up to the value "10". At its maximum value the +# system will use more CPU, longer cycles (and technically may introduce +# more latency), and will tolerate less already expired keys still present +# in the system. It's a tradeoff between memory, CPU and latency. +# +# active-expire-effort 1 + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives. + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +# It is also possible, for the case when to replace the user code DEL calls +# with UNLINK calls is not easy, to modify the default behavior of the DEL +# command to act exactly like UNLINK, using the following configuration +# directive: + +lazyfree-lazy-user-del no + +################################ THREADED I/O ################################# + +# Redis is mostly single threaded, however there are certain threaded +# operations such as UNLINK, slow I/O accesses and other things that are +# performed on side threads. +# +# Now it is also possible to handle Redis clients socket reads and writes +# in different I/O threads. Since especially writing is so slow, normally +# Redis users use pipelining in order to speed up the Redis performances per +# core, and spawn multiple instances in order to scale more. Using I/O +# threads it is possible to easily speedup two times Redis without resorting +# to pipelining nor sharding of the instance. +# +# By default threading is disabled, we suggest enabling it only in machines +# that have at least 4 or more cores, leaving at least one spare core. +# Using more than 8 threads is unlikely to help much. We also recommend using +# threaded I/O only if you actually have performance problems, with Redis +# instances being able to use a quite big percentage of CPU time, otherwise +# there is no point in using this feature. +# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# io-threads 4 +# +# Setting io-threads to 1 will just use the main thread as usual. +# When I/O threads are enabled, we only use threads for writes, that is +# to thread the write(2) syscall and transfer the client buffers to the +# socket. However it is also possible to enable threading of reads and +# protocol parsing using the following configuration directive, by setting +# it to yes: +# +# io-threads-do-reads no +# +# Usually threading reads doesn't help much. +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. Aso this feature currently does not work when SSL is +# enabled. +# +# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make +# sure you also run the benchmark itself in threaded mode, using the +# --threads option to match the number of Redis threads, otherwise you'll not +# be able to notice the improvements. + +############################ KERNEL OOM CONTROL ############################## + +# On Linux, it is possible to hint the kernel OOM killer on what processes +# should be killed first when out of memory. +# +# Enabling this feature makes Redis actively control the oom_score_adj value +# for all its processes, depending on their role. The default scores will +# attempt to have background child processes killed before all others, and +# replicas killed before masters. +# +# Redis supports three options: +# +# no: Don't make changes to oom-score-adj (default). +# yes: Alias to "relative" see below. +# absolute: Values in oom-score-adj-values are written as is to the kernel. +# relative: Values are used relative to the initial value of oom_score_adj when +# the server starts and are then clamped to a range of -1000 to 1000. +# Because typically the initial value is 0, they will often match the +# absolute values. +oom-score-adj no + +# When oom-score-adj is used, this directive controls the specific values used +# for master, replica and background child processes. Values range -2000 to +# 2000 (higher means more likely to be killed). +# +# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) +# can freely increase their value, but not decrease it below its initial +# settings. This means that setting oom-score-adj to "relative" and setting the +# oom-score-adj-values to positive values will always succeed. +oom-score-adj-values 0 200 800 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# AOF file for faster rewrites and recoveries. When this option is turned +# on the rewritten AOF file is composed of two different stanzas: +# +# [RDB file][AOF tail] +# +# When loading, Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, then continues loading the AOF +# tail. +aof-use-rdb-preamble yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet call any write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### + +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are a multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large cluster-replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the cluster-replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least a hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the master can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# This option, when set to yes, allows nodes to serve read traffic while the +# the cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful for two cases. The first case is for when an application +# doesn't require consistency of data during node failures or network partitions. +# One example of this is a cache, where as long as the node has the data it +# should be able to serve it. +# +# The second use case is for configurations that don't meet the recommended +# three shards but want to enable cluster mode and scale later. A +# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# entire cluster without this option set, with it set there is only a write outage. +# Without a quorum of masters, slot ownership will not change automatically. +# +# cluster-allow-reads-when-down no + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following two options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-bus-port +# +# Each instructs the node about its address, client port, and cluster message +# bus port. The information is then published in the header of the bus packets +# so that other nodes will be able to correctly map the address of the node +# publishing the information. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usual. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-port 6379 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# t Stream commands +# m Key-miss events (Note: It is not included in the 'A' class) +# A Alias for g$lshzxet, so that the "AKE" string means all the events +# (Except key-miss events which are excluded from 'A' due to their +# unique nature). +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### GOPHER SERVER ################################# + +# Redis contains an implementation of the Gopher protocol, as specified in +# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). +# +# The Gopher protocol was very popular in the late '90s. It is an alternative +# to the web, and the implementation both server and client side is so simple +# that the Redis server has just 100 lines of code in order to implement this +# support. +# +# What do you do with Gopher nowadays? Well Gopher never *really* died, and +# lately there is a movement in order for the Gopher more hierarchical content +# composed of just plain text documents to be resurrected. Some want a simpler +# internet, others believe that the mainstream internet became too much +# controlled, and it's cool to create an alternative space for people that +# want a bit of fresh air. +# +# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol +# as a gift. +# +# --- HOW IT WORKS? --- +# +# The Redis Gopher support uses the inline protocol of Redis, and specifically +# two kind of inline requests that were anyway illegal: an empty request +# or any request that starts with "/" (there are no Redis commands starting +# with such a slash). Normal RESP2/RESP3 requests are completely out of the +# path of the Gopher protocol implementation and are served as usual as well. +# +# If you open a connection to Redis when Gopher is enabled and send it +# a string like "/foo", if there is a key named "/foo" it is served via the +# Gopher protocol. +# +# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher +# talking), you likely need a script like the following: +# +# https://github.com/antirez/gopher2redis +# +# --- SECURITY WARNING --- +# +# If you plan to put Redis on the internet in a publicly accessible address +# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. +# Once a password is set: +# +# 1. The Gopher server (when enabled, not by default) will still serve +# content via Gopher. +# 2. However other commands cannot be called before the client will +# authenticate. +# +# So use the 'requirepass' option to protect your instance. +# +# Note that Gopher is not currently supported when 'io-threads-do-reads' +# is enabled. +# +# To enable Gopher support, uncomment the following line and set the option +# from no (the default) to yes. +# +# gopher-enabled no + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entires limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. However you can change this limit +# here, but must be 1mb or greater +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporarily raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in a "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Enabled active defragmentation +# activedefrag no + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage, to be used when the lower +# threshold is reached +# active-defrag-cycle-min 1 + +# Maximal effort for defrag in CPU percentage, to be used when the upper +# threshold is reached +# active-defrag-cycle-max 25 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + +# Jemalloc background thread for purging will be enabled by default +jemalloc-bg-thread yes + +# It is possible to pin different threads and processes of Redis to specific +# CPUs in your system, in order to maximize the performances of the server. +# This is useful both in order to pin different Redis threads in different +# CPUs, but also in order to make sure that multiple Redis instances running +# in the same host will be pinned to different CPUs. +# +# Normally you can do this using the "taskset" command, however it is also +# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# +# You can pin the server/IO threads, bio threads, aof rewrite child process, and +# the bgsave child process. The syntax to specify the cpu list is the same as +# the taskset command: +# +# Set redis server/io threads to cpu affinity 0,2,4,6: +# server_cpulist 0-7:2 +# +# Set bio threads to cpu affinity 1,3: +# bio_cpulist 1,3 +# +# Set aof rewrite child process to cpu affinity 8,9,10,11: +# aof_rewrite_cpulist 8-11 +# +# Set bgsave child process to cpu affinity 1,10,11 +# bgsave_cpulist 1,10-11 + +# In some cases redis will emit warnings and even refuse to start if it detects +# that the system is in bad state, it is possible to suppress these warnings +# by setting the following config which takes a space delimited list of warnings +# to suppress +# +# ignore-warnings ARM64-COW-BUG diff --git a/apps/redis/6.2.14/data.yml b/apps/redis/6.2.14/data.yml new file mode 100644 index 00000000..fcf9b51d --- /dev/null +++ b/apps/redis/6.2.14/data.yml @@ -0,0 +1,23 @@ +additionalProperties: + formFields: + - default: "/home/redis" + envKey: REDIS_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 6379 + envKey: PANEL_APP_PORT_HTTP + labelZh: 端口 + labelEn: Port + required: true + rule: paramPort + type: number + - default: "" + envKey: REDIS_ROOT_PASSWORD + labelZh: 密码 + labelEn: Password + random: true + required: false + rule: paramComplexity + type: password diff --git a/apps/redis/6.2.14/docker-compose.yml b/apps/redis/6.2.14/docker-compose.yml new file mode 100644 index 00000000..b6a29cbd --- /dev/null +++ b/apps/redis/6.2.14/docker-compose.yml @@ -0,0 +1,28 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + redis: + image: redis:6.2.14-alpine + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:6379 + command: > + sh -c ' + if [ -z "${REDIS_ROOT_PASSWORD}" ]; then + redis-server /etc/redis/redis.conf + else + redis-server /etc/redis/redis.conf --requirepass ${REDIS_ROOT_PASSWORD} + fi' + volumes: + - ${REDIS_ROOT_PATH}/data:/data + - ${REDIS_ROOT_PATH}/config/redis.conf:/etc/redis/redis.conf + - ${REDIS_ROOT_PATH}/logs:/logs diff --git a/apps/redis/6.2.14/scripts/init.sh b/apps/redis/6.2.14/scripts/init.sh new file mode 100644 index 00000000..d10108b1 --- /dev/null +++ b/apps/redis/6.2.14/scripts/init.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + mkdir -p "$REDIS_ROOT_PATH" + + mkdir -p "$REDIS_ROOT_PATH/data" + mkdir -p "$REDIS_ROOT_PATH/config" + mkdir -p "$REDIS_ROOT_PATH/logs" + + cp ./config/redis.conf "$REDIS_ROOT_PATH/config/redis.conf" + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/redis/6.2.14/scripts/uninstall.sh b/apps/redis/6.2.14/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/redis/6.2.14/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/redis/6.2.14/scripts/upgrade.sh b/apps/redis/6.2.14/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/redis/6.2.14/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/redis/7.2.5/config/redis.conf b/apps/redis/7.2.5/config/redis.conf new file mode 100644 index 00000000..8795ae29 --- /dev/null +++ b/apps/redis/7.2.5/config/redis.conf @@ -0,0 +1,2276 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Note that option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# Included paths may contain wildcards. All files matching the wildcards will +# be included in alphabetical order. +# Note that if an include path contains a wildcards but no files match it when +# the server is started, the include statement will be ignored and no error will +# be emitted. It is safe, therefore, to include wildcard files from empty +# directories. +# +# include /path/to/local.conf +# include /path/to/other.conf +# include /path/to/fragments/*.conf +# + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all available network interfaces on the host machine. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# Each address can be prefixed by "-", which means that redis will not fail to +# start if the address is not available. Being not available only refers to +# addresses that does not correspond to any network interface. Addresses that +# are already in use will always fail, and unsupported protocols will always BE +# silently skipped. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 # listens on two specific IPv4 addresses +# bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6 +# bind * -::* # like the default, all available interfaces +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only on the +# IPv4 and IPv6 (if available) loopback interface addresses (this means Redis +# will only be able to accept client connections from the same host that it is +# running on). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# COMMENT OUT THE FOLLOWING LINE. +# +# You will also need to set a password unless you explicitly disable protected +# mode. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind 0.0.0.0 + +# By default, outgoing connections (from replica to master, from Sentinel to +# instances, cluster bus, etc.) are not bound to a specific local address. In +# most cases, this means the operating system will handle that based on routing +# and the interface through which the connection goes out. +# +# Using bind-source-addr it is possible to configure a specific address to bind +# to, which may also affect how the connection gets routed. +# +# Example: +# +# bind-source-addr 10.0.0.1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and the default user has no password, the server +# only accepts local connections from the IPv4 address (127.0.0.1), IPv6 address +# (::1) or Unix domain sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured. +protected-mode yes + +# Redis uses default hardened security configuration directives to reduce the +# attack surface on innocent users. Therefore, several sensitive configuration +# directives are immutable, and some potentially-dangerous commands are blocked. +# +# Configuration directives that control files that Redis writes to (e.g., 'dir' +# and 'dbfilename') and that aren't usually modified during runtime +# are protected by making them immutable. +# +# Commands that can increase the attack surface of Redis and that aren't usually +# called by users are blocked by default. +# +# These can be exposed to either all connections or just local ones by setting +# each of the configs listed below to either of these values: +# +# no - Block for any connection (remain immutable) +# yes - Allow for any connection (no protection) +# local - Allow only for local connections. Ones originating from the +# IPv4 address (127.0.0.1), IPv6 address (::1) or Unix domain sockets. +# +# enable-protected-configs no +# enable-debug-command no +# enable-module-command no + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need a high backlog in order +# to avoid slow clients connection issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /run/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Force network equipment in the middle to consider the connection to be +# alive. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +# Apply OS-specific mechanism to mark the listening socket with the specified +# ID, to support advanced routing and filtering capabilities. +# +# On Linux, the ID represents a connection mark. +# On FreeBSD, the ID represents a socket cookie ID. +# On OpenBSD, the ID represents a route table ID. +# +# The default value is 0, which implies no marking is required. +# socket-mark-id 0 + +################################# TLS/SSL ##################################### + +# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration +# directive can be used to define TLS-listening ports. To enable TLS on the +# default port, use: +# +# port 0 +# tls-port 6379 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. These files should be +# PEM formatted. +# +# tls-cert-file redis.crt +# tls-key-file redis.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-key-file-pass secret + +# Normally Redis uses the same certificate for both server functions (accepting +# connections) and client functions (replicating from a master, establishing +# cluster bus connections, etc.). +# +# Sometimes certificates are issued with attributes that designate them as +# client-only or server-only certificates. In that case it may be desired to use +# different certificates for incoming (server) and outgoing (client) +# connections. To do that, use the following directives: +# +# tls-client-cert-file client.crt +# tls-client-key-file client.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-client-key-file-pass secret + +# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange, +# required by older versions of OpenSSL (<3.0). Newer versions do not require +# this configuration and recommend against it. +# +# tls-dh-params-file redis.dh + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Redis requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients (including replica servers) on a TLS port are required +# to authenticate using valid client side certificates. +# +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. +# +# tls-auth-clients no +# tls-auth-clients optional + +# By default, a Redis replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# TLS for the bus protocol, use the following directive: +# +# tls-cluster yes + +# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended +# that older formally deprecated versions are kept disabled to reduce the attack surface. +# You can explicitly specify TLS versions to support. +# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2", +# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination. +# To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +# When Redis is supervised by upstart or systemd, this parameter has no impact. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# requires "expect stop" in your upstart job config +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# on startup, and updating Redis status on a regular +# basis. +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous pings back to your supervisor. +# +# The default is "no". To run under upstart/systemd, you can simply uncomment +# the line below: +# +# supervised auto + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +# +# Note that on modern Linux systems "/run/redis.pid" is more conforming +# and should be used instead. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# To disable the built in crash log, which will possibly produce cleaner core +# dumps when they are needed, uncomment the following: +# +# crash-log-enabled no + +# To disable the fast memory check that's run as part of the crash log, which +# will possibly let redis terminate sooner, uncomment the following: +# +# crash-memcheck-enabled no + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY and syslog logging is +# disabled. Basically this means that normally a logo is displayed only in +# interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo no + +# By default, Redis modifies the process title (as seen in 'top' and 'ps') to +# provide some runtime information. It is possible to disable this and leave +# the process name as executed by setting the following to no. +set-proc-title yes + +# When changing the process title, Redis uses the following template to construct +# the modified title. +# +# Template variables are specified in curly brackets. The following variables are +# supported: +# +# {title} Name of process as executed if parent, or type of child process. +# {listen-addr} Bind address or '*' followed by TCP or TLS port listening on, or +# Unix socket if only that's available. +# {server-mode} Special mode, i.e. "[sentinel]" or "[cluster]". +# {port} TCP port listening on, or 0. +# {tls-port} TLS port listening on, or 0. +# {unixsocket} Unix domain socket listening on, or "". +# {config-file} Name of configuration file used. +# +proc-title-template "{title} {listen-addr} {server-mode}" + +################################ SNAPSHOTTING ################################ + +# Save the DB to disk. +# +# save [ ...] +# +# Redis will save the DB if the given number of seconds elapsed and it +# surpassed the given number of write operations against the DB. +# +# Snapshotting can be completely disabled with a single empty string argument +# as in following example: +# +# save "" +# +# Unless specified otherwise, by default Redis will save the DB: +# * After 3600 seconds (an hour) if at least 1 change was performed +# * After 300 seconds (5 minutes) if at least 100 changes were performed +# * After 60 seconds if at least 10000 changes were performed +# +# You can set these explicitly by uncommenting the following line. +# +# save 3600 1 300 100 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# By default compression is enabled as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# Enables or disables full sanitization checks for ziplist and listpack etc when +# loading an RDB or RESTORE payload. This reduces the chances of a assertion or +# crash later on while processing commands. +# Options: +# no - Never perform full sanitization +# yes - Always perform full sanitization +# clients - Perform full sanitization only for user connections. +# Excludes: RDB files, RESTORE commands received from the master +# connection, and client connections which have the +# skip-sanitize-payload ACL flag. +# The default should be 'clients' but since it currently affects cluster +# resharding via MIGRATE, it is temporarily set to 'no' by default. +# +# sanitize-dump-payload no + +# The filename where to dump the DB +dbfilename dump.rdb + +# Remove RDB files used by replication in instances without persistence +# enabled. By default this option is disabled, however there are environments +# where for regulations or other security concerns, RDB files persisted on +# disk by masters in order to feed replicas, or stored on disk by replicas +# in order to load them for the initial synchronization, should be deleted +# ASAP. Note that this option ONLY WORKS in instances that have both AOF +# and RDB persistence disabled, otherwise is completely ignored. +# +# An alternative (and sometimes better) way to obtain the same effect is +# to use diskless replication on both master and replicas instances. However +# in the case of replicas, diskless is not always an option. +rdb-del-sync-files no + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth +# +# However this is not enough if you are using Redis ACLs (for Redis version +# 6 or greater), and the default user is not capable of running the PSYNC +# command and/or other commands needed for replication. In this case it's +# better to configure a special user to use with replication, and specify the +# masteruser configuration as such: +# +# masteruser +# +# When masteruser is specified, the replica will authenticate against its +# master using the new AUTH form: AUTH . + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) If replica-serve-stale-data is set to 'no' the replica will reply with error +# "MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'" +# to all data access commands, excluding commands such as: +# INFO, REPLICAOF, AUTH, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, +# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, +# HOST and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# New replicas and reconnecting replicas that are not able to continue the +# replication process just receiving differences, need to do what is called a +# "full synchronization". An RDB file is transmitted from the master to the +# replicas. +# +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child +# producing the RDB file finishes its work. With diskless replication instead +# once the transfer starts, new replicas arriving will be queued and a new +# transfer will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple +# replicas will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync yes + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the +# server waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# When diskless replication is enabled with a delay, it is possible to let +# the replication start before the maximum delay is reached if the maximum +# number of replicas expected have connected. Default of 0 means that the +# maximum is not defined and Redis will wait the full delay. +repl-diskless-sync-max-replicas 0 + +# ----------------------------------------------------------------------------- +# WARNING: RDB diskless load is experimental. Since in this setup the replica +# does not immediately store an RDB on disk, it may cause data loss during +# failovers. RDB diskless load + Redis modules not handling I/O reads may also +# cause Redis to abort in case of I/O errors during the initial synchronization +# stage with the master. Use only if you know what you are doing. +# ----------------------------------------------------------------------------- +# +# Replica can load the RDB it reads from the replication link directly from the +# socket, or store the RDB to a file and read that file after it was completely +# received from the master. +# +# In many cases the disk is slower than the network, and storing and loading +# the RDB file may increase replication time (and even increase the master's +# Copy on Write memory and replica buffers). +# However, parsing the RDB file directly from the socket may mean that we have +# to flush the contents of the current database before the full rdb was +# received. For this reason we have the following options: +# +# "disabled" - Don't use diskless load (store the rdb file to the disk first) +# "on-empty-db" - Use diskless load only when it is completely safe. +# "swapdb" - Keep current db contents in RAM while parsing the data directly +# from the socket. Replicas in this mode can keep serving current +# data set while replication is in progress, except for cases where +# they can't recognize master as having a data set from same +# replication history. +# Note that this requires sufficient memory, if you don't have it, +# you risk an OOM kill. +repl-diskless-load disabled + +# Master send PINGs to its replicas in a predefined interval. It's possible to +# change this interval with the repl_ping_replica_period option. The default +# value is 10 seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. The default +# value is 60 seconds. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a +# replica wants to reconnect again, often a full resync is not needed, but a +# partial resync is enough, just passing the portion of data the replica +# missed while disconnected. +# +# The bigger the replication backlog, the longer the replica can endure the +# disconnect and later be able to perform a partial resynchronization. +# +# The backlog is only allocated if there is at least one replica connected. +# +# repl-backlog-size 1mb + +# After a master has no connected replicas for some time, the backlog will be +# freed. The following option configures the amount of seconds that need to +# elapse, starting from the time the last replica disconnected, for the backlog +# buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with other replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO +# output. It is used by Redis Sentinel in order to select a replica to promote +# into a master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel +# will pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# The propagation error behavior controls how Redis will behave when it is +# unable to handle a command being processed in the replication stream from a master +# or processed while reading from an AOF file. Errors that occur during propagation +# are unexpected, and can cause data inconsistency. However, there are edge cases +# in earlier versions of Redis where it was possible for the server to replicate or persist +# commands that would fail on future versions. For this reason the default behavior +# is to ignore such errors and continue processing commands. +# +# If an application wants to ensure there is no data divergence, this configuration +# should be set to 'panic' instead. The value can also be set to 'panic-on-replicas' +# to only panic when a replica encounters an error on the replication stream. One of +# these two panic values will become the default value in the future once there are +# sufficient safety mechanisms in place to prevent false positive crashes. +# +# propagation-error-behavior ignore + +# Replica ignore disk write errors controls the behavior of a replica when it is +# unable to persist a write command received from its master to disk. By default, +# this configuration is set to 'no' and will crash the replica in this condition. +# It is not recommended to change this default, however in order to be compatible +# with older versions of Redis this config can be toggled to 'yes' which will just +# log a warning and execute the write command it got from the master. +# +# replica-ignore-disk-write-errors no + +# ----------------------------------------------------------------------------- +# By default, Redis Sentinel includes all replicas in its reports. A replica +# can be excluded from Redis Sentinel's announcements. An unannounced replica +# will be ignored by the 'sentinel replicas ' command and won't be +# exposed to Redis Sentinel's clients. +# +# This option does not change the behavior of replica-priority. Even with +# replica-announced set to 'no', the replica can be promoted to master. To +# prevent this behavior, set replica-priority to 0. +# +# replica-announced yes + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP address and port normally reported by a replica is +# obtained in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may actually be reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +############################### KEYS TRACKING ################################# + +# Redis implements server assisted support for client side caching of values. +# This is implemented using an invalidation table that remembers, using +# a radix key indexed by key name, what clients have which keys. In turn +# this is used in order to send invalidation messages to clients. Please +# check this page to understand more about the feature: +# +# https://redis.io/topics/client-side-caching +# +# When tracking is enabled for a client, all the read only queries are assumed +# to be cached: this will force Redis to store information in the invalidation +# table. When keys are modified, such information is flushed away, and +# invalidation messages are sent to the clients. However if the workload is +# heavily dominated by reads, Redis could use more and more memory in order +# to track the keys fetched by many clients. +# +# For this reason it is possible to configure a maximum fill value for the +# invalidation table. By default it is set to 1M of keys, and once this limit +# is reached, Redis will start to evict keys in the invalidation table +# even if they were not modified, just to reclaim memory: this will in turn +# force the clients to invalidate the cached values. Basically the table +# maximum size is a trade off between the memory you want to spend server +# side to track information about who cached what, and the ability of clients +# to retain cached objects in memory. +# +# If you set the value to 0, it means there are no limits, and Redis will +# retain as many keys as needed in the invalidation table. +# In the "stats" INFO section, you can find information about the number of +# keys in the invalidation table at every given moment. +# +# Note: when key tracking is used in broadcasting mode, no memory is used +# in the server side so this setting is useless. +# +# tracking-table-max-keys 1000000 + +################################## SECURITY ################################### + +# Warning: since Redis is pretty fast, an outside user can try up to +# 1 million passwords per second against a modern box. This means that you +# should use very strong passwords, otherwise they will be very easy to break. +# Note that because the password is really a shared secret between the client +# and the server, and should not be memorized by any human, the password +# can be easily a long string from /dev/urandom or whatever, so by using a +# long and unguessable password no brute force attack will be possible. + +# Redis ACL users are defined in the following format: +# +# user ... acl rules ... +# +# For example: +# +# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 +# +# The special username "default" is used for new connections. If this user +# has the "nopass" rule, then new connections will be immediately authenticated +# as the "default" user without the need of any password provided via the +# AUTH command. Otherwise if the "default" user is not flagged with "nopass" +# the connections will start in not authenticated state, and will require +# AUTH (or the HELLO command AUTH option) in order to be authenticated and +# start to work. +# +# The ACL rules that describe what a user can do are the following: +# +# on Enable the user: it is possible to authenticate as this user. +# off Disable the user: it's no longer possible to authenticate +# with this user, however the already authenticated connections +# will still work. +# skip-sanitize-payload RESTORE dump-payload sanitization is skipped. +# sanitize-payload RESTORE dump-payload is sanitized (default). +# + Allow the execution of that command. +# May be used with `|` for allowing subcommands (e.g "+config|get") +# - Disallow the execution of that command. +# May be used with `|` for blocking subcommands (e.g "-config|set") +# +@ Allow the execution of all the commands in such category +# with valid categories are like @admin, @set, @sortedset, ... +# and so forth, see the full list in the server.c file where +# the Redis command table is described and defined. +# The special category @all means all the commands, but currently +# present in the server, and that will be loaded in the future +# via modules. +# +|first-arg Allow a specific first argument of an otherwise +# disabled command. It is only supported on commands with +# no sub-commands, and is not allowed as negative form +# like -SELECT|1, only additive starting with "+". This +# feature is deprecated and may be removed in the future. +# allcommands Alias for +@all. Note that it implies the ability to execute +# all the future commands loaded via the modules system. +# nocommands Alias for -@all. +# ~ Add a pattern of keys that can be mentioned as part of +# commands. For instance ~* allows all the keys. The pattern +# is a glob-style pattern like the one of KEYS. +# It is possible to specify multiple patterns. +# %R~ Add key read pattern that specifies which keys can be read +# from. +# %W~ Add key write pattern that specifies which keys can be +# written to. +# allkeys Alias for ~* +# resetkeys Flush the list of allowed keys patterns. +# & Add a glob-style pattern of Pub/Sub channels that can be +# accessed by the user. It is possible to specify multiple channel +# patterns. +# allchannels Alias for &* +# resetchannels Flush the list of allowed channel patterns. +# > Add this password to the list of valid password for the user. +# For example >mypass will add "mypass" to the list. +# This directive clears the "nopass" flag (see later). +# < Remove this password from the list of valid passwords. +# nopass All the set passwords of the user are removed, and the user +# is flagged as requiring no password: it means that every +# password will work against this user. If this directive is +# used for the default user, every new connection will be +# immediately authenticated with the default user without +# any explicit AUTH command required. Note that the "resetpass" +# directive will clear this condition. +# resetpass Flush the list of allowed passwords. Moreover removes the +# "nopass" status. After "resetpass" the user has no associated +# passwords and there is no way to authenticate without adding +# some password (or setting it as "nopass" later). +# reset Performs the following actions: resetpass, resetkeys, off, +# -@all. The user returns to the same state it has immediately +# after its creation. +# () Create a new selector with the options specified within the +# parentheses and attach it to the user. Each option should be +# space separated. The first character must be ( and the last +# character must be ). +# clearselectors Remove all of the currently attached selectors. +# Note this does not change the "root" user permissions, +# which are the permissions directly applied onto the +# user (outside the parentheses). +# +# ACL rules can be specified in any order: for instance you can start with +# passwords, then flags, or key patterns. However note that the additive +# and subtractive rules will CHANGE MEANING depending on the ordering. +# For instance see the following example: +# +# user alice on +@all -DEBUG ~* >somepassword +# +# This will allow "alice" to use all the commands with the exception of the +# DEBUG command, since +@all added all the commands to the set of the commands +# alice can use, and later DEBUG was removed. However if we invert the order +# of two ACL rules the result will be different: +# +# user alice on -DEBUG +@all ~* >somepassword +# +# Now DEBUG was removed when alice had yet no commands in the set of allowed +# commands, later all the commands are added, so the user will be able to +# execute everything. +# +# Basically ACL rules are processed left-to-right. +# +# The following is a list of command categories and their meanings: +# * keyspace - Writing or reading from keys, databases, or their metadata +# in a type agnostic way. Includes DEL, RESTORE, DUMP, RENAME, EXISTS, DBSIZE, +# KEYS, EXPIRE, TTL, FLUSHALL, etc. Commands that may modify the keyspace, +# key or metadata will also have `write` category. Commands that only read +# the keyspace, key or metadata will have the `read` category. +# * read - Reading from keys (values or metadata). Note that commands that don't +# interact with keys, will not have either `read` or `write`. +# * write - Writing to keys (values or metadata) +# * admin - Administrative commands. Normal applications will never need to use +# these. Includes REPLICAOF, CONFIG, DEBUG, SAVE, MONITOR, ACL, SHUTDOWN, etc. +# * dangerous - Potentially dangerous (each should be considered with care for +# various reasons). This includes FLUSHALL, MIGRATE, RESTORE, SORT, KEYS, +# CLIENT, DEBUG, INFO, CONFIG, SAVE, REPLICAOF, etc. +# * connection - Commands affecting the connection or other connections. +# This includes AUTH, SELECT, COMMAND, CLIENT, ECHO, PING, etc. +# * blocking - Potentially blocking the connection until released by another +# command. +# * fast - Fast O(1) commands. May loop on the number of arguments, but not the +# number of elements in the key. +# * slow - All commands that are not Fast. +# * pubsub - PUBLISH / SUBSCRIBE related +# * transaction - WATCH / MULTI / EXEC related commands. +# * scripting - Scripting related. +# * set - Data type: sets related. +# * sortedset - Data type: zsets related. +# * list - Data type: lists related. +# * hash - Data type: hashes related. +# * string - Data type: strings related. +# * bitmap - Data type: bitmaps related. +# * hyperloglog - Data type: hyperloglog related. +# * geo - Data type: geo related. +# * stream - Data type: streams related. +# +# For more information about ACL configuration please refer to +# the Redis web site at https://redis.io/topics/acl + +# ACL LOG +# +# The ACL Log tracks failed commands and authentication events associated +# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. +acllog-max-len 128 + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the external +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside redis.conf to describe users. +# +# aclfile /etc/redis/users.acl + +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility +# layer on top of the new ACL system. The option effect will be just setting +# the password for the default user. Clients will still authenticate using +# AUTH as usually, or more explicitly with AUTH default +# if they follow the new protocol: both will work. +# +# The requirepass is not compatible with aclfile option and the ACL LOAD +# command, these will cause requirepass to be ignored. +# +# requirepass foobared + +# New users are initialized with restrictive permissions by default, via the +# equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it +# is possible to manage access to Pub/Sub channels with ACL rules as well. The +# default Pub/Sub channels permission if new users is controlled by the +# acl-pubsub-default configuration directive, which accepts one of these values: +# +# allchannels: grants access to all Pub/Sub channels +# resetchannels: revokes access to all Pub/Sub channels +# +# From Redis 7.0, acl-pubsub-default defaults to 'resetchannels' permission. +# +# acl-pubsub-default resetchannels + +# Command renaming (DEPRECATED). +# +# ------------------------------------------------------------------------ +# WARNING: avoid using this option if possible. Instead use ACLs to remove +# commands from the default user, and put them only in some admin user you +# create for administrative purposes. +# ------------------------------------------------------------------------ +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select one from the following behaviors: +# +# volatile-lru -> Evict using approximated LRU, only keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key having an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, when there are no suitable keys for +# eviction, Redis will return an error on write operations that require +# more memory. These are usually commands that create new keys, add data or +# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE, +# SORT (due to the STORE argument), and EXEC (if the transaction includes any +# command that requires memory). +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. By default Redis will check five keys and pick the one that was +# used least recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Eviction processing is designed to function well with the default setting. +# If there is an unusually large amount of write traffic, this value may need to +# be increased. Decreasing this value may reduce latency at the risk of +# eviction processing effectiveness +# 0 = minimum latency, 10 = default, 100 = process without regard to latency +# +# maxmemory-eviction-tenacity 10 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica +# to have a different memory setting, and you are sure all the writes performed +# to the replica are idempotent, then you may change this default (but be sure +# to understand what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory +# and so forth). So make sure you monitor your replicas and make sure they +# have enough memory to never hit a real out-of-memory condition before the +# master hits the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +# Redis reclaims expired keys in two ways: upon access when those keys are +# found to be expired, and also in background, in what is called the +# "active expire key". The key space is slowly and interactively scanned +# looking for expired keys to reclaim, so that it is possible to free memory +# of keys that are expired and will never be accessed again in a short time. +# +# The default effort of the expire cycle will try to avoid having more than +# ten percent of expired keys still in memory, and will try to avoid consuming +# more than 25% of total memory and to add latency to the system. However +# it is possible to increase the expire "effort" that is normally set to +# "1", to a greater value, up to the value "10". At its maximum value the +# system will use more CPU, longer cycles (and technically may introduce +# more latency), and will tolerate less already expired keys still present +# in the system. It's a tradeoff between memory, CPU and latency. +# +# active-expire-effort 1 + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives. + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +# It is also possible, for the case when to replace the user code DEL calls +# with UNLINK calls is not easy, to modify the default behavior of the DEL +# command to act exactly like UNLINK, using the following configuration +# directive: + +lazyfree-lazy-user-del no + +# FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous +# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the +# commands. When neither flag is passed, this directive will be used to determine +# if the data should be deleted asynchronously. + +lazyfree-lazy-user-flush no + +################################ THREADED I/O ################################# + +# Redis is mostly single threaded, however there are certain threaded +# operations such as UNLINK, slow I/O accesses and other things that are +# performed on side threads. +# +# Now it is also possible to handle Redis clients socket reads and writes +# in different I/O threads. Since especially writing is so slow, normally +# Redis users use pipelining in order to speed up the Redis performances per +# core, and spawn multiple instances in order to scale more. Using I/O +# threads it is possible to easily speedup two times Redis without resorting +# to pipelining nor sharding of the instance. +# +# By default threading is disabled, we suggest enabling it only in machines +# that have at least 4 or more cores, leaving at least one spare core. +# Using more than 8 threads is unlikely to help much. We also recommend using +# threaded I/O only if you actually have performance problems, with Redis +# instances being able to use a quite big percentage of CPU time, otherwise +# there is no point in using this feature. +# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# io-threads 4 +# +# Setting io-threads to 1 will just use the main thread as usual. +# When I/O threads are enabled, we only use threads for writes, that is +# to thread the write(2) syscall and transfer the client buffers to the +# socket. However it is also possible to enable threading of reads and +# protocol parsing using the following configuration directive, by setting +# it to yes: +# +# io-threads-do-reads no +# +# Usually threading reads doesn't help much. +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. Also, this feature currently does not work when SSL is +# enabled. +# +# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make +# sure you also run the benchmark itself in threaded mode, using the +# --threads option to match the number of Redis threads, otherwise you'll not +# be able to notice the improvements. + +############################ KERNEL OOM CONTROL ############################## + +# On Linux, it is possible to hint the kernel OOM killer on what processes +# should be killed first when out of memory. +# +# Enabling this feature makes Redis actively control the oom_score_adj value +# for all its processes, depending on their role. The default scores will +# attempt to have background child processes killed before all others, and +# replicas killed before masters. +# +# Redis supports these options: +# +# no: Don't make changes to oom-score-adj (default). +# yes: Alias to "relative" see below. +# absolute: Values in oom-score-adj-values are written as is to the kernel. +# relative: Values are used relative to the initial value of oom_score_adj when +# the server starts and are then clamped to a range of -1000 to 1000. +# Because typically the initial value is 0, they will often match the +# absolute values. +oom-score-adj no + +# When oom-score-adj is used, this directive controls the specific values used +# for master, replica and background child processes. Values range -2000 to +# 2000 (higher means more likely to be killed). +# +# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) +# can freely increase their value, but not decrease it below its initial +# settings. This means that setting oom-score-adj to "relative" and setting the +# oom-score-adj-values to positive values will always succeed. +oom-score-adj-values 0 200 800 + + +#################### KERNEL transparent hugepage CONTROL ###################### + +# Usually the kernel Transparent Huge Pages control is set to "madvise" or +# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which +# case this config has no effect. On systems in which it is set to "always", +# redis will attempt to disable it specifically for the redis process in order +# to avoid latency problems specifically with fork(2) and CoW. +# If for some reason you prefer to keep it enabled, you can set this config to +# "no" and the kernel global to "always". + +disable-thp yes + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check https://redis.io/topics/persistence for more information. + +appendonly no + +# The base name of the append only file. +# +# Redis 7 and newer use a set of append-only files to persist the dataset +# and changes applied to it. There are two basic types of files in use: +# +# - Base files, which are a snapshot representing the complete state of the +# dataset at the time the file was created. Base files can be either in +# the form of RDB (binary serialized) or AOF (textual commands). +# - Incremental files, which contain additional commands that were applied +# to the dataset following the previous file. +# +# In addition, manifest files are used to track the files and the order in +# which they were created and should be applied. +# +# Append-only file names are created by Redis following a specific pattern. +# The file name's prefix is based on the 'appendfilename' configuration +# parameter, followed by additional information about the sequence and type. +# +# For example, if appendfilename is set to appendonly.aof, the following file +# names could be derived: +# +# - appendonly.aof.1.base.rdb as a base file. +# - appendonly.aof.1.incr.aof, appendonly.aof.2.incr.aof as incremental files. +# - appendonly.aof.manifest as a manifest file. + +appendfilename "appendonly.aof" + +# For convenience, Redis stores all persistent append-only files in a dedicated +# directory. The name of the directory is determined by the appenddirname +# configuration parameter. + +appenddirname "appendonlydir" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync no". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# Redis can create append-only base files in either RDB or AOF formats. Using +# the RDB format is always faster and more efficient, and disabling it is only +# supported for backward compatibility purposes. +aof-use-rdb-preamble yes + +# Redis supports recording timestamp annotations in the AOF to support restoring +# the data from a specific point-in-time. However, using this capability changes +# the AOF format in a way that may not be compatible with existing AOF parsers. +aof-timestamp-enabled no + +################################ SHUTDOWN ##################################### + +# Maximum time to wait for replicas when shutting down, in seconds. +# +# During shut down, a grace period allows any lagging replicas to catch up with +# the latest replication offset before the master exists. This period can +# prevent data loss, especially for deployments without configured disk backups. +# +# The 'shutdown-timeout' value is the grace period's duration in seconds. It is +# only applicable when the instance has replicas. To disable the feature, set +# the value to 0. +# +# shutdown-timeout 10 + +# When Redis receives a SIGINT or SIGTERM, shutdown is initiated and by default +# an RDB snapshot is written to disk in a blocking operation if save points are configured. +# The options used on signaled shutdown can include the following values: +# default: Saves RDB snapshot only if save points are configured. +# Waits for lagging replicas to catch up. +# save: Forces a DB saving operation even if no save points are configured. +# nosave: Prevents DB saving operation even if one or more save points are configured. +# now: Skips waiting for lagging replicas. +# force: Ignores any errors that would normally prevent the server from exiting. +# +# Any combination of values is allowed as long as "save" and "nosave" are not set simultaneously. +# Example: "nosave force now" +# +# shutdown-on-sigint default +# shutdown-on-sigterm default + +################ NON-DETERMINISTIC LONG BLOCKING COMMANDS ##################### + +# Maximum time in milliseconds for EVAL scripts, functions and in some cases +# modules' commands before Redis can start processing or rejecting other clients. +# +# If the maximum execution time is reached Redis will start to reply to most +# commands with a BUSY error. +# +# In this state Redis will only allow a handful of commands to be executed. +# For instance, SCRIPT KILL, FUNCTION KILL, SHUTDOWN NOSAVE and possibly some +# module specific 'allow-busy' commands. +# +# SCRIPT KILL and FUNCTION KILL will only be able to stop a script that did not +# yet call any write commands, so SHUTDOWN NOSAVE may be the only way to stop +# the server in the case a write command was already issued by the script when +# the user doesn't want to wait for the natural termination of the script. +# +# The default is 5 seconds. It is possible to set it to 0 or a negative value +# to disable this mechanism (uninterrupted execution). Note that in the past +# this config had a different name, which is now an alias, so both of these do +# the same: +# lua-time-limit 5000 +# busy-reply-threshold 5000 + +################################ REDIS CLUSTER ############################### + +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are a multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# The cluster port is the port that the cluster bus will listen for inbound connections on. When set +# to the default value, 0, it will be bound to the command port + 10000. Setting this value requires +# you to specify the cluster bus port when executing cluster meet. +# cluster-port 0 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large cluster-replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the cluster-replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value or +# set cluster-allow-replica-migration to 'no'. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# Turning off this option allows to use less automatic cluster configuration. +# It both disables migration to orphaned masters and migration from masters +# that became empty. +# +# Default is 'yes' (allow automatic migrations). +# +# cluster-allow-replica-migration yes + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least a hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the replica can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# This option, when set to yes, allows nodes to serve read traffic while the +# cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful for two cases. The first case is for when an application +# doesn't require consistency of data during node failures or network partitions. +# One example of this is a cache, where as long as the node has the data it +# should be able to serve it. +# +# The second use case is for configurations that don't meet the recommended +# three shards but want to enable cluster mode and scale later. A +# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# entire cluster without this option set, with it set there is only a write outage. +# Without a quorum of masters, slot ownership will not change automatically. +# +# cluster-allow-reads-when-down no + +# This option, when set to yes, allows nodes to serve pubsub shard traffic while +# the cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful if the application would like to use the pubsub feature even when +# the cluster global stable state is not OK. If the application wants to make sure only +# one shard is serving a given channel, this feature should be kept as yes. +# +# cluster-allow-pubsubshard-when-down yes + +# Cluster link send buffer limit is the limit on the memory usage of an individual +# cluster bus link's send buffer in bytes. Cluster links would be freed if they exceed +# this limit. This is to primarily prevent send buffers from growing unbounded on links +# toward slow peers (E.g. PubSub messages being piled up). +# This limit is disabled by default. Enable this limit when 'mem_cluster_links' INFO field +# and/or 'send-buffer-allocated' entries in the 'CLUSTER LINKS` command output continuously increase. +# Minimum limit of 1gb is recommended so that cluster link buffer can fit in at least a single +# PubSub message by default. (client-query-buffer-limit default value is 1gb) +# +# cluster-link-sendbuf-limit 0 + +# Clusters can configure their announced hostname using this config. This is a common use case for +# applications that need to use TLS Server Name Indication (SNI) or dealing with DNS based +# routing. By default this value is only shown as additional metadata in the CLUSTER SLOTS +# command, but can be changed using 'cluster-preferred-endpoint-type' config. This value is +# communicated along the clusterbus to all nodes, setting it to an empty string will remove +# the hostname and also propagate the removal. +# +# cluster-announce-hostname "" + +# Clusters can advertise how clients should connect to them using either their IP address, +# a user defined hostname, or by declaring they have no endpoint. Which endpoint is +# shown as the preferred endpoint is set by using the cluster-preferred-endpoint-type +# config with values 'ip', 'hostname', or 'unknown-endpoint'. This value controls how +# the endpoint returned for MOVED/ASKING requests as well as the first field of CLUSTER SLOTS. +# If the preferred endpoint type is set to hostname, but no announced hostname is set, a '?' +# will be returned instead. +# +# When a cluster advertises itself as having an unknown endpoint, it's indicating that +# the server doesn't know how clients can reach the cluster. This can happen in certain +# networking situations where there are multiple possible routes to the node, and the +# server doesn't know which one the client took. In this case, the server is expecting +# the client to reach out on the same endpoint it used for making the last request, but use +# the port provided in the response. +# +# cluster-preferred-endpoint-type ip + +# In order to setup your cluster make sure to read the documentation +# available at https://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following four options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-tls-port +# * cluster-announce-bus-port +# +# Each instructs the node about its address, client ports (for connections +# without and with TLS) and cluster message bus port. The information is then +# published in the header of the bus packets so that other nodes will be able to +# correctly map the address of the node publishing the information. +# +# If cluster-tls is set to yes and cluster-announce-tls-port is omitted or set +# to zero, then cluster-announce-port refers to the TLS port. Note also that +# cluster-announce-tls-port has no effect if cluster-tls is set to no. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usual. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-tls-port 6379 +# cluster-announce-port 0 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +################################ LATENCY TRACKING ############################## + +# The Redis extended latency monitoring tracks the per command latencies and enables +# exporting the percentile distribution via the INFO latencystats command, +# and cumulative latency distributions (histograms) via the LATENCY command. +# +# By default, the extended latency monitoring is enabled since the overhead +# of keeping track of the command latency is very small. +# latency-tracking yes + +# By default the exported latency percentiles via the INFO latencystats command +# are the p50, p99, and p999. +# latency-tracking-info-percentiles 50 99 99.9 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at https://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# n New key events (Note: not included in the 'A' class) +# t Stream commands +# d Module key type events +# m Key-miss events (Note: It is not included in the 'A' class) +# A Alias for g$lshzxetd, so that the "AKE" string means all the events +# (Except key-miss events which are excluded from 'A' due to their +# unique nature). +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-listpack-entries 512 +hash-max-listpack-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-listpack-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-listpack-entries 128 +zset-max-listpack-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entries limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Note that it doesn't make sense to set the replica clients output buffer +# limit lower than the repl-backlog-size config (partial sync will succeed +# and then replica will get disconnected). +# Such a configuration is ignored (the size of repl-backlog-size will be used). +# This doesn't have memory consumption implications since the replica client +# will share the backlog buffers memory. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In some scenarios client connections can hog up memory leading to OOM +# errors or data eviction. To avoid this we can cap the accumulated memory +# used by all client connections (all pubsub and normal clients). Once we +# reach that limit connections will be dropped by the server freeing up +# memory. The server will attempt to drop the connections using the most +# memory first. We call this mechanism "client eviction". +# +# Client eviction is configured using the maxmemory-clients setting as follows: +# 0 - client eviction is disabled (default) +# +# A memory value can be used for the client eviction threshold, +# for example: +# maxmemory-clients 1g +# +# A percentage value (between 1% and 100%) means the client eviction threshold +# is based on a percentage of the maxmemory setting. For example to set client +# eviction at 5% of maxmemory: +# maxmemory-clients 5% + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. However you can change this limit +# here, but must be 1mb or greater +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporarily raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 4 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 4 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in a "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Active defragmentation is disabled by default +# activedefrag no + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage, to be used when the lower +# threshold is reached +# active-defrag-cycle-min 1 + +# Maximal effort for defrag in CPU percentage, to be used when the upper +# threshold is reached +# active-defrag-cycle-max 25 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + +# Jemalloc background thread for purging will be enabled by default +jemalloc-bg-thread yes + +# It is possible to pin different threads and processes of Redis to specific +# CPUs in your system, in order to maximize the performances of the server. +# This is useful both in order to pin different Redis threads in different +# CPUs, but also in order to make sure that multiple Redis instances running +# in the same host will be pinned to different CPUs. +# +# Normally you can do this using the "taskset" command, however it is also +# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# +# You can pin the server/IO threads, bio threads, aof rewrite child process, and +# the bgsave child process. The syntax to specify the cpu list is the same as +# the taskset command: +# +# Set redis server/io threads to cpu affinity 0,2,4,6: +# server_cpulist 0-7:2 +# +# Set bio threads to cpu affinity 1,3: +# bio_cpulist 1,3 +# +# Set aof rewrite child process to cpu affinity 8,9,10,11: +# aof_rewrite_cpulist 8-11 +# +# Set bgsave child process to cpu affinity 1,10,11 +# bgsave_cpulist 1,10-11 + +# In some cases redis will emit warnings and even refuse to start if it detects +# that the system is in bad state, it is possible to suppress these warnings +# by setting the following config which takes a space delimited list of warnings +# to suppress +# +# ignore-warnings ARM64-COW-BUG diff --git a/apps/redis/7.2.5/data.yml b/apps/redis/7.2.5/data.yml new file mode 100644 index 00000000..fcf9b51d --- /dev/null +++ b/apps/redis/7.2.5/data.yml @@ -0,0 +1,23 @@ +additionalProperties: + formFields: + - default: "/home/redis" + envKey: REDIS_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 6379 + envKey: PANEL_APP_PORT_HTTP + labelZh: 端口 + labelEn: Port + required: true + rule: paramPort + type: number + - default: "" + envKey: REDIS_ROOT_PASSWORD + labelZh: 密码 + labelEn: Password + random: true + required: false + rule: paramComplexity + type: password diff --git a/apps/redis/7.2.5/docker-compose.yml b/apps/redis/7.2.5/docker-compose.yml new file mode 100644 index 00000000..cb24a4dc --- /dev/null +++ b/apps/redis/7.2.5/docker-compose.yml @@ -0,0 +1,28 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + redis: + image: redis:7.2.5-alpine + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:6379 + command: > + sh -c ' + if [ -z "${REDIS_ROOT_PASSWORD}" ]; then + redis-server /etc/redis/redis.conf + else + redis-server /etc/redis/redis.conf --requirepass ${REDIS_ROOT_PASSWORD} + fi' + volumes: + - ${REDIS_ROOT_PATH}/data:/data + - ${REDIS_ROOT_PATH}/config/redis.conf:/etc/redis/redis.conf + - ${REDIS_ROOT_PATH}/logs:/logs diff --git a/apps/redis/7.2.5/scripts/init.sh b/apps/redis/7.2.5/scripts/init.sh new file mode 100644 index 00000000..d10108b1 --- /dev/null +++ b/apps/redis/7.2.5/scripts/init.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + mkdir -p "$REDIS_ROOT_PATH" + + mkdir -p "$REDIS_ROOT_PATH/data" + mkdir -p "$REDIS_ROOT_PATH/config" + mkdir -p "$REDIS_ROOT_PATH/logs" + + cp ./config/redis.conf "$REDIS_ROOT_PATH/config/redis.conf" + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/redis/7.2.5/scripts/uninstall.sh b/apps/redis/7.2.5/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/redis/7.2.5/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/redis/7.2.5/scripts/upgrade.sh b/apps/redis/7.2.5/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/redis/7.2.5/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/redis/README.md b/apps/redis/README.md new file mode 100644 index 00000000..9bf7b4d4 --- /dev/null +++ b/apps/redis/README.md @@ -0,0 +1,17 @@ +# Redis + +Redis(Remote Dictionary Server)是一种开源的内存数据库,通常用作缓存系统或键值存储数据库。 + +![Redis](https://raw.githubusercontent.com/docker-library/docs/0e42ee108b46e1ba6333e9eb44201b8f26c4032d/redis/logo.png) + +## 简介 + +Redis 通常被称为数据结构服务器。这意味着,Redis 可通过一系列命令访问可变数据结构,这些命令通过 TCP +套接字和简单协议以服务器-客户端模式发送。因此,不同进程可以通过共享方式查询和修改相同的数据结构。 + +Redis 中的数据结构具有一些特殊属性: + ++ Redis 会将它们存储在磁盘上,即使它们总是在服务器内存中提供和修改。这意味着 Redis 不仅速度快,而且不易挥发。 ++ 数据结构的实现强调内存效率,因此与使用高级编程语言建模的相同数据结构相比,Redis 中的数据结构使用的内存可能更少。 ++ Redis 提供了许多在数据库中自然能找到的功能,如复制、可调整的耐用性级别、集群和高可用性。 + 另一个很好的例子是,将 Redis 视为更复杂版本的 memcached,其中的操作不仅仅是 SET 和 GET,而是处理复杂数据类型(如列表、集合、有序数据结构等)的操作。 diff --git a/apps/redis/data.yml b/apps/redis/data.yml new file mode 100644 index 00000000..a0854885 --- /dev/null +++ b/apps/redis/data.yml @@ -0,0 +1,19 @@ +name: Redis +title: 高性能的开源键值数据库 +description: 高性能的开源键值数据库 +additionalProperties: + key: redis + name: Redis + tags: + - Database + - Runtime + - Storage + - Local + shortDescZh: 高性能的开源键值数据库 + shortDescEn: High-performance key-value database + type: runtime + crossVersionUpdate: true + limit: 0 + website: https://redis.io/ + github: https://github.com/redis/redis/ + document: https://redis.io/docs/ diff --git a/apps/redis/logo.png b/apps/redis/logo.png new file mode 100644 index 00000000..f8b930e9 Binary files /dev/null and b/apps/redis/logo.png differ diff --git a/apps/sentinel/1.8.6/data.yml b/apps/sentinel/1.8.6/data.yml new file mode 100644 index 00000000..15145809 --- /dev/null +++ b/apps/sentinel/1.8.6/data.yml @@ -0,0 +1,60 @@ +additionalProperties: + formFields: + - default: 8858 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelEn: Web port + labelZh: Web 端口 + required: true + rule: paramPort + type: number + - default: 8719 + edit: true + envKey: COMMUNICATION_PORT + labelEn: Communication port + labelZh: 通讯端口 + required: true + rule: paramPort + type: number + - default: "sentinel" + edit: true + envKey: SENTINEL_DASHBOARD_AUTH_USERNAME + labelEn: Dashboard username + labelZh: 控制台 用户名 + required: true + type: text + - default: "sentinel" + edit: true + envKey: SENTINEL_DASHBOARD_AUTH_PASSWORD + labelEn: Dashboard password + labelZh: 控制台 密码 + required: true + type: text + - default: "host" + edit: true + envKey: NETWORK_MODE + labelEn: Drive path + labelZh: 网络模式 + required: true + type: select + values: + - label: "Host" + value: "host" + - label: "Bridge" + value: "bridge" + - label: "None" + value: "none" + - label: "1panel-network" + value: "1panel-network" + - default: "false" + edit: true + envKey: SENTINEL_DASHBOARD_FEIGN_ENABLED + labelEn: Feign enabled + labelZh: Feign 支持是否启用 + required: false + type: select + values: + - label: "true" + value: "true" + - label: "false" + value: "false" diff --git a/apps/sentinel/1.8.6/docker-compose.yml b/apps/sentinel/1.8.6/docker-compose.yml new file mode 100644 index 00000000..61ab2f36 --- /dev/null +++ b/apps/sentinel/1.8.6/docker-compose.yml @@ -0,0 +1,29 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + "sentinel-dashboard": + image: bladex/sentinel-dashboard:1.8.6 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + network_mode: ${NETWORK_MODE} + ports: + - ${PANEL_APP_PORT_HTTP}:8858 + - ${COMMUNICATION_PORT}:8719 + entrypoint: + - "java" + - "-Djava.security.egd=file:/dev/./urandom" + - "-Dserver.port=8858" + - "-Dcsp.sentinel.api.port=8719" + - "-Dcsp.sentinel.dashboard.server=localhost:8858" + - "-Dproject.name=sentinel-dashboard" + - "-Dsentinel.dashboard.auth.username=${SENTINEL_DASHBOARD_AUTH_USERNAME}" + - "-Dsentinel.dashboard.auth.password=${SENTINEL_DASHBOARD_AUTH_PASSWORD}" + - "-Dfeign.sentinel.enabled=${SENTINEL_DASHBOARD_FEIGN_ENABLED}" + - "-jar" + - "app.jar" diff --git a/apps/sentinel/1.8.6/scripts/init.sh b/apps/sentinel/1.8.6/scripts/init.sh new file mode 100644 index 00000000..b4e37318 --- /dev/null +++ b/apps/sentinel/1.8.6/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [[ -f .env ]]; then + source .env + + echo "Check Finish." + +else + echo ".env not found." +fi diff --git a/apps/sentinel/1.8.6/scripts/upgrade.sh b/apps/sentinel/1.8.6/scripts/upgrade.sh new file mode 100644 index 00000000..b4e37318 --- /dev/null +++ b/apps/sentinel/1.8.6/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [[ -f .env ]]; then + source .env + + echo "Check Finish." + +else + echo ".env not found." +fi diff --git a/apps/sentinel/README.md b/apps/sentinel/README.md new file mode 100644 index 00000000..c59ae8bd --- /dev/null +++ b/apps/sentinel/README.md @@ -0,0 +1,47 @@ +# Sentinel-Dashboard + +随着微服务的流行,服务和服务之间的稳定性变得越来越重要。Sentinel是面向分布式、多语言异构化服务架构的流量治理组件,主要以流量为切入点,从流量路由、流量控制、流量整形、熔断降级、系统自适应过载保护、热点流量防护等多个维度来帮助开发者保障微服务的稳定性。 + +## 快速启动 + +访问地址 +`http://IP:8858/sentinel` + +> 用户名 +> sentinel +> +> 密码 +> sentinel + +## Sentinel 的历史 + ++ 2012 年,Sentinel 诞生,主要功能为入口流量控制。 ++ 2013-2017 年,Sentinel 在阿里巴巴集团内部迅速发展,成为基础技术模块,覆盖了所有的核心场景。Sentinel 也因此积累了大量的流量归整场景以及生产实践。 ++ 2018 年,Sentinel 开源,并持续演进。 ++ 2019 年,Sentinel 朝着多语言扩展的方向不断探索,推出 C++ 原生版本,同时针对 Service Mesh 场景也推出了 Envoy 集群流量控制支持,以解决 + Service Mesh 架构下多语言限流的问题。 ++ 2020 年,推出 Sentinel Go 版本,继续朝着云原生方向演进。 ++ 2021 年,Sentinel 正在朝着 2.0 云原生高可用决策中心组件进行演进;同时推出了 Sentinel Rust 原生版本。同时我们也在 Rust + 社区进行了 Envoy WASM extension 及 eBPF extension 等场景探索。 ++ 2022 年,Sentinel 品牌升级为流量治理,领域涵盖流量路由/调度、流量染色、流控降级、过载保护/实例摘除等;同时社区将流量治理相关标准抽出到 + OpenSergo 标准中,Sentinel 作为流量治理标准实现。 + +## Sentinel 基本概念 + +### 资源 + +资源是 Sentinel 的关键概念。它可以是 Java 应用程序中的任何内容,例如,由应用程序提供的服务,或由应用程序调用的其它应用提供的服务,甚至可以是一段代码。在接下来的文档中,我们都会用资源来描述代码块。 + +只要通过 Sentinel API 定义的代码,就是资源,能够被 Sentinel 保护起来。大部分情况下,可以使用方法签名,URL,甚至服务名称作为资源名来标示资源。 + +### 规则 + +围绕资源的实时状态设定的规则,可以包括流量控制规则、熔断降级规则以及系统保护规则。所有规则可以动态实时调整。 + +## Sentinel 是如何工作的 + +Sentinel 的主要工作机制如下: + ++ 对主流框架提供适配或者显示的 API,来定义需要保护的资源,并提供设施对资源进行实时统计和调用链路分析。 ++ 根据预设的规则,结合对资源的实时统计信息,对流量进行控制。同时,Sentinel 提供开放的接口,方便您定义及改变规则。 ++ Sentinel 提供实时的监控系统,方便您快速了解目前系统的状态。 diff --git a/apps/sentinel/data.yml b/apps/sentinel/data.yml new file mode 100644 index 00000000..c021a190 --- /dev/null +++ b/apps/sentinel/data.yml @@ -0,0 +1,19 @@ +name: sentinel-dashboard +tags: + - 中间件 +title: Sentinel-Dashboard +type: 中间件 +description: 阿里巴巴流量卫兵 +additionalProperties: + key: sentinel-dashboard + name: Sentinel-Dashboard + tags: + - Middleware + shortDescZh: 阿里巴巴流量卫兵 + shortDescEn: Alibaba Traffic Guard + type: runtime + crossVersionUpdate: true + limit: 0 + website: https://sentinelguard.io/ + github: https://github.com/alibaba/Sentinel + document: https://sentinelguard.io/zh-cn/docs/introduction.html diff --git a/apps/sentinel/logo.png b/apps/sentinel/logo.png new file mode 100644 index 00000000..3f493afc Binary files /dev/null and b/apps/sentinel/logo.png differ diff --git a/apps/siyuan/3.1.1/data.yml b/apps/siyuan/3.1.1/data.yml new file mode 100644 index 00000000..c3abcfb6 --- /dev/null +++ b/apps/siyuan/3.1.1/data.yml @@ -0,0 +1,25 @@ +additionalProperties: + formFields: + - default: "/home/siyuan" + edit: true + envKey: SIYUAN_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 6806 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: "" + edit: true + envKey: AuthCode + labelZh: 授权码 + labelEn: Access Auth Code + required: true + rule: paramComplexity + type: password diff --git a/apps/siyuan/3.1.1/docker-compose.yml b/apps/siyuan/3.1.1/docker-compose.yml new file mode 100644 index 00000000..3b22f38a --- /dev/null +++ b/apps/siyuan/3.1.1/docker-compose.yml @@ -0,0 +1,25 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + siyuan: + image: b3log/siyuan:v3.1.1 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + command: [ '--workspace=/siyuan/workspace/', '--accessAuthCode=${AuthCode}' ] + user: 1000:1000 + ports: + - ${PANEL_APP_PORT_HTTP}:6806 + env_file: + - /etc/1panel/envs/global.env + volumes: + - ${SIYUAN_ROOT_PATH}/workspace:/siyuan/workspace + environment: + - RUN_IN_CONTAINER=true diff --git a/apps/siyuan/3.1.1/scripts/init.sh b/apps/siyuan/3.1.1/scripts/init.sh new file mode 100644 index 00000000..801a7489 --- /dev/null +++ b/apps/siyuan/3.1.1/scripts/init.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + if [ ! -d $SIYUAN_ROOT_PATH ]; then + mkdir -p $SIYUAN_ROOT_PATH + fi + + if [ ! -d $SIYUAN_ROOT_PATH/workspace ]; then + mkdir -p $SIYUAN_ROOT_PATH/workspace + fi + + chown -R 1000:1000 $SIYUAN_ROOT_PATH + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/siyuan/3.1.1/scripts/uninstall.sh b/apps/siyuan/3.1.1/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/siyuan/3.1.1/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/siyuan/3.1.1/scripts/upgrade.sh b/apps/siyuan/3.1.1/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/siyuan/3.1.1/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/siyuan/README.md b/apps/siyuan/README.md new file mode 100644 index 00000000..3bb18a5b --- /dev/null +++ b/apps/siyuan/README.md @@ -0,0 +1,48 @@ +# 思源笔记 + +![思源笔记](https://file.lifebus.top/imgs/siyuan_b3log_cover.png) + +## 简介 + +重构你的思维 + +**所见所得 双链块引** + +**加密同步 隐私优先** + +思源笔记是一款隐私优先的个人知识管理系统,支持完全离线使用,同时也支持端到端加密同步。 + +融合块、大纲和双向链接,重构你的思维。 + +## 反向代理 + +Nginx 配置 WebSocket 反向代理: + +```shell +location /ws { + proxy_pass http://localhost:6806; + proxy_read_timeout 60s; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'Upgrade'; +} +``` + +## 常见问题 + +### 数据存储 + +数据保存在工作空间文件夹下,在工作空间 data 文件夹下: + +| 文件夹 | 说明 | +|-----------|-------------------| +| assets | 保存所有插入的资源文件 | +| emojis | 用于保存自定义图标表情图片 | +| snippets | 用于保存代码片段 | +| storage | 用于保存查询条件、布局和闪卡数据等 | +| templates | 用于保存模板片段 | +| widgets | 用于保存挂件 | +| plugins | 用于保存插件 | +| public | 用于保存公开的数据 | + +> 其余文件夹就是用户自己创建的笔记本文件夹,笔记本文件夹下 .sy 后缀的文件用于保存文档数据,数据格式为 JSON diff --git a/apps/siyuan/data.yml b/apps/siyuan/data.yml new file mode 100644 index 00000000..f9a82324 --- /dev/null +++ b/apps/siyuan/data.yml @@ -0,0 +1,18 @@ +name: 思源笔记 +title: 隐私优先的个人知识管理系统 +description: 隐私优先的个人知识管理系统 +additionalProperties: + key: siyuan + name: 思源笔记 + tags: + - WebSite + - Storage + - Local + shortDescZh: 隐私优先的个人知识管理系统 + shortDescEn: A privacy-first personal knowledge management system + type: website + crossVersionUpdate: true + limit: 0 + website: https://b3log.org/ + github: https://github.com/siyuan-note/siyuan/ + document: https://b3log.org/siyuan/ diff --git a/apps/siyuan/logo.png b/apps/siyuan/logo.png new file mode 100644 index 00000000..91330759 Binary files /dev/null and b/apps/siyuan/logo.png differ diff --git a/apps/speedtest-tracker/0.18.3/data.yml b/apps/speedtest-tracker/0.18.3/data.yml new file mode 100644 index 00000000..c5892dc5 --- /dev/null +++ b/apps/speedtest-tracker/0.18.3/data.yml @@ -0,0 +1,176 @@ +additionalProperties: + formFields: + - default: "lscr.io/linuxserver/speedtest-tracker:0.18.3" + edit: true + envKey: SPEED_TEST_IMAGE + labelEn: Image source + labelZh: 镜像源 + required: true + type: select + values: + - label: "LinuxServer" + value: "lscr.io/linuxserver/speedtest-tracker:0.18.3" + - label: "GitHub" + value: "ghcr.io/alexjustesen/speedtest-tracker:v0.18.3" + - default: "/home/speedtest-tracker" + edit: true + envKey: SP_TRACKER_ROOT_PATH + labelEn: Data persistence root path + labelZh: 数据持久化 根路径 + required: true + type: text + - default: 8080 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelEn: Server port + labelZh: Web 服务端口 + required: true + rule: paramPort + type: number + - default: 8443 + edit: true + envKey: PANEL_APP_PORT_HTTPS + labelEn: Server port + labelZh: SSL 服务端口 + required: true + rule: paramPort + type: number + - default: "Asia/Shanghai" + edit: true + envKey: SP_TRACKER_TZ + labelEn: Timezone + labelZh: 时区 + required: false + type: text + - default: "sqlite" + edit: true + envKey: SP_TRACKER_DB_TYPE + labelEn: Database type + labelZh: 数据库驱动类型 + required: true + type: select + values: + - label: MySQL (MariaDB) + value: "mysql" + - label: SQLite + value: "sqlite" + - label: PostgreSQL + value: "pgsql" + - default: "localhost" + edit: true + envKey: SP_TRACKER_DB_HOST + labelEn: Database host IP + labelZh: 数据库主机IP + required: false + type: text + - default: 3306 + edit: true + envKey: SP_TRACKER_DB_PORT + labelEn: Database Port (default 3306) + labelZh: 数据库端口 (默认3306) + required: false + rule: paramPort + type: number + - default: "speedtest_tracker" + edit: true + envKey: SP_TRACKER_DB_USER + labelEn: Database Connection Username + labelZh: 数据库 用户名 + required: false + type: text + - default: "speedtest_tracker" + edit: true + envKey: SP_TRACKER_DB_PASSWORD + labelEn: Database Connection Password + labelZh: 数据库 用户名密码 + required: false + type: password + - default: "speedtest_tracker" + edit: true + envKey: MYSQL_SERVICE_DB_NAME + labelEn: Database Name + labelZh: 数据库名称 + required: false + type: text + - default: "" + edit: true + envKey: SP_TRACKER_APP_KEY + labelEn: Application Key + labelZh: 加密存储数据的应用程序密钥 (Base64 开头) + required: false + type: text + - default: 1000 + edit: true + envKey: PUID + labelEn: User ID + labelZh: 用户ID + required: false + type: number + - default: 1000 + edit: true + envKey: PGID + labelEn: Group ID + labelZh: 组ID + required: false + type: number + - default: "smtp" + edit: true + envKey: MAIL_MAILER + labelEn: Mailer + labelZh: 协议 (SMTP) + required: false + type: text + - default: "smtp.163.com" + edit: true + envKey: MAIL_HOST + labelEn: Mail Host + labelZh: 邮件服务器地址 (SMTP) + required: false + type: text + - default: "" + edit: true + envKey: MAIL_USERNAME + labelEn: Mail Username + labelZh: 邮件用户名 (SMTP) + required: false + type: text + - default: "" + edit: true + envKey: MAIL_PASSWORD + labelEn: Mail Password + labelZh: 邮件账户密码 (SMTP) + required: false + type: text + - default: "ssl" + edit: true + envKey: MAIL_ENCRYPTION + labelEn: Mail Encryption + labelZh: 邮件加密方式 (SMTP) + required: false + type: select + values: + - label: "SSL" + value: "ssl" + - label: "TLS" + value: "tls" + - default: "" + edit: true + envKey: MAIL_FROM_ADDRESS + labelEn: Mail From Address + labelZh: 邮件发送地址 (SMTP) + required: false + type: text + - default: "" + edit: true + envKey: MAIL_FROM_NAME + labelEn: Mail From Name + labelZh: 邮件发送者名称 (SMTP) + required: false + type: text + - default: "" + edit: true + envKey: TELEGRAM_BOT_TOKEN + labelEn: Bot Token ID (Telegram Notification) + labelZh: 机器人的令牌 ID (通知 Telegram) + required: false + type: text diff --git a/apps/speedtest-tracker/0.18.3/docker-compose.yml b/apps/speedtest-tracker/0.18.3/docker-compose.yml new file mode 100644 index 00000000..82c5a569 --- /dev/null +++ b/apps/speedtest-tracker/0.18.3/docker-compose.yml @@ -0,0 +1,36 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + speedtest-tracker: + image: ${SPEED_TEST_IMAGE} + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:80 + - ${PANEL_APP_PORT_HTTPS}:443 + environment: + - PUID=${PUID:-1000} + - PGID=${PGID:-1000} + - TZ=${SP_TRACKER_TZ:-Asia/Shanghai} + - DB_CONNECTION=${SP_TRACKER_DB_TYPE:-sqlite} + - DB_HOST=${SP_TRACKER_DB_HOST} + - DB_PORT=${SP_TRACKER_DB_PORT} + - DB_DATABASE=${MYSQL_SERVICE_DB_NAME} + - DB_USERNAME=${SP_TRACKER_DB_USER} + - DB_PASSWORD=${SP_TRACKER_DB_PASSWORD} + volumes: + - ${SP_TRACKER_ROOT_PATH}/config:/config + healthcheck: + test: curl -fSs APP_URL/api/healthcheck || exit 1 + interval: 10s + retries: 3 + start_period: 30s + timeout: 10s diff --git a/apps/speedtest-tracker/0.18.3/scripts/init.sh b/apps/speedtest-tracker/0.18.3/scripts/init.sh new file mode 100644 index 00000000..5a4fb865 --- /dev/null +++ b/apps/speedtest-tracker/0.18.3/scripts/init.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [[ -f .env ]]; then + source .env + + keys + mkdir -p "$SP_TRACKER_ROOT_PATH" + + mkdir -p "$SP_TRACKER_ROOT_PATH/conf" + mkdir -p "$SP_TRACKER_ROOT_PATH/conf/keys" + + chmod $PUID:$PGID -R "$SP_TRACKER_ROOT_PATH" + + echo "Check Finish." + +else + echo ".env not found." +fi diff --git a/apps/speedtest-tracker/0.18.3/scripts/upgrade.sh b/apps/speedtest-tracker/0.18.3/scripts/upgrade.sh new file mode 100644 index 00000000..b4e37318 --- /dev/null +++ b/apps/speedtest-tracker/0.18.3/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [[ -f .env ]]; then + source .env + + echo "Check Finish." + +else + echo ".env not found." +fi diff --git a/apps/speedtest-tracker/0.18.7/data.yml b/apps/speedtest-tracker/0.18.7/data.yml new file mode 100644 index 00000000..8cd281ef --- /dev/null +++ b/apps/speedtest-tracker/0.18.7/data.yml @@ -0,0 +1,176 @@ +additionalProperties: + formFields: + - default: "lscr.io/linuxserver/speedtest-tracker:0.18.7" + edit: true + envKey: SPEED_TEST_IMAGE + labelEn: Image source + labelZh: 镜像源 + required: true + type: select + values: + - label: "LinuxServer" + value: "lscr.io/linuxserver/speedtest-tracker:0.18.7" + - label: "GitHub" + value: "ghcr.io/alexjustesen/speedtest-tracker:v0.18.7" + - default: "/home/speedtest-tracker" + edit: true + envKey: SP_TRACKER_ROOT_PATH + labelEn: Data persistence root path + labelZh: 数据持久化 根路径 + required: true + type: text + - default: 8080 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelEn: Server port + labelZh: Web 服务端口 + required: true + rule: paramPort + type: number + - default: 8443 + edit: true + envKey: PANEL_APP_PORT_HTTPS + labelEn: Server port + labelZh: SSL 服务端口 + required: true + rule: paramPort + type: number + - default: "Asia/Shanghai" + edit: true + envKey: SP_TRACKER_TZ + labelEn: Timezone + labelZh: 时区 + required: false + type: text + - default: "sqlite" + edit: true + envKey: SP_TRACKER_DB_TYPE + labelEn: Database type + labelZh: 数据库驱动类型 + required: true + type: select + values: + - label: MySQL (MariaDB) + value: "mysql" + - label: SQLite + value: "sqlite" + - label: PostgreSQL + value: "pgsql" + - default: "localhost" + edit: true + envKey: SP_TRACKER_DB_HOST + labelEn: Database host IP + labelZh: 数据库主机IP + required: false + type: text + - default: 3306 + edit: true + envKey: SP_TRACKER_DB_PORT + labelEn: Database Port (default 3306) + labelZh: 数据库端口 (默认3306) + required: false + rule: paramPort + type: number + - default: "speedtest_tracker" + edit: true + envKey: SP_TRACKER_DB_USER + labelEn: Database Connection Username + labelZh: 数据库 用户名 + required: false + type: text + - default: "speedtest_tracker" + edit: true + envKey: SP_TRACKER_DB_PASSWORD + labelEn: Database Connection Password + labelZh: 数据库 用户名密码 + required: false + type: password + - default: "speedtest_tracker" + edit: true + envKey: MYSQL_SERVICE_DB_NAME + labelEn: Database Name + labelZh: 数据库名称 + required: false + type: text + - default: "" + edit: true + envKey: SP_TRACKER_APP_KEY + labelEn: Application Key + labelZh: 加密存储数据的应用程序密钥 (Base64 开头) + required: false + type: text + - default: 1000 + edit: true + envKey: PUID + labelEn: User ID + labelZh: 用户ID + required: false + type: number + - default: 1000 + edit: true + envKey: PGID + labelEn: Group ID + labelZh: 组ID + required: false + type: number + - default: "smtp" + edit: true + envKey: MAIL_MAILER + labelEn: Mailer + labelZh: 协议 (SMTP) + required: false + type: text + - default: "smtp.163.com" + edit: true + envKey: MAIL_HOST + labelEn: Mail Host + labelZh: 邮件服务器地址 (SMTP) + required: false + type: text + - default: "" + edit: true + envKey: MAIL_USERNAME + labelEn: Mail Username + labelZh: 邮件用户名 (SMTP) + required: false + type: text + - default: "" + edit: true + envKey: MAIL_PASSWORD + labelEn: Mail Password + labelZh: 邮件账户密码 (SMTP) + required: false + type: text + - default: "ssl" + edit: true + envKey: MAIL_ENCRYPTION + labelEn: Mail Encryption + labelZh: 邮件加密方式 (SMTP) + required: false + type: select + values: + - label: "SSL" + value: "ssl" + - label: "TLS" + value: "tls" + - default: "" + edit: true + envKey: MAIL_FROM_ADDRESS + labelEn: Mail From Address + labelZh: 邮件发送地址 (SMTP) + required: false + type: text + - default: "" + edit: true + envKey: MAIL_FROM_NAME + labelEn: Mail From Name + labelZh: 邮件发送者名称 (SMTP) + required: false + type: text + - default: "" + edit: true + envKey: TELEGRAM_BOT_TOKEN + labelEn: Bot Token ID (Telegram Notification) + labelZh: 机器人的令牌 ID (通知 Telegram) + required: false + type: text diff --git a/apps/speedtest-tracker/0.18.7/docker-compose.yml b/apps/speedtest-tracker/0.18.7/docker-compose.yml new file mode 100644 index 00000000..82c5a569 --- /dev/null +++ b/apps/speedtest-tracker/0.18.7/docker-compose.yml @@ -0,0 +1,36 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + speedtest-tracker: + image: ${SPEED_TEST_IMAGE} + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:80 + - ${PANEL_APP_PORT_HTTPS}:443 + environment: + - PUID=${PUID:-1000} + - PGID=${PGID:-1000} + - TZ=${SP_TRACKER_TZ:-Asia/Shanghai} + - DB_CONNECTION=${SP_TRACKER_DB_TYPE:-sqlite} + - DB_HOST=${SP_TRACKER_DB_HOST} + - DB_PORT=${SP_TRACKER_DB_PORT} + - DB_DATABASE=${MYSQL_SERVICE_DB_NAME} + - DB_USERNAME=${SP_TRACKER_DB_USER} + - DB_PASSWORD=${SP_TRACKER_DB_PASSWORD} + volumes: + - ${SP_TRACKER_ROOT_PATH}/config:/config + healthcheck: + test: curl -fSs APP_URL/api/healthcheck || exit 1 + interval: 10s + retries: 3 + start_period: 30s + timeout: 10s diff --git a/apps/speedtest-tracker/0.18.7/scripts/init.sh b/apps/speedtest-tracker/0.18.7/scripts/init.sh new file mode 100644 index 00000000..5a4fb865 --- /dev/null +++ b/apps/speedtest-tracker/0.18.7/scripts/init.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [[ -f .env ]]; then + source .env + + keys + mkdir -p "$SP_TRACKER_ROOT_PATH" + + mkdir -p "$SP_TRACKER_ROOT_PATH/conf" + mkdir -p "$SP_TRACKER_ROOT_PATH/conf/keys" + + chmod $PUID:$PGID -R "$SP_TRACKER_ROOT_PATH" + + echo "Check Finish." + +else + echo ".env not found." +fi diff --git a/apps/speedtest-tracker/0.18.7/scripts/upgrade.sh b/apps/speedtest-tracker/0.18.7/scripts/upgrade.sh new file mode 100644 index 00000000..b4e37318 --- /dev/null +++ b/apps/speedtest-tracker/0.18.7/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [[ -f .env ]]; then + source .env + + echo "Check Finish." + +else + echo ".env not found." +fi diff --git a/apps/speedtest-tracker/0.19.0/data.yml b/apps/speedtest-tracker/0.19.0/data.yml new file mode 100644 index 00000000..afba7aa8 --- /dev/null +++ b/apps/speedtest-tracker/0.19.0/data.yml @@ -0,0 +1,176 @@ +additionalProperties: + formFields: + - default: "lscr.io/linuxserver/speedtest-tracker:0.19.0" + edit: true + envKey: SPEED_TEST_IMAGE + labelEn: Image source + labelZh: 镜像源 + required: true + type: select + values: + - label: "LinuxServer" + value: "lscr.io/linuxserver/speedtest-tracker:0.19.0" + - label: "GitHub" + value: "ghcr.io/alexjustesen/speedtest-tracker:v0.19.0" + - default: "/home/speedtest-tracker" + edit: true + envKey: SP_TRACKER_ROOT_PATH + labelEn: Data persistence root path + labelZh: 数据持久化 根路径 + required: true + type: text + - default: 8080 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelEn: Server port + labelZh: Web 服务端口 + required: true + rule: paramPort + type: number + - default: 8443 + edit: true + envKey: PANEL_APP_PORT_HTTPS + labelEn: Server port + labelZh: SSL 服务端口 + required: true + rule: paramPort + type: number + - default: "Asia/Shanghai" + edit: true + envKey: SP_TRACKER_TZ + labelEn: Timezone + labelZh: 时区 + required: false + type: text + - default: "sqlite" + edit: true + envKey: SP_TRACKER_DB_TYPE + labelEn: Database type + labelZh: 数据库驱动类型 + required: true + type: select + values: + - label: MySQL (MariaDB) + value: "mysql" + - label: SQLite + value: "sqlite" + - label: PostgreSQL + value: "pgsql" + - default: "localhost" + edit: true + envKey: SP_TRACKER_DB_HOST + labelEn: Database host IP + labelZh: 数据库主机IP + required: false + type: text + - default: 3306 + edit: true + envKey: SP_TRACKER_DB_PORT + labelEn: Database Port (default 3306) + labelZh: 数据库端口 (默认3306) + required: false + rule: paramPort + type: number + - default: "speedtest_tracker" + edit: true + envKey: SP_TRACKER_DB_USER + labelEn: Database Connection Username + labelZh: 数据库 用户名 + required: false + type: text + - default: "speedtest_tracker" + edit: true + envKey: SP_TRACKER_DB_PASSWORD + labelEn: Database Connection Password + labelZh: 数据库 用户名密码 + required: false + type: password + - default: "speedtest_tracker" + edit: true + envKey: MYSQL_SERVICE_DB_NAME + labelEn: Database Name + labelZh: 数据库名称 + required: false + type: text + - default: "" + edit: true + envKey: SP_TRACKER_APP_KEY + labelEn: Application Key + labelZh: 加密存储数据的应用程序密钥 (Base64 开头) + required: false + type: text + - default: 1000 + edit: true + envKey: PUID + labelEn: User ID + labelZh: 用户ID + required: false + type: number + - default: 1000 + edit: true + envKey: PGID + labelEn: Group ID + labelZh: 组ID + required: false + type: number + - default: "smtp" + edit: true + envKey: MAIL_MAILER + labelEn: Mailer + labelZh: 协议 (SMTP) + required: false + type: text + - default: "smtp.163.com" + edit: true + envKey: MAIL_HOST + labelEn: Mail Host + labelZh: 邮件服务器地址 (SMTP) + required: false + type: text + - default: "" + edit: true + envKey: MAIL_USERNAME + labelEn: Mail Username + labelZh: 邮件用户名 (SMTP) + required: false + type: text + - default: "" + edit: true + envKey: MAIL_PASSWORD + labelEn: Mail Password + labelZh: 邮件账户密码 (SMTP) + required: false + type: text + - default: "ssl" + edit: true + envKey: MAIL_ENCRYPTION + labelEn: Mail Encryption + labelZh: 邮件加密方式 (SMTP) + required: false + type: select + values: + - label: "SSL" + value: "ssl" + - label: "TLS" + value: "tls" + - default: "" + edit: true + envKey: MAIL_FROM_ADDRESS + labelEn: Mail From Address + labelZh: 邮件发送地址 (SMTP) + required: false + type: text + - default: "" + edit: true + envKey: MAIL_FROM_NAME + labelEn: Mail From Name + labelZh: 邮件发送者名称 (SMTP) + required: false + type: text + - default: "" + edit: true + envKey: TELEGRAM_BOT_TOKEN + labelEn: Bot Token ID (Telegram Notification) + labelZh: 机器人的令牌 ID (通知 Telegram) + required: false + type: text diff --git a/apps/speedtest-tracker/0.19.0/docker-compose.yml b/apps/speedtest-tracker/0.19.0/docker-compose.yml new file mode 100644 index 00000000..82c5a569 --- /dev/null +++ b/apps/speedtest-tracker/0.19.0/docker-compose.yml @@ -0,0 +1,36 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + speedtest-tracker: + image: ${SPEED_TEST_IMAGE} + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:80 + - ${PANEL_APP_PORT_HTTPS}:443 + environment: + - PUID=${PUID:-1000} + - PGID=${PGID:-1000} + - TZ=${SP_TRACKER_TZ:-Asia/Shanghai} + - DB_CONNECTION=${SP_TRACKER_DB_TYPE:-sqlite} + - DB_HOST=${SP_TRACKER_DB_HOST} + - DB_PORT=${SP_TRACKER_DB_PORT} + - DB_DATABASE=${MYSQL_SERVICE_DB_NAME} + - DB_USERNAME=${SP_TRACKER_DB_USER} + - DB_PASSWORD=${SP_TRACKER_DB_PASSWORD} + volumes: + - ${SP_TRACKER_ROOT_PATH}/config:/config + healthcheck: + test: curl -fSs APP_URL/api/healthcheck || exit 1 + interval: 10s + retries: 3 + start_period: 30s + timeout: 10s diff --git a/apps/speedtest-tracker/0.19.0/scripts/init.sh b/apps/speedtest-tracker/0.19.0/scripts/init.sh new file mode 100644 index 00000000..5a4fb865 --- /dev/null +++ b/apps/speedtest-tracker/0.19.0/scripts/init.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [[ -f .env ]]; then + source .env + + keys + mkdir -p "$SP_TRACKER_ROOT_PATH" + + mkdir -p "$SP_TRACKER_ROOT_PATH/conf" + mkdir -p "$SP_TRACKER_ROOT_PATH/conf/keys" + + chmod $PUID:$PGID -R "$SP_TRACKER_ROOT_PATH" + + echo "Check Finish." + +else + echo ".env not found." +fi diff --git a/apps/speedtest-tracker/0.19.0/scripts/upgrade.sh b/apps/speedtest-tracker/0.19.0/scripts/upgrade.sh new file mode 100644 index 00000000..b4e37318 --- /dev/null +++ b/apps/speedtest-tracker/0.19.0/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [[ -f .env ]]; then + source .env + + echo "Check Finish." + +else + echo ".env not found." +fi diff --git a/apps/speedtest-tracker/README.md b/apps/speedtest-tracker/README.md new file mode 100644 index 00000000..7ec2ddc1 --- /dev/null +++ b/apps/speedtest-tracker/README.md @@ -0,0 +1,11 @@ +# Speedtest Tracker + +Speedtest Tracker 是一款自托管互联网性能跟踪应用程序,可针对 Ookla 的 Speedtest 服务运行速度测试检查。 + +Speedtest Tracker 的主要用例是建立互联网性能的历史记录,以便您在未收到 ISP 公布的费率时收到通知。 + +## 默认账户 + +> admin@example.com +> +> password diff --git a/apps/speedtest-tracker/data.yml b/apps/speedtest-tracker/data.yml new file mode 100644 index 00000000..16ff0330 --- /dev/null +++ b/apps/speedtest-tracker/data.yml @@ -0,0 +1,19 @@ +name: SpeedTest-Tracker +tags: + - 工具 +title: SpeedTest-Tracker +type: 工具 +description: +additionalProperties: + key: speedtest-tracker + name: SpeedTest-Tracker + tags: + - Tool + shortDescZh: 托管互联网性能跟踪应用程序 + shortDescEn: Hosted internet performance tracking application + type: tool + crossVersionUpdate: true + limit: 0 + website: https://docs.speedtest-tracker.dev/ + github: https://github.com/alexjustesen/speedtest-tracker + document: https://docs.speedtest-tracker.dev/ diff --git a/apps/speedtest-tracker/logo.png b/apps/speedtest-tracker/logo.png new file mode 100644 index 00000000..568581de Binary files /dev/null and b/apps/speedtest-tracker/logo.png differ diff --git a/apps/speedtest/5.3.3/data.yml b/apps/speedtest/5.3.3/data.yml new file mode 100644 index 00000000..666fc367 --- /dev/null +++ b/apps/speedtest/5.3.3/data.yml @@ -0,0 +1,100 @@ +additionalProperties: + formFields: + - default: "/home/speedtest" + edit: true + envKey: SPEED_TEST_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 2283 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: "LibreSpeed" + edit: true + envKey: TITLE + labelZh: 网页标题 + labelEn: Web title + required: true + type: text + - default: "false" + edit: true + envKey: TELEMETRY + labelZh: 启用遥测 + labelEn: Enable telemetry + required: true + type: select + values: + - label: "启用" + value: "true" + - label: "禁用" + value: "false" + - default: "" + edit: true + envKey: PASSWORD + labelZh: 访问密码 (开启遥测) + labelEn: Access password (Enable telemetry) + required: true + type: text + - default: "false" + edit: true + envKey: ENABLE_ID_OBFUSCATION + labelZh: ID 混淆 (开启遥测) + labelEn: ID obfuscation (Enable telemetry) + required: true + type: select + values: + - label: "启用" + value: "true" + - label: "禁用" + value: "false" + - default: "false" + edit: true + envKey: REDACT_IP_ADDRESSES + labelZh: 隐藏 IP 地址 (开启遥测) + labelEn: Hide IP addresses (Enable telemetry) + required: true + type: select + values: + - label: "启用" + value: "true" + - label: "禁用" + value: "false" + - default: "" + edit: true + envKey: EMAIL + labelZh: 邮箱地址 (开启遥测) + labelEn: Email address (Enable telemetry) + required: false + type: text + - default: "" + edit: true + envKey: IPINFO_APIKEY + labelZh: ipinfo.io 的 API 密钥 (开启遥测) + labelEn: ipinfo.io API key (Enable telemetry) + required: false + type: text + - default: "false" + edit: true + envKey: DISABLE_IPINFO + labelZh: 禁用 ipinfo.io (开启遥测) + labelEn: Disable ipinfo.io (Enable telemetry) + required: true + type: select + values: + - label: "启用" + value: "true" + - label: "禁用" + value: "false" + - default: "km" + edit: true + envKey: DISTANCE + labelZh: 距离单位 (km/mi) + labelEn: Distance unit (km/mi) + required: false + type: text diff --git a/apps/speedtest/5.3.3/docker-compose.yml b/apps/speedtest/5.3.3/docker-compose.yml new file mode 100644 index 00000000..4aca583d --- /dev/null +++ b/apps/speedtest/5.3.3/docker-compose.yml @@ -0,0 +1,24 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + speedtest: + image: ghcr.io/librespeed/speedtest:5.3.3 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:80 + volumes: + - ${SPEED_TEST_ROOT_PATH}/database:/database + environment: + - MODE=standalone + - WEBPORT=80 + env_file: + - .env diff --git a/apps/speedtest/5.3.3/scripts/init.sh b/apps/speedtest/5.3.3/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/speedtest/5.3.3/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/speedtest/5.3.3/scripts/uninstall.sh b/apps/speedtest/5.3.3/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/speedtest/5.3.3/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/speedtest/5.3.3/scripts/upgrade.sh b/apps/speedtest/5.3.3/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/speedtest/5.3.3/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/speedtest/README.md b/apps/speedtest/README.md new file mode 100644 index 00000000..6a166334 --- /dev/null +++ b/apps/speedtest/README.md @@ -0,0 +1,50 @@ +# SpeedTest + +没有 Flash,没有 Java,没有 Websocket,没有废话。 + +这是一个用 Javascript 实现的非常轻量级的速度测试,使用 XMLHttpRequest 和 Web Workers。 + +![SpeedTest](https://github.com/librespeed/speedtest/blob/master/.logo/logo3.png) + +## 特性 + ++ 下载 ++ 上传 ++ Ping ++ 抖动 ++ IP 地址、ISP、距服务器的距离(可选) ++ 遥测(可选) ++ 结果共享(可选) ++ 多点测试(可选) + +## 安装说明 + ++ `访问密码` + +前提:`开启遥测` + +访问统计页面的密码。如果未设置,统计页面将不允许访问。 + +开启遥测后: `http://127.0.0.1/results/stats.php` 处将提供统计页面 + ++ `隐藏 IP 地址 (开启遥测)` + +启用遥测时,将从收集的遥测中编辑 IP 地址和主机名,以实现更好的隐私。 + ++ `邮箱地址 (开启遥测)` + +GDPR 请求的电子邮件地址。启用遥测时必须指定。 + ++ `ipinfo.io 的 API 密钥 (开启遥测)` + +如果您希望提供大量测试,则为必需。否则,ipinfo.io 将限制您的访问。 + ++ `禁用 ipinfo.io (开启遥测)` + +禁用后,则不会从 ipinfo.io 获取 ISP 信息和距离。 + ++ `距离单位 (km/mi)` + +如果禁用 `禁用 ipinfo.io (开启遥测)`,此项决定如何测量距服务器的距离。 + +`km` 代表公里, `mi` 代表英里,也可以是空字符串以禁用距离测量。 diff --git a/apps/speedtest/data.yml b/apps/speedtest/data.yml new file mode 100644 index 00000000..92c30a6a --- /dev/null +++ b/apps/speedtest/data.yml @@ -0,0 +1,18 @@ +name: SpeedTest +title: SpeedTest +description: 轻量级的速度测试 +additionalProperties: + key: speedtest + name: SpeedTest + tags: + - WebSite + - Tool + - Local + shortDescZh: 轻量级的速度测试 + shortDescEn: Lightweight speed test + type: website + crossVersionUpdate: true + limit: 0 + website: https://librespeed.org/ + github: https://github.com/librespeed/speedtest/ + document: https://github.com/librespeed/speedtest/ diff --git a/apps/speedtest/logo.png b/apps/speedtest/logo.png new file mode 100644 index 00000000..33a1bde6 Binary files /dev/null and b/apps/speedtest/logo.png differ diff --git a/apps/stream-rec-backend/0.6.9/data.yml b/apps/stream-rec-backend/0.6.9/data.yml new file mode 100644 index 00000000..08ef890a --- /dev/null +++ b/apps/stream-rec-backend/0.6.9/data.yml @@ -0,0 +1,32 @@ +additionalProperties: + formFields: + - default: "/home/stream-rec" + edit: true + envKey: STREAM_REC_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 12555 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: API 端口 + labelEn: API port + required: true + rule: paramPort + type: number + - default: "stream-rec" + edit: false + envKey: LOGIN_SECRET + labelZh: 登录密码 (初始化) + labelEn: Login Password (Initialization) + required: true + random: true + type: text + - default: "" + edit: true + envKey: HTTP_PROXY + labelZh: 网络代理 + labelEn: Network Proxy + required: false + type: text diff --git a/apps/stream-rec-backend/0.6.9/docker-compose.yml b/apps/stream-rec-backend/0.6.9/docker-compose.yml new file mode 100644 index 00000000..6e547c86 --- /dev/null +++ b/apps/stream-rec-backend/0.6.9/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + stream-rec-backend: + image: streamrec/stream-rec:v0.6.9 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:12555 + volumes: + - ${STREAM_REC_ROOT_PATH}/records:/opt/records + - ${STREAM_REC_ROOT_PATH}/download:/download + environment: + - TZ=Asia/Shanghai + - LOG_LEVEL=INFO + - DB_PATH=/opt/records + - DOWNLOAD_PATH=/download + env_file: + - .env diff --git a/apps/stream-rec-backend/0.6.9/scripts/init.sh b/apps/stream-rec-backend/0.6.9/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/stream-rec-backend/0.6.9/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/stream-rec-backend/0.6.9/scripts/uninstall.sh b/apps/stream-rec-backend/0.6.9/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/stream-rec-backend/0.6.9/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/stream-rec-backend/0.6.9/scripts/upgrade.sh b/apps/stream-rec-backend/0.6.9/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/stream-rec-backend/0.6.9/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/stream-rec-backend/README.md b/apps/stream-rec-backend/README.md new file mode 100644 index 00000000..adaa2762 --- /dev/null +++ b/apps/stream-rec-backend/README.md @@ -0,0 +1,34 @@ +# Stream-Rec + +Stream-rec 是一个自动录制各种直播平台的工具。 + +![Stream-Rec](https://github.com/hua0512/stream-rec-front/blob/master/docs/zh/dashboard.png) + +## 特性 + +基于 Kotlin, Ktor, 和 ffmpeg。 + ++ 自动录播,可配置录制质量,路径,格式,并发量,分段录制(时间或文件大小),分段上传,根据直播标题和开始时间自动命名文件。 ++ 自动弹幕录制(XML格式),可使用 DanmakuFactory 进行弹幕转换,或配合AList来实现弹幕自动挂载。 ++ 使用 SQLite 持久化存储录播和上传信息 ++ 支持 Rclone 上传到云存储 ++ 使用 Web 界面进行配置 ++ 支持 Docker + +## 直播平台支持列表 + +| 平台 | 录制 | 弹幕 | 链接格式 | +|-----------|----|----|-----------------------------------------------| +| 抖音 | ✅ | ✅ | `https://www.live.douyin.com/{抖音id}` | +| 斗鱼 | ✅ | ✅ | `https://www.douyu.com/{直播间}` | +| 虎牙 | ✅ | ✅ | `https://www.huya.com/{直播间}` | +| PandaTV | ✅ | ✅ | `https://www.pandalive.co.kr/live/play/{直播间}` | +| Twitch | ✅ | ✅ | `https://www.twitch.tv/{直播间}` | +| AfreecaTv | ❌ | ❌ | | +| Bilibili | ❌ | ❌ | | +| Niconico | ❌ | ❌ | | +| Youtube | ❌ | ❌ | | + +## 安装说明 + +当前项目为 Stream-Rec 的后端部分,请配合前端使用。 diff --git a/apps/stream-rec-backend/data.yml b/apps/stream-rec-backend/data.yml new file mode 100644 index 00000000..571738fa --- /dev/null +++ b/apps/stream-rec-backend/data.yml @@ -0,0 +1,17 @@ +name: Stream Rec API服务 +title: 自动流媒体录制工具 +description: 自动流媒体录制工具 +additionalProperties: + key: stream-rec-backend + name: Stream Rec API服务 + tags: + - Tool + - Local + shortDescZh: 自动流媒体录制工具 + shortDescEn: Automatic streaming media recording tool + type: tool + crossVersionUpdate: true + limit: 0 + website: https://github.com/hua0512/stream-rec/ + github: https://github.com/hua0512/stream-rec/ + document: https://github.com/hua0512/stream-rec/ diff --git a/apps/stream-rec-backend/logo.png b/apps/stream-rec-backend/logo.png new file mode 100644 index 00000000..bb2272f9 Binary files /dev/null and b/apps/stream-rec-backend/logo.png differ diff --git a/apps/stream-rec-frontend/0.6.9/data.yml b/apps/stream-rec-frontend/0.6.9/data.yml new file mode 100644 index 00000000..c90bf428 --- /dev/null +++ b/apps/stream-rec-frontend/0.6.9/data.yml @@ -0,0 +1,46 @@ +additionalProperties: + formFields: + - default: "/home/stream-rec" + edit: true + envKey: STREAM_REC_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 15275 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: "" + edit: true + envKey: NEXTAUTH_SECRET + labelZh: NextAuth 加密密钥 + labelEn: NextAuth Secret + required: true + random: true + type: text + - default: "http://stream-rec-backend:12555/api" + edit: true + envKey: API_URL + labelZh: API 地址 + labelEn: API URL + required: true + type: text + - default: "ws://stream-rec-backend:12555/live/update" + edit: true + envKey: WS_API_URL + labelZh: WebSocket API 地址 + labelEn: WebSocket API URL + required: true + type: text + - default: "http://localhost:15275/" + edit: true + envKey: NEXTAUTH_URL + labelZh: NextAuth 服务器地址 + labelEn: NextAuth Server URL + required: true + type: text diff --git a/apps/stream-rec-frontend/0.6.9/docker-compose.yml b/apps/stream-rec-frontend/0.6.9/docker-compose.yml new file mode 100644 index 00000000..0eb3e363 --- /dev/null +++ b/apps/stream-rec-frontend/0.6.9/docker-compose.yml @@ -0,0 +1,21 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + stream-rec-frontend: + image: streamrec/stream-rec-front:v0.6.9 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:15275 + environment: + - TZ=Asia/Shanghai + env_file: + - .env diff --git a/apps/stream-rec-frontend/0.6.9/scripts/init.sh b/apps/stream-rec-frontend/0.6.9/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/stream-rec-frontend/0.6.9/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/stream-rec-frontend/0.6.9/scripts/uninstall.sh b/apps/stream-rec-frontend/0.6.9/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/stream-rec-frontend/0.6.9/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/stream-rec-frontend/0.6.9/scripts/upgrade.sh b/apps/stream-rec-frontend/0.6.9/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/stream-rec-frontend/0.6.9/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/stream-rec-frontend/README.md b/apps/stream-rec-frontend/README.md new file mode 100644 index 00000000..302b70e2 --- /dev/null +++ b/apps/stream-rec-frontend/README.md @@ -0,0 +1,59 @@ +# Stream-Rec + +Stream-rec 是一个自动录制各种直播平台的工具。 + +![Stream-Rec](https://github.com/hua0512/stream-rec-front/blob/master/docs/zh/dashboard.png) + +## 特性 + +基于 Kotlin, Ktor, 和 ffmpeg。 + ++ 自动录播,可配置录制质量,路径,格式,并发量,分段录制(时间或文件大小),分段上传,根据直播标题和开始时间自动命名文件。 ++ 自动弹幕录制(XML格式),可使用 DanmakuFactory 进行弹幕转换,或配合AList来实现弹幕自动挂载。 ++ 使用 SQLite 持久化存储录播和上传信息 ++ 支持 Rclone 上传到云存储 ++ 使用 Web 界面进行配置 ++ 支持 Docker + +## 直播平台支持列表 + +| 平台 | 录制 | 弹幕 | 链接格式 | +|-----------|----|----|-----------------------------------------------| +| 抖音 | ✅ | ✅ | `https://www.live.douyin.com/{抖音id}` | +| 斗鱼 | ✅ | ✅ | `https://www.douyu.com/{直播间}` | +| 虎牙 | ✅ | ✅ | `https://www.huya.com/{直播间}` | +| PandaTV | ✅ | ✅ | `https://www.pandalive.co.kr/live/play/{直播间}` | +| Twitch | ✅ | ✅ | `https://www.twitch.tv/{直播间}` | +| AfreecaTv | ❌ | ❌ | | +| Bilibili | ❌ | ❌ | | +| Niconico | ❌ | ❌ | | +| Youtube | ❌ | ❌ | | + +## 安装说明 + +当前项目为 Stream-Rec 的前端部分,请配合后端使用。 + +获取后端部署主机ip,例如:`192.168.1.20` + +获取后端部署API服务端口,默认值:`12555`,具体值为 `API 端口` + +### `API 地址` 配置 + +```shell +# 协议 + 主机 + API端口 + /api +http://192.168.1.20:12555/api +``` + +### `WebSocket API 地址` 配置 + +```shell +# ws:// + 主机 + API端口 + /live/update +ws://192.168.1.20:12555/live/update +``` + +### `NextAuth 服务器地址` 配置 + +```shell +# 协议 + 主机 + Web端口 + / +http://localhost:15275/ +``` diff --git a/apps/stream-rec-frontend/data.yml b/apps/stream-rec-frontend/data.yml new file mode 100644 index 00000000..15d729d4 --- /dev/null +++ b/apps/stream-rec-frontend/data.yml @@ -0,0 +1,18 @@ +name: Stream Rec 前台服务 +title: 自动流媒体录制工具 +description: 自动流媒体录制工具 +additionalProperties: + key: stream-rec-frontend + name: Stream Rec 前台服务 + tags: + - WebSite + - Tool + - Local + shortDescZh: 自动流媒体录制工具 + shortDescEn: Automatic streaming media recording tool + type: website + crossVersionUpdate: true + limit: 0 + website: https://github.com/hua0512/stream-rec/ + github: https://github.com/hua0512/stream-rec/ + document: https://github.com/hua0512/stream-rec/ diff --git a/apps/stream-rec-frontend/logo.png b/apps/stream-rec-frontend/logo.png new file mode 100644 index 00000000..bb2272f9 Binary files /dev/null and b/apps/stream-rec-frontend/logo.png differ diff --git a/apps/stream-rec/0.6.9/data.yml b/apps/stream-rec/0.6.9/data.yml new file mode 100644 index 00000000..18ef3bb3 --- /dev/null +++ b/apps/stream-rec/0.6.9/data.yml @@ -0,0 +1,69 @@ +additionalProperties: + formFields: + - default: "/home/stream-rec" + edit: true + envKey: STREAM_REC_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 15275 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: 12555 + edit: true + envKey: PANEL_APP_PORT_API + labelZh: API 端口 + labelEn: API port + required: true + rule: paramPort + type: number + - default: "stream-rec" + edit: false + envKey: LOGIN_SECRET + labelZh: 登录密码 (初始化) + labelEn: Login Password (Initialization) + required: true + random: true + type: text + - default: "" + edit: true + envKey: NEXTAUTH_SECRET + labelZh: NextAuth 加密密钥 + labelEn: NextAuth Secret + required: true + random: true + type: text + - default: "http://localhost:15275/" + edit: true + envKey: NEXTAUTH_URL + labelZh: NextAuth 服务器地址 + labelEn: NextAuth Server URL + required: true + type: text + - default: "http://stream-rec-backend:12555/api" + disabled: true + envKey: API_URL + labelZh: API 地址 + labelEn: API URL + required: true + type: text + - default: "ws://stream-rec-backend:12555/live/update" + edit: true + envKey: WS_API_URL + labelZh: WebSocket API 地址 + labelEn: WebSocket API URL + required: true + type: text + - default: "" + edit: true + envKey: HTTP_PROXY + labelZh: 网络代理 + labelEn: Network Proxy + required: false + type: text diff --git a/apps/stream-rec/0.6.9/docker-compose.yml b/apps/stream-rec/0.6.9/docker-compose.yml new file mode 100644 index 00000000..4846e531 --- /dev/null +++ b/apps/stream-rec/0.6.9/docker-compose.yml @@ -0,0 +1,44 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + stream-rec-backend: + image: streamrec/stream-rec:v0.6.9 + container_name: ${CONTAINER_NAME}-backend + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_API}:12555 + volumes: + - ${STREAM_REC_ROOT_PATH}/records:/opt/records + - ${STREAM_REC_ROOT_PATH}/download:/download + environment: + - TZ=Asia/Shanghai + - LOG_LEVEL=INFO + - DB_PATH=/opt/records + - DOWNLOAD_PATH=/download + env_file: + - .env + + stream-rec-frontend: + depends_on: + - stream-rec-backend + image: streamrec/stream-rec-front:v0.6.9 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:15275 + environment: + - TZ=Asia/Shanghai + env_file: + - .env diff --git a/apps/stream-rec/0.6.9/scripts/init.sh b/apps/stream-rec/0.6.9/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/stream-rec/0.6.9/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/stream-rec/0.6.9/scripts/uninstall.sh b/apps/stream-rec/0.6.9/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/stream-rec/0.6.9/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/stream-rec/0.6.9/scripts/upgrade.sh b/apps/stream-rec/0.6.9/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/stream-rec/0.6.9/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/stream-rec/README.md b/apps/stream-rec/README.md new file mode 100644 index 00000000..8a913710 --- /dev/null +++ b/apps/stream-rec/README.md @@ -0,0 +1,70 @@ +# Stream-Rec + +Stream-rec 是一个自动录制各种直播平台的工具。 + +![Stream-Rec](https://github.com/hua0512/stream-rec-front/blob/master/docs/zh/dashboard.png) + +## 特性 + +基于 Kotlin, Ktor, 和 ffmpeg。 + ++ 自动录播,可配置录制质量,路径,格式,并发量,分段录制(时间或文件大小),分段上传,根据直播标题和开始时间自动命名文件。 ++ 自动弹幕录制(XML格式),可使用 DanmakuFactory 进行弹幕转换,或配合AList来实现弹幕自动挂载。 ++ 使用 SQLite 持久化存储录播和上传信息 ++ 支持 Rclone 上传到云存储 ++ 使用 Web 界面进行配置 ++ 支持 Docker + +## 直播平台支持列表 + +| 平台 | 录制 | 弹幕 | 链接格式 | +|-----------|----|----|-----------------------------------------------| +| 抖音 | ✅ | ✅ | `https://www.live.douyin.com/{抖音id}` | +| 斗鱼 | ✅ | ✅ | `https://www.douyu.com/{直播间}` | +| 虎牙 | ✅ | ✅ | `https://www.huya.com/{直播间}` | +| PandaTV | ✅ | ✅ | `https://www.pandalive.co.kr/live/play/{直播间}` | +| Twitch | ✅ | ✅ | `https://www.twitch.tv/{直播间}` | +| AfreecaTv | ❌ | ❌ | | +| Bilibili | ❌ | ❌ | | +| Niconico | ❌ | ❌ | | +| Youtube | ❌ | ❌ | | + +## 安装说明 + +> 默认账户 +> +> 用户名:stream-rec +> +> 密码:stream-rec + +### 必须修改配置 + ++ `WebSocket API 地址` + +默认值:`ws://stream-rec-backend:12555/live/update` + +需要获取: + ++ 宿主机 IP 地址 ++ 配置项 `API 端口`, 默认值:`12555` + +填写格式:`ws://{宿主机 IP 地址}:{API 端口}/live/update` + +### 可修改配置 + ++ `NextAuth 服务器地址` + +默认值:`http://localhost:15275/` + +需要获取: + ++ 宿主机 IP 地址 ++ 配置项 `WebUI 端口`, 默认值:`15275` + +填写格式:`http://{宿主机 IP 地址}:{WebUI 端口}/` + +### 无法修改项目 + ++ `API 地址` + +当前部署方式为 前后端合并部署,请勿强制修改参数值。 diff --git a/apps/stream-rec/data.yml b/apps/stream-rec/data.yml new file mode 100644 index 00000000..90e11b38 --- /dev/null +++ b/apps/stream-rec/data.yml @@ -0,0 +1,18 @@ +name: Stream Rec +title: 自动流媒体录制工具 +description: 自动流媒体录制工具 +additionalProperties: + key: stream-rec + name: Stream Rec + tags: + - WebSite + - Tool + - Local + shortDescZh: 自动流媒体录制工具 + shortDescEn: Automatic streaming media recording tool + type: website + crossVersionUpdate: true + limit: 0 + website: https://github.com/hua0512/stream-rec/ + github: https://github.com/hua0512/stream-rec/ + document: https://github.com/hua0512/stream-rec/ diff --git a/apps/stream-rec/logo.png b/apps/stream-rec/logo.png new file mode 100644 index 00000000..bb2272f9 Binary files /dev/null and b/apps/stream-rec/logo.png differ diff --git a/apps/transmission/4.0.6/data.yml b/apps/transmission/4.0.6/data.yml new file mode 100644 index 00000000..5d8e1e89 --- /dev/null +++ b/apps/transmission/4.0.6/data.yml @@ -0,0 +1,97 @@ +additionalProperties: + formFields: + - default: "host" + edit: true + envKey: NETWORK_MODE + labelZh: 网络模式 + labelEn: Network Mode + required: true + type: select + values: + - label: 主机模式 + value: "host" + - label: 桥接模式 + value: "bridge" + - label: 无网络 + value: "none" + - label: 1panel-network + value: "1panel-network" + - default: 9091 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI Port + required: true + rule: paramPort + type: number + - default: 51413 + edit: true + envKey: PANEL_APP_PORT_TORRENTING + labelZh: Torrenting 端口 + labelEn: Torrenting Port + required: true + rule: paramPort + type: number + - default: "/home/transmission" + edit: true + envKey: TRANSMISSION_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: "admin" + edit: true + envKey: USER + labelZh: 用户名 + labelEn: User + required: true + type: text + - default: "" + edit: true + envKey: PASS + labelZh: 密码 + labelEn: Password + required: true + type: text + - default: "" + edit: true + envKey: TRANSMISSION_WEB_HOME + labelZh: 第三方 UI 文件夹 + labelEn: Third-party UI folder + required: false + type: text + - default: "" + edit: true + envKey: WHITELIST + labelZh: IP 白名单 + labelEn: Whitelist + required: false + type: text + - default: "" + edit: true + envKey: HOST_WHITELIST + labelZh: 主机白名单 + labelEn: Host Whitelist + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_1 + labelEn: Custom mount directory 1 + labelZh: 自定义挂载目录 1 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_2 + labelEn: Custom mount directory 2 + labelZh: 自定义挂载目录 2 + required: false + type: text + - default: "" + edit: true + envKey: CUSTOM_MOUNT_DIRECTORY_3 + labelEn: Custom mount directory 3 + labelZh: 自定义挂载目录 3 + required: false + type: text diff --git a/apps/transmission/4.0.6/docker-compose.yml b/apps/transmission/4.0.6/docker-compose.yml new file mode 100644 index 00000000..9c9f8d0f --- /dev/null +++ b/apps/transmission/4.0.6/docker-compose.yml @@ -0,0 +1,33 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + transmission: + image: linuxserver/transmission:4.0.6 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + network_mode: ${NETWORK_MODE} + ports: + - ${PANEL_APP_PORT_HTTP}:9091 + - ${PANEL_APP_PORT_TORRENTING} + - ${PANEL_APP_PORT_TORRENTING}/udp + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${TRANSMISSION_ROOT_PATH}/config:/config + - ${TRANSMISSION_ROOT_PATH}/downloads:/downloads + - ${TRANSMISSION_ROOT_PATH}/watch:/watch + - ${CUSTOM_MOUNT_DIRECTORY_1:-./default_mount_1}:${CUSTOM_MOUNT_DIRECTORY_1:-/default_mount_1} + - ${CUSTOM_MOUNT_DIRECTORY_2:-./default_mount_2}:${CUSTOM_MOUNT_DIRECTORY_2:-/default_mount_2} + - ${CUSTOM_MOUNT_DIRECTORY_3:-./default_mount_3}:${CUSTOM_MOUNT_DIRECTORY_3:-/default_mount_3} + environment: + - PUID=0 + - PGID=0 + - UMASK=022 + - PEERPORT= ${PANEL_APP_PORT_TORRENTING} diff --git a/apps/transmission/4.0.6/scripts/init.sh b/apps/transmission/4.0.6/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/transmission/4.0.6/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/transmission/4.0.6/scripts/uninstall.sh b/apps/transmission/4.0.6/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/transmission/4.0.6/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/transmission/4.0.6/scripts/upgrade.sh b/apps/transmission/4.0.6/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/transmission/4.0.6/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/transmission/README.md b/apps/transmission/README.md new file mode 100644 index 00000000..4ee1fc50 --- /dev/null +++ b/apps/transmission/README.md @@ -0,0 +1,5 @@ +# Transmission + +Transmission是一种BitTorrent客户端,特点是一个跨平台的后端和其上的简洁的用户界面。 + +![Transmission](https://file.lifebus.top/imgs/transmission_logo.png) diff --git a/apps/transmission/data.yml b/apps/transmission/data.yml new file mode 100644 index 00000000..5a182dce --- /dev/null +++ b/apps/transmission/data.yml @@ -0,0 +1,18 @@ +name: Transmission +title: BitTorrent客户端 +description: BitTorrent客户端 +additionalProperties: + key: transmission + name: Transmission + tags: + - WebSite + - Tool + - Local + shortDescZh: BitTorrent客户端 + shortDescEn: BitTorrent client + type: website + crossVersionUpdate: true + limit: 0 + website: https://transmissionbt.com/ + github: https://github.com/transmission/transmission/ + document: https://github.com/transmission/transmission/ diff --git a/apps/transmission/logo.png b/apps/transmission/logo.png new file mode 100644 index 00000000..8b0c3f16 Binary files /dev/null and b/apps/transmission/logo.png differ diff --git a/apps/umami/2.12.1/data.yml b/apps/umami/2.12.1/data.yml new file mode 100644 index 00000000..384a353a --- /dev/null +++ b/apps/umami/2.12.1/data.yml @@ -0,0 +1,123 @@ +additionalProperties: + formFields: + - child: + default: "" + envKey: PANEL_DB_HOST + required: true + type: service + default: postgresql + edit: true + envKey: PANEL_DB_TYPE + labelZh: 数据库 服务 (前置检查) + labelEn: Database Service (Pre-check) + required: true + type: apps + values: + - label: PostgreSQL + value: postgresql + - label: MySQL + value: mysql + - label: MariaDB + value: mariadb + - label: Percona + value: percona + - default: "/home/umami" + edit: true + envKey: UMAMI_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 3000 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: 连接端口 + labelEn: Connection Port + required: true + rule: paramPort + type: number + - default: "" + edit: true + envKey: APP_SECRET + labelZh: 应用密钥 + labelEn: Application Secret + random: true + required: false + rule: paramComplexity + type: password + - default: "/" + edit: true + envKey: BASE_PATH + labelZh: 基础路径 + labelEn: Base Path + required: true + type: text + - default: "" + edit: true + envKey: ALLOWED_FRAME_URLS + labelZh: 允许的 frame 地址 + labelEn: Allowed frame urls + required: false + type: text + - default: "0" + edit: true + envKey: DISABLE_BOT_CHECK + labelZh: 禁用机器人检测 + labelEn: Disable bot detection + required: true + type: select + values: + - label: 开启 + value: "1" + - label: 关闭 + value: "0" + - default: postgresql + edit: true + envKey: DATABASE_TYPE + labelZh: 数据库 类型 + labelEn: Database Type + required: true + type: select + values: + - label: PostgreSQL + value: postgresql + - label: MySQL (MariaDB, Percona) + value: mysql + - default: "127.0.0.1" + edit: true + envKey: DB_HOSTNAME + labelZh: 数据库 主机地址 + labelEn: Database Host + required: true + type: text + - default: 5432 + edit: true + envKey: DB_PORT + labelZh: 数据库 端口 + labelEn: Database Port + required: true + rule: paramPort + type: number + - default: "umami" + edit: true + envKey: DB_USERNAME + labelZh: 数据库 用户名 + labelEn: Database User + required: true + type: text + - default: "" + edit: true + envKey: DB_PASSWORD + labelEn: Database Password + labelZh: 数据库 密码 + random: true + required: true + rule: paramComplexity + type: password + - default: "umami" + edit: true + envKey: DB_DATABASE_NAME + labelZh: 数据库 名称 + labelEn: Database Name + required: true + type: text diff --git a/apps/umami/2.12.1/docker-compose.yml b/apps/umami/2.12.1/docker-compose.yml new file mode 100644 index 00000000..fe14c2b7 --- /dev/null +++ b/apps/umami/2.12.1/docker-compose.yml @@ -0,0 +1,24 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + umami: + image: ghcr.io/umami-software/umami:${DATABASE_TYPE}-v2.12.1 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:3000 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + environment: + - DISABLE_TELEMETRY = 1 + - REMOVE_TRAILING_SLASH = 1 + - DATABASE_URL=${DATABASE_TYPE}://${DB_USERNAME}:${DB_PASSWORD}@${DB_HOSTNAME}:${DB_PORT}/${DB_DATABASE_NAME} diff --git a/apps/umami/2.12.1/scripts/init.sh b/apps/umami/2.12.1/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/umami/2.12.1/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/umami/2.12.1/scripts/uninstall.sh b/apps/umami/2.12.1/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/umami/2.12.1/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/umami/2.12.1/scripts/upgrade.sh b/apps/umami/2.12.1/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/umami/2.12.1/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/umami/README.md b/apps/umami/README.md new file mode 100644 index 00000000..2609da5a --- /dev/null +++ b/apps/umami/README.md @@ -0,0 +1,39 @@ +# Umami + +为速度和效率而构建的网站分析 + +Umami 网站分析提供您实时做出决策所需的数据。 + +![Umami](https://file.lifebus.top/imgs/umami_cover.jpg) + +## 简介 + +Umami 让您轻松分析数据 + ++ 便于使用 + +Umami 功能强大而简单,易于使用和理解,不需要复杂的设置或标签配置。 + ++ UTM 和自定义事件 + +Umami 会自动理解带有 UTM 参数的链接,并让您根据 UTM 查看和过滤您的网站数据。此外,您还可以跟踪网站上的任何事件,例如按钮点击、表单提交、购买、新闻通讯注册等。 + ++ 没有 Cookie 横幅 + +所有数据均经过 Umami 匿名处理,并且不会收集您网站用户的任何个人信息。您无需选择加入 Cookie +横幅即可跟踪网站的性能,从而为您的用户提供更好、更值得信赖的体验。 +默认情况下,Umami 符合 GDPRP 和 CCPA。 + +## 环境准备 + ++ 数据库支持 + + `MySQL` 版本:`5.7+` + + `PostgreSQL` 版本:`12.14+` + +## 安装说明 + +> 默认管理员帐户 +> +> 用户名: admin +> +> 密码: umami diff --git a/apps/umami/data.yml b/apps/umami/data.yml new file mode 100644 index 00000000..398c073f --- /dev/null +++ b/apps/umami/data.yml @@ -0,0 +1,20 @@ +name: Umami +title: 为速度和效率而构建的网站分析 +description: 为速度和效率而构建的网站分析 +additionalProperties: + key: umami + name: Umami + tags: + - WebSite + - Database + - Middleware + - Runtime + - Local + shortDescZh: 为速度和效率而构建的网站分析 + shortDescEn: A website analytics tool that is built for speed and efficiency + type: website + crossVersionUpdate: true + limit: 0 + website: https://umami.is/ + github: https://github.com/umami-software/umami/ + document: https://umami.is/docs/ diff --git a/apps/umami/logo.png b/apps/umami/logo.png new file mode 100644 index 00000000..9ede937a Binary files /dev/null and b/apps/umami/logo.png differ diff --git a/apps/uptime-kuma/1.23.13/data.yml b/apps/uptime-kuma/1.23.13/data.yml new file mode 100644 index 00000000..26dcfdad --- /dev/null +++ b/apps/uptime-kuma/1.23.13/data.yml @@ -0,0 +1,36 @@ +additionalProperties: + formFields: + - default: "/home/uptime-kuma" + edit: true + envKey: UPTIME_KUMA_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 3001 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: "" + edit: true + envKey: UPTIME_KUMA_CLOUDFLARED_TOKEN + labelZh: Cloudflared 隧道令牌 + labelEn: Cloudflared tunnel token + required: false + type: text + - default: "false" + edit: true + envKey: UPTIME_KUMA_DISABLE_FRAME_SAMEORIGIN + labelZh: 禁用 Frame SameOrigin + labelEn: Disable Frame SameOrigin + required: true + type: select + values: + - label: 是 + value: "true" + - label: 否 + value: "false" diff --git a/apps/uptime-kuma/1.23.13/docker-compose.yml b/apps/uptime-kuma/1.23.13/docker-compose.yml new file mode 100644 index 00000000..18fe2aba --- /dev/null +++ b/apps/uptime-kuma/1.23.13/docker-compose.yml @@ -0,0 +1,32 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + uptime-kuma: + image: louislam/uptime-kuma:1.23.13 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:3001 + env_file: + - /etc/1panel/envs/global.env + - ${ENV_FILE:-/etc/1panel/envs/default.env} + volumes: + - ${UPTIME_KUMA_ROOT_PATH}/data:/app/data + - /var/run/docker.sock:/var/run/docker.sock + environment: + - PUID=0 + - PGID=0 + - UPTIME_KUMA_PORT=3001 + - UPTIME_KUMA_HOST=0.0.0.0 + - DATA_DIR=/app/data + - NODE_TLS_REJECT_UNAUTHORIZED=0 + - UPTIME_KUMA_ALLOW_ALL_CHROME_EXEC=0 + - UPTIME_KUMA_WS_ORIGIN_CHECK=cors-like diff --git a/apps/uptime-kuma/1.23.13/scripts/init.sh b/apps/uptime-kuma/1.23.13/scripts/init.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/uptime-kuma/1.23.13/scripts/init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/uptime-kuma/1.23.13/scripts/uninstall.sh b/apps/uptime-kuma/1.23.13/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/uptime-kuma/1.23.13/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/uptime-kuma/1.23.13/scripts/upgrade.sh b/apps/uptime-kuma/1.23.13/scripts/upgrade.sh new file mode 100644 index 00000000..c211154a --- /dev/null +++ b/apps/uptime-kuma/1.23.13/scripts/upgrade.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + # setup-1 add default values + CURRENT_DIR=$(pwd) + echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/uptime-kuma/README.md b/apps/uptime-kuma/README.md new file mode 100644 index 00000000..9b4a86cf --- /dev/null +++ b/apps/uptime-kuma/README.md @@ -0,0 +1,19 @@ +# Uptime Kuma + +Uptime Kuma 是一款易于使用的自托管监控工具。 + +![Uptime Kuma](https://file.lifebus.top/imgs/uptime_kuma_cover.jpg) + +## 特性 + ++ 监控 HTTP(s) / TCP / HTTP(s) 关键字 / HTTP(s) Json 查询 / Ping / DNS 记录 / 推送 / Steam 游戏服务器 / Docker 容器的正常运行时间 ++ 精美、反应式、快速的 UI/UX ++ 通过 Telegram、Discord、Gotify、Slack、Pushover、电子邮件 (SMTP) 和 90 多种通知服务发送通知,请单击此处查看完整列表 ++ 20 秒间隔 ++ 多种语言 ++ 多个状态页面 ++ 将状态页面映射到特定域 ++ 平图 ++ 证书信息 ++ 代理支持 ++ 2FA 支持 diff --git a/apps/uptime-kuma/data.yml b/apps/uptime-kuma/data.yml new file mode 100644 index 00000000..4199455c --- /dev/null +++ b/apps/uptime-kuma/data.yml @@ -0,0 +1,19 @@ +name: Uptime Kuma +title: 自托管的监控工具 +type: 实用工具 +description: 自托管的监控工具 +additionalProperties: + key: uptime-kuma + name: Uptime Kuma + tags: + - WebSite + - Tool + - Local + shortDescZh: 自托管的监控工具 + shortDescEn: Self-hosted monitoring tool + type: website + crossVersionUpdate: true + limit: 0 + website: https://uptime.kuma.pet/ + github: https://github.com/louislam/uptime-kuma/ + document: https://github.com/louislam/uptime-kuma/wiki/ diff --git a/apps/uptime-kuma/logo.png b/apps/uptime-kuma/logo.png new file mode 100644 index 00000000..54309bce Binary files /dev/null and b/apps/uptime-kuma/logo.png differ diff --git a/apps/yarr/2.4.0/data.yml b/apps/yarr/2.4.0/data.yml new file mode 100644 index 00000000..facc5a5b --- /dev/null +++ b/apps/yarr/2.4.0/data.yml @@ -0,0 +1,17 @@ +additionalProperties: + formFields: + - default: 7070 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelEn: Port + labelZh: 端口 + required: true + rule: paramPort + type: number + - default: "/home/yarr" + edit: true + envKey: YARR_ROOT_PATH + labelEn: Root Path + labelZh: 数据持久化 根路径 + required: true + type: text diff --git a/apps/yarr/2.4.0/docker-compose.yml b/apps/yarr/2.4.0/docker-compose.yml new file mode 100644 index 00000000..923ebb3c --- /dev/null +++ b/apps/yarr/2.4.0/docker-compose.yml @@ -0,0 +1,19 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + bark: + image: qyg2297248353/yarr:v2.4.0 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + networks: + - 1panel-network + ports: + - ${PANEL_APP_PORT_HTTP}:7070 + volumes: + - ${YARR_ROOT_PATH}/data:/data diff --git a/apps/yarr/2.4.0/scripts/init.sh b/apps/yarr/2.4.0/scripts/init.sh new file mode 100644 index 00000000..b4e37318 --- /dev/null +++ b/apps/yarr/2.4.0/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [[ -f .env ]]; then + source .env + + echo "Check Finish." + +else + echo ".env not found." +fi diff --git a/apps/yarr/2.4.0/scripts/upgrade.sh b/apps/yarr/2.4.0/scripts/upgrade.sh new file mode 100644 index 00000000..b4e37318 --- /dev/null +++ b/apps/yarr/2.4.0/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [[ -f .env ]]; then + source .env + + echo "Check Finish." + +else + echo ".env not found." +fi diff --git a/apps/yarr/README.md b/apps/yarr/README.md new file mode 100644 index 00000000..1621f5df --- /dev/null +++ b/apps/yarr/README.md @@ -0,0 +1,5 @@ +# Yarr + +yarr 是一个基于 Web 的提要聚合器,它既可以用作桌面应用程序,也可以用作个人自托管服务器。 + +该应用程序是带有嵌入式数据库(SQLite)的单个二进制文件。 diff --git a/apps/yarr/data.yml b/apps/yarr/data.yml new file mode 100644 index 00000000..4ef9587a --- /dev/null +++ b/apps/yarr/data.yml @@ -0,0 +1,20 @@ +name: Yarr +tags: + - 实用工具 +title: Rss 阅读器 +type: 实用工具 +description: Rss 阅读器 +additionalProperties: + key: yarr + name: Yarr + tags: + - Tool + shortDescZh: Rss 阅读器 + shortDescEn: Rss Reader + type: tool + crossVersionUpdate: true + limit: 0 + recommend: 0 + website: https://github.com/nkanaev/yarr/ + github: https://github.com/nkanaev/yarr + document: https://github.com/nkanaev/yarr/ diff --git a/apps/yarr/logo.png b/apps/yarr/logo.png new file mode 100644 index 00000000..e337af24 Binary files /dev/null and b/apps/yarr/logo.png differ diff --git a/apps/ztncui/1.2.17/data.yml b/apps/ztncui/1.2.17/data.yml new file mode 100644 index 00000000..280b77e7 --- /dev/null +++ b/apps/ztncui/1.2.17/data.yml @@ -0,0 +1,47 @@ +additionalProperties: + formFields: + - default: "/home/ztncui" + edit: true + envKey: ZTNCUI_ROOT_PATH + labelZh: 数据持久化路径 + labelEn: Data persistence path + required: true + type: text + - default: 3000 + edit: true + envKey: PANEL_APP_PORT_HTTP + labelZh: WebUI 端口 + labelEn: WebUI port + required: true + rule: paramPort + type: number + - default: 3443 + edit: true + envKey: PANEL_APP_PORT_HTTPS + labelZh: HTTPS 端口 + labelEn: HTTPS Port + required: true + rule: paramPort + type: number + - default: 9993 + edit: true + envKey: PANEL_APP_PORT_API + labelZh: API 端口 + labelEn: API Port + required: true + rule: paramPort + type: number + - default: "" + edit: true + envKey: ZTNCUI_PASSWD + labelZh: 管理员 密码 + labelEn: Admin Password + required: true + type: password + - default: "" + edit: true + envKey: MYADDR + labelZh: 服务器IP + labelEn: Server IP + required: true + type: text diff --git a/apps/ztncui/1.2.17/docker-compose.yml b/apps/ztncui/1.2.17/docker-compose.yml new file mode 100644 index 00000000..60d487f8 --- /dev/null +++ b/apps/ztncui/1.2.17/docker-compose.yml @@ -0,0 +1,29 @@ +version: "3.8" + +networks: + 1panel-network: + external: true + +services: + ztncui: + image: keynetworks/ztncui:1.2.17 + container_name: ${CONTAINER_NAME} + labels: + createdBy: "Apps" + restart: always + network_mode: ${NETWORK_MODE} + ports: + - ${PANEL_APP_PORT_HTTP}:3000 + - ${PANEL_APP_PORT_HTTPS}:3443 + - ${PANEL_APP_PORT_API}:9993/udp + env_file: + - .env + environment: + - NODE_ENV=production + - HTTP_PORT=3000 + - HTTPS_PORT=3443 + - ZT_ADDR=localhost:9993 + - HTTP_ALL_INTERFACES=yes + volumes: + - ${ZTNCUI_ROOT_PATH}/ztncui:/opt/key-networks/ztncui/etc + - ${ZTNCUI_ROOT_PATH}/zerotier-one:/var/lib/zerotier-one diff --git a/apps/ztncui/1.2.17/scripts/init.sh b/apps/ztncui/1.2.17/scripts/init.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/ztncui/1.2.17/scripts/init.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/ztncui/1.2.17/scripts/uninstall.sh b/apps/ztncui/1.2.17/scripts/uninstall.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/ztncui/1.2.17/scripts/uninstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/ztncui/1.2.17/scripts/upgrade.sh b/apps/ztncui/1.2.17/scripts/upgrade.sh new file mode 100644 index 00000000..c86c4fbc --- /dev/null +++ b/apps/ztncui/1.2.17/scripts/upgrade.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env + + echo "Check Finish." + +else + echo "Error: .env file not found." +fi diff --git a/apps/ztncui/README.md b/apps/ztncui/README.md new file mode 100644 index 00000000..86788331 --- /dev/null +++ b/apps/ztncui/README.md @@ -0,0 +1,28 @@ +# ztncui + +包含 ZeroTier One 和 ztncui 的 Docker 映像,用于在容器中设置具有 Web 用户界面的独立 ZeroTier 网络控制器。 + +![ztncui](https://docs.zerotier.com/assets/images/integrating-gateway-f59b544e1196b304d4fc502a64b19e27.png) + +## 简介 + +ZeroTier 这一类 P2P VPN 是在互联网的基础上将自己的所有设备组成一个私有的网络,可以理解为互联网连接的局域网。最常见的场景就是在公司可以用手机直接访问家里的 +NAS,而且是点对点直连,数据传输并不经由第三方服务器中转。 + +ZeroTier 在多设备之间建立了一个 Peer to Peer VPN(P2PVPN) 连接,如:在笔记本电脑、台式机、嵌入式设备、云资源和应用。这些设备只需要通过 +ZeroTier One ( ZeroTier 的客户端) 在不同设备之间建立直接连接,即使它们位于 NAT 之后。连接到虚拟 LAN 的任何计算机和设备通常通过 +NAT 或路由器设备与 Internet 连接,ZeroTier One 使用 STUN 和隧道来建立 NAT 后设备之间的 VPN 直连。 + +简单一点说,ZeroTier 就是通过 P2P 等方式实现形如交换机或路由器上 LAN 设备的内网互联。 + +### 专有名词 + +PLANET :行星服务器,Zerotier 根服务器 + +MOON :卫星服务器,用户自建的私有根服务器,起到代理加速的作用 + +LEAF :网络客户端,就是每台连接到网络节点。 + +## 安装说明 + +> 默认用户名:`admin` diff --git a/apps/ztncui/data.yml b/apps/ztncui/data.yml new file mode 100644 index 00000000..d7aff2fa --- /dev/null +++ b/apps/ztncui/data.yml @@ -0,0 +1,18 @@ +name: Ztncui +title: 创建和管理虚拟网络 +description: 创建和管理虚拟软件定义网络 +additionalProperties: + key: ztncui + name: Ztncui + tags: + - WebSite + - Tool + - Local + shortDescZh: 创建和管理虚拟软件定义网络 + shortDescEn: Create and manage virtual software-defined networks + type: website + crossVersionUpdate: true + limit: 0 + website: https://www.zerotier.com/ + github: https://github.com/zerotier/ZeroTierOne/ + document: https://docs.zerotier.com/ diff --git a/apps/ztncui/logo.png b/apps/ztncui/logo.png new file mode 100644 index 00000000..33e29b5c Binary files /dev/null and b/apps/ztncui/logo.png differ diff --git a/envs/default.env b/envs/default.env new file mode 100644 index 00000000..77a2c8cc --- /dev/null +++ b/envs/default.env @@ -0,0 +1,2 @@ +# copyright© 2024 XinJiang Ms Studio +ENV_FILE=.env diff --git a/envs/gitea/gitea.env b/envs/gitea/gitea.env new file mode 100644 index 00000000..6d9ecfbf --- /dev/null +++ b/envs/gitea/gitea.env @@ -0,0 +1,24 @@ +USER_UID=1000 +USER_GID=1000 +APP_NAME="Gitea: Git with a cup of tea" +RUN_MODE=prod +DOMAIN=localhost +SSH_DOMAIN=localhost +SSH_PORT=22 +SSH_LISTEN_PORT=22 +DISABLE_SSH=false +HTTP_PORT=3000 +ROOT_URL="" +LFS_START_SERVER=true +DB_TYPE=sqlite3 +DB_HOST="" +DB_NAME="" +DB_USER="" +DB_PASSWD="" +INSTALL_LOCK=false +SECRET_KEY="" +DISABLE_REGISTRATION=false +REQUIRE_SIGNIN_VIEW=false +DEFAULT_UI_LOCATION=Asia/Shanghai +ALLOW_LOCALNETWORKS=true +ENABLE_SWAGGER=false diff --git a/envs/global.env b/envs/global.env new file mode 100644 index 00000000..4ea885ee --- /dev/null +++ b/envs/global.env @@ -0,0 +1,2 @@ +# copyright© 2024 XinJiang Ms Studio +TZ=Asia/Shanghai diff --git a/envs/moviepilot/moviepilot.env b/envs/moviepilot/moviepilot.env new file mode 100644 index 00000000..d244ac86 --- /dev/null +++ b/envs/moviepilot/moviepilot.env @@ -0,0 +1,53 @@ +####################################################################### +# 【*】为必配项,其余为选配项,选配项可以删除整项配置项或者保留配置默认值 # +####################################################################### +# 【*】API监听地址(注意不是前端访问地址) +HOST=0.0.0.0 +# 是否调试模式,打开后将输出更多日志 +DEBUG=false +# 是否开发模式,打开后后台服务将不会启动 +DEV=false +# 【*】超级管理员,设置后一但重启将固化到数据库中,修改将无效(初始化超级管理员密码仅会生成一次,请在日志中查看并自行登录系统修改) +SUPERUSER=admin +# 大内存模式,开启后会增加缓存数量,但会占用更多内存 +BIG_MEMORY_MODE=false +# 是否启用DOH域名解析,启用后对于api.themovie.org等域名通过DOH解析,避免域名DNS被污染 +DOH_ENABLE=true +# 元数据识别缓存过期时间,数字型,单位小时,0为系统默认(大内存模式为7天,滞则为3天),调大该值可减少themoviedb的访问次数 +META_CACHE_EXPIRE=0 +# 自动检查和更新站点资源包(索引、认证等) +AUTO_UPDATE_RESOURCE=true +# 【*】API密钥,建议更换复杂字符串,有Jellyseerr/Overseerr、媒体服务器Webhook等配置以及部分支持API_TOKEN的API中使用 +API_TOKEN=moviepilot +# 登录页面电影海报,tmdb/bing,tmdb要求能正常连接api.themoviedb.org +WALLPAPER=tmdb +# TMDB图片地址,无需修改需保留默认值,如果默认地址连通性不好可以尝试修改为:`static-mdb.v.geilijiasu.com` +TMDB_IMAGE_DOMAIN=image.tmdb.org +# TMDB API地址,无需修改需保留默认值,也可配置为`api.tmdb.org`或其它中转代理服务地址,能连通即可 +TMDB_API_DOMAIN=api.themoviedb.org +# 媒体识别来源 themoviedb/douban,使用themoviedb时需要确保能正常连接api.themoviedb.org,使用douban时不支持二级分类 +RECOGNIZE_SOURCE=themoviedb +# Fanart开关 +FANART_ENABLE=true +# 新增已入库媒体是否跟随TMDB信息变化,true/false,为false时即使TMDB信息变化时也会仍然按历史记录中已入库的信息进行刮削 +SCRAP_FOLLOW_TMDB=true +# 刮削来源 themoviedb/douban,使用themoviedb时需要确保能正常连接api.themoviedb.org,使用douban时会缺失部分信息 +SCRAP_SOURCE=themoviedb +# 电影重命名格式,Jinja2语法,参考:https://jinja.palletsprojects.com/en/3.0.x/templates/ +MOVIE_RENAME_FORMAT={{title}}{% if year %} ({{year}}){% endif %}/{{title}}{% if year %} ({{year}}){% endif %}{% if part %}-{{part}}{% endif %}{% if videoFormat %} - {{videoFormat}}{% endif %}{{fileExt}} +# 电视剧重命名格式,Jinja2语法,参考:https://jinja.palletsprojects.com/en/3.0.x/templates/ +TV_RENAME_FORMAT={{title}}{% if year %} ({{year}}){% endif %}/Season {{season}}/{{title}} - {{season_episode}}{% if part %}-{{part}}{% endif %}{% if episode %} - 第 {{episode}} 集{% endif %}{{fileExt}} +# 交互搜索自动下载用户ID(消息通知渠道的用户ID),使用,分割,设置为 all 代表所有用户自动择优下载,未设置需要用户手动选择资源或者回复`0`才自动择优下载 +AUTO_DOWNLOAD_USER= +# 自动下载站点字幕(如有) +DOWNLOAD_SUBTITLE=true +# OCR服务器地址 +OCR_HOST=https://movie-pilot.org +# 插件市场仓库地址,多个地址使用`,`分隔,保留最后的/ +PLUGIN_MARKET=https://github.com/jxxghp/MoviePilot-Plugins,https://github.com/thsrite/MoviePilot-Plugins,https://github.com/InfinityPacer/MoviePilot-Plugins,https://github.com/honue/MoviePilot-Plugins +# 搜索多个名称,true/false,为true时搜索时会同时搜索中英文及原始名称,搜索结果会更全面,但会增加搜索时间;为false时其中一个名称搜索到结果或全部名称搜索完毕即停止 +SEARCH_MULTIPLE_NAME=true + + +# 自定义配置 +AUTH_SITE="iyuu,hhclub,audiences,hddolby,zmpt,freefarm,hdfans,wintersakura,leaves,ptba,icc2022,xingtan,ptvicomo,agsvpt,hdkyl,qingwa,discfan,haidan,rousi" diff --git a/envs/onedev/onedev.env b/envs/onedev/onedev.env new file mode 100644 index 00000000..5f2107e0 --- /dev/null +++ b/envs/onedev/onedev.env @@ -0,0 +1,29 @@ +# 外部数据库支持配置 + +# PostgreSQL +# hibernate_dialect=io.onedev.server.persistence.PostgreSQLDialect +# hibernate_connection_driver_class=org.postgresql.Driver +# hibernate_connection_url=jdbc:postgresql://localhost:5432/onedev +# hibernate_connection_username=postgres +# hibernate_connection_password=postgres + +# MySQL +# hibernate_dialect=org.hibernate.dialect.MySQL5InnoDBDialect +# hibernate_connection_driver_class=com.mysql.cj.jdbc.Driver +# hibernate_connection_url=jdbc:mysql://localhost:3306/onedev?serverTimezone=UTC&allowPublicKeyRetrieval=true&useSSL=false&disableMariaDbDriver=true +# hibernate_connection_username=root +# hibernate_connection_password=root + +# MariaDB +# hibernate_dialect=org.hibernate.dialect.MySQL5InnoDBDialect +# hibernate_connection_driver_class=org.mariadb.jdbc.Driver +# hibernate_connection_url=jdbc:mariadb://localhost:3306/onedev +# hibernate_connection_username=root +# hibernate_connection_password=root + +# MS SQL Server +# hibernate_dialect=org.hibernate.dialect.SQLServer2012Dialect +# hibernate_connection_driver_class=com.microsoft.sqlserver.jdbc.SQLServerDriver +# hibernate_connection_url=jdbc:sqlserver://localhost:1433;databaseName=onedev +# hibernate_connection_username=sa +# hibernate_connection_password=sa diff --git a/envs/outline/outline.env b/envs/outline/outline.env new file mode 100644 index 00000000..1e366c08 --- /dev/null +++ b/envs/outline/outline.env @@ -0,0 +1,224 @@ +# –––––––––––––––– REQUIRED –––––––––––––––– + +NODE_ENV=production + +# Generate a hex-encoded 32-byte random key. You should use `openssl rand -hex 32` +# in your terminal to generate a random value. +SECRET_KEY=generate_a_new_key + +# Generate a unique random key. The format is not important but you could still use +# `openssl rand -hex 32` in your terminal to produce this. +UTILS_SECRET=generate_a_new_key + +# For production point these at your databases, in development the default +# should work out of the box. +DATABASE_URL=postgres://user:pass@localhost:5432/outline +DATABASE_CONNECTION_POOL_MIN= +DATABASE_CONNECTION_POOL_MAX= +# Uncomment this to disable SSL for connecting to Postgres +PGSSLMODE=disable + +# For redis you can either specify an ioredis compatible url like this +REDIS_URL=redis://localhost:6379 +# or alternatively, if you would like to provide additional connection options, +# use a base64 encoded JSON connection option object. Refer to the ioredis documentation +# for a list of available options. +# Example: Use Redis Sentinel for high availability +# {"sentinels":[{"host":"sentinel-0","port":26379},{"host":"sentinel-1","port":26379}],"name":"mymaster"} +# REDIS_URL=ioredis://eyJzZW50aW5lbHMiOlt7Imhvc3QiOiJzZW50aW5lbC0wIiwicG9ydCI6MjYzNzl9LHsiaG9zdCI6InNlbnRpbmVsLTEiLCJwb3J0IjoyNjM3OX1dLCJuYW1lIjoibXltYXN0ZXIifQ== + +# URL should point to the fully qualified, publicly accessible URL. If using a +# proxy the port in URL and PORT may be different. +URL=http://127.0.0.1:3000 +PORT=3000 + +# See [documentation](docs/SERVICES.md) on running a separate collaboration +# server, for normal operation this does not need to be set. +COLLABORATION_URL= + +# Specify what storage system to use. Possible value is one of "s3" or "local". +# For "local", the avatar images and document attachments will be saved on local disk. +FILE_STORAGE=local + +# If "local" is configured for FILE_STORAGE above, then this sets the parent directory under +# which all attachments/images go. Make sure that the process has permissions to create +# this path and also to write files to it. +FILE_STORAGE_LOCAL_ROOT_DIR=/var/lib/outline/data + +# Maximum allowed size for the uploaded attachment. +FILE_STORAGE_UPLOAD_MAX_SIZE=262144000 + +# Override the maximum size of document imports, generally this should be lower +# than the document attachment maximum size. +FILE_STORAGE_IMPORT_MAX_SIZE= + +# Override the maximum size of workspace imports, these can be especially large +# and the files are temporary being automatically deleted after a period of time. +FILE_STORAGE_WORKSPACE_IMPORT_MAX_SIZE= + +# To support uploading of images for avatars and document attachments in a distributed +# architecture an s3-compatible storage can be configured if FILE_STORAGE=s3 above. +AWS_ACCESS_KEY_ID=get_a_key_from_aws +AWS_SECRET_ACCESS_KEY=get_the_secret_of_above_key +AWS_REGION=xx-xxxx-x +AWS_S3_ACCELERATE_URL= +AWS_S3_UPLOAD_BUCKET_URL=http://s3:4569 +AWS_S3_UPLOAD_BUCKET_NAME=bucket_name_here +AWS_S3_FORCE_PATH_STYLE=true +AWS_S3_ACL=private + +# –––––––––––––– AUTHENTICATION –––––––––––––– + +# Third party signin credentials, at least ONE OF EITHER Google, Slack, +# or Microsoft is required for a working installation or you'll have no sign-in +# options. + +# To configure Slack auth, you'll need to create an Application at +# => https://api.slack.com/apps +# +# When configuring the Client ID, add a redirect URL under "OAuth & Permissions": +# https:///auth/slack.callback +SLACK_CLIENT_ID=get_a_key_from_slack +SLACK_CLIENT_SECRET=get_the_secret_of_above_key + +# To configure Google auth, you'll need to create an OAuth Client ID at +# => https://console.cloud.google.com/apis/credentials +# +# When configuring the Client ID, add an Authorized redirect URI: +# https:///auth/google.callback +GOOGLE_CLIENT_ID= +GOOGLE_CLIENT_SECRET= + +# To configure Microsoft/Azure auth, you'll need to create an OAuth Client. See +# the guide for details on setting up your Azure App: +# => https://wiki.generaloutline.com/share/dfa77e56-d4d2-4b51-8ff8-84ea6608faa4 +AZURE_CLIENT_ID= +AZURE_CLIENT_SECRET= +AZURE_RESOURCE_APP_ID= + +# To configure generic OIDC auth, you'll need some kind of identity provider. +# See documentation for whichever IdP you use to acquire the following info: +# Redirect URI is https:///auth/oidc.callback +OIDC_CLIENT_ID= +OIDC_CLIENT_SECRET= +OIDC_AUTH_URI= +OIDC_TOKEN_URI= +OIDC_USERINFO_URI= +OIDC_LOGOUT_URI= + +# Specify which claims to derive user information from +# Supports any valid JSON path with the JWT payload +OIDC_USERNAME_CLAIM=preferred_username + +# Display name for OIDC authentication +OIDC_DISPLAY_NAME=OpenID Connect + +# Space separated auth scopes. +OIDC_SCOPES=openid profile email + +# To configure the GitHub integration, you'll need to create a GitHub App at +# => https://github.com/settings/apps +# +# When configuring the Client ID, add a redirect URL under "Permissions & events": +# https:///api/github.callback +GITHUB_CLIENT_ID= +GITHUB_CLIENT_SECRET= +GITHUB_APP_NAME= +GITHUB_APP_ID= +GITHUB_APP_PRIVATE_KEY= + +# To configure Discord auth, you'll need to create a Discord Application at +# => https://discord.com/developers/applications/ +# +# When configuring the Client ID, add a redirect URL under "OAuth2": +# https:///auth/discord.callback +DISCORD_CLIENT_ID= +DISCORD_CLIENT_SECRET= + +# DISCORD_SERVER_ID should be the ID of the Discord server that Outline is +# integrated with. +# Used to verify that the user is a member of the server as well as server +# metadata such as nicknames, server icon and name. +DISCORD_SERVER_ID= + +# DISCORD_SERVER_ROLES should be a comma separated list of role IDs that are +# allowed to access Outline. If this is not set, all members of the server +# will be allowed to access Outline. +# DISCORD_SERVER_ID and DISCORD_SERVER_ROLES must be set together. +DISCORD_SERVER_ROLES= + +# –––––––––––––––– OPTIONAL –––––––––––––––– + +# Base64 encoded private key and certificate for HTTPS termination. This is only +# required if you do not use an external reverse proxy. See documentation: +# https://wiki.generaloutline.com/share/1c922644-40d8-41fe-98f9-df2b67239d45 +SSL_KEY= +SSL_CERT= + +# If using a Cloudfront/Cloudflare distribution or similar it can be set below. +# This will cause paths to javascript, stylesheets, and images to be updated to +# the hostname defined in CDN_URL. In your CDN configuration the origin server +# should be set to the same as URL. +CDN_URL= + +# Auto-redirect to https in production. The default is true but you may set to +# false if you can be sure that SSL is terminated at an external loadbalancer. +FORCE_HTTPS=false + +# Have the installation check for updates by sending anonymized statistics to +# the maintainers +ENABLE_UPDATES=true + +# How many processes should be spawned. As a reasonable rule divide your servers +# available memory by 512 for a rough estimate +WEB_CONCURRENCY=1 + +# You can remove this line if your reverse proxy already logs incoming http +# requests and this ends up being duplicative +DEBUG=http + +# Configure lowest severity level for server logs. Should be one of +# error, warn, info, http, verbose, debug and silly +LOG_LEVEL=info + +# For a complete Slack integration with search and posting to channels the +# following configs are also needed, some more details +# => https://wiki.generaloutline.com/share/be25efd1-b3ef-4450-b8e5-c4a4fc11e02a +# +SLACK_VERIFICATION_TOKEN=your_token +SLACK_APP_ID=A0XXXXXXX +SLACK_MESSAGE_ACTIONS=true + +# Optionally enable Sentry (sentry.io) to track errors and performance, +# and optionally add a Sentry proxy tunnel for bypassing ad blockers in the UI: +# https://docs.sentry.io/platforms/javascript/troubleshooting/#using-the-tunnel-option) +SENTRY_DSN= +SENTRY_TUNNEL= + +# To support sending outgoing transactional emails such as "document updated" or +# "you've been invited" you'll need to provide authentication for an SMTP server +SMTP_HOST= +SMTP_PORT= +SMTP_USERNAME= +SMTP_PASSWORD= +SMTP_FROM_EMAIL= +SMTP_REPLY_EMAIL= +SMTP_TLS_CIPHERS= +SMTP_SECURE=true + +# The default interface language. See translate.getoutline.com for a list of +# available language codes and their rough percentage translated. +DEFAULT_LANGUAGE=zh_CN + +# Optionally enable rate limiter at application web server +RATE_LIMITER_ENABLED=true + +# Configure default throttling parameters for rate limiter +RATE_LIMITER_REQUESTS=1000 +RATE_LIMITER_DURATION_WINDOW=60 + +# Iframely API config +# https://iframe.ly/api/oembed +# https://iframe.ly/api/iframely +IFRAMELY_URL=https://iframe.ly/api/iframely +# IFRAMELY_API_KEY= diff --git a/logo.png b/logo.png new file mode 100644 index 00000000..6f82a12d Binary files /dev/null and b/logo.png differ diff --git a/script/app_install.sh b/script/app_install.sh new file mode 100644 index 00000000..42c6ab0c --- /dev/null +++ b/script/app_install.sh @@ -0,0 +1,84 @@ +#!/bin/bash + +echo "$(date): Step Tip - Start installing the 1Panel third-party app store" +echo "$(date): Step Init - Checking for required commands..." + +check_command() { + command -v "$1" > /dev/null 2>&1 || { + echo >&2 "Error: $1 is not installed. Please install it and try again." + exit 1 + } +} + +check_command "git" +check_command "cp" +check_command "rm" +check_command "echo" +check_command "which" +check_command "xargs" +check_command "grep" +check_command "cut" + +BASE_DIR=$(which 1pctl | xargs grep '^BASE_DIR=' | cut -d'=' -f2) +echo "Step Init - 1panel install directory: $BASE_DIR" + +if [ -z "$BASE_DIR" ]; then + echo "Error: 1panel install directory not found." + exit 1 +fi + +echo "$(date): Step 1 - Cloning repository..." +repos=( + 'https://github.com/QYG2297248353/appstore-1panel' + 'https://gitee.com/qyg2297248353/appstore-1panel' + 'https://gitea.com/QYG2297248353/appstore-1panel' +) + +for repo in "${repos[@]}"; do + git clone --depth 1 -b released "$repo" "${BASE_DIR:?}/1panel/resource/apps/local/appstore-localApps" && break +done + +if [ ! -d "${BASE_DIR:?}/1panel/resource/apps/local/appstore-localApps" ]; then + echo "Error: Failed to clone repository." + exit 1 +fi + +APPS_DIR="$BASE_DIR/1panel/resource/apps/local/appstore-localApps/apps" +LOCAL_DIR="$BASE_DIR/1panel/resource/apps/local" +ENVS_DIR="$BASE_DIR/1panel/resource/apps/local/appstore-localApps/envs" +DEST_ENVS_DIR="/etc/1panel/envs" + +echo "$(date): Step 2 - Checking for updated apps..." +for app_directory in "${APPS_DIR:?}"/*; do + app_name=$(basename "$app_directory") + + if [ -d "${LOCAL_DIR:?}/$app_name" ]; then + rm -rf "${LOCAL_DIR:?}/$app_name" + cp -r "${app_directory:?}" "${LOCAL_DIR:?}/" + echo "$(date): Step 2 - Upgraded applications $app_name" + echo "$(date): Step 2 - Copied and replaced directory $app_directory to $LOCAL_DIR/" + else + cp -r "${app_directory:?}" "${LOCAL_DIR:?}/" + echo "$(date): Step 2 - Installed applications $app_name" + echo "$(date): Step 2 - Copied directory $app_directory to $LOCAL_DIR/" + fi +done + +echo "$(date): Step 3 - Copying envs directory..." +if [ -d "${ENVS_DIR:?}" ]; then + rm -rf "${DEST_ENVS_DIR:?}" + mkdir -p "${DEST_ENVS_DIR:?}" + cp -r "${ENVS_DIR:?}/"* "${DEST_ENVS_DIR:?}/" + echo "$(date): Step 3 - Copied envs directory to $DEST_ENVS_DIR/" +else + echo "$(date): Step 3 - Envs directory not found, skipping." +fi + +echo "$(date): Step 4 - Cleaning installed directory..." +rm -rf "${BASE_DIR:?}/1panel/resource/apps/local/appstore-localApps" +echo "$(date): Step 4 - Finished cleaning installed directory" + +echo "$(date): Step Tip - Installation completed!" +echo "$(date): Step Tip - Copyright© 2024 Xinjiang Mengsen Software Development Studio." +echo "$(date): Step Tip - https://blog.lifebus.top/" +echo "$(date): Step Tip - Done!" diff --git a/script/auto_install.sh b/script/auto_install.sh new file mode 100644 index 00000000..83886aac --- /dev/null +++ b/script/auto_install.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +mkdir -p /home/task + +urls=( + 'https://install.lifebus.top/app_install.sh' + 'https://gitee.com/qyg2297248353/appstore-1panel/releases/download/v1.0/app_install.sh' + 'https://gitea.com/QYG2297248353/appstore-1panel/releases/download/install/app_install.sh' + 'https://raw.githubusercontent.com/QYG2297248353/appstore-1panel/custom/script/app_install.sh' + 'https://github.com/QYG2297248353/appstore-1panel/releases/download/install/app_install.sh' +) + +for url in "${urls[@]}"; do + wget -O /home/task/app_install.sh "$url" && break +done + +if [[ -f /home/task/app_install.sh ]]; then + chmod +x /home/task/app_install.sh + + crontab -l | grep -v '/home/task/app_install.sh' | crontab - + crontab -l | grep -v '/home/task/app_install_zh.sh' | crontab - + + (crontab -l ; echo "0 */3 * * * /bin/bash /home/task/app_install.sh") | crontab - + + /bin/bash /home/task/app_install.sh +else + echo "网络异常,请检查您的网络状态。" +fi diff --git a/script/auto_uninstall.sh b/script/auto_uninstall.sh new file mode 100644 index 00000000..ab34b154 --- /dev/null +++ b/script/auto_uninstall.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +crontab -l | grep -v '/home/task/app_install.sh' | crontab - +crontab -l | grep -v '/home/task/app_install_zh.sh' | crontab - + +rm -f /home/task/app_install.sh +rm -f /home/task/app_install_zh.sh diff --git a/tags.yaml b/tags.yaml new file mode 100644 index 00000000..228f3ac7 --- /dev/null +++ b/tags.yaml @@ -0,0 +1,53 @@ +name: 1Panel +title: 新一代的 Linux 服务器运维管理面板 +additionalProperties: + tags: + - key: WebSite + name: 建站 + sort: 1 + - key: Database + name: 数据库 + sort: 2 + - key: Server + name: Web 服务器 + sort: 3 + - key: Runtime + name: 运行环境 + sort: 4 + - key: Tool + name: 实用工具 + sort: 5 + - key: Storage + name: 云存储 + sort: 6 + - key: AI + name: AI / 大模型 + sort: 7 + - key: BI + name: BI + sort: 8 + - key: Security + name: 安全 + sort: 9 + - key: DevTool + name: 开发工具 + sort: 10 + - key: DevOps + name: DevOps + sort: 11 + - key: Middleware + name: 中间件 + sort: 12 + - key: Media + name: 多媒体 + sort: 13 + - key: Email + name: 邮件服务 + sort: 14 + - key: Game + name: 休闲游戏 + sort: 15 + - key: Local + name: 本地 + sort: 99 + version: v1.6.0 diff --git a/新疆萌森软件开发工作室.url b/新疆萌森软件开发工作室.url new file mode 100644 index 00000000..09c31e74 --- /dev/null +++ b/新疆萌森软件开发工作室.url @@ -0,0 +1,5 @@ +[{000214A0-0000-0000-C000-000000000046}] +Prop3=19,11 +[InternetShortcut] +IDList= +URL=https://lifebus.top/