Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • sig/storage/src/ceph-reef
1 result
Show changes
Commits on Source (3)
Showing
with 4421 additions and 0 deletions
105aad78fdc9a3e84049755eb07ea09230e314eeffd51375e75b487ff950d3ab SOURCES/ceph-18.2.4.tar.gz
peridot: https://mirror.potato.shrug.pw/peridot-cli-0.2.3-2.x86_64.rpm
upload to lookaside
peridot lookaside upload ceph-18.2.4.tar.gz
--- ceph-15.2.2/src/common/crc32c_intel_fast_zero_asm.s.orig 2020-05-26 08:34:32.226201974 -0400
+++ ceph-15.2.2/src/common/crc32c_intel_fast_zero_asm.s 2020-05-26 17:19:32.497201974 -0400
@@ -1,5 +1,5 @@
;
-; Copyright 2012-2013 Intel Corporation All Rights Reserved.
+; Copyright 2012-2015 Intel Corporation All Rights Reserved.
; All rights reserved.
;
; http://opensource.org/licenses/BSD-3-Clause
@@ -59,6 +59,19 @@
xor rbx, rbx ;; rbx = crc1 = 0;
xor r10, r10 ;; r10 = crc2 = 0;
+ cmp len, %%bSize*3*2
+ jbe %%non_prefetch
+
+ %assign i 0
+ %rep %%bSize/8 - 1
+ crc32 rax, bufptmp ;; update crc0
+ crc32 rbx, bufptmp ;; update crc1
+ crc32 r10, bufptmp ;; update crc2
+ %assign i (i+8)
+ %endrep
+ jmp %%next %+ %1
+
+%%non_prefetch:
%assign i 0
%rep %%bSize/8 - 1
crc32 rax, bufptmp ;; update crc0
@@ -66,6 +79,8 @@
crc32 r10, bufptmp ;; update crc2
%assign i (i+8)
%endrep
+
+%%next %+ %1:
crc32 rax, bufptmp ;; update crc0
crc32 rbx, bufptmp ;; update crc1
; SKIP ;crc32 r10, bufptmp ;; update crc2
@@ -180,12 +195,15 @@
%define crc_init_dw r8d
%endif
-
+ endbranch
push rdi
push rbx
mov rax, crc_init ;; rax = crc_init;
+ cmp len, 8
+ jb less_than_8
+
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; 1) ALIGN: ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
--- ceph-15.1.0/src/common/bit_str.h.orig 2020-02-03 09:47:20.047149798 -0500
+++ ceph-15.1.0/src/common/bit_str.h 2020-02-03 09:47:50.213149798 -0500
@@ -17,6 +17,7 @@
#include <cstdint>
#include <iosfwd>
#include <functional>
+#include <ostream>
namespace ceph {
class Formatter;
--- ceph-16.1.0-43-g6b74fb5c/cmake/modules/Finduring.cmake.orig 2021-02-01 08:45:39.316108287 -0500
+++ ceph-16.1.0-43-g6b74fb5c/cmake/modules/Finduring.cmake 2021-02-01 08:45:59.813665378 -0500
@@ -5,7 +5,7 @@
# uring_FOUND - True if uring found.
find_path(URING_INCLUDE_DIR liburing.h)
-find_library(URING_LIBRARIES liburing.a liburing)
+find_library(URING_LIBRARIES liburing.so liburing)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(uring DEFAULT_MSG URING_LIBRARIES URING_INCLUDE_DIR)
From 1999108aeb1f6f93a19ea7bb64c6ae8b87d1b264 Mon Sep 17 00:00:00 2001
From: "H.J. Lu" <hjl.tools@gmail.com>
Date: Thu, 20 Jan 2022 05:33:13 -0800
Subject: [PATCH] CET: Add CET marker to crc32c_intel_fast_zero_asm.s
Add .note.gnu.property section to crc32c_intel_fast_zero_asm.s to mark
for IBT and SHSTK compatibility.
---
src/common/crc32c_intel_fast_zero_asm.s | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/src/common/crc32c_intel_fast_zero_asm.s b/src/common/crc32c_intel_fast_zero_asm.s
index 216ecf639f3..2e291d858f3 100644
--- a/src/common/crc32c_intel_fast_zero_asm.s
+++ b/src/common/crc32c_intel_fast_zero_asm.s
@@ -654,4 +654,8 @@ slversion crc32_iscsi_zero_00, 00, 02, 0014
%ifidn __OUTPUT_FORMAT__, elf64
; inform linker that this doesn't require executable stack
section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
%endif
--
2.34.1
From bbcc1a69f787881f16156f3c789052942a564103 Mon Sep 17 00:00:00 2001
From: "H.J. Lu" <hjl.tools@gmail.com>
Date: Thu, 20 Jan 2022 05:35:49 -0800
Subject: [PATCH] isa-l/CET: Add CET marker to x86-64 crc32 assembly codes
Add .note.gnu.property section to x86-64 crc32 assembly codes to mark
for IBT and SHSTK compatibility.
---
crc/crc32_gzip_refl_by16_10.asm | 9 +++++++++
crc/crc32_gzip_refl_by8.asm | 9 +++++++++
crc/crc32_gzip_refl_by8_02.asm | 9 +++++++++
crc/crc32_ieee_01.asm | 8 ++++++++
crc/crc32_ieee_02.asm | 9 +++++++++
crc/crc32_ieee_by16_10.asm | 9 +++++++++
crc/crc32_ieee_by4.asm | 9 +++++++++
crc/crc32_iscsi_00.asm | 8 ++++++++
crc/crc32_iscsi_01.asm | 8 ++++++++
9 files changed, 78 insertions(+)
diff --git a/src/isa-l/crc/crc32_gzip_refl_by16_10.asm b/src/isa-l/crc/crc32_gzip_refl_by16_10.asm
index 40236f6..b16874d 100644
--- a/src/isa-l/crc/crc32_gzip_refl_by16_10.asm
+++ b/src/isa-l/crc/crc32_gzip_refl_by16_10.asm
@@ -566,3 +566,12 @@ global no_ %+ FUNCTION_NAME
no_ %+ FUNCTION_NAME %+ :
%endif
%endif ; (AS_FEATURE_LEVEL) >= 10
+
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
diff --git a/src/isa-l/crc/crc32_gzip_refl_by8.asm b/src/isa-l/crc/crc32_gzip_refl_by8.asm
index 62f7e7d..97b0c4a 100644
--- a/src/isa-l/crc/crc32_gzip_refl_by8.asm
+++ b/src/isa-l/crc/crc32_gzip_refl_by8.asm
@@ -622,3 +622,12 @@ dq 0x0706050403020100, 0x000e0d0c0b0a0908
;;; func core, ver, snum
slversion crc32_gzip_refl_by8, 01, 00, 002c
+
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
diff --git a/src/isa-l/crc/crc32_gzip_refl_by8_02.asm b/src/isa-l/crc/crc32_gzip_refl_by8_02.asm
index 80d849e..1d5a75f 100644
--- a/src/isa-l/crc/crc32_gzip_refl_by8_02.asm
+++ b/src/isa-l/crc/crc32_gzip_refl_by8_02.asm
@@ -553,3 +553,12 @@ pshufb_shf_table:
; dq 0x060504030201008f, 0x0e0d0c0b0a090807 ; shl 1 (16-15) / shr15
dq 0x8786858483828100, 0x8f8e8d8c8b8a8988
dq 0x0706050403020100, 0x000e0d0c0b0a0908
+
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
diff --git a/src/isa-l/crc/crc32_ieee_01.asm b/src/isa-l/crc/crc32_ieee_01.asm
index 32495ed..cfc443b 100644
--- a/src/isa-l/crc/crc32_ieee_01.asm
+++ b/src/isa-l/crc/crc32_ieee_01.asm
@@ -653,3 +653,11 @@ dq 0x0706050403020100, 0x000e0d0c0b0a0908
;;; func core, ver, snum
slversion crc32_ieee_01, 01, 06, 0011
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
diff --git a/src/isa-l/crc/crc32_ieee_02.asm b/src/isa-l/crc/crc32_ieee_02.asm
index 8a472b0..dd7096a 100644
--- a/src/isa-l/crc/crc32_ieee_02.asm
+++ b/src/isa-l/crc/crc32_ieee_02.asm
@@ -649,3 +649,12 @@ pshufb_shf_table:
; dq 0x060504030201008f, 0x0e0d0c0b0a090807 ; shl 1 (16-15) / shr15
dq 0x8786858483828100, 0x8f8e8d8c8b8a8988
dq 0x0706050403020100, 0x000e0d0c0b0a0908
+
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
diff --git a/src/isa-l/crc/crc32_ieee_by16_10.asm b/src/isa-l/crc/crc32_ieee_by16_10.asm
index 200fd93..2afd597 100644
--- a/src/isa-l/crc/crc32_ieee_by16_10.asm
+++ b/src/isa-l/crc/crc32_ieee_by16_10.asm
@@ -582,3 +582,12 @@ global no_ %+ FUNCTION_NAME
no_ %+ FUNCTION_NAME %+ :
%endif
%endif ; (AS_FEATURE_LEVEL) >= 10
+
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
diff --git a/src/isa-l/crc/crc32_ieee_by4.asm b/src/isa-l/crc/crc32_ieee_by4.asm
index 39bed5a..847d0bd 100644
--- a/src/isa-l/crc/crc32_ieee_by4.asm
+++ b/src/isa-l/crc/crc32_ieee_by4.asm
@@ -563,3 +563,12 @@ SHUF_MASK dq 0x08090A0B0C0D0E0F, 0x0001020304050607
;;; func core, ver, snum
slversion crc32_ieee_by4, 05, 02, 0017
+
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
diff --git a/src/isa-l/crc/crc32_iscsi_00.asm b/src/isa-l/crc/crc32_iscsi_00.asm
index 4f81e3a..3d6b2d1 100644
--- a/src/isa-l/crc/crc32_iscsi_00.asm
+++ b/src/isa-l/crc/crc32_iscsi_00.asm
@@ -669,3 +669,11 @@ DD 0x54851c7f,0x89e3d7c4,0xeba4fdf8,0x36c23643
;;; func core, ver, snum
slversion crc32_iscsi_00, 00, 04, 0014
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
diff --git a/src/isa-l/crc/crc32_iscsi_01.asm b/src/isa-l/crc/crc32_iscsi_01.asm
index 2a81517..c048413 100644
--- a/src/isa-l/crc/crc32_iscsi_01.asm
+++ b/src/isa-l/crc/crc32_iscsi_01.asm
@@ -588,3 +588,11 @@ K_table:
;;; func core, ver, snum
slversion crc32_iscsi_01, 01, 04, 0015
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
--
2.34.1
From 72e6d27e08c86c16e8931739a5e6ecbc06b102d5 Mon Sep 17 00:00:00 2001
From: "H.J. Lu" <hjl.tools@gmail.com>
Date: Thu, 20 Jan 2022 05:40:56 -0800
Subject: [PATCH] spdk/isa-l/CET: Add CET marker to x86-64 crc32 assembly codes
Add .note.gnu.property section to x86-64 crc32 assembly codes to mark
for IBT and SHSTK compatibility.
---
crc/crc32_gzip_refl_by8.asm | 9 +++++++++
crc/crc32_ieee_01.asm | 8 ++++++++
crc/crc32_ieee_by4.asm | 9 +++++++++
crc/crc32_iscsi_00.asm | 8 ++++++++
crc/crc32_iscsi_01.asm | 8 ++++++++
5 files changed, 42 insertions(+)
diff --git a/src/spdk/isa-l/crc/crc32_gzip_refl_by8.asm b/src/spdk/isa-l/crc/crc32_gzip_refl_by8.asm
index 62f7e7d..97b0c4a 100644
--- a/src/spdk/isa-l/crc/crc32_gzip_refl_by8.asm
+++ b/src/spdk/isa-l/crc/crc32_gzip_refl_by8.asm
@@ -622,3 +622,12 @@ dq 0x0706050403020100, 0x000e0d0c0b0a0908
;;; func core, ver, snum
slversion crc32_gzip_refl_by8, 01, 00, 002c
+
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
diff --git a/src/spdk/isa-l/crc/crc32_ieee_01.asm b/src/spdk/isa-l/crc/crc32_ieee_01.asm
index 32495ed..cfc443b 100644
--- a/src/spdk/isa-l/crc/crc32_ieee_01.asm
+++ b/src/spdk/isa-l/crc/crc32_ieee_01.asm
@@ -653,3 +653,11 @@ dq 0x0706050403020100, 0x000e0d0c0b0a0908
;;; func core, ver, snum
slversion crc32_ieee_01, 01, 06, 0011
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
diff --git a/src/spdk/isa-l/crc/crc32_ieee_by4.asm b/src/spdk/isa-l/crc/crc32_ieee_by4.asm
index 39bed5a..847d0bd 100644
--- a/src/spdk/isa-l/crc/crc32_ieee_by4.asm
+++ b/src/spdk/isa-l/crc/crc32_ieee_by4.asm
@@ -563,3 +563,12 @@ SHUF_MASK dq 0x08090A0B0C0D0E0F, 0x0001020304050607
;;; func core, ver, snum
slversion crc32_ieee_by4, 05, 02, 0017
+
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
diff --git a/src/spdk/isa-l/crc/crc32_iscsi_00.asm b/src/spdk/isa-l/crc/crc32_iscsi_00.asm
index 4f81e3a..3d6b2d1 100644
--- a/src/spdk/isa-l/crc/crc32_iscsi_00.asm
+++ b/src/spdk/isa-l/crc/crc32_iscsi_00.asm
@@ -669,3 +669,11 @@ DD 0x54851c7f,0x89e3d7c4,0xeba4fdf8,0x36c23643
;;; func core, ver, snum
slversion crc32_iscsi_00, 00, 04, 0014
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
diff --git a/src/spdk/isa-l/crc/crc32_iscsi_01.asm b/src/spdk/isa-l/crc/crc32_iscsi_01.asm
index 2a81517..c048413 100644
--- a/src/spdk/isa-l/crc/crc32_iscsi_01.asm
+++ b/src/spdk/isa-l/crc/crc32_iscsi_01.asm
@@ -588,3 +588,11 @@ K_table:
;;; func core, ver, snum
slversion crc32_iscsi_01, 01, 04, 0015
+%ifidn __OUTPUT_FORMAT__, elf64
+; inform linker that this doesn't require executable stack
+section .note.GNU-stack noalloc noexec nowrite progbits
+; inform linker that this is compatible with IBT and SHSTK
+section .note.gnu.property note alloc noexec align=8
+DD 0x00000004,0x00000010,0x00000005,0x00554e47
+DD 0xc0000002,0x00000004,0x00000003,0x00000000
+%endif
--
2.34.1
--- ceph-16.2.6-681-gfdc003bc/src/tracing/bluestore.tp.orig 2021-12-07 08:02:04.682972474 -0500
+++ ceph-16.2.6-681-gfdc003bc/src/tracing/bluestore.tp 2021-12-07 08:03:13.840771852 -0500
@@ -1,3 +1,9 @@
+
+#ifdef __x86_64__
+#undef STAP_SDT_ARG_CONSTRAINT
+#define STAP_SDT_ARG_CONSTRAINT norx
+#endif
+
#include "include/int_types.h"
TRACEPOINT_EVENT(bluestore, transaction_state_duration,
--- ceph-16.2.6-681-gfdc003bc/src/tracing/librbd.tp.orig 2021-12-07 09:50:16.467579483 -0500
+++ ceph-16.2.6-681-gfdc003bc/src/tracing/librbd.tp 2021-12-07 09:50:47.620026940 -0500
@@ -1,3 +1,8 @@
+#ifdef __x86_64__
+#undef STAP_SDT_ARG_CONSTRAINT
+#define STAP_SDT_ARG_CONSTRAINT norx
+#endif
+
#include "tracing/tracing-common.h"
#include "include/rbd/librbd.h"
#include "include/int_types.h"
--- ceph-16.2.6-681-gfdc003bc/src/tracing/bluestore.tp.orig 2021-12-07 08:02:04.682972474 -0500
+++ ceph-16.2.6-681-gfdc003bc/src/tracing/bluestore.tp 2021-12-07 08:03:13.840771852 -0500
@@ -1,3 +1,9 @@
+
+#ifdef __x86_64__
+#undef STAP_SDT_ARG_CONSTRAINT
+#define STAP_SDT_ARG_CONSTRAINT norx
+#endif
+
#include "include/int_types.h"
TRACEPOINT_EVENT(bluestore, transaction_state_duration,
--- ceph-16.2.6-681-gfdc003bc/src/tracing/librbd.tp.orig 2021-12-07 09:50:16.467579483 -0500
+++ ceph-16.2.6-681-gfdc003bc/src/tracing/librbd.tp 2021-12-07 09:50:47.620026940 -0500
@@ -1,3 +1,8 @@
+#ifdef __x86_64__
+#undef STAP_SDT_ARG_CONSTRAINT
+#define STAP_SDT_ARG_CONSTRAINT norx
+#endif
+
#include "tracing/tracing-common.h"
#include "include/rbd/librbd.h"
#include "include/int_types.h"
--- ceph-16.2.7/src/common/LogEntry.cc.orig 2022-01-17 13:52:10.799134159 -0500
+++ ceph-16.2.7/src/common/LogEntry.cc 2022-01-17 13:52:47.244469274 -0500
@@ -183,7 +183,7 @@
return "crit";
default:
ceph_abort();
- return 0;
+ return "";
}
}
--- ceph-16.2.7/src/test/librados/tier_cxx.cc.orig 2022-01-19 09:30:47.209459506 -0500
+++ ceph-16.2.7/src/test/librados/tier_cxx.cc 2022-01-19 10:02:47.783240298 -0500
@@ -120,7 +120,7 @@
}
void check_fp_oid_refcount(librados::IoCtx& ioctx, std::string foid, uint64_t count,
- std::string fp_algo = NULL)
+ std::string fp_algo = "")
{
bufferlist t;
int size = foid.length();
@@ -148,7 +148,7 @@
ASSERT_LE(count, refs.count());
}
-string get_fp_oid(string oid, std::string fp_algo = NULL)
+string get_fp_oid(string oid, std::string fp_algo = "")
{
if (fp_algo == "sha1") {
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
--- ceph-17.0.0-10335-gfd206722/src/s3select/include/s3select_functions.h.orig 2022-02-11 17:21:40.268627997 -0500
+++ ceph-17.0.0-10335-gfd206722/src/s3select/include/s3select_functions.h 2022-02-11 17:21:57.155325437 -0500
@@ -466,7 +466,7 @@
std::string print(int ident) override
{
- return std::string(0);
+ return std::string("");
}
void push_argument(base_statement* arg)
--- ceph-18.0.0-3078-gc4847bf8/src/rgw/driver/dbstore/CMakeLists.txt.orig 2023-05-10 08:23:50.000000000 -0400
+++ ceph-18.0.0-3078-gc4847bf8/src/rgw/driver/dbstore/CMakeLists.txt 2023-05-11 08:21:13.794152904 -0400
@@ -24,7 +24,7 @@
dbstore_mgr.cc
)
-add_library(dbstore_lib ${dbstore_srcs})
+add_library(dbstore_lib STATIC ${dbstore_srcs})
target_include_directories(dbstore_lib
PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw"
PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw/store/rados"
@@ -49,6 +49,7 @@
# add pthread library
set (CMAKE_LINK_LIBRARIES ${CMAKE_LINK_LIBRARIES} pthread)
+set (CMAKE_LINK_LIBRARIES ${CMAKE_LINK_LIBRARIES} global)
find_package(gtest QUIET)
if(WITH_TESTS)
--- ceph-17.1.0-175-g086c8f84/src/arrow/cpp/cmake_modules/ThirdpartyToolchain.cmake.orig 2022-04-08 11:27:53.593570634 -0400
+++ ceph-17.1.0-175-g086c8f84/src/arrow/cpp/cmake_modules/ThirdpartyToolchain.cmake 2022-04-08 11:28:20.778087653 -0400
@@ -1991,7 +1991,7 @@
if((NOT ARROW_SIMD_LEVEL STREQUAL "NONE") OR (NOT ARROW_RUNTIME_SIMD_LEVEL STREQUAL "NONE"
))
- set(xsimd_SOURCE "BUNDLED")
+ set(xsimd_SOURCE "SYSTEM")
resolve_dependency(xsimd)
# TODO: Don't use global includes but rather target_include_directories
include_directories(SYSTEM ${XSIMD_INCLUDE_DIR})
--- ceph-17.2.6/cmake/modules/BuildBoost.cmake.orig 2023-04-27 14:00:28.239524778 -0400
+++ ceph-17.2.6/cmake/modules/BuildBoost.cmake 2023-04-28 07:49:59.743342207 -0400
@@ -63,7 +63,11 @@
else()
list(APPEND boost_features "address-model=32")
endif()
- set(BOOST_CXXFLAGS "-fPIC -w") # check on arm, etc <---XXX
+ if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
+ set(BOOST_CXXFLAGS "-fPIC -w -fcf-protection") # check on arm, etc <---XXX
+ else()
+ set(BOOST_CXXFLAGS "-fPIC -w") # check on arm, etc <---XXX
+ endif()
list(APPEND boost_features "cxxflags=${BOOST_CXXFLAGS}")
set(boost_with_libs)
--- ceph-17.2.6/src/boost/libs/context/src/asm/make_x86_64_sysv_elf_gas.S.orig 2023-04-30 14:25:35.009605033 -0400
+++ ceph-17.2.6/src/boost/libs/context/src/asm/make_x86_64_sysv_elf_gas.S 2023-04-30 14:28:32.239465067 -0400
@@ -80,3 +80,18 @@
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
+
+.section .note.gnu.property
+.align=8
+
+ .byte 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00
+ .byte 0x05, 0x00, 0x00, 0x00, 0x47, 0x4E, 0x55, 0x00
+ .byte 0x00, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x01, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00
+ .byte 0x05, 0x00, 0x00, 0x00, 0x47, 0x4E, 0x55, 0x00
+ .byte 0x02, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+
--- ceph-17.2.6/src/boost/libs/context/src/asm/jump_x86_64_sysv_elf_gas.S.orig 2023-04-30 14:25:35.008605050 -0400
+++ ceph-17.2.6/src/boost/libs/context/src/asm/jump_x86_64_sysv_elf_gas.S 2023-04-30 14:27:50.145210847 -0400
@@ -89,3 +89,17 @@
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
+
+.section .note.gnu.property
+.align=8
+
+ .byte 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00
+ .byte 0x05, 0x00, 0x00, 0x00, 0x47, 0x4E, 0x55, 0x00
+ .byte 0x00, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x01, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00
+ .byte 0x05, 0x00, 0x00, 0x00, 0x47, 0x4E, 0x55, 0x00
+ .byte 0x02, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
--- ceph-17.2.6/src/boost/libs/context/src/asm/ontop_x86_64_sysv_elf_gas.S.orig 2023-04-30 14:25:35.009605033 -0400
+++ ceph-17.2.6/src/boost/libs/context/src/asm/ontop_x86_64_sysv_elf_gas.S 2023-04-30 14:29:30.402434597 -0400
@@ -92,3 +92,17 @@
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
+
+.section .note.gnu.property
+.align=8
+
+ .byte 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00
+ .byte 0x05, 0x00, 0x00, 0x00, 0x47, 0x4E, 0x55, 0x00
+ .byte 0x00, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x01, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00
+ .byte 0x05, 0x00, 0x00, 0x00, 0x47, 0x4E, 0x55, 0x00
+ .byte 0x02, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
--- ceph-18.0.0-2950-g1c931bc4/cmake/modules/BuildBoost.cmake.orig 2023-04-28 18:30:19.133064577 -0400
+++ ceph-18.0.0-2950-g1c931bc4/cmake/modules/BuildBoost.cmake 2023-04-28 18:31:55.290354383 -0400
@@ -104,12 +104,21 @@
set(user_config ${CMAKE_BINARY_DIR}/user-config.jam)
# edit the user-config.jam so b2 will be able to use the specified
# toolset and python
+if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
+ file(WRITE ${user_config}
+ "using ${toolset}"
+ " : "
+ " : ${CMAKE_CXX_COMPILER}"
+ " : <compileflags>-fPIC <compileflags>-w <compileflags>-fcf-protection <compileflags>-Wno-everything"
+ " ;\n")
+else()
file(WRITE ${user_config}
"using ${toolset}"
" : "
" : ${CMAKE_CXX_COMPILER}"
" : <compileflags>-fPIC <compileflags>-w <compileflags>-Wno-everything"
" ;\n")
+endif()
if(with_python_version)
find_package(Python3 ${with_python_version} QUIET REQUIRED
COMPONENTS Development)
--- ceph-17.2.6/src/boost/libs/context/src/asm/make_x86_64_sysv_elf_gas.S.orig 2023-04-30 14:25:35.009605033 -0400
+++ ceph-17.2.6/src/boost/libs/context/src/asm/make_x86_64_sysv_elf_gas.S 2023-04-30 14:28:32.239465067 -0400
@@ -80,3 +80,18 @@
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
+
+.section .note.gnu.property
+.align=8
+
+ .byte 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00
+ .byte 0x05, 0x00, 0x00, 0x00, 0x47, 0x4E, 0x55, 0x00
+ .byte 0x00, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x01, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00
+ .byte 0x05, 0x00, 0x00, 0x00, 0x47, 0x4E, 0x55, 0x00
+ .byte 0x02, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+
--- ceph-17.2.6/src/boost/libs/context/src/asm/jump_x86_64_sysv_elf_gas.S.orig 2023-04-30 14:25:35.008605050 -0400
+++ ceph-17.2.6/src/boost/libs/context/src/asm/jump_x86_64_sysv_elf_gas.S 2023-04-30 14:27:50.145210847 -0400
@@ -89,3 +89,17 @@
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
+
+.section .note.gnu.property
+.align=8
+
+ .byte 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00
+ .byte 0x05, 0x00, 0x00, 0x00, 0x47, 0x4E, 0x55, 0x00
+ .byte 0x00, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x01, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00
+ .byte 0x05, 0x00, 0x00, 0x00, 0x47, 0x4E, 0x55, 0x00
+ .byte 0x02, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
--- ceph-17.2.6/src/boost/libs/context/src/asm/ontop_x86_64_sysv_elf_gas.S.orig 2023-04-30 14:25:35.009605033 -0400
+++ ceph-17.2.6/src/boost/libs/context/src/asm/ontop_x86_64_sysv_elf_gas.S 2023-04-30 14:29:30.402434597 -0400
@@ -92,3 +92,17 @@
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
+
+.section .note.gnu.property
+.align=8
+
+ .byte 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00
+ .byte 0x05, 0x00, 0x00, 0x00, 0x47, 0x4E, 0x55, 0x00
+ .byte 0x00, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x01, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00
+ .byte 0x05, 0x00, 0x00, 0x00, 0x47, 0x4E, 0x55, 0x00
+ .byte 0x02, 0x00, 0x00, 0xC0, 0x04, 0x00, 0x00, 0x00
+ .byte 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
diff -ur ceph-18.2.1~/debian/changelog ceph-18.2.1/debian/changelog
--- ceph-18.2.1~/debian/changelog 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/debian/changelog 2023-12-11 16:55:38.000000000 -0500
@@ -2,7 +2,7 @@
* New upstream release
- -- Ceph Release Team <ceph-maintainers@ceph.io> Tue, 14 Nov 2023 19:36:16 +0000
+ -- Ceph Release Team <ceph-maintainers@ceph.io> Mon, 11 Dec 2023 21:55:36 +0000
ceph (18.2.0-1) stable; urgency=medium
diff -ur ceph-18.2.1~/doc/architecture.rst ceph-18.2.1/doc/architecture.rst
--- ceph-18.2.1~/doc/architecture.rst 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/doc/architecture.rst 2023-12-11 16:55:38.000000000 -0500
@@ -30,6 +30,8 @@
- :term:`Ceph Manager`
- :term:`Ceph Metadata Server`
+.. _arch_monitor:
+
Ceph Monitors maintain the master copy of the cluster map, which they provide
to Ceph clients. Provisioning multiple monitors within the Ceph cluster ensures
availability in the event that one of the monitor daemons or its host fails.
diff -ur ceph-18.2.1~/doc/glossary.rst ceph-18.2.1/doc/glossary.rst
--- ceph-18.2.1~/doc/glossary.rst 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/doc/glossary.rst 2023-12-11 16:55:38.000000000 -0500
@@ -271,7 +271,7 @@
The Ceph manager software, which collects all the state from
the whole cluster in one place.
- MON
+ :ref:`MON<arch_monitor>`
The Ceph monitor software.
Node
@@ -337,6 +337,12 @@
Firefly (v. 0.80). See :ref:`Primary Affinity
<rados_ops_primary_affinity>`.
+ Quorum
+ Quorum is the state that exists when a majority of the
+ :ref:`Monitors<arch_monitor>` in the cluster are ``up``. A
+ minimum of three :ref:`Monitors<arch_monitor>` must exist in
+ the cluster in order for Quorum to be possible.
+
RADOS
**R**\eliable **A**\utonomic **D**\istributed **O**\bject
**S**\tore. RADOS is the object store that provides a scalable
diff -ur ceph-18.2.1~/doc/rados/troubleshooting/troubleshooting-mon.rst ceph-18.2.1/doc/rados/troubleshooting/troubleshooting-mon.rst
--- ceph-18.2.1~/doc/rados/troubleshooting/troubleshooting-mon.rst 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/doc/rados/troubleshooting/troubleshooting-mon.rst 2023-12-11 16:55:38.000000000 -0500
@@ -17,59 +17,66 @@
Initial Troubleshooting
=======================
-#. **Make sure that the monitors are running.**
+The first steps in the process of troubleshooting Ceph Monitors involve making
+sure that the Monitors are running and that they are able to communicate with
+the network and on the network. Follow the steps in this section to rule out
+the simplest causes of Monitor malfunction.
+
+#. **Make sure that the Monitors are running.**
+
+ Make sure that the Monitor (*mon*) daemon processes (``ceph-mon``) are
+ running. It might be the case that the mons have not be restarted after an
+ upgrade. Checking for this simple oversight can save hours of painstaking
+ troubleshooting.
+
+ It is also important to make sure that the manager daemons (``ceph-mgr``)
+ are running. Remember that typical cluster configurations provide one
+ Manager (``ceph-mgr``) for each Monitor (``ceph-mon``).
- First, make sure that the monitor (*mon*) daemon processes (``ceph-mon``)
- are running. Sometimes Ceph admins either forget to start the mons or
- forget to restart the mons after an upgrade. Checking for this simple
- oversight can save hours of painstaking troubleshooting. It is also
- important to make sure that the manager daemons (``ceph-mgr``) are running.
- Remember that typical cluster configurations provide one ``ceph-mgr`` for
- each ``ceph-mon``.
+ .. note:: In releases prior to v1.12.5, Rook will not run more than two
+ managers.
- .. note:: Rook will not run more than two managers.
+#. **Make sure that you can reach the Monitor nodes.**
-#. **Make sure that you can reach the monitor nodes.**
-
- In certain rare cases, there may be ``iptables`` rules that block access to
- monitor nodes or TCP ports. These rules might be left over from earlier
+ In certain rare cases, ``iptables`` rules might be blocking access to
+ Monitor nodes or TCP ports. These rules might be left over from earlier
stress testing or rule development. To check for the presence of such
- rules, SSH into the server and then try to connect to the monitor's ports
- (``tcp/3300`` and ``tcp/6789``) using ``telnet``, ``nc``, or a similar
- tool.
-
-#. **Make sure that the ``ceph status`` command runs and receives a reply from the cluster.**
-
- If the ``ceph status`` command does receive a reply from the cluster, then
- the cluster is up and running. The monitors will answer to a ``status``
- request only if there is a formed quorum. Confirm that one or more ``mgr``
- daemons are reported as running. Under ideal conditions, all ``mgr``
- daemons will be reported as running.
-
+ rules, SSH into each Monitor node and use ``telnet`` or ``nc`` or a similar
+ tool to attempt to connect to each of the other Monitor nodes on ports
+ ``tcp/3300`` and ``tcp/6789``.
+
+#. **Make sure that the "ceph status" command runs and receives a reply from the cluster.**
+
+ If the ``ceph status`` command receives a reply from the cluster, then the
+ cluster is up and running. Monitors answer to a ``status`` request only if
+ there is a formed quorum. Confirm that one or more ``mgr`` daemons are
+ reported as running. In a cluster with no deficiencies, ``ceph status``
+ will report that all ``mgr`` daemons are running.
If the ``ceph status`` command does not receive a reply from the cluster,
- then there are probably not enough monitors ``up`` to form a quorum. The
- ``ceph -s`` command with no further options specified connects to an
- arbitrarily selected monitor. In certain cases, however, it might be
- helpful to connect to a specific monitor (or to several specific monitors
+ then there are probably not enough Monitors ``up`` to form a quorum. If the
+ ``ceph -s`` command is run with no further options specified, it connects
+ to an arbitrarily selected Monitor. In certain cases, however, it might be
+ helpful to connect to a specific Monitor (or to several specific Monitors
in sequence) by adding the ``-m`` flag to the command: for example, ``ceph
status -m mymon1``.
#. **None of this worked. What now?**
If the above solutions have not resolved your problems, you might find it
- helpful to examine each individual monitor in turn. Whether or not a quorum
- has been formed, it is possible to contact each monitor individually and
+ helpful to examine each individual Monitor in turn. Even if no quorum has
+ been formed, it is possible to contact each Monitor individually and
request its status by using the ``ceph tell mon.ID mon_status`` command
- (here ``ID`` is the monitor's identifier).
+ (here ``ID`` is the Monitor's identifier).
- Run the ``ceph tell mon.ID mon_status`` command for each monitor in the
+ Run the ``ceph tell mon.ID mon_status`` command for each Monitor in the
cluster. For more on this command's output, see :ref:`Understanding
mon_status
<rados_troubleshoting_troubleshooting_mon_understanding_mon_status>`.
- There is also an alternative method: SSH into each monitor node and query
- the daemon's admin socket. See :ref:`Using the Monitor's Admin
+ There is also an alternative method for contacting each individual Monitor:
+ SSH into each Monitor node and query the daemon's admin socket. See
+ :ref:`Using the Monitor's Admin
Socket<rados_troubleshoting_troubleshooting_mon_using_admin_socket>`.
.. _rados_troubleshoting_troubleshooting_mon_using_admin_socket:
diff -ur ceph-18.2.1~/qa/tasks/cephfs/kernel_mount.py ceph-18.2.1/qa/tasks/cephfs/kernel_mount.py
--- ceph-18.2.1~/qa/tasks/cephfs/kernel_mount.py 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/qa/tasks/cephfs/kernel_mount.py 2023-12-11 16:55:38.000000000 -0500
@@ -68,7 +68,10 @@
self.enable_dynamic_debug()
self.ctx[f'kmount_count.{self.client_remote.hostname}'] = kmount_count + 1
- self.gather_mount_info()
+ try:
+ self.gather_mount_info()
+ except:
+ log.warn('failed to fetch mount info - tests depending on mount addr/inst may fail!')
def gather_mount_info(self):
self.id = self._get_global_id()
diff -ur ceph-18.2.1~/qa/tasks/cephfs/mount.py ceph-18.2.1/qa/tasks/cephfs/mount.py
--- ceph-18.2.1~/qa/tasks/cephfs/mount.py 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/qa/tasks/cephfs/mount.py 2023-12-11 16:55:38.000000000 -0500
@@ -186,6 +186,12 @@
sudo=True).decode())
def is_blocked(self):
+ if not self.addr:
+ # can't infer if our addr is blocklisted - let the caller try to
+ # umount without lazy/force. If the client was blocklisted, then
+ # the umount would be stuck and the test would fail on timeout.
+ # happens only with Ubuntu 20.04 (missing kclient patches :/).
+ return False
self.fs = Filesystem(self.ctx, name=self.cephfs_name)
try:
diff -ur ceph-18.2.1~/src/ceph-volume/ceph_volume/devices/raw/list.py ceph-18.2.1/src/ceph-volume/ceph_volume/devices/raw/list.py
--- ceph-18.2.1~/src/ceph-volume/ceph_volume/devices/raw/list.py 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/src/ceph-volume/ceph_volume/devices/raw/list.py 2023-12-11 16:55:38.000000000 -0500
@@ -5,7 +5,7 @@
from textwrap import dedent
from ceph_volume import decorators, process
from ceph_volume.util import disk
-
+from typing import Any, Dict, List
logger = logging.getLogger(__name__)
@@ -66,50 +66,57 @@
def __init__(self, argv):
self.argv = argv
+ def is_atari_partitions(self, _lsblk: Dict[str, Any]) -> bool:
+ dev = _lsblk['NAME']
+ if _lsblk.get('PKNAME'):
+ parent = _lsblk['PKNAME']
+ try:
+ if disk.has_bluestore_label(parent):
+ logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(dev, parent),
+ 'device is likely a phantom Atari partition. device info: {}'.format(_lsblk)))
+ return True
+ except OSError as e:
+ logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(dev),
+ 'failed to determine if parent device {} is BlueStore. err: {}'.format(parent, e)))
+ return True
+ return False
+
+ def exclude_atari_partitions(self, _lsblk_all: Dict[str, Any]) -> List[Dict[str, Any]]:
+ return [_lsblk for _lsblk in _lsblk_all if not self.is_atari_partitions(_lsblk)]
+
def generate(self, devs=None):
logger.debug('Listing block devices via lsblk...')
- info_devices = disk.lsblk_all(abspath=True)
+ info_devices = []
if not devs or not any(devs):
# If no devs are given initially, we want to list ALL devices including children and
# parents. Parent disks with child partitions may be the appropriate device to return if
# the parent disk has a bluestore header, but children may be the most appropriate
# devices to return if the parent disk does not have a bluestore header.
+ info_devices = disk.lsblk_all(abspath=True)
devs = [device['NAME'] for device in info_devices if device.get('NAME',)]
+ else:
+ for dev in devs:
+ info_devices.append(disk.lsblk(dev, abspath=True))
+
+ # Linux kernels built with CONFIG_ATARI_PARTITION enabled can falsely interpret
+ # bluestore's on-disk format as an Atari partition table. These false Atari partitions
+ # can be interpreted as real OSDs if a bluestore OSD was previously created on the false
+ # partition. See https://tracker.ceph.com/issues/52060 for more info. If a device has a
+ # parent, it is a child. If the parent is a valid bluestore OSD, the child will only
+ # exist if it is a phantom Atari partition, and the child should be ignored. If the
+ # parent isn't bluestore, then the child could be a valid bluestore OSD. If we fail to
+ # determine whether a parent is bluestore, we should err on the side of not reporting
+ # the child so as not to give a false negative.
+ info_devices = self.exclude_atari_partitions(info_devices)
result = {}
logger.debug('inspecting devices: {}'.format(devs))
- for dev in devs:
- # Linux kernels built with CONFIG_ATARI_PARTITION enabled can falsely interpret
- # bluestore's on-disk format as an Atari partition table. These false Atari partitions
- # can be interpreted as real OSDs if a bluestore OSD was previously created on the false
- # partition. See https://tracker.ceph.com/issues/52060 for more info. If a device has a
- # parent, it is a child. If the parent is a valid bluestore OSD, the child will only
- # exist if it is a phantom Atari partition, and the child should be ignored. If the
- # parent isn't bluestore, then the child could be a valid bluestore OSD. If we fail to
- # determine whether a parent is bluestore, we should err on the side of not reporting
- # the child so as not to give a false negative.
- matched_info_devices = [info for info in info_devices if info['NAME'] == dev]
- if not matched_info_devices:
- logger.warning('device {} does not exist'.format(dev))
- continue
- info_device = matched_info_devices[0]
- if 'PKNAME' in info_device and info_device['PKNAME'] != "":
- parent = info_device['PKNAME']
- try:
- if disk.has_bluestore_label(parent):
- logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(dev, parent),
- 'device is likely a phantom Atari partition. device info: {}'.format(info_device)))
- continue
- except OSError as e:
- logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(dev),
- 'failed to determine if parent device {} is BlueStore. err: {}'.format(parent, e)))
- continue
-
- bs_info = _get_bluestore_info(dev)
+ for info_device in info_devices:
+ bs_info = _get_bluestore_info(info_device['NAME'])
if bs_info is None:
# None is also returned in the rare event that there is an issue reading info from
# a BlueStore disk, so be sure to log our assumption that it isn't bluestore
- logger.info('device {} does not have BlueStore information'.format(dev))
+ logger.info('device {} does not have BlueStore information'.format(info_device['NAME']))
continue
uuid = bs_info['osd_uuid']
if uuid not in result:
diff -ur ceph-18.2.1~/src/ceph-volume/ceph_volume/tests/util/test_disk.py ceph-18.2.1/src/ceph-volume/ceph_volume/tests/util/test_disk.py
--- ceph-18.2.1~/src/ceph-volume/ceph_volume/tests/util/test_disk.py 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/src/ceph-volume/ceph_volume/tests/util/test_disk.py 2023-12-11 16:55:38.000000000 -0500
@@ -1,7 +1,37 @@
import os
import pytest
from ceph_volume.util import disk
-from mock.mock import patch
+from mock.mock import patch, MagicMock
+
+
+class TestFunctions:
+ @patch('ceph_volume.util.disk.os.path.exists', MagicMock(return_value=False))
+ def test_is_device_path_does_not_exist(self):
+ assert not disk.is_device('/dev/foo')
+
+ @patch('ceph_volume.util.disk.os.path.exists', MagicMock(return_value=True))
+ def test_is_device_dev_doesnt_startswith_dev(self):
+ assert not disk.is_device('/foo')
+
+ @patch('ceph_volume.util.disk.allow_loop_devices', MagicMock(return_value=False))
+ @patch('ceph_volume.util.disk.os.path.exists', MagicMock(return_value=True))
+ def test_is_device_loop_not_allowed(self):
+ assert not disk.is_device('/dev/loop123')
+
+ @patch('ceph_volume.util.disk.lsblk', MagicMock(return_value={'NAME': 'foo', 'TYPE': 'disk'}))
+ @patch('ceph_volume.util.disk.os.path.exists', MagicMock(return_value=True))
+ def test_is_device_type_disk(self):
+ assert disk.is_device('/dev/foo')
+
+ @patch('ceph_volume.util.disk.lsblk', MagicMock(return_value={'NAME': 'foo', 'TYPE': 'mpath'}))
+ @patch('ceph_volume.util.disk.os.path.exists', MagicMock(return_value=True))
+ def test_is_device_type_mpath(self):
+ assert disk.is_device('/dev/foo')
+
+ @patch('ceph_volume.util.disk.lsblk', MagicMock(return_value={'NAME': 'foo1', 'TYPE': 'part'}))
+ @patch('ceph_volume.util.disk.os.path.exists', MagicMock(return_value=True))
+ def test_is_device_type_part(self):
+ assert not disk.is_device('/dev/foo1')
class TestLsblkParser(object):
diff -ur ceph-18.2.1~/src/ceph-volume/ceph_volume/util/disk.py ceph-18.2.1/src/ceph-volume/ceph_volume/util/disk.py
--- ceph-18.2.1~/src/ceph-volume/ceph_volume/util/disk.py 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/src/ceph-volume/ceph_volume/util/disk.py 2023-12-11 16:55:38.000000000 -0500
@@ -359,6 +359,10 @@
if not allow_loop_devices():
return False
+ TYPE = lsblk(dev).get('TYPE')
+ if TYPE:
+ return TYPE in ['disk', 'mpath']
+
# fallback to stat
return _stat_is_device(os.lstat(dev).st_mode)
diff -ur ceph-18.2.1~/src/.git_version ceph-18.2.1/src/.git_version
--- ceph-18.2.1~/src/.git_version 2023-11-14 14:37:51.000000000 -0500
+++ ceph-18.2.1/src/.git_version 2023-12-11 16:57:17.000000000 -0500
@@ -1,2 +1,2 @@
-e3fce6809130d78ac0058fc87e537ecd926cd213
+7fe91d5d5842e04be3b4f514d6dd990c54b29c76
18.2.1
diff -ur ceph-18.2.1~/src/messages/MClientRequest.h ceph-18.2.1/src/messages/MClientRequest.h
--- ceph-18.2.1~/src/messages/MClientRequest.h 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/src/messages/MClientRequest.h 2023-12-11 16:55:38.000000000 -0500
@@ -234,6 +234,12 @@
copy_from_legacy_head(&head, &old_mds_head);
head.version = 0;
+ head.ext_num_retry = head.num_retry;
+ head.ext_num_fwd = head.num_fwd;
+
+ head.owner_uid = head.caller_uid;
+ head.owner_gid = head.caller_gid;
+
/* Can't set the btime from legacy struct */
if (head.op == CEPH_MDS_OP_SETATTR) {
int localmask = head.args.setattr.mask;
diff -ur ceph-18.2.1~/src/os/bluestore/AvlAllocator.cc ceph-18.2.1/src/os/bluestore/AvlAllocator.cc
--- ceph-18.2.1~/src/os/bluestore/AvlAllocator.cc 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/src/os/bluestore/AvlAllocator.cc 2023-12-11 16:55:38.000000000 -0500
@@ -39,7 +39,7 @@
uint64_t search_bytes = 0;
auto rs_start = range_tree.lower_bound(range_t{*cursor, size}, compare);
for (auto rs = rs_start; rs != range_tree.end(); ++rs) {
- uint64_t offset = p2roundup(rs->start, align);
+ uint64_t offset = rs->start;
*cursor = offset + size;
if (offset + size <= rs->end) {
return offset;
@@ -59,7 +59,7 @@
}
// If we reached end, start from beginning till cursor.
for (auto rs = range_tree.begin(); rs != rs_start; ++rs) {
- uint64_t offset = p2roundup(rs->start, align);
+ uint64_t offset = rs->start;
*cursor = offset + size;
if (offset + size <= rs->end) {
return offset;
@@ -82,7 +82,7 @@
const auto compare = range_size_tree.key_comp();
auto rs_start = range_size_tree.lower_bound(range_t{0, size}, compare);
for (auto rs = rs_start; rs != range_size_tree.end(); ++rs) {
- uint64_t offset = p2roundup(rs->start, align);
+ uint64_t offset = rs->start;
if (offset + size <= rs->end) {
return offset;
}
diff -ur ceph-18.2.1~/src/os/bluestore/BlueFS.cc ceph-18.2.1/src/os/bluestore/BlueFS.cc
--- ceph-18.2.1~/src/os/bluestore/BlueFS.cc 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/src/os/bluestore/BlueFS.cc 2023-12-11 16:55:38.000000000 -0500
@@ -658,16 +658,24 @@
}
logger->set(l_bluefs_wal_alloc_unit, wal_alloc_size);
+
+ uint64_t shared_alloc_size = cct->_conf->bluefs_shared_alloc_size;
+ if (shared_alloc && shared_alloc->a) {
+ uint64_t unit = shared_alloc->a->get_block_size();
+ shared_alloc_size = std::max(
+ unit,
+ shared_alloc_size);
+ ceph_assert(0 == p2phase(shared_alloc_size, unit));
+ }
if (bdev[BDEV_SLOW]) {
alloc_size[BDEV_DB] = cct->_conf->bluefs_alloc_size;
- alloc_size[BDEV_SLOW] = cct->_conf->bluefs_shared_alloc_size;
- logger->set(l_bluefs_db_alloc_unit, cct->_conf->bluefs_alloc_size);
- logger->set(l_bluefs_main_alloc_unit, cct->_conf->bluefs_shared_alloc_size);
+ alloc_size[BDEV_SLOW] = shared_alloc_size;
} else {
- alloc_size[BDEV_DB] = cct->_conf->bluefs_shared_alloc_size;
- logger->set(l_bluefs_main_alloc_unit, 0);
- logger->set(l_bluefs_db_alloc_unit, cct->_conf->bluefs_shared_alloc_size);
+ alloc_size[BDEV_DB] = shared_alloc_size;
+ alloc_size[BDEV_SLOW] = 0;
}
+ logger->set(l_bluefs_db_alloc_unit, alloc_size[BDEV_DB]);
+ logger->set(l_bluefs_main_alloc_unit, alloc_size[BDEV_SLOW]);
// new wal and db devices are never shared
if (bdev[BDEV_NEWWAL]) {
alloc_size[BDEV_NEWWAL] = cct->_conf->bluefs_alloc_size;
@@ -681,13 +689,13 @@
continue;
}
ceph_assert(bdev[id]->get_size());
- ceph_assert(alloc_size[id]);
if (is_shared_alloc(id)) {
dout(1) << __func__ << " shared, id " << id << std::hex
<< ", capacity 0x" << bdev[id]->get_size()
<< ", block size 0x" << alloc_size[id]
<< std::dec << dendl;
} else {
+ ceph_assert(alloc_size[id]);
std::string name = "bluefs-";
const char* devnames[] = { "wal","db","slow" };
if (id <= BDEV_SLOW)
diff -ur ceph-18.2.1~/src/os/bluestore/BtreeAllocator.cc ceph-18.2.1/src/os/bluestore/BtreeAllocator.cc
--- ceph-18.2.1~/src/os/bluestore/BtreeAllocator.cc 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/src/os/bluestore/BtreeAllocator.cc 2023-12-11 16:55:38.000000000 -0500
@@ -25,7 +25,7 @@
{
auto rs_start = range_tree.lower_bound(*cursor);
for (auto rs = rs_start; rs != range_tree.end(); ++rs) {
- uint64_t offset = p2roundup(rs->first, align);
+ uint64_t offset = rs->first;
if (offset + size <= rs->second) {
*cursor = offset + size;
return offset;
@@ -37,7 +37,7 @@
}
// If we reached end, start from beginning till cursor.
for (auto rs = range_tree.begin(); rs != rs_start; ++rs) {
- uint64_t offset = p2roundup(rs->first, align);
+ uint64_t offset = rs->first;
if (offset + size <= rs->second) {
*cursor = offset + size;
return offset;
@@ -53,7 +53,7 @@
// the needs
auto rs_start = range_size_tree.lower_bound(range_value_t{0,size});
for (auto rs = rs_start; rs != range_size_tree.end(); ++rs) {
- uint64_t offset = p2roundup(rs->start, align);
+ uint64_t offset = rs->start;
if (offset + size <= rs->start + rs->size) {
return offset;
}
diff -ur ceph-18.2.1~/src/os/bluestore/fastbmap_allocator_impl.cc ceph-18.2.1/src/os/bluestore/fastbmap_allocator_impl.cc
--- ceph-18.2.1~/src/os/bluestore/fastbmap_allocator_impl.cc 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/src/os/bluestore/fastbmap_allocator_impl.cc 2023-12-11 16:55:38.000000000 -0500
@@ -17,19 +17,9 @@
inline interval_t _align2units(uint64_t offset, uint64_t len, uint64_t min_length)
{
- interval_t res;
- if (len >= min_length) {
- res.offset = p2roundup(offset, min_length);
- auto delta_off = res.offset - offset;
- if (len > delta_off) {
- res.length = len - delta_off;
- res.length = p2align<uint64_t>(res.length, min_length);
- if (res.length) {
- return res;
- }
- }
- }
- return interval_t();
+ return len >= min_length ?
+ interval_t(offset, p2align<uint64_t>(len, min_length)) :
+ interval_t();
}
interval_t AllocatorLevel01Loose::_get_longest_from_l0(uint64_t pos0,
diff -ur ceph-18.2.1~/src/os/bluestore/StupidAllocator.cc ceph-18.2.1/src/os/bluestore/StupidAllocator.cc
--- ceph-18.2.1~/src/os/bluestore/StupidAllocator.cc 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/src/os/bluestore/StupidAllocator.cc 2023-12-11 16:55:38.000000000 -0500
@@ -52,20 +52,6 @@
}
}
-/// return the effective length of the extent if we align to alloc_unit
-uint64_t StupidAllocator::_aligned_len(
- StupidAllocator::interval_set_t::iterator p,
- uint64_t alloc_unit)
-{
- uint64_t skew = p.get_start() % alloc_unit;
- if (skew)
- skew = alloc_unit - skew;
- if (skew > p.get_len())
- return 0;
- else
- return p.get_len() - skew;
-}
-
int64_t StupidAllocator::allocate_int(
uint64_t want_size, uint64_t alloc_unit, int64_t hint,
uint64_t *offset, uint32_t *length)
@@ -89,7 +75,7 @@
for (bin = orig_bin; bin < (int)free.size(); ++bin) {
p = free[bin].lower_bound(hint);
while (p != free[bin].end()) {
- if (_aligned_len(p, alloc_unit) >= want_size) {
+ if (p.get_len() >= want_size) {
goto found;
}
++p;
@@ -102,7 +88,7 @@
p = free[bin].begin();
auto end = hint ? free[bin].lower_bound(hint) : free[bin].end();
while (p != end) {
- if (_aligned_len(p, alloc_unit) >= want_size) {
+ if (p.get_len() >= want_size) {
goto found;
}
++p;
@@ -114,7 +100,7 @@
for (bin = orig_bin; bin >= 0; --bin) {
p = free[bin].lower_bound(hint);
while (p != free[bin].end()) {
- if (_aligned_len(p, alloc_unit) >= alloc_unit) {
+ if (p.get_len() >= alloc_unit) {
goto found;
}
++p;
@@ -127,7 +113,7 @@
p = free[bin].begin();
auto end = hint ? free[bin].lower_bound(hint) : free[bin].end();
while (p != end) {
- if (_aligned_len(p, alloc_unit) >= alloc_unit) {
+ if (p.get_len() >= alloc_unit) {
goto found;
}
++p;
@@ -137,11 +123,9 @@
return -ENOSPC;
found:
- uint64_t skew = p.get_start() % alloc_unit;
- if (skew)
- skew = alloc_unit - skew;
- *offset = p.get_start() + skew;
- *length = std::min(std::max(alloc_unit, want_size), p2align((p.get_len() - skew), alloc_unit));
+ *offset = p.get_start();
+ *length = std::min(std::max(alloc_unit, want_size), p2align(p.get_len(), alloc_unit));
+
if (cct->_conf->bluestore_debug_small_allocations) {
uint64_t max =
alloc_unit * (rand() % cct->_conf->bluestore_debug_small_allocations);
@@ -158,7 +142,7 @@
free[bin].erase(*offset, *length);
uint64_t off, len;
- if (*offset && free[bin].contains(*offset - skew - 1, &off, &len)) {
+ if (*offset && free[bin].contains(*offset - 1, &off, &len)) {
int newbin = _choose_bin(len);
if (newbin != bin) {
ldout(cct, 30) << __func__ << " demoting 0x" << std::hex << off << "~" << len
diff -ur ceph-18.2.1~/src/os/bluestore/StupidAllocator.h ceph-18.2.1/src/os/bluestore/StupidAllocator.h
--- ceph-18.2.1~/src/os/bluestore/StupidAllocator.h 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/src/os/bluestore/StupidAllocator.h 2023-12-11 16:55:38.000000000 -0500
@@ -31,10 +31,6 @@
unsigned _choose_bin(uint64_t len);
void _insert_free(uint64_t offset, uint64_t len);
- uint64_t _aligned_len(
- interval_set_t::iterator p,
- uint64_t alloc_unit);
-
public:
StupidAllocator(CephContext* cct,
int64_t size,
diff -ur ceph-18.2.1~/src/test/objectstore/Allocator_test.cc ceph-18.2.1/src/test/objectstore/Allocator_test.cc
--- ceph-18.2.1~/src/test/objectstore/Allocator_test.cc 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/src/test/objectstore/Allocator_test.cc 2023-12-11 16:55:38.000000000 -0500
@@ -516,8 +516,7 @@
PExtentVector extents;
auto need = 0x3f980000;
auto got = alloc->allocate(need, 0x10000, 0, (int64_t)0, &extents);
- EXPECT_GT(got, 0);
- EXPECT_EQ(got, 0x630000);
+ EXPECT_GE(got, 0x630000);
}
TEST_P(AllocTest, test_alloc_50656_best_fit)
diff -ur ceph-18.2.1~/src/test/objectstore/fastbmap_allocator_test.cc ceph-18.2.1/src/test/objectstore/fastbmap_allocator_test.cc
--- ceph-18.2.1~/src/test/objectstore/fastbmap_allocator_test.cc 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/src/test/objectstore/fastbmap_allocator_test.cc 2023-12-11 16:55:38.000000000 -0500
@@ -625,6 +625,8 @@
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
{
+ // Original free space disposition (start chunk, count):
+ // <NC/2, NC/2>
size_t to_release = 2 * _1m + 0x1000;
// release 2M + 4K at the beginning
interval_vector_t r;
@@ -637,6 +639,8 @@
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
+ // Original free space disposition (start chunk, count):
+ // <0, 513>, <NC / 2, NC / 2>
// allocate 4K within the deallocated range
uint64_t allocated4 = 0;
interval_vector_t a4;
@@ -652,79 +656,91 @@
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
- // allocate 1M - should go to the second 1M chunk
+ // Original free space disposition (start chunk, count):
+ // <1, 512>, <NC / 2, NC / 2>
+ // allocate 1M - should go to offset 4096
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(_1m, _1m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, _1m);
- ASSERT_EQ(a4[0].offset, _1m);
+ ASSERT_EQ(a4[0].offset, 4096);
ASSERT_EQ(a4[0].length, _1m);
bins_overall.clear();
al2.collect_stats(bins_overall);
- ASSERT_EQ(bins_overall.size(), 3u);
- ASSERT_EQ(bins_overall[0], 1u);
- ASSERT_EQ(bins_overall[cbits((_1m - 0x1000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall.size(), 2u);
+ ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
+ // Original free space disposition (start chunk, count):
+ // <257, 256>, <NC / 2, NC / 2>
// and allocate yet another 8K within the deallocated range
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x2000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, 0x2000u);
- ASSERT_EQ(a4[0].offset, 0x1000u);
+ ASSERT_EQ(a4[0].offset, _1m + 0x1000u);
ASSERT_EQ(a4[0].length, 0x2000u);
bins_overall.clear();
al2.collect_stats(bins_overall);
- ASSERT_EQ(bins_overall[0], 1u);
- ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall.size(), 2u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
- // release just allocated 1M
+ // Original free space disposition (start chunk, count):
+ // <259, 254>, <NC / 2, NC / 2>
+ // release 4K~1M
interval_vector_t r;
- r.emplace_back(_1m, _1m);
+ r.emplace_back(0x1000, _1m);
al2.free_l2(r);
bins_overall.clear();
al2.collect_stats(bins_overall);
- ASSERT_EQ(bins_overall.size(), 2u);
- ASSERT_EQ(bins_overall[cbits((2 * _1m - 0x3000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall.size(), 3u);
+ //ASSERT_EQ(bins_overall[cbits((2 * _1m - 0x3000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
- // allocate 3M - should go to the second 1M chunk and @capacity/2
+ // Original free space disposition (start chunk, count):
+ // <1, 257>, <259, 254>, <NC / 2, NC / 2>
+ // allocate 3M - should go to the first 1M chunk and @capacity/2
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(3 * _1m, _1m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 2u);
ASSERT_EQ(allocated4, 3 * _1m);
- ASSERT_EQ(a4[0].offset, _1m);
+ ASSERT_EQ(a4[0].offset, 0x1000);
ASSERT_EQ(a4[0].length, _1m);
ASSERT_EQ(a4[1].offset, capacity / 2);
ASSERT_EQ(a4[1].length, 2 * _1m);
bins_overall.clear();
al2.collect_stats(bins_overall);
- ASSERT_EQ(bins_overall.size(), 3u);
- ASSERT_EQ(bins_overall[0], 1u);
- ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall.size(), 2u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u);
}
{
- // release allocated 1M in the second meg chunk except
+ // Original free space disposition (start chunk, count):
+ // <259, 254>, <NC / 2 - 512, NC / 2 - 512>
+ // release allocated 1M in the first meg chunk except
// the first 4K chunk
interval_vector_t r;
- r.emplace_back(_1m + 0x1000, _1m);
+ r.emplace_back(0x1000, _1m);
al2.free_l2(r);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 3u);
ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u);
- ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u);
}
{
+ // Original free space disposition (start chunk, count):
+ // <1, 256>, <259, 254>, <NC / 2 - 512, NC / 2 - 512>
// release 2M @(capacity / 2)
interval_vector_t r;
r.emplace_back(capacity / 2, 2 * _1m);
@@ -733,10 +749,12 @@
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 3u);
ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u);
- ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits((num_chunks) / 2) - 1], 1u);
}
{
+ // Original free space disposition (start chunk, count):
+ // <1, 256>, <259, 254>, <NC / 2, NC / 2>
// allocate 4x512K - should go to the second halves of
// the first and second 1M chunks and @(capacity / 2)
uint64_t allocated4 = 0;
@@ -744,51 +762,54 @@
al2.allocate_l2(2 * _1m, _1m / 2, &allocated4, &a4);
ASSERT_EQ(a4.size(), 3u);
ASSERT_EQ(allocated4, 2 * _1m);
- ASSERT_EQ(a4[0].offset, _1m / 2);
+ ASSERT_EQ(a4[1].offset, 0x1000);
+ ASSERT_EQ(a4[1].length, _1m);
+ ASSERT_EQ(a4[0].offset, _1m + 0x3000);
ASSERT_EQ(a4[0].length, _1m / 2);
- ASSERT_EQ(a4[1].offset, _1m + _1m / 2);
- ASSERT_EQ(a4[1].length, _1m / 2);
ASSERT_EQ(a4[2].offset, capacity / 2);
- ASSERT_EQ(a4[2].length, _1m);
+ ASSERT_EQ(a4[2].length, _1m / 2);
bins_overall.clear();
al2.collect_stats(bins_overall);
- ASSERT_EQ(bins_overall.size(), 3u);
- ASSERT_EQ(bins_overall[0], 1u);
- // below we have 512K - 4K & 512K - 12K chunks which both fit into
- // the same bin = 6
- ASSERT_EQ(bins_overall[6], 2u);
+ ASSERT_EQ(bins_overall.size(), 2u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x2000 - 0x80000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u);
}
{
- // cleanup first 2M except except the last 4K chunk
+ // Original free space disposition (start chunk, count):
+ // <387, 126>, <NC / 2 + 128, NC / 2 - 128>
+ // cleanup first 1536K except the last 4K chunk
interval_vector_t r;
- r.emplace_back(0, 2 * _1m - 0x1000);
+ r.emplace_back(0, _1m + _1m / 2 - 0x1000);
al2.free_l2(r);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 3u);
- ASSERT_EQ(bins_overall[0], 1u);
- ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((_1m + _1m / 2 - 0x1000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x2000 - 0x80000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u);
}
{
- // release 2M @(capacity / 2)
+ // Original free space disposition (start chunk, count):
+ // <0, 383> <387, 126>, <NC / 2 + 128, NC / 2 - 128>
+ // release 512K @(capacity / 2)
interval_vector_t r;
- r.emplace_back(capacity / 2, 2 * _1m);
+ r.emplace_back(capacity / 2, _1m / 2);
al2.free_l2(r);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 3u);
- ASSERT_EQ(bins_overall[0], 1u);
- ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((_1m + _1m / 2 - 0x1000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x2000 - 0x80000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
- // allocate 132M using 4M granularity should go to (capacity / 2)
+ // Original free space disposition (start chunk, count):
+ // <0, 383> <387, 126>, <NC / 2, NC / 2>
+ // allocate 132M (=33792*4096) = using 4M granularity should go to (capacity / 2)
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(132 * _1m, 4 * _1m , &allocated4, &a4);
@@ -799,24 +820,40 @@
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 3u);
+ ASSERT_EQ(bins_overall[cbits((_1m + _1m / 2 - 0x1000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x2000 - 0x80000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2 - 33792) - 1], 1u);
}
{
- // cleanup left 4K chunk in the first 2M
+ // Original free space disposition (start chunk, count):
+ // <0, 383> <387, 126>, <NC / 2 + 33792, NC / 2 - 33792>
+ // cleanup remaining 4*4K chunks in the first 2M
interval_vector_t r;
- r.emplace_back(2 * _1m - 0x1000, 0x1000);
+ r.emplace_back(383 * 4096, 4 * 0x1000);
al2.free_l2(r);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 2u);
+ ASSERT_EQ(bins_overall[cbits((2 * _1m + 0x1000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2 - 33792) - 1], 1u);
}
{
+ // Original free space disposition (start chunk, count):
+ // <0, 513>, <NC / 2 + 33792, NC / 2 - 33792>
// release 132M @(capacity / 2)
interval_vector_t r;
r.emplace_back(capacity / 2, 132 * _1m);
al2.free_l2(r);
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall.size(), 2u);
+ ASSERT_EQ(bins_overall[cbits((2 * _1m + 0x1000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
+ // Original free space disposition (start chunk, count):
+ // <0, 513>, <NC / 2, NC / 2>
// allocate 132M using 2M granularity should go to the first chunk and to
// (capacity / 2)
uint64_t allocated4 = 0;
@@ -827,14 +864,31 @@
ASSERT_EQ(a4[0].length, 2 * _1m);
ASSERT_EQ(a4[1].offset, capacity / 2);
ASSERT_EQ(a4[1].length, 130 * _1m);
+
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+
+ ASSERT_EQ(bins_overall.size(), 2u);
+ ASSERT_EQ(bins_overall[cbits(0)], 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2 - 33792) - 1], 1u);
}
{
+ // Original free space disposition (start chunk, count):
+ // <512, 1>, <NC / 2 + 33792, NC / 2 - 33792>
// release 130M @(capacity / 2)
interval_vector_t r;
r.emplace_back(capacity / 2, 132 * _1m);
al2.free_l2(r);
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+
+ ASSERT_EQ(bins_overall.size(), 2u);
+ ASSERT_EQ(bins_overall[cbits(0)], 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
+ // Original free space disposition (start chunk, count):
+ // <512,1>, <NC / 2, NC / 2>
// release 4K~16K
// release 28K~32K
// release 68K~24K
@@ -843,21 +897,46 @@
r.emplace_back(0x7000, 0x8000);
r.emplace_back(0x11000, 0x6000);
al2.free_l2(r);
+
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+
+ ASSERT_EQ(bins_overall.size(), 4u);
+ ASSERT_EQ(bins_overall[cbits(0)], 1u);
+ ASSERT_EQ(bins_overall[cbits(0x4000 / 0x1000) - 1], 2u); // accounts both 0x4000 & 0x6000
+ ASSERT_EQ(bins_overall[cbits(0x8000 / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
- // allocate 32K using 16K granularity - should bypass the first
- // unaligned extent, use the second free extent partially given
- // the 16K alignment and then fallback to capacity / 2
+ // Original free space disposition (start chunk, count):
+ // <1, 4>, <7, 8>, <17, 6> <512,1>, <NC / 2, NC / 2>
+ // allocate 80K using 16K granularity
uint64_t allocated4 = 0;
interval_vector_t a4;
- al2.allocate_l2(0x8000, 0x4000, &allocated4, &a4);
- ASSERT_EQ(a4.size(), 2u);
- ASSERT_EQ(a4[0].offset, 0x8000u);
- ASSERT_EQ(a4[0].length, 0x4000u);
- ASSERT_EQ(a4[1].offset, capacity / 2);
+ al2.allocate_l2(0x14000, 0x4000, &allocated4, &a4);
+
+ ASSERT_EQ(a4.size(), 4);
+ ASSERT_EQ(a4[1].offset, 0x1000u);
ASSERT_EQ(a4[1].length, 0x4000u);
- }
+ ASSERT_EQ(a4[0].offset, 0x7000u);
+ ASSERT_EQ(a4[0].length, 0x8000u);
+ ASSERT_EQ(a4[2].offset, 0x11000u);
+ ASSERT_EQ(a4[2].length, 0x4000u);
+ ASSERT_EQ(a4[3].offset, capacity / 2);
+ ASSERT_EQ(a4[3].length, 0x4000u);
+
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall.size(), 3u);
+ ASSERT_EQ(bins_overall[cbits(0)], 1u);
+ ASSERT_EQ(bins_overall[cbits(0x2000 / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2 - 1) - 1], 1u);
+ }
+ {
+ // Original free space disposition (start chunk, count):
+ // <21, 2> <512,1>, <NC / 2 + 1, NC / 2 - 1>
+ }
}
std::cout << "Done L2 cont aligned" << std::endl;
}
@@ -913,7 +992,7 @@
al2.allocate_l2(0x3e000000, _1m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 2u);
ASSERT_EQ(allocated4, 0x3e000000u);
- ASSERT_EQ(a4[0].offset, 0x5fed00000u);
+ ASSERT_EQ(a4[0].offset, 0x5fec30000u);
ASSERT_EQ(a4[0].length, 0x1300000u);
ASSERT_EQ(a4[1].offset, 0x628000000u);
ASSERT_EQ(a4[1].length, 0x3cd00000u);
diff -ur ceph-18.2.1~/src/test/objectstore/store_test.cc ceph-18.2.1/src/test/objectstore/store_test.cc
--- ceph-18.2.1~/src/test/objectstore/store_test.cc 2023-11-14 14:36:19.000000000 -0500
+++ ceph-18.2.1/src/test/objectstore/store_test.cc 2023-12-11 16:55:38.000000000 -0500
@@ -9524,9 +9524,9 @@
string key;
_key_encode_u64(1, &key);
bluestore_shared_blob_t sb(1);
- sb.ref_map.get(0x2000, block_size);
- sb.ref_map.get(0x4000, block_size);
- sb.ref_map.get(0x4000, block_size);
+ sb.ref_map.get(0x822000, block_size);
+ sb.ref_map.get(0x824000, block_size);
+ sb.ref_map.get(0x824000, block_size);
bufferlist bl;
encode(sb, bl);
bstore->inject_broken_shared_blob_key(key, bl);
--- ceph-18.2.4/src/include/rados/rgw_file.h.orig 2024-09-23 12:05:39.942703570 -0400
+++ ceph-18.2.4/src/include/rados/rgw_file.h 2024-09-23 12:05:55.800441732 -0400
@@ -27,7 +27,7 @@
#define LIBRGW_FILE_VER_MAJOR 1
#define LIBRGW_FILE_VER_MINOR 2
-#define LIBRGW_FILE_VER_EXTRA 0
+#define LIBRGW_FILE_VER_EXTRA 1
#define LIBRGW_FILE_VERSION(maj, min, extra) ((maj << 16) + (min << 8) + extra)
#define LIBRGW_FILE_VERSION_CODE LIBRGW_FILE_VERSION(LIBRGW_FILE_VER_MAJOR, LIBRGW_FILE_VER_MINOR, LIBRGW_FILE_VER_EXTRA)
#
# spec file for package ceph
#
# Copyright (C) 2004-2019 The Ceph Project Developers. See COPYING file
# at the top-level directory of this distribution and at
# https://github.com/ceph/ceph/blob/master/COPYING
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon.
#
# This file is under the GNU Lesser General Public License, version 2.1
#
# Please submit bugfixes or comments via http://tracker.ceph.com/
#
#################################################################################
# conditional build section
#
# please read this for explanation of bcond syntax:
# https://rpm-software-management.github.io/rpm/manual/conditionalbuilds.html
#################################################################################
%global _hardened_build 1
%bcond_with make_check
%bcond_with zbd
%bcond_with cmake_verbose_logging
%bcond_without ceph_test_package
%ifarch s390
%bcond_with tcmalloc
%else
%bcond_without tcmalloc
%endif
%bcond_without rbd_ssd_cache
%ifarch x86_64
%bcond_without rbd_rwl_cache
%else
%bcond_with rbd_rwl_cache
%endif
%if 0%{?fedora} || 0%{?rhel}
%if 0%{?rhel} < 9
%bcond_with system_pmdk
%else
%ifarch s390x %{arm64} riskv64
%bcond_with system_pmdk
%else
%bcond_without system_pmdk
%endif
%endif
%bcond_without selinux
%bcond_without amqp_endpoint
%bcond_without kafka_endpoint
%bcond_without lttng
%bcond_without libradosstriper
%bcond_without ocf
%global luarocks_package_name luarocks
%bcond_without lua_packages
%global _remote_tarball_prefix https://download.ceph.com/tarballs/
%endif
%if 0%{?suse_version}
%ifarch s390x
%bcond_with system_pmdk
%else
%bcond_without system_pmdk
%endif
%bcond_with amqp_endpoint
%bcond_with cephfs_java
%bcond_with kafka_endpoint
%bcond_with libradosstriper
%ifarch x86_64 aarch64 ppc64le
%bcond_without lttng
%else
%bcond_with lttng
%endif
%bcond_with ocf
%bcond_with selinux
#Compat macro for _fillupdir macro introduced in Nov 2017
%if ! %{defined _fillupdir}
%global _fillupdir /var/adm/fillup-templates
%endif
#luarocks
%if 0%{?is_opensuse}
# openSUSE
%bcond_without lua_packages
%if 0%{?sle_version}
# openSUSE Leap
%global luarocks_package_name lua53-luarocks
%else
# openSUSE Tumbleweed
%global luarocks_package_name lua54-luarocks
%endif
%else
# SLE
%bcond_with lua_packages
%endif
%endif
%bcond_with seastar
%if 0%{?suse_version}
%bcond_with jaeger
%else
%bcond_without jaeger
%endif
%bcond_with jaeger
%if 0%{?fedora} || 0%{?suse_version} >= 1500 || 0%{?rhel} >= 10
# distros that ship cmd2 and/or colorama
%bcond_without cephfs_shell
%else
# distros that do _not_ ship cmd2/colorama
%bcond_with cephfs_shell
%endif
%if 0%{?fedora} || 0%{?rhel} >= 9
%bcond_without system_arrow
%bcond_without system_utf8proc
%else
# for centos 8, utf8proc-devel comes from the subversion-devel module which isn't available in EPEL8
# this is tracked in https://bugzilla.redhat.com/2152265
%bcond_with system_arrow
%bcond_with system_utf8proc
%endif
%if 0%{?fedora} || 0%{?suse_version} || 0%{?rhel} >= 8
%global weak_deps 1
%endif
%if %{with selinux}
# get selinux policy version
# Force 0.0.0 policy version for centos builds to avoid repository sync issues between rhel and centos
%if 0%{?centos}
%global _selinux_policy_version 0.0.0
%else
%{!?_selinux_policy_version: %global _selinux_policy_version 0.0.0}
%endif
%endif
%{!?_udevrulesdir: %global _udevrulesdir /lib/udev/rules.d}
%{!?tmpfiles_create: %global tmpfiles_create systemd-tmpfiles --create}
%{!?python3_pkgversion: %global python3_pkgversion 3}
%{!?python3_version_nodots: %global python3_version_nodots 3}
%{!?python3_version: %global python3_version 3}
%{!?gts_prefix: %global gts_prefix gcc-toolset-11}
%if ! 0%{?suse_version}
# use multi-threaded xz compression: xz level 7 using ncpus threads
%global _source_payload w7T%{_smp_build_ncpus}.xzdio
%global _binary_payload w7T%{_smp_build_ncpus}.xzdio
%endif
%define smp_limit_mem_per_job() %( \
kb_per_job=%1 \
kb_total=$(head -3 /proc/meminfo | sed -n 's/MemAvailable:\\s*\\(.*\\) kB.*/\\1/p') \
jobs=$(( $kb_total / $kb_per_job )) \
[ $jobs -lt 1 ] && jobs=1 \
echo $jobs )
%if 0%{?_smp_ncpus_max} == 0
%if 0%{?__isa_bits} == 32
# 32-bit builds can use 3G memory max, which is not enough even for -j2
%global _smp_ncpus_max 1
%else
# 3.0 GiB mem per job
# SUSE distros use limit_build in the place of smp_limit_mem_per_job, please
# be sure to update it (in the build section, below) as well when changing this
# number.
%global _smp_ncpus_max %{smp_limit_mem_per_job 3000000}
%endif
%endif
# disable -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1, as gcc-toolset-{10,11}-annobin
# do not provide gcc-annobin.so anymore, despite that they provide annobin.so. but
# redhat-rpm-config still passes -fplugin=gcc-annobin to the compiler.
%undefine _annotated_build
%if 0%{?rhel} == 8 && 0%{?enable_devtoolset11:1}
%enable_devtoolset11
%endif
#################################################################################
# main package definition
#################################################################################
Name: ceph
Version: 18.2.4
Release: 2%{?dist}
%if 0%{?fedora} || 0%{?rhel}
Epoch: 2
%endif
# define _epoch_prefix macro which will expand to the empty string if epoch is
# undefined
%global _epoch_prefix %{?epoch:%{epoch}:}
Summary: User space components of the Ceph file system
License: (LGPLv2+ or LGPLv3) and CC-BY-SA-3.0 and GPLv2 and Boost and BSD and MIT
%if 0%{?suse_version}
Group: System/Filesystems
%endif
URL: http://ceph.com/
Source0: https://download.ceph.com/tarballs/ceph-%{version}.tar.gz
#Source0: https://1.chacra.ceph.com/r/ceph/quincy/
Patch0001: 0001-src-common-crc32c_intel_fast.patch
Patch0003: 0003-src-common-bitstr.h.patch
Patch0008: 0008-cmake-modules-Finduring.cmake.patch
Patch0010: 0010-CET-Add-CET-marker-to-crc32c_intel_fast_zero_asm.s.patch
Patch0011: 0011-isa-l-CET-Add-CET-marker-to-x86-64-crc32-assembly-co.patch
Patch0012: 0012-spdk-isa-l-CET-Add-CET-marker-to-x86-64-crc32-assemb.patch
Patch0016: 0016-src-tracing.patch
Patch0017: 0017-gcc-12-omnibus.patch
Patch0018: 0018-src-rgw-store-dbstore-CMakeLists.txt.patch
Patch0020: 0020-src-arrow-cpp-cmake_modules-ThirdpartyToolchain.cmake.patch
Patch0032: 0032-cmake-modules-BuildBoost.cmake.patch
Patch0033: 0033-boost-asm.patch
Patch0046: 0046-src-include-rados-rgw_file.h.patch
# ceph 14.0.1 does not support 32-bit architectures, bugs #1727788, #1727787
ExcludeArch: i686 armv7hl
%if 0%{?suse_version}
# _insert_obs_source_lines_here
ExclusiveArch: x86_64 aarch64 ppc64le s390x
%endif
#################################################################################
# dependencies that apply across all distro families
#################################################################################
Requires: ceph-osd = %{_epoch_prefix}%{version}-%{release}
Requires: ceph-mds = %{_epoch_prefix}%{version}-%{release}
Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release}
Requires: ceph-mon = %{_epoch_prefix}%{version}-%{release}
Requires(post): binutils
%if 0%{with cephfs_java}
BuildRequires: java-devel
BuildRequires: jpackage-utils
BuildRequires: sharutils
%endif
%if 0%{with selinux}
BuildRequires: checkpolicy
BuildRequires: selinux-policy-devel
%endif
BuildRequires: gperf
BuildRequires: cmake > 3.5
BuildRequires: fuse3-devel
%if 0%{?fedora} || 0%{?suse_version} > 1500 || 0%{?rhel} >= 9
BuildRequires: gcc-c++ >= 11
%endif
%if 0%{?suse_version} == 1500
BuildRequires: gcc11-c++
%endif
%if 0%{?rhel} == 8
BuildRequires: %{gts_prefix}-gcc-c++
BuildRequires: %{gts_prefix}-build
BuildRequires: %{gts_prefix}-libatomic-devel
%endif
%if 0%{?fedora} || 0%{?rhel} >= 9
BuildRequires: libatomic
BuildRequires: gcc-c++
%endif
%ifarch x86_64 aarch64
BuildRequires: mold
%endif
%if 0%{with tcmalloc}
# libprofiler did not build on ppc64le until 2.7.90
%if 0%{?fedora} || 0%{?rhel} >= 8
BuildRequires: gperftools-devel >= 2.7.90
BuildRequires: libunwind-devel
%endif
%if 0%{?rhel} && 0%{?rhel} < 8
BuildRequires: gperftools-devel >= 2.6.1
%endif
%if 0%{?suse_version}
BuildRequires: gperftools-devel >= 2.4
%endif
%endif
BuildRequires: libaio-devel
BuildRequires: libblkid-devel >= 2.17
BuildRequires: cryptsetup-devel
BuildRequires: libcurl-devel
BuildRequires: libcap-devel
BuildRequires: libcap-ng-devel
BuildRequires: fmt-devel >= 6.2.1
%if 0%{?fedora} || 0%{?rhel} >= 10
BuildRequires: rocksdb-devel
Requires: rocksdb
%endif
BuildRequires: liburing-devel
BuildRequires: pkgconfig(libudev)
BuildRequires: libnl3-devel
BuildRequires: liboath-devel
BuildRequires: libtool
BuildRequires: libxml2-devel
BuildRequires: libzstd-devel
BuildRequires: ninja-build
BuildRequires: ncurses-devel
BuildRequires: libicu-devel
BuildRequires: patch
BuildRequires: perl
BuildRequires: pkgconfig
BuildRequires: procps
BuildRequires: python%{python3_pkgversion}
BuildRequires: python%{python3_pkgversion}-rpm-macros
BuildRequires: python%{python3_pkgversion}-devel
BuildRequires: python%{python3_pkgversion}-setuptools
BuildRequires: python%{python3_pkgversion}-Cython
BuildRequires: snappy-devel
BuildRequires: sqlite-devel
BuildRequires: sudo
BuildRequires: pkgconfig(udev)
%ifarch %{valgrind_arches}
BuildRequires: valgrind-devel
%endif
BuildRequires: which
BuildRequires: xfsprogs-devel
BuildRequires: xmlstarlet
BuildRequires: nasm
BuildRequires: lua-devel
%if 0%{with seastar} || 0%{with jaeger}
BuildRequires: yaml-cpp-devel >= 0.6
%endif
%if 0%{with amqp_endpoint}
BuildRequires: librabbitmq-devel
%endif
%if 0%{with kafka_endpoint}
BuildRequires: librdkafka-devel
%endif
%if 0%{with lua_packages}
BuildRequires: %{luarocks_package_name}
%endif
%if 0%{with make_check}
BuildRequires: hostname
BuildRequires: jq
BuildRequires: libuuid-devel
BuildRequires: python%{python3_pkgversion}-bcrypt
BuildRequires: python%{python3_pkgversion}-pecan
BuildRequires: python%{python3_pkgversion}-requests
BuildRequires: python%{python3_pkgversion}-dateutil
BuildRequires: python%{python3_pkgversion}-coverage
BuildRequires: python%{python3_pkgversion}-pyOpenSSL
BuildRequires: socat
BuildRequires: python%{python3_pkgversion}-asyncssh
BuildRequires: python%{python3_pkgversion}-natsort
%endif
%if 0%{with zbd}
BuildRequires: libzbd-devel
%endif
%if 0%{?suse_version}
BuildRequires: libthrift-devel >= 0.13.0
%else
BuildRequires: thrift-devel >= 0.13.0
%endif
BuildRequires: re2-devel
%if 0%{with jaeger}
BuildRequires: bison
BuildRequires: flex
%if 0%{?fedora} || 0%{?rhel}
BuildRequires: json-devel
%endif
%if 0%{?suse_version}
BuildRequires: nlohmann_json-devel
%endif
BuildRequires: libevent-devel
%endif
%if 0%{with system_pmdk}
%if 0%{?suse_version}
BuildRequires: libndctl-devel >= 63
%else
BuildRequires: ndctl-devel >= 63
BuildRequires: daxctl-devel >= 63
%endif
BuildRequires: libpmem-devel
BuildRequires: libpmemobj-devel >= 1.8
%endif
%if 0%{with system_arrow}
BuildRequires: libarrow-devel
BuildRequires: parquet-libs-devel
BuildRequires: utf8proc-devel
%endif
%if 0%{with seastar}
BuildRequires: c-ares-devel
BuildRequires: gnutls-devel
BuildRequires: hwloc-devel
BuildRequires: libpciaccess-devel
BuildRequires: lksctp-tools-devel
BuildRequires: ragel
BuildRequires: systemtap-sdt-devel
%if 0%{?fedora} || 0%{?rhel} >= 10
BuildRequires: libubsan
BuildRequires: libasan
%endif
%if 0%{?rhel} == 8
BuildRequires: %{gts_prefix}-annobin
BuildRequires: %{gts_prefix}-annobin-plugin-gcc
BuildRequires: %{gts_prefix}-libubsan-devel
BuildRequires: %{gts_prefix}-libasan-devel
%endif
%endif
#################################################################################
# distro-conditional dependencies
#################################################################################
%if 0%{?suse_version}
BuildRequires: pkgconfig(systemd)
BuildRequires: systemd-rpm-macros
%{?systemd_requires}
PreReq: %fillup_prereq
BuildRequires: fdupes
BuildRequires: memory-constraints
BuildRequires: net-tools
BuildRequires: libbz2-devel
BuildRequires: mozilla-nss-devel
BuildRequires: keyutils-devel
BuildRequires: libopenssl-devel
BuildRequires: ninja
BuildRequires: openldap2-devel
#BuildRequires: krb5
#BuildRequires: krb5-devel
BuildRequires: cunit-devel
BuildRequires: python%{python3_pkgversion}-PrettyTable
BuildRequires: python%{python3_pkgversion}-PyYAML
BuildRequires: python%{python3_pkgversion}-Sphinx
BuildRequires: rdma-core-devel
BuildRequires: liblz4-devel >= 1.7
# for prometheus-alerts
BuildRequires: golang-github-prometheus-prometheus
BuildRequires: jsonnet
%endif
%if 0%{?fedora} || 0%{?rhel}
Requires: systemd
BuildRequires: boost-devel
BuildRequires: boost-random
BuildRequires: nss-devel
BuildRequires: keyutils-libs-devel
BuildRequires: libatomic
BuildRequires: libibverbs-devel
BuildRequires: librdmacm-devel
BuildRequires: openldap-devel
#BuildRequires: krb5-devel
BuildRequires: openssl-devel
BuildRequires: CUnit-devel
BuildRequires: python%{python3_pkgversion}-devel
BuildRequires: python%{python3_pkgversion}-prettytable
BuildRequires: python%{python3_pkgversion}-pyyaml
BuildRequires: python%{python3_pkgversion}-sphinx
BuildRequires: lz4-devel >= 1.7
%endif
# distro-conditional make check dependencies
%if 0%{with make_check}
BuildRequires: golang
%if 0%{?fedora} || 0%{?rhel}
BuildRequires: golang-github-prometheus
BuildRequires: libtool-ltdl-devel
BuildRequires: xmlsec1
BuildRequires: xmlsec1-devel
%ifarch x86_64
BuildRequires: xmlsec1-nss
%endif
BuildRequires: xmlsec1-openssl
BuildRequires: xmlsec1-openssl-devel
BuildRequires: python%{python3_pkgversion}-cherrypy
BuildRequires: python%{python3_pkgversion}-routes
BuildRequires: python%{python3_pkgversion}-scipy
BuildRequires: python%{python3_pkgversion}-werkzeug
BuildRequires: python%{python3_pkgversion}-pyOpenSSL
%endif
%if 0%{?suse_version}
BuildRequires: golang-github-prometheus-prometheus
BuildRequires: jsonnet
BuildRequires: libxmlsec1-1
BuildRequires: libxmlsec1-nss1
BuildRequires: libxmlsec1-openssl1
BuildRequires: python%{python3_pkgversion}-CherryPy
BuildRequires: python%{python3_pkgversion}-Routes
BuildRequires: python%{python3_pkgversion}-Werkzeug
BuildRequires: python%{python3_pkgversion}-numpy-devel
BuildRequires: xmlsec1-devel
BuildRequires: xmlsec1-openssl-devel
%endif
%endif
# lttng and babeltrace for rbd-replay-prep
%if %{with lttng}
%if 0%{?fedora} || 0%{?rhel}
BuildRequires: lttng-ust-devel
BuildRequires: libbabeltrace-devel
%endif
%if 0%{?suse_version}
BuildRequires: lttng-ust-devel
BuildRequires: babeltrace-devel
%endif
%endif
%if 0%{?suse_version}
BuildRequires: libexpat-devel
%endif
%if 0%{?rhel} || 0%{?fedora}
BuildRequires: expat-devel
%endif
#hardened-cc1
%if 0%{?fedora} || 0%{?rhel}
BuildRequires: redhat-rpm-config
%endif
%if 0%{with seastar}
%if 0%{?fedora} || 0%{?rhel}
BuildRequires: cryptopp-devel
BuildRequires: numactl-devel
%endif
%if 0%{?suse_version}
BuildRequires: libcryptopp-devel
BuildRequires: libnuma-devel
%endif
%endif
%if 0%{?rhel} >= 8
BuildRequires: /usr/bin/pathfix.py
%endif
%description
Ceph is a massively scalable, open-source, distributed storage system that runs
on commodity hardware and delivers object, block and file system storage.
#################################################################################
# subpackages
#################################################################################
%package base
Summary: Ceph Base Package
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Provides: ceph-test:/usr/bin/ceph-kvstore-tool
Requires: ceph-common = %{_epoch_prefix}%{version}-%{release}
Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release}
Requires: librgw2 = %{_epoch_prefix}%{version}-%{release}
%if 0%{with selinux}
Requires: ceph-selinux = %{_epoch_prefix}%{version}-%{release}
%endif
Requires: findutils
Requires: grep
Requires: logrotate
Requires: psmisc
Requires: util-linux
Requires: which
%if 0%{?rhel} && 0%{?rhel} < 8
# The following is necessary due to tracker 36508 and can be removed once the
# associated upstream bugs are resolved.
%if 0%{with tcmalloc}
Requires: gperftools-libs >= 2.6.1
%endif
%endif
%if 0%{?weak_deps}
Recommends: chrony
Recommends: nvme-cli
%if 0%{?suse_version}
Requires: smartmontools
%else
Recommends: smartmontools
%endif
%endif
%description base
Base is the package that includes all the files shared amongst ceph servers
%package -n cephadm
Summary: Utility to bootstrap Ceph clusters
BuildArch: noarch
Requires: lvm2
Requires: python%{python3_pkgversion}
Requires: openssh-server
Requires: which
%if 0%{?weak_deps}
Recommends: podman >= 2.0.2
%endif
%description -n cephadm
Utility to bootstrap a Ceph cluster and manage Ceph daemons deployed
with systemd and podman.
%package -n ceph-common
Summary: Ceph Common
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-rbd = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-cephfs = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-rgw = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-ceph-argparse = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release}
%if 0%{?fedora} || 0%{?rhel}
Requires: python%{python3_pkgversion}-prettytable
%endif
%if 0%{?suse_version}
Requires: python%{python3_pkgversion}-PrettyTable
%endif
%if 0%{with libradosstriper}
Requires: libradosstriper1 = %{_epoch_prefix}%{version}-%{release}
%endif
%{?systemd_requires}
%if 0%{?suse_version}
Requires(pre): pwdutils
%endif
%description -n ceph-common
Common utilities to mount and interact with a ceph storage cluster.
Comprised of files that are common to Ceph clients and servers.
%package mds
Summary: Ceph Metadata Server Daemon
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
%description mds
ceph-mds is the metadata server daemon for the Ceph distributed file system.
One or more instances of ceph-mds collectively manage the file system
namespace, coordinating access to the shared OSD cluster.
%package mon
Summary: Ceph Monitor Daemon
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Provides: ceph-test:/usr/bin/ceph-monstore-tool
Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
%description mon
ceph-mon is the cluster monitor daemon for the Ceph distributed file
system. One or more instances of ceph-mon form a Paxos part-time
parliament cluster that provides extremely reliable and durable storage
of cluster membership, configuration, and state.
%package mgr
Summary: Ceph Manager Daemon
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
Requires: ceph-mgr-modules-core = %{_epoch_prefix}%{version}-%{release}
Requires: libcephsqlite = %{_epoch_prefix}%{version}-%{release}
%if 0%{?weak_deps}
Recommends: ceph-mgr-dashboard = %{_epoch_prefix}%{version}-%{release}
Recommends: ceph-mgr-diskprediction-local = %{_epoch_prefix}%{version}-%{release}
Recommends: ceph-mgr-k8sevents = %{_epoch_prefix}%{version}-%{release}
Recommends: ceph-mgr-cephadm = %{_epoch_prefix}%{version}-%{release}
Recommends: python%{python3_pkgversion}-influxdb
%endif
%description mgr
ceph-mgr enables python modules that provide services (such as the REST
module derived from Calamari) and expose CLI hooks. ceph-mgr gathers
the cluster maps, the daemon metadata, and performance counters, and
exposes all these to the python modules.
%package mgr-dashboard
Summary: Ceph Dashboard
BuildArch: noarch
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release}
Requires: ceph-grafana-dashboards = %{_epoch_prefix}%{version}-%{release}
Requires: ceph-prometheus-alerts = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-setuptools
%if 0%{?fedora} || 0%{?rhel}
Requires: python%{python3_pkgversion}-cherrypy
Requires: python%{python3_pkgversion}-routes
Requires: python%{python3_pkgversion}-werkzeug
%if 0%{?weak_deps}
Recommends: python%{python3_pkgversion}-saml
%endif
%endif
%if 0%{?suse_version}
Requires: python%{python3_pkgversion}-CherryPy
Requires: python%{python3_pkgversion}-Routes
Requires: python%{python3_pkgversion}-Werkzeug
Recommends: python%{python3_pkgversion}-python3-saml
%endif
%description mgr-dashboard
ceph-mgr-dashboard is a manager module, providing a web-based application
to monitor and manage many aspects of a Ceph cluster and related components.
See the Dashboard documentation at http://docs.ceph.com/ for details and a
detailed feature overview.
%package mgr-diskprediction-local
Summary: Ceph Manager module for predicting disk failures
BuildArch: noarch
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-numpy
%if 0%{?fedora} || 0%{?suse_version} || 0%{?rhel} >= 10
Requires: python%{python3_pkgversion}-scikit-learn
%endif
Requires: python3-scipy
%description mgr-diskprediction-local
ceph-mgr-diskprediction-local is a ceph-mgr module that tries to predict
disk failures using local algorithms and machine-learning databases.
%package mgr-modules-core
Summary: Ceph Manager modules which are always enabled
BuildArch: noarch
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: python%{python3_pkgversion}-bcrypt
Requires: python%{python3_pkgversion}-pecan
Requires: python%{python3_pkgversion}-pyOpenSSL
Requires: python%{python3_pkgversion}-requests
Requires: python%{python3_pkgversion}-dateutil
Requires: python%{python3_pkgversion}-setuptools
%if 0%{?fedora} || 0%{?rhel} >= 8
Requires: python%{python3_pkgversion}-cherrypy
Requires: python%{python3_pkgversion}-pyyaml
Requires: python%{python3_pkgversion}-werkzeug
%endif
%if 0%{?suse_version}
Requires: python%{python3_pkgversion}-CherryPy
Requires: python%{python3_pkgversion}-PyYAML
Requires: python%{python3_pkgversion}-Werkzeug
%endif
%if 0%{?weak_deps}
Recommends: ceph-mgr-rook = %{_epoch_prefix}%{version}-%{release}
%endif
%description mgr-modules-core
ceph-mgr-modules-core provides a set of modules which are always
enabled by ceph-mgr.
%package mgr-rook
BuildArch: noarch
Summary: Ceph Manager module for Rook-based orchestration
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-kubernetes
Requires: python%{python3_pkgversion}-jsonpatch
%description mgr-rook
ceph-mgr-rook is a ceph-mgr module for orchestration functions using
a Rook backend.
%package mgr-k8sevents
BuildArch: noarch
Summary: Ceph Manager module to orchestrate ceph-events to kubernetes' events API
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-kubernetes
%description mgr-k8sevents
ceph-mgr-k8sevents is a ceph-mgr module that sends every ceph-events
to kubernetes' events API
%package mgr-cephadm
Summary: Ceph Manager module for cephadm-based orchestration
BuildArch: noarch
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-asyncssh
Requires: python%{python3_pkgversion}-natsort
Requires: cephadm = %{_epoch_prefix}%{version}-%{release}
%if 0%{?suse_version}
Requires: openssh
Requires: python%{python3_pkgversion}-CherryPy
Requires: python%{python3_pkgversion}-Jinja2
%endif
%if 0%{?rhel} || 0%{?fedora}
Requires: openssh-clients
Requires: python%{python3_pkgversion}-cherrypy
Requires: python%{python3_pkgversion}-jinja2
%endif
%description mgr-cephadm
ceph-mgr-cephadm is a ceph-mgr module for orchestration functions using
the integrated cephadm deployment tool management operations.
%package fuse
Summary: Ceph fuse-based client
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: fuse3
Requires: python%{python3_pkgversion}
%description fuse
FUSE based client for Ceph distributed network file system
%package -n cephfs-mirror
Summary: Ceph daemon for mirroring CephFS snapshots
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release}
%description -n cephfs-mirror
Daemon for mirroring CephFS snapshots between Ceph clusters.
%package -n ceph-exporter
Summary: Daemon for exposing perf counters as Prometheus metrics
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
%description -n ceph-exporter
Daemon for exposing perf counters as Prometheus metrics
%package -n rbd-fuse
Summary: Ceph fuse-based client
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
%description -n rbd-fuse
FUSE based client to map Ceph rbd images to files
%package -n rbd-mirror
Summary: Ceph daemon for mirroring RBD images
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
%description -n rbd-mirror
Daemon for mirroring RBD images between Ceph clusters, streaming
changes asynchronously.
%package immutable-object-cache
Summary: Ceph daemon for immutable object cache
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
%description immutable-object-cache
Daemon for immutable object cache.
%package -n rbd-nbd
Summary: Ceph RBD client base on NBD
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
%description -n rbd-nbd
NBD based client to map Ceph rbd images to local device
%package radosgw
Summary: Rados REST gateway
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
%if 0%{with selinux}
Requires: ceph-selinux = %{_epoch_prefix}%{version}-%{release}
%endif
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
Requires: librgw2 = %{_epoch_prefix}%{version}-%{release}
%if 0%{?rhel} || 0%{?fedora}
Requires: mailcap
%endif
%if 0%{?weak_deps}
Recommends: gawk
%endif
%description radosgw
RADOS is a distributed object store used by the Ceph distributed
storage system. This package provides a REST gateway to the
object store that aims to implement a superset of Amazon's S3
service as well as the OpenStack Object Storage ("Swift") API.
%package -n cephfs-top
Summary: top(1) like utility for Ceph Filesystem
BuildArch: noarch
Requires: python%{python3_pkgversion}-rados
%description -n cephfs-top
This package provides a top(1) like utility to display Ceph Filesystem metrics
in realtime.
%if %{with ocf}
%package resource-agents
Summary: OCF-compliant resource agents for Ceph daemons
BuildArch: noarch
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-base = %{_epoch_prefix}%{version}
Requires: resource-agents
%description resource-agents
Resource agents for monitoring and managing Ceph daemons
under Open Cluster Framework (OCF) compliant resource
managers such as Pacemaker.
%endif
%package osd
Summary: Ceph Object Storage Daemon
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Provides: ceph-test:/usr/bin/ceph-osdomap-tool
Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
Requires: sudo
Requires: libstoragemgmt
%if 0%{?weak_deps}
Recommends: ceph-volume = %{_epoch_prefix}%{version}-%{release}
%endif
%description osd
ceph-osd is the object storage daemon for the Ceph distributed file
system. It is responsible for storing objects on a local file system
and providing access to them over the network.
%if 0%{with seastar}
%package crimson-osd
Summary: Ceph Object Storage Daemon (crimson)
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-osd = %{_epoch_prefix}%{version}-%{release}
Requires: binutils
%description crimson-osd
crimson-osd is the object storage daemon for the Ceph distributed file
system. It is responsible for storing objects on a local file system
and providing access to them over the network.
%endif
%package volume
Summary: Ceph OSD deployment and inspection tool
BuildArch: noarch
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-osd = %{_epoch_prefix}%{version}-%{release}
Requires: cryptsetup
Requires: e2fsprogs
Requires: lvm2
Requires: parted
Requires: util-linux
Requires: xfsprogs
Requires: python%{python3_pkgversion}-setuptools
Requires: python%{python3_pkgversion}-packaging
Requires: python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release}
%description volume
This package contains a tool to deploy OSD with different devices like
lvm or physical disks, and trying to follow a predictable, and robust
way of preparing, activating, and starting the deployed OSD.
%package -n librados2
Summary: RADOS distributed object store client library
%if 0%{?suse_version}
Group: System/Libraries
%endif
%if 0%{?rhel} || 0%{?fedora}
Obsoletes: ceph-libs < %{_epoch_prefix}%{version}-%{release}
%endif
%description -n librados2
RADOS is a reliable, autonomic distributed object storage cluster
developed as part of the Ceph distributed storage system. This is a
shared library allowing applications to access the distributed object
store using a simple file-like interface.
%package -n librados-devel
Summary: RADOS headers
%if 0%{?suse_version}
Group: Development/Libraries/C and C++
%endif
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release}
Provides: librados2-devel = %{_epoch_prefix}%{version}-%{release}
Obsoletes: librados2-devel < %{_epoch_prefix}%{version}-%{release}
%description -n librados-devel
This package contains C libraries and headers needed to develop programs
that use RADOS object store.
%package -n libradospp-devel
Summary: RADOS headers
%if 0%{?suse_version}
Group: Development/Libraries/C and C++
%endif
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
Requires: librados-devel = %{_epoch_prefix}%{version}-%{release}
%description -n libradospp-devel
This package contains C++ libraries and headers needed to develop programs
that use RADOS object store.
%package -n librgw2
Summary: RADOS gateway client library
%if 0%{?suse_version}
Group: System/Libraries
%endif
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
%description -n librgw2
This package provides a library implementation of the RADOS gateway
(distributed object store with S3 and Swift personalities).
%package -n librgw-devel
Summary: RADOS gateway client library
%if 0%{?suse_version}
Group: Development/Libraries/C and C++
%endif
Requires: librados-devel = %{_epoch_prefix}%{version}-%{release}
Requires: librgw2 = %{_epoch_prefix}%{version}-%{release}
Provides: librgw2-devel = %{_epoch_prefix}%{version}-%{release}
Obsoletes: librgw2-devel < %{_epoch_prefix}%{version}-%{release}
%description -n librgw-devel
This package contains libraries and headers needed to develop programs
that use RADOS gateway client library.
%package -n python%{python3_pkgversion}-rgw
Summary: Python 3 libraries for the RADOS gateway
%if 0%{?suse_version}
Group: Development/Libraries/Python
%endif
Requires: librgw2 = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release}
%{?python_provide:%python_provide python%{python3_pkgversion}-rgw}
Provides: python-rgw = %{_epoch_prefix}%{version}-%{release}
Obsoletes: python-rgw < %{_epoch_prefix}%{version}-%{release}
%description -n python%{python3_pkgversion}-rgw
This package contains Python 3 libraries for interacting with Ceph RADOS
gateway.
%package -n python%{python3_pkgversion}-rados
Summary: Python 3 libraries for the RADOS object store
%if 0%{?suse_version}
Group: Development/Libraries/Python
%endif
Requires: python%{python3_pkgversion}
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
%{?python_provide:%python_provide python%{python3_pkgversion}-rados}
Provides: python-rados = %{_epoch_prefix}%{version}-%{release}
Obsoletes: python-rados < %{_epoch_prefix}%{version}-%{release}
%description -n python%{python3_pkgversion}-rados
This package contains Python 3 libraries for interacting with Ceph RADOS
object store.
%package -n libcephsqlite
Summary: SQLite3 VFS for Ceph
%if 0%{?suse_version}
Group: System/Libraries
%endif
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
%description -n libcephsqlite
A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS
distributed object store.
%package -n libcephsqlite-devel
Summary: SQLite3 VFS for Ceph headers
%if 0%{?suse_version}
Group: Development/Libraries/C and C++
%endif
Requires: sqlite-devel
Requires: libcephsqlite = %{_epoch_prefix}%{version}-%{release}
Requires: librados-devel = %{_epoch_prefix}%{version}-%{release}
Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release}
Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release}
Provides: libcephsqlite-devel = %{_epoch_prefix}%{version}-%{release}
Obsoletes: libcephsqlite-devel < %{_epoch_prefix}%{version}-%{release}
%description -n libcephsqlite-devel
A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS
distributed object store.
%if 0%{with libradosstriper}
%package -n libradosstriper1
Summary: RADOS striping interface
%if 0%{?suse_version}
Group: System/Libraries
%endif
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
%description -n libradosstriper1
Striping interface built on top of the rados library, allowing
to stripe bigger objects onto several standard rados objects using
an interface very similar to the rados one.
%package -n libradosstriper-devel
Summary: RADOS striping interface headers
%if 0%{?suse_version}
Group: Development/Libraries/C and C++
%endif
Requires: libradosstriper1 = %{_epoch_prefix}%{version}-%{release}
Requires: librados-devel = %{_epoch_prefix}%{version}-%{release}
Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release}
Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release}
Provides: libradosstriper1-devel = %{_epoch_prefix}%{version}-%{release}
Obsoletes: libradosstriper1-devel < %{_epoch_prefix}%{version}-%{release}
%description -n libradosstriper-devel
This package contains libraries and headers needed to develop programs
that use RADOS striping interface.
%endif
%package -n librbd1
Summary: RADOS block device client library
%if 0%{?suse_version}
Group: System/Libraries
%endif
Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
%if 0%{?suse_version}
Requires(post): coreutils
%endif
%if 0%{?rhel} || 0%{?fedora}
Obsoletes: ceph-libs < %{_epoch_prefix}%{version}-%{release}
%endif
%description -n librbd1
RBD is a block device striped across multiple distributed objects in
RADOS, a reliable, autonomic distributed object storage cluster
developed as part of the Ceph distributed storage system. This is a
shared library allowing applications to manage these block devices.
%package -n librbd-devel
Summary: RADOS block device headers
%if 0%{?suse_version}
Group: Development/Libraries/C and C++
%endif
Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
Requires: librados-devel = %{_epoch_prefix}%{version}-%{release}
Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release}
Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release}
Provides: librbd1-devel = %{_epoch_prefix}%{version}-%{release}
Obsoletes: librbd1-devel < %{_epoch_prefix}%{version}-%{release}
%description -n librbd-devel
This package contains libraries and headers needed to develop programs
that use RADOS block device.
%package -n python%{python3_pkgversion}-rbd
Summary: Python 3 libraries for the RADOS block device
%if 0%{?suse_version}
Group: Development/Libraries/Python
%endif
Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release}
%{?python_provide:%python_provide python%{python3_pkgversion}-rbd}
Provides: python-rbd = %{_epoch_prefix}%{version}-%{release}
Obsoletes: python-rbd < %{_epoch_prefix}%{version}-%{release}
%description -n python%{python3_pkgversion}-rbd
This package contains Python 3 libraries for interacting with Ceph RADOS
block device.
%package -n libcephfs2
Summary: Ceph distributed file system client library
%if 0%{?suse_version}
Group: System/Libraries
%endif
Obsoletes: libcephfs1 < %{_epoch_prefix}%{version}-%{release}
%if 0%{?rhel} || 0%{?fedora}
Obsoletes: ceph-libs < %{_epoch_prefix}%{version}-%{release}
Obsoletes: ceph-libcephfs < %{_epoch_prefix}%{version}-%{release}
%endif
%description -n libcephfs2
Ceph is a distributed network file system designed to provide excellent
performance, reliability, and scalability. This is a shared library
allowing applications to access a Ceph distributed file system via a
POSIX-like interface.
%package -n libcephfs-devel
Summary: Ceph distributed file system headers
%if 0%{?suse_version}
Group: Development/Libraries/C and C++
%endif
Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release}
Requires: librados-devel = %{_epoch_prefix}%{version}-%{release}
Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release}
Provides: libcephfs2-devel = %{_epoch_prefix}%{version}-%{release}
Obsoletes: libcephfs2-devel < %{_epoch_prefix}%{version}-%{release}
%description -n libcephfs-devel
This package contains libraries and headers needed to develop programs
that use Ceph distributed file system.
%package -n python%{python3_pkgversion}-cephfs
Summary: Python 3 libraries for Ceph distributed file system
%if 0%{?suse_version}
Group: Development/Libraries/Python
%endif
Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release}
Requires: python%{python3_pkgversion}-ceph-argparse = %{_epoch_prefix}%{version}-%{release}
%{?python_provide:%python_provide python%{python3_pkgversion}-cephfs}
Provides: python-cephfs = %{_epoch_prefix}%{version}-%{release}
Obsoletes: python-cephfs < %{_epoch_prefix}%{version}-%{release}
%description -n python%{python3_pkgversion}-cephfs
This package contains Python 3 libraries for interacting with Ceph distributed
file system.
%package -n python%{python3_pkgversion}-ceph-argparse
Summary: Python 3 utility libraries for Ceph CLI
%if 0%{?suse_version}
Group: Development/Libraries/Python
%endif
%{?python_provide:%python_provide python%{python3_pkgversion}-ceph-argparse}
%description -n python%{python3_pkgversion}-ceph-argparse
This package contains types and routines for Python 3 used by the Ceph CLI as
well as the RESTful interface. These have to do with querying the daemons for
command-description information, validating user command input against those
descriptions, and submitting the command to the appropriate daemon.
%package -n python%{python3_pkgversion}-ceph-common
Summary: Python 3 utility libraries for Ceph
%if 0%{?fedora} || 0%{?rhel} >= 8
Requires: python%{python3_pkgversion}-pyyaml
%endif
%if 0%{?suse_version}
Requires: python%{python3_pkgversion}-PyYAML
%endif
%if 0%{?suse_version}
Group: Development/Libraries/Python
%endif
%{?python_provide:%python_provide python%{python3_pkgversion}-ceph-common}
%description -n python%{python3_pkgversion}-ceph-common
This package contains data structures, classes and functions used by Ceph.
It also contains utilities used for the cephadm orchestrator.
%if 0%{with cephfs_shell}
%package -n cephfs-shell
Summary: Interactive shell for Ceph file system
Requires: python%{python3_pkgversion}-cmd2
Requires: python%{python3_pkgversion}-colorama
Requires: python%{python3_pkgversion}-cephfs
%description -n cephfs-shell
This package contains an interactive tool that allows accessing a Ceph
file system without mounting it by providing a nice pseudo-shell which
works like an FTP client.
%endif
%if 0%{with ceph_test_package}
%package -n ceph-test
Summary: Ceph benchmarks and test tools
%if 0%{?suse_version}
Group: System/Benchmark
%endif
Requires: ceph-common = %{_epoch_prefix}%{version}-%{release}
Requires: xmlstarlet
Requires: jq
Requires: socat
BuildRequires: gtest-devel
BuildRequires: gmock-devel
%description -n ceph-test
This package contains Ceph benchmarks and test tools.
%endif
%if 0%{with cephfs_java}
%package -n libcephfs_jni1
Summary: Java Native Interface library for CephFS Java bindings
%if 0%{?suse_version}
Group: System/Libraries
%endif
Requires: java
Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release}
%description -n libcephfs_jni1
This package contains the Java Native Interface library for CephFS Java
bindings.
%package -n libcephfs_jni-devel
Summary: Development files for CephFS Java Native Interface library
%if 0%{?suse_version}
Group: Development/Libraries/Java
%endif
Requires: java
Requires: libcephfs_jni1 = %{_epoch_prefix}%{version}-%{release}
Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release}
Provides: libcephfs_jni1-devel = %{_epoch_prefix}%{version}-%{release}
Obsoletes: libcephfs_jni1-devel < %{_epoch_prefix}%{version}-%{release}
%description -n libcephfs_jni-devel
This package contains the development files for CephFS Java Native Interface
library.
%package -n cephfs-java
Summary: Java libraries for the Ceph File System
%if 0%{?suse_version}
Group: System/Libraries
%endif
Requires: java
Requires: libcephfs_jni1 = %{_epoch_prefix}%{version}-%{release}
Requires: junit
BuildRequires: junit
%description -n cephfs-java
This package contains the Java libraries for the Ceph File System.
%endif
%package -n rados-objclass-devel
Summary: RADOS object class development kit
%if 0%{?suse_version}
Group: Development/Libraries/C and C++
%endif
Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release}
%description -n rados-objclass-devel
This package contains libraries and headers needed to develop RADOS object
class plugins.
%if 0%{with selinux}
%package selinux
Summary: SELinux support for Ceph MON, OSD and MDS
%if 0%{?suse_version}
Group: System/Filesystems
%endif
Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
Requires: policycoreutils, libselinux-utils
Requires(post): ceph-base = %{_epoch_prefix}%{version}-%{release}
Requires(post): selinux-policy-base >= %{_selinux_policy_version}, policycoreutils, gawk
Requires(postun): policycoreutils
%description selinux
This package contains SELinux support for Ceph MON, OSD and MDS. The package
also performs file-system relabelling which can take a long time on heavily
populated file-systems.
%endif
%package grafana-dashboards
Summary: The set of Grafana dashboards for monitoring purposes
BuildArch: noarch
%if 0%{?suse_version}
Group: System/Filesystems
%endif
%description grafana-dashboards
This package provides a set of Grafana dashboards for monitoring of
Ceph clusters. The dashboards require a Prometheus server setup
collecting data from Ceph Manager "prometheus" module and Prometheus
project "node_exporter" module. The dashboards are designed to be
integrated with the Ceph Manager Dashboard web UI.
%package prometheus-alerts
Summary: Prometheus alerts for a Ceph deplyoment
BuildArch: noarch
Group: System/Monitoring
%description prometheus-alerts
This package provides Ceph default alerts for Prometheus.
%package mib
Summary: MIB for SNMP alerts
BuildArch: noarch
%if 0%{?suse_version}
Group: System/Monitoring
%endif
%description mib
This package provides a Ceph MIB for SNMP traps.
%package node-proxy
Summary: hw monitoring agent for Ceph
BuildArch: noarch
%if 0%{?suse_version}
Group: System/Monitoring
%endif
%description node-proxy
This package provides a Ceph hardware monitoring agent.
#################################################################################
# common
#################################################################################
%prep
%autosetup -p1 -n %{name}-%{version}
%build
# Disable lto on systems that do not support symver attribute
# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=48200 for details
%if ( 0%{?rhel} && 0%{?rhel} < 9 ) || ( 0%{?suse_version} && 0%{?suse_version} <= 1500 )
%define _lto_cflags %{nil}
%endif
%if 0%{with cephfs_java}
# Find jni.h
for i in /usr/{lib64,lib}/jvm/java/include{,/linux}; do
[ -d $i ] && java_inc="$java_inc -I$i"
done
%endif
%if 0%{?suse_version}
%limit_build -m 3000
%endif
export CPPFLAGS="$java_inc"
export CFLAGS="$RPM_OPT_FLAGS"
export CXXFLAGS="$RPM_OPT_FLAGS"
export LDFLAGS="$RPM_LD_FLAGS"
%if 0%{with seastar}
# seastar uses longjmp() to implement coroutine. and this annoys longjmp_chk()
export CXXFLAGS=$(echo $RPM_OPT_FLAGS | sed -e 's/-Wp,-D_FORTIFY_SOURCE=2//g')
# remove from CFLAGS too because it causes the arrow submodule to fail with:
# warning _FORTIFY_SOURCE requires compiling with optimization (-O)
export CFLAGS=$(echo $RPM_OPT_FLAGS | sed -e 's/-Wp,-D_FORTIFY_SOURCE=2//g')
%endif
env | sort
%{?!_vpath_builddir:%global _vpath_builddir %{_target_platform}}
# TODO: drop this step once we can use `cmake -B`
%{cmake} \
-GNinja \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DBUILD_CONFIG=rpmbuild \
-DSYSTEMD_SYSTEM_UNIT_DIR:PATH=%{_unitdir} \
-DCMAKE_INSTALL_SYSCONFDIR:PATH=%{_sysconfdir} \
-DWITH_MANPAGE:BOOL=ON \
-DWITH_PYTHON3:STRING=%{python3_version} \
-DWITH_MGR_DASHBOARD_FRONTEND:BOOL=OFF \
%if 0%{?suse_version}
-DWITH_RADOSGW_SELECT_PARQUET:BOOL=OFF \
%endif
%if 0%{without ceph_test_package}
-DWITH_TESTS:BOOL=OFF \
%endif
%if 0%{with cephfs_java}
-DJAVA_HOME=%{java_home} \
-DJAVA_LIB_INSTALL_DIR=%{_jnidir} \
-DWITH_CEPHFS_JAVA:BOOL=ON \
%endif
%if 0%{with selinux}
-DWITH_SELINUX:BOOL=ON \
%endif
%if %{with lttng}
-DWITH_LTTNG:BOOL=ON \
-DWITH_BABELTRACE:BOOL=ON \
%else
-DWITH_LTTNG:BOOL=OFF \
-DWITH_BABELTRACE:BOOL=OFF \
%endif
$CEPH_EXTRA_CMAKE_ARGS \
%if 0%{with ocf}
-DWITH_OCF:BOOL=ON \
%endif
%if 0%{?fedora} || 0%{?rhel} >= 10
-DWITH_SYSTEM_ROCKSDB:BOOL=ON \
%endif
-DWITH_SYSTEM_LIBURING:BOOL=ON \
-DWITH_SYSTEM_BOOST:BOOL=OFF \
%if 0%{with cephfs_shell}
-DWITH_CEPHFS_SHELL:BOOL=ON \
%endif
%if 0%{with libradosstriper}
-DWITH_LIBRADOSSTRIPER:BOOL=ON \
%else
-DWITH_LIBRADOSSTRIPER:BOOL=OFF \
%endif
%if 0%{with amqp_endpoint}
-DWITH_RADOSGW_AMQP_ENDPOINT:BOOL=ON \
%else
-DWITH_RADOSGW_AMQP_ENDPOINT:BOOL=OFF \
%endif
%if 0%{with kafka_endpoint}
-DWITH_RADOSGW_KAFKA_ENDPOINT:BOOL=ON \
%else
-DWITH_RADOSGW_KAFKA_ENDPOINT:BOOL=OFF \
%endif
%if 0%{without lua_packages}
-DWITH_RADOSGW_LUA_PACKAGES:BOOL=OFF \
%endif
%if 0%{with zbd}
-DWITH_ZBD:BOOL=ON \
%endif
%if 0%{with cmake_verbose_logging}
-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON \
%endif
%if 0%{with rbd_rwl_cache}
-DWITH_RBD_RWL:BOOL=ON \
%endif
%if 0%{with rbd_ssd_cache}
-DWITH_RBD_SSD_CACHE:BOOL=ON \
%endif
%if 0%{with system_pmdk}
-DWITH_SYSTEM_PMDK:BOOL=ON \
%endif
%if 0%{without jaeger}
-DWITH_JAEGER:BOOL=OFF \
%endif
%if 0%{?suse_version}
-DBOOST_J:STRING=%{jobs} \
%else
-DBOOST_J:STRING=%{_smp_build_ncpus} \
%endif
%if 0%{with ceph_test_package}
-DWITH_SYSTEM_GTEST:BOOL=ON \
%endif
-DWITH_SYSTEM_ZSTD:BOOL=ON \
%if 0%{?fedora} || 0%{?rhel}
-DWITH_FMT_HEADER_ONLY:BOOL=ON \
%endif
%if 0%{with system_arrow}
-DWITH_SYSTEM_ARROW:BOOL=ON \
-DWITH_SYSTEM_UTF8PROC:BOOL=ON \
%endif
%ifarch x86_64 aarch64
-DCMAKE_LINKER=%{_bindir}/ld.mold \
%endif
%if 0%{with seastar}
-DWITH_SEASTAR:BOOL=ON \
-DWITH_JAEGER:BOOL=OFF \
%endif
-DWITH_GRAFANA:BOOL=ON
%if %{with cmake_verbose_logging}
cat ./%{__cmake_builddir}/CMakeFiles/CMakeOutput.log
cat ./%{__cmake_builddir}/CMakeFiles/CMakeError.log
%endif
export VERBOSE=1
export V=1
export GCC_COLORS=
%cmake_build
%if 0%{with make_check}
%check
# run in-tree unittests
# cd %{_vpath_builddir}
# ctest "$CEPH_MFLAGS_JOBS"
%endif
%install
%cmake_install
# we have dropped sysvinit bits
rm -f %{buildroot}/%{_sysconfdir}/init.d/ceph
%if 0%{with seastar}
# package crimson-osd with the name of ceph-osd
install -m 0755 %{buildroot}%{_bindir}/crimson-osd %{buildroot}%{_bindir}/ceph-osd
%endif
install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
%if 0%{?fedora} || 0%{?rhel}
install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph
%endif
%if 0%{?suse_version}
install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_fillupdir}/sysconfig.%{name}
%endif
install -m 0644 -D systemd/ceph.tmpfiles.d %{buildroot}%{_tmpfilesdir}/ceph-common.conf
install -m 0644 -D systemd/50-ceph.preset %{buildroot}%{_presetdir}/50-ceph.preset
mkdir -p %{buildroot}%{_sbindir}
install -m 0644 -D src/logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d/ceph
chmod 0644 %{buildroot}%{_docdir}/ceph/sample.ceph.conf
install -m 0644 -D COPYING %{buildroot}%{_docdir}/ceph/COPYING
install -m 0644 -D etc/sysctl/90-ceph-osd.conf %{buildroot}%{_sysctldir}/90-ceph-osd.conf
install -m 0755 -D src/tools/rbd_nbd/rbd-nbd_quiesce %{buildroot}%{_libexecdir}/rbd-nbd/rbd-nbd_quiesce
mkdir -p %{buildroot}%{_sharedstatedir}/cephadm
chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm
mkdir -p %{buildroot}%{_sharedstatedir}/cephadm/.ssh
chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm/.ssh
touch %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys
chmod 0600 %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys
# firewall templates and /sbin/mount.ceph symlink
%if 0%{?suse_version} && 0%{?suse_version} < 1550
mkdir -p %{buildroot}/sbin
ln -sf %{_sbindir}/mount.ceph %{buildroot}/sbin/mount.ceph
%endif
# udev rules
install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules
# sudoers.d
install -m 0440 -D sudoers.d/ceph-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-smartctl
%if 0%{?rhel} >= 8
pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/*
pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_sbindir}/*
%endif
#set up placeholder directories
mkdir -p %{buildroot}%{_sysconfdir}/ceph
mkdir -p %{buildroot}%{_localstatedir}/run/ceph
mkdir -p %{buildroot}%{_localstatedir}/log/ceph
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/tmp
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mon
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/osd
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mds
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mgr
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/crash
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/crash/posted
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/radosgw
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-osd
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mds
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rgw
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mgr
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd
mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd-mirror
# prometheus alerts
install -m 644 -D monitoring/ceph-mixin/prometheus_alerts.yml %{buildroot}/etc/prometheus/ceph/ceph_default_alerts.yml
# SNMP MIB
install -m 644 -D -t %{buildroot}%{_datadir}/snmp/mibs monitoring/snmp/CEPH-MIB.txt
%if 0%{?suse_version}
# create __pycache__ directories and their contents
%py3_compile %{buildroot}%{python3_sitelib}
# hardlink duplicate files under /usr to save space
%fdupes %{buildroot}%{_prefix}
%endif
%if 0%{?rhel} == 8 || 0%{?fedora} >= 33
%py_byte_compile %{__python3} %{buildroot}%{python3_sitelib}
%endif
#################################################################################
# files and systemd scriptlets
#################################################################################
%files
%files base
%{_bindir}/ceph-crash
%{_bindir}/crushtool
%{_bindir}/monmaptool
%{_bindir}/osdmaptool
%{_bindir}/ceph-kvstore-tool
%{_bindir}/ceph-run
%{_presetdir}/50-ceph.preset
%{_sbindir}/ceph-create-keys
%dir %{_libexecdir}/ceph
%{_libexecdir}/ceph/ceph_common.sh
%dir %{_libdir}/rados-classes
%{_libdir}/rados-classes/*
%dir %{_libdir}/ceph
%dir %{_libdir}/ceph/erasure-code
%{_libdir}/ceph/erasure-code/libec_*.so*
%dir %{_libdir}/ceph/extblkdev
%{_libdir}/ceph/extblkdev/libceph_*.so*
%dir %{_libdir}/ceph/compressor
%{_libdir}/ceph/compressor/libceph_*.so*
%{_unitdir}/ceph-crash.service
%dir %{_libdir}/ceph/crypto
%{_libdir}/ceph/crypto/libceph_*.so*
%if %{with lttng}
%{_libdir}/libos_tp.so*
%{_libdir}/libosd_tp.so*
%endif
%config(noreplace) %{_sysconfdir}/logrotate.d/ceph
%if 0%{?fedora} || 0%{?rhel}
%config(noreplace) %{_sysconfdir}/sysconfig/ceph
%endif
%if 0%{?suse_version}
%{_fillupdir}/sysconfig.*
%endif
%{_unitdir}/ceph.target
%{_mandir}/man8/ceph-create-keys.8*
%{_mandir}/man8/ceph-run.8*
%{_mandir}/man8/crushtool.8*
%{_mandir}/man8/osdmaptool.8*
%{_mandir}/man8/monmaptool.8*
%{_mandir}/man8/ceph-kvstore-tool.8*
#set up placeholder directories
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/crash
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/crash/posted
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/tmp
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-osd
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mds
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rgw
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mgr
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd-mirror
%{_sysconfdir}/sudoers.d/ceph-smartctl
%post base
/sbin/ldconfig
%if 0%{?suse_version}
%fillup_only
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl preset ceph.target ceph-crash.service >/dev/null 2>&1 || :
fi
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_post ceph.target ceph-crash.service
%endif
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl start ceph.target ceph-crash.service >/dev/null 2>&1 || :
fi
%preun base
%if 0%{?suse_version}
%service_del_preun ceph.target ceph-crash.service
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_preun ceph.target ceph-crash.service
%endif
%postun base
/sbin/ldconfig
%systemd_postun ceph.target
%pre -n cephadm
getent group cephadm >/dev/null || groupadd -r cephadm
getent passwd cephadm >/dev/null || useradd -r -g cephadm -s /bin/bash -c "cephadm user for mgr/cephadm" -d %{_sharedstatedir}/cephadm cephadm
exit 0
%if ! 0%{?suse_version}
%postun -n cephadm
[ $1 -ne 0 ] || userdel cephadm || :
%endif
%files -n cephadm
%{_sbindir}/cephadm
%{_mandir}/man8/cephadm.8*
%attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm
%attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm/.ssh
%config(noreplace) %attr(0600,cephadm,cephadm) %{_sharedstatedir}/cephadm/.ssh/authorized_keys
%files common
%dir %{_docdir}/ceph
%doc %{_docdir}/ceph/sample.ceph.conf
%license %{_docdir}/ceph/COPYING
%{_bindir}/ceph
%{_bindir}/ceph-authtool
%{_bindir}/ceph-conf
%{_bindir}/ceph-dencoder
%{_bindir}/ceph-rbdnamer
%{_bindir}/ceph-syn
%{_bindir}/cephfs-data-scan
%{_bindir}/cephfs-journal-tool
%{_bindir}/cephfs-table-tool
%{_bindir}/crushdiff
%{_bindir}/rados
%{_bindir}/radosgw-admin
%{_bindir}/rbd
%{_bindir}/rbd-replay
%{_bindir}/rbd-replay-many
%{_bindir}/rbdmap
%{_bindir}/rgw-gap-list
%{_bindir}/rgw-gap-list-comparator
%{_bindir}/rgw-orphan-list
%{_bindir}/rgw-restore-bucket-index
%{_sbindir}/mount.ceph
%if 0%{?suse_version} && 0%{?suse_version} < 1550
/sbin/mount.ceph
%endif
%if %{with lttng}
%{_bindir}/rbd-replay-prep
%endif
%{_bindir}/ceph-post-file
%dir %{_libdir}/ceph/denc
%{_libdir}/ceph/denc/denc-mod-*.so
%{_tmpfilesdir}/ceph-common.conf
%{_mandir}/man8/ceph-authtool.8*
%{_mandir}/man8/ceph-conf.8*
%{_mandir}/man8/ceph-dencoder.8*
%{_mandir}/man8/ceph-diff-sorted.8*
%{_mandir}/man8/ceph-rbdnamer.8*
%{_mandir}/man8/ceph-syn.8*
%{_mandir}/man8/ceph-post-file.8*
%{_mandir}/man8/ceph.8*
%{_mandir}/man8/crushdiff.8*
%{_mandir}/man8/mount.ceph.8*
%{_mandir}/man8/rados.8*
%{_mandir}/man8/radosgw-admin.8*
%{_mandir}/man8/rbd.8*
%{_mandir}/man8/rbdmap.8*
%{_mandir}/man8/rbd-replay.8*
%{_mandir}/man8/rbd-replay-many.8*
%{_mandir}/man8/rbd-replay-prep.8*
%{_mandir}/man8/rgw-orphan-list.8*
%dir %{_datadir}/ceph/
%{_datadir}/ceph/known_hosts_drop.ceph.com
%{_datadir}/ceph/id_rsa_drop.ceph.com
%{_datadir}/ceph/id_rsa_drop.ceph.com.pub
%dir %{_sysconfdir}/ceph/
%config %{_sysconfdir}/bash_completion.d/ceph
%config %{_sysconfdir}/bash_completion.d/rados
%config %{_sysconfdir}/bash_completion.d/rbd
%config %{_sysconfdir}/bash_completion.d/radosgw-admin
%config(noreplace) %{_sysconfdir}/ceph/rbdmap
%{_unitdir}/rbdmap.service
%dir %{_udevrulesdir}
%{_udevrulesdir}/50-rbd.rules
%attr(3770,ceph,ceph) %dir %{_localstatedir}/log/ceph/
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/
%pre common
CEPH_GROUP_ID=167
CEPH_USER_ID=167
%if 0%{?rhel} || 0%{?fedora}
/usr/sbin/groupadd ceph -g $CEPH_GROUP_ID -o -r 2>/dev/null || :
/usr/sbin/useradd ceph -u $CEPH_USER_ID -o -r -g ceph -s /sbin/nologin -c "Ceph daemons" -d %{_localstatedir}/lib/ceph 2>/dev/null || :
%endif
%if 0%{?suse_version}
if ! getent group ceph >/dev/null ; then
CEPH_GROUP_ID_OPTION=""
getent group $CEPH_GROUP_ID >/dev/null || CEPH_GROUP_ID_OPTION="-g $CEPH_GROUP_ID"
groupadd ceph $CEPH_GROUP_ID_OPTION -r 2>/dev/null || :
fi
if ! getent passwd ceph >/dev/null ; then
CEPH_USER_ID_OPTION=""
getent passwd $CEPH_USER_ID >/dev/null || CEPH_USER_ID_OPTION="-u $CEPH_USER_ID"
useradd ceph $CEPH_USER_ID_OPTION -r -g ceph -s /sbin/nologin 2>/dev/null || :
fi
usermod -c "Ceph storage service" \
-d %{_localstatedir}/lib/ceph \
-g ceph \
-s /sbin/nologin \
ceph
%endif
exit 0
%post common
%tmpfiles_create %{_tmpfilesdir}/ceph-common.conf
%postun common
# Package removal cleanup
if [ "$1" -eq "0" ] ; then
rm -rf %{_localstatedir}/log/ceph
rm -rf %{_sysconfdir}/ceph
fi
%files mds
%{_bindir}/ceph-mds
%{_mandir}/man8/ceph-mds.8*
%{_unitdir}/ceph-mds@.service
%{_unitdir}/ceph-mds.target
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mds
%post mds
%if 0%{?suse_version}
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl preset ceph-mds@\*.service ceph-mds.target >/dev/null 2>&1 || :
fi
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_post ceph-mds@\*.service ceph-mds.target
%endif
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl start ceph-mds.target >/dev/null 2>&1 || :
fi
%preun mds
%if 0%{?suse_version}
%service_del_preun ceph-mds@\*.service ceph-mds.target
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_preun ceph-mds@\*.service ceph-mds.target
%endif
%postun mds
%systemd_postun ceph-mds@\*.service ceph-mds.target
if [ $1 -ge 1 ] ; then
# Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
# "yes". In any case: if units are not running, do not touch them.
SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
source $SYSCONF_CEPH
fi
if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
/usr/bin/systemctl try-restart ceph-mds@\*.service > /dev/null 2>&1 || :
fi
fi
%files mgr
%{_bindir}/ceph-mgr
%dir %{_datadir}/ceph/mgr
%{_datadir}/ceph/mgr/mgr_module.*
%{_datadir}/ceph/mgr/mgr_util.*
%{_datadir}/ceph/mgr/object_format.*
%{_unitdir}/ceph-mgr@.service
%{_unitdir}/ceph-mgr.target
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mgr
%post mgr
%if 0%{?suse_version}
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl preset ceph-mgr@\*.service ceph-mgr.target >/dev/null 2>&1 || :
fi
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_post ceph-mgr@\*.service ceph-mgr.target
%endif
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl start ceph-mgr.target >/dev/null 2>&1 || :
fi
%preun mgr
%if 0%{?suse_version}
%service_del_preun ceph-mgr@\*.service ceph-mgr.target
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_preun ceph-mgr@\*.service ceph-mgr.target
%endif
%postun mgr
%systemd_postun ceph-mgr@\*.service ceph-mgr.target
if [ $1 -ge 1 ] ; then
# Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
# "yes". In any case: if units are not running, do not touch them.
SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
source $SYSCONF_CEPH
fi
if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
/usr/bin/systemctl try-restart ceph-mgr@\*.service > /dev/null 2>&1 || :
fi
fi
%files mgr-dashboard
%{_datadir}/ceph/mgr/dashboard
%post mgr-dashboard
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
fi
%postun mgr-dashboard
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
fi
%files mgr-diskprediction-local
%{_datadir}/ceph/mgr/diskprediction_local
%post mgr-diskprediction-local
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
fi
%postun mgr-diskprediction-local
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
fi
%files mgr-modules-core
%dir %{_datadir}/ceph/mgr
%{_datadir}/ceph/mgr/alerts
%{_datadir}/ceph/mgr/balancer
%{_datadir}/ceph/mgr/crash
%{_datadir}/ceph/mgr/devicehealth
%{_datadir}/ceph/mgr/influx
%{_datadir}/ceph/mgr/insights
%{_datadir}/ceph/mgr/iostat
%{_datadir}/ceph/mgr/localpool
%{_datadir}/ceph/mgr/mds_autoscaler
%{_datadir}/ceph/mgr/mirroring
%{_datadir}/ceph/mgr/nfs
%{_datadir}/ceph/mgr/orchestrator
%{_datadir}/ceph/mgr/osd_perf_query
%{_datadir}/ceph/mgr/osd_support
%{_datadir}/ceph/mgr/pg_autoscaler
%{_datadir}/ceph/mgr/progress
%{_datadir}/ceph/mgr/prometheus
%{_datadir}/ceph/mgr/rbd_support
%{_datadir}/ceph/mgr/restful
%{_datadir}/ceph/mgr/rgw
%{_datadir}/ceph/mgr/selftest
%{_datadir}/ceph/mgr/snap_schedule
%{_datadir}/ceph/mgr/stats
%{_datadir}/ceph/mgr/status
%{_datadir}/ceph/mgr/telegraf
%{_datadir}/ceph/mgr/telemetry
%{_datadir}/ceph/mgr/test_orchestrator
%{_datadir}/ceph/mgr/volumes
%{_datadir}/ceph/mgr/zabbix
%files mgr-rook
%{_datadir}/ceph/mgr/rook
%post mgr-rook
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
fi
%postun mgr-rook
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
fi
%files mgr-k8sevents
%{_datadir}/ceph/mgr/k8sevents
%post mgr-k8sevents
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
fi
%postun mgr-k8sevents
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
fi
%files mgr-cephadm
%{_datadir}/ceph/mgr/cephadm
%post mgr-cephadm
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
fi
%postun mgr-cephadm
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
fi
%files mon
%{_bindir}/ceph-mon
%{_bindir}/ceph-monstore-tool
%{_mandir}/man8/ceph-mon.8*
%{_unitdir}/ceph-mon@.service
%{_unitdir}/ceph-mon.target
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mon
%post mon
%if 0%{?suse_version}
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl preset ceph-mon@\*.service ceph-mon.target >/dev/null 2>&1 || :
fi
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_post ceph-mon@\*.service ceph-mon.target
%endif
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl start ceph-mon.target >/dev/null 2>&1 || :
fi
%preun mon
%if 0%{?suse_version}
%service_del_preun ceph-mon@\*.service ceph-mon.target
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_preun ceph-mon@\*.service ceph-mon.target
%endif
%postun mon
%systemd_postun ceph-mon@\*.service ceph-mon.target
if [ $1 -ge 1 ] ; then
# Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
# "yes". In any case: if units are not running, do not touch them.
SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
source $SYSCONF_CEPH
fi
if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
/usr/bin/systemctl try-restart ceph-mon@\*.service > /dev/null 2>&1 || :
fi
fi
%files fuse
%{_bindir}/ceph-fuse
%{_mandir}/man8/ceph-fuse.8*
%{_sbindir}/mount.fuse.ceph
%{_mandir}/man8/mount.fuse.ceph.8*
%{_unitdir}/ceph-fuse@.service
%{_unitdir}/ceph-fuse.target
%files -n cephfs-mirror
%{_bindir}/cephfs-mirror
%{_mandir}/man8/cephfs-mirror.8*
%{_unitdir}/cephfs-mirror@.service
%{_unitdir}/cephfs-mirror.target
%post -n cephfs-mirror
%if 0%{?suse_version}
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl preset cephfs-mirror@\*.service cephfs-mirror.target >/dev/null 2>&1 || :
fi
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_post cephfs-mirror@\*.service cephfs-mirror.target
%endif
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl start cephfs-mirror.target >/dev/null 2>&1 || :
fi
%preun -n cephfs-mirror
%if 0%{?suse_version}
%service_del_preun cephfs-mirror@\*.service cephfs-mirror.target
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_preun cephfs-mirror@\*.service cephfs-mirror.target
%endif
%postun -n cephfs-mirror
%systemd_postun cephfs-mirror@\*.service cephfs-mirror.target
if [ $1 -ge 1 ] ; then
# Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
# "yes". In any case: if units are not running, do not touch them.
SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
source $SYSCONF_CEPH
fi
if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
/usr/bin/systemctl try-restart cephfs-mirror@\*.service > /dev/null 2>&1 || :
fi
fi
%files -n ceph-exporter
%{_bindir}/ceph-exporter
%{_unitdir}/ceph-exporter.service
%files -n rbd-fuse
%{_bindir}/rbd-fuse
%{_mandir}/man8/rbd-fuse.8*
%files -n rbd-mirror
%{_bindir}/rbd-mirror
%{_mandir}/man8/rbd-mirror.8*
%{_unitdir}/ceph-rbd-mirror@.service
%{_unitdir}/ceph-rbd-mirror.target
%post -n rbd-mirror
%if 0%{?suse_version}
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl preset ceph-rbd-mirror@\*.service ceph-rbd-mirror.target >/dev/null 2>&1 || :
fi
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_post ceph-rbd-mirror@\*.service ceph-rbd-mirror.target
%endif
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl start ceph-rbd-mirror.target >/dev/null 2>&1 || :
fi
%preun -n rbd-mirror
%if 0%{?suse_version}
%service_del_preun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_preun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target
%endif
%postun -n rbd-mirror
%systemd_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target
if [ $1 -ge 1 ] ; then
# Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
# "yes". In any case: if units are not running, do not touch them.
SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
source $SYSCONF_CEPH
fi
if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
/usr/bin/systemctl try-restart ceph-rbd-mirror@\*.service > /dev/null 2>&1 || :
fi
fi
%files immutable-object-cache
%{_bindir}/ceph-immutable-object-cache
%{_mandir}/man8/ceph-immutable-object-cache.8*
%{_unitdir}/ceph-immutable-object-cache@.service
%{_unitdir}/ceph-immutable-object-cache.target
%post immutable-object-cache
%if 0%{?suse_version}
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl preset ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target >/dev/null 2>&1 || :
fi
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_post ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target
%endif
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl start ceph-immutable-object-cache.target >/dev/null 2>&1 || :
fi
%preun immutable-object-cache
%if 0%{?suse_version}
%service_del_preun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_preun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target
%endif
%postun immutable-object-cache
%systemd_postun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target
if [ $1 -ge 1 ] ; then
# Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
# "yes". In any case: if units are not running, do not touch them.
SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
source $SYSCONF_CEPH
fi
if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
/usr/bin/systemctl try-restart ceph-immutable-object-cache@\*.service > /dev/null 2>&1 || :
fi
fi
%files -n rbd-nbd
%{_bindir}/rbd-nbd
%{_mandir}/man8/rbd-nbd.8*
%dir %{_libexecdir}/rbd-nbd
%{_libexecdir}/rbd-nbd/rbd-nbd_quiesce
%files radosgw
%{_bindir}/ceph-diff-sorted
%{_bindir}/radosgw
%{_bindir}/radosgw-token
%{_bindir}/radosgw-es
%{_bindir}/radosgw-object-expirer
%{_bindir}/rgw-policy-check
%{_mandir}/man8/radosgw.8*
%{_mandir}/man8/rgw-policy-check.8*
%dir %{_localstatedir}/lib/ceph/radosgw
%{_unitdir}/ceph-radosgw@.service
%{_unitdir}/ceph-radosgw.target
%post radosgw
%if 0%{?suse_version}
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl preset ceph-radosgw@\*.service ceph-radosgw.target >/dev/null 2>&1 || :
fi
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_post ceph-radosgw@\*.service ceph-radosgw.target
%endif
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl start ceph-radosgw.target >/dev/null 2>&1 || :
fi
%preun radosgw
%if 0%{?suse_version}
%service_del_preun ceph-radosgw@\*.service ceph-radosgw.target
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_preun ceph-radosgw@\*.service ceph-radosgw.target
%endif
%postun radosgw
%systemd_postun ceph-radosgw@\*.service ceph-radosgw.target
if [ $1 -ge 1 ] ; then
# Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
# "yes". In any case: if units are not running, do not touch them.
SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
source $SYSCONF_CEPH
fi
if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
/usr/bin/systemctl try-restart ceph-radosgw@\*.service > /dev/null 2>&1 || :
fi
fi
%files osd
%{_bindir}/ceph-clsinfo
%{_bindir}/ceph-bluestore-tool
%{_bindir}/ceph-erasure-code-tool
%{_bindir}/ceph-objectstore-tool
%{_bindir}/ceph-osdomap-tool
%{_bindir}/ceph-osd
%{_libexecdir}/ceph/ceph-osd-prestart.sh
%{_mandir}/man8/ceph-clsinfo.8*
%{_mandir}/man8/ceph-osd.8*
%{_mandir}/man8/ceph-bluestore-tool.8*
%{_unitdir}/ceph-osd@.service
%{_unitdir}/ceph-osd.target
%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/osd
%config(noreplace) %{_sysctldir}/90-ceph-osd.conf
%post osd
%if 0%{?suse_version}
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl preset ceph-osd@\*.service ceph-osd.target >/dev/null 2>&1 || :
fi
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_post ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target
%endif
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl start ceph-osd.target >/dev/null 2>&1 || :
fi
%if 0%{?sysctl_apply}
%sysctl_apply 90-ceph-osd.conf
%else
/usr/lib/systemd/systemd-sysctl %{_sysctldir}/90-ceph-osd.conf > /dev/null 2>&1 || :
%endif
%preun osd
%if 0%{?suse_version}
%service_del_preun ceph-osd@\*.service ceph-osd.target
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_preun ceph-osd@\*.service ceph-osd.target
%endif
%postun osd
%systemd_postun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target
if [ $1 -ge 1 ] ; then
# Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
# "yes". In any case: if units are not running, do not touch them.
SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
source $SYSCONF_CEPH
fi
if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
/usr/bin/systemctl try-restart ceph-osd@\*.service ceph-volume@\*.service > /dev/null 2>&1 || :
fi
fi
%if 0%{with seastar}
%files crimson-osd
%{_bindir}/crimson-osd
%endif
%files volume
%{_sbindir}/ceph-volume
%{_sbindir}/ceph-volume-systemd
%dir %{python3_sitelib}/ceph_volume
%{python3_sitelib}/ceph_volume/*
%{python3_sitelib}/ceph_volume-*
%{_mandir}/man8/ceph-volume.8*
%{_mandir}/man8/ceph-volume-systemd.8*
%{_unitdir}/ceph-volume@.service
%post volume
%if 0%{?suse_version}
if [ $1 -eq 1 ] ; then
/usr/bin/systemctl preset ceph-volume@\*.service >/dev/null 2>&1 || :
fi
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_post ceph-volume@\*.service
%endif
%preun volume
%if 0%{?suse_version}
%service_del_preun ceph-volume@\*.service
%endif
%if 0%{?fedora} || 0%{?rhel}
%systemd_preun ceph-volume@\*.service
%endif
%postun volume
%systemd_postun ceph-volume@\*.service
if [ $1 -ge 1 ] ; then
# Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
# "yes". In any case: if units are not running, do not touch them.
SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
source $SYSCONF_CEPH
fi
if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
/usr/bin/systemctl try-restart ceph-volume@\*.service > /dev/null 2>&1 || :
fi
fi
%if %{with ocf}
%files resource-agents
%dir %{_prefix}/lib/ocf
%dir %{_prefix}/lib/ocf/resource.d
%dir %{_prefix}/lib/ocf/resource.d/ceph
%attr(0755,-,-) %{_prefix}/lib/ocf/resource.d/ceph/rbd
%endif
%files -n librados2
%doc %{_docdir}/ceph/COPYING
%{_libdir}/librados.so.*
%dir %{_libdir}/ceph
%{_libdir}/ceph/libceph-common.so.*
%if %{with lttng}
%{_libdir}/librados_tp.so.*
%endif
%dir %{_sysconfdir}/ceph
%post -n librados2 -p /sbin/ldconfig
%postun -n librados2 -p /sbin/ldconfig
%files -n librados-devel
%dir %{_includedir}/rados
%{_includedir}/rados/librados.h
%{_includedir}/rados/rados_types.h
%{_libdir}/librados.so
%if %{with lttng}
%{_libdir}/librados_tp.so
%endif
%{_bindir}/librados-config
%{_mandir}/man8/librados-config.8*
%files -n libradospp-devel
%dir %{_includedir}/rados
%{_includedir}/rados/buffer.h
%{_includedir}/rados/buffer_fwd.h
%{_includedir}/rados/crc32c.h
%{_includedir}/rados/inline_memory.h
%{_includedir}/rados/librados.hpp
%{_includedir}/rados/librados_fwd.hpp
%{_includedir}/rados/page.h
%{_includedir}/rados/rados_types.hpp
%files -n python%{python3_pkgversion}-rados
%{python3_sitearch}/rados.cpython*.so
%{python3_sitearch}/rados-*.egg-info
%files -n libcephsqlite
%{_libdir}/libcephsqlite.so
%post -n libcephsqlite -p /sbin/ldconfig
%postun -n libcephsqlite -p /sbin/ldconfig
%files -n libcephsqlite-devel
%{_includedir}/libcephsqlite.h
%if 0%{with libradosstriper}
%files -n libradosstriper1
%{_libdir}/libradosstriper.so.*
%post -n libradosstriper1 -p /sbin/ldconfig
%postun -n libradosstriper1 -p /sbin/ldconfig
%files -n libradosstriper-devel
%dir %{_includedir}/radosstriper
%{_includedir}/radosstriper/libradosstriper.h
%{_includedir}/radosstriper/libradosstriper.hpp
%{_libdir}/libradosstriper.so
%endif
%files -n librbd1
%doc %{_docdir}/ceph/COPYING
%{_libdir}/librbd.so.*
%if %{with lttng}
%{_libdir}/librbd_tp.so.*
%endif
%dir %{_libdir}/ceph/librbd
%{_libdir}/ceph/librbd/libceph_*.so*
%post -n librbd1 -p /sbin/ldconfig
%postun -n librbd1 -p /sbin/ldconfig
%files -n librbd-devel
%dir %{_includedir}/rbd
%{_includedir}/rbd/librbd.h
%{_includedir}/rbd/librbd.hpp
%{_includedir}/rbd/features.h
%{_libdir}/librbd.so
%if %{with lttng}
%{_libdir}/librbd_tp.so
%endif
%files -n librgw2
%{_libdir}/librgw.so.*
%if %{with lttng}
%{_libdir}/librgw_op_tp.so.*
%{_libdir}/librgw_rados_tp.so.*
%endif
%post -n librgw2 -p /sbin/ldconfig
%postun -n librgw2 -p /sbin/ldconfig
%files -n librgw-devel
%dir %{_includedir}/rados
%{_includedir}/rados/librgw.h
%{_includedir}/rados/rgw_file.h
%{_libdir}/librgw.so
%if %{with lttng}
%{_libdir}/librgw_op_tp.so
%{_libdir}/librgw_rados_tp.so
%endif
%files -n python%{python3_pkgversion}-rgw
%{python3_sitearch}/rgw.cpython*.so
%{python3_sitearch}/rgw-*.egg-info
%files -n python%{python3_pkgversion}-rbd
%{python3_sitearch}/rbd.cpython*.so
%{python3_sitearch}/rbd-*.egg-info
%files -n libcephfs2
%{_libdir}/libcephfs.so.*
%dir %{_sysconfdir}/ceph
%post -n libcephfs2 -p /sbin/ldconfig
%postun -n libcephfs2 -p /sbin/ldconfig
%files -n libcephfs-devel
%dir %{_includedir}/cephfs
%{_includedir}/cephfs/libcephfs.h
%{_includedir}/cephfs/ceph_ll_client.h
%{_includedir}/cephfs/types.h
%dir %{_includedir}/cephfs/metrics
%{_includedir}/cephfs/metrics/Types.h
%{_libdir}/libcephfs.so
%files -n python%{python3_pkgversion}-cephfs
%{python3_sitearch}/cephfs.cpython*.so
%{python3_sitearch}/cephfs-*.egg-info
%files -n python%{python3_pkgversion}-ceph-argparse
%{python3_sitelib}/ceph_argparse.py
%{python3_sitelib}/__pycache__/ceph_argparse.cpython*.py*
%{python3_sitelib}/ceph_daemon.py
%{python3_sitelib}/__pycache__/ceph_daemon.cpython*.py*
%files -n python%{python3_pkgversion}-ceph-common
%{python3_sitelib}/ceph
%{python3_sitelib}/ceph-*.egg-info
%if 0%{with cephfs_shell}
%files -n cephfs-shell
%{python3_sitelib}/cephfs_shell-*.egg-info
%{_bindir}/cephfs-shell
%{_mandir}/man8/cephfs-shell.8*
%endif
%files -n cephfs-top
%{python3_sitelib}/cephfs_top-*.egg-info
%{_bindir}/cephfs-top
%{_mandir}/man8/cephfs-top.8*
%if 0%{with ceph_test_package}
%files -n ceph-test
%{_bindir}/ceph-client-debug
%{_bindir}/ceph_bench_log
%{_bindir}/ceph_multi_stress_watch
%{_bindir}/ceph_erasure_code_benchmark
%{_bindir}/ceph_omapbench
%{_bindir}/ceph_objectstore_bench
%{_bindir}/ceph_perf_objectstore
%{_bindir}/ceph_perf_local
%{_bindir}/ceph_perf_msgr_client
%{_bindir}/ceph_perf_msgr_server
%{_bindir}/ceph_psim
%{_bindir}/ceph_radosacl
%{_bindir}/ceph_rgw_jsonparser
%{_bindir}/ceph_rgw_multiparser
%{_bindir}/ceph_scratchtool
%{_bindir}/ceph_scratchtoolpp
%{_bindir}/ceph_test_*
%{_bindir}/ceph-coverage
%{_bindir}/ceph-debugpack
%{_bindir}/ceph-dedup-tool
%if 0%{with seastar}
%{_bindir}/crimson-store-nbd
%endif
%{_mandir}/man8/ceph-debugpack.8*
%dir %{_libdir}/ceph
%{_libdir}/ceph/ceph-monstore-update-crush.sh
%endif
%if 0%{with cephfs_java}
%files -n libcephfs_jni1
%{_libdir}/libcephfs_jni.so.*
%post -n libcephfs_jni1 -p /sbin/ldconfig
%postun -n libcephfs_jni1 -p /sbin/ldconfig
%files -n libcephfs_jni-devel
%{_libdir}/libcephfs_jni.so
%files -n cephfs-java
%{_javadir}/libcephfs.jar
%{_javadir}/libcephfs-test.jar
%endif
%files -n rados-objclass-devel
%dir %{_includedir}/rados
%{_includedir}/rados/objclass.h
%if 0%{with selinux}
%files selinux
%attr(0600,root,root) %{_datadir}/selinux/packages/ceph.pp
%{_datadir}/selinux/devel/include/contrib/ceph.if
%{_mandir}/man8/ceph_selinux.8*
%post selinux
# backup file_contexts before update
. /etc/selinux/config
FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts
cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre
# Install the policy
/usr/sbin/semodule -i %{_datadir}/selinux/packages/ceph.pp
# Load the policy if SELinux is enabled
if ! /usr/sbin/selinuxenabled; then
# Do not relabel if selinux is not enabled
exit 0
fi
if diff ${FILE_CONTEXT} ${FILE_CONTEXT}.pre > /dev/null 2>&1; then
# Do not relabel if file contexts did not change
exit 0
fi
# Stop ceph.target while relabeling if CEPH_AUTO_RESTART_ON_UPGRADE=yes
SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
source $SYSCONF_CEPH
fi
# Check whether the daemons are running
/usr/bin/systemctl status ceph.target > /dev/null 2>&1
STATUS=$?
# Stop the daemons if they were running
if test $STATUS -eq 0; then
if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
/usr/bin/systemctl stop ceph.target > /dev/null 2>&1
fi
fi
# Relabel the files fix for first package install
/usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null
rm -f ${FILE_CONTEXT}.pre
# The fixfiles command won't fix label for /var/run/ceph
/usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1
# Start the daemons iff they were running before
if test $STATUS -eq 0; then
if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
/usr/bin/systemctl start ceph.target > /dev/null 2>&1 || :
fi
fi
exit 0
%postun selinux
if [ $1 -eq 0 ]; then
# backup file_contexts before update
. /etc/selinux/config
FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts
cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre
# Remove the module
/usr/sbin/semodule -n -r ceph > /dev/null 2>&1
# Reload the policy if SELinux is enabled
if ! /usr/sbin/selinuxenabled ; then
# Do not relabel if SELinux is not enabled
exit 0
fi
# Stop ceph.target while relabeling if CEPH_AUTO_RESTART_ON_UPGRADE=yes
SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
source $SYSCONF_CEPH
fi
# Check whether the daemons are running
/usr/bin/systemctl status ceph.target > /dev/null 2>&1
STATUS=$?
# Stop the daemons if they were running
if test $STATUS -eq 0; then
if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
/usr/bin/systemctl stop ceph.target > /dev/null 2>&1
fi
fi
/usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null
rm -f ${FILE_CONTEXT}.pre
# The fixfiles command won't fix label for /var/run/ceph
/usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1
# Start the daemons if they were running before
if test $STATUS -eq 0; then
if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
/usr/bin/systemctl start ceph.target > /dev/null 2>&1 || :
fi
fi
fi
exit 0
%endif
%files grafana-dashboards
%if 0%{?suse_version}
%attr(0755,root,root) %dir %{_sysconfdir}/grafana
%attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards
%endif
%attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards/ceph-dashboard
%config %{_sysconfdir}/grafana/dashboards/ceph-dashboard/*
%files prometheus-alerts
%if 0%{?suse_version}
%attr(0755,root,root) %dir %{_sysconfdir}/prometheus
%endif
%attr(0755,root,root) %dir %{_sysconfdir}/prometheus/ceph
%config %{_sysconfdir}/prometheus/ceph/ceph_default_alerts.yml
%files mib
%attr(0755,root,root) %dir %{_datadir}/snmp
%{_datadir}/snmp/mibs
%files node-proxy
%{_sbindir}/ceph-node-proxy
%dir %{python3_sitelib}/ceph_node_proxy
%{python3_sitelib}/ceph_node_proxy/*
%{python3_sitelib}/ceph_node_proxy-*
%changelog
* Mon Sep 23 2024 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:18.2.4-2
- ceph-18.2.4, missing backport to .../include/rados/rgw_file.h
* Mon Jul 15 2024 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:18.2.4-1
- ceph-18.2.4 GA
* Mon Apr 29 2024 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:18.2.3-1
- ceph-18.2.3 GA
* Thu Mar 7 2024 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:18.2.2-1
- ceph-18.2.2 GA
* Tue Dec 19 2023 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:18.2.1-1
- ceph-18.2.1, incorporate changes from *final* 18.2.1 release from
https://download.ceph.com/rpm-18.2.1/el9/SRPMS/ceph-18.2.1-0.el9.src.rpm
* Wed Nov 15 2023 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:18.2.1-1
- ceph-18.2.1 GA
* Thu Nov 2 2023 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:18.2.0-2
- ceph-18.2.0, Requires: fuse -> fuse3
* Fri Aug 4 2023 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:18.2.0-1
- ceph-18.2.0 GA
* Wed Jul 26 2023 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:18.1.3-0.1
- ceph-18.1.3 RC4
* Mon Jul 10 2023 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:18.1.2-0.2
- ceph-18.1.2 RC3, rebuild with libarrow->thrift
* Thu Jun 29 2023 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:18.1.2-0.1
- ceph-18.1.2 RC3
* Sun Jun 18 2023 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:18.1.1-0.1
- ceph-18.1.1 RC2
* Tue Jun 13 2023 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:18.1.0-0.1
- ceph-18.1.0 RC1
* Wed Jul 6 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.2.1-3
- enable cephfs-shell
* Tue Jun 28 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.2.1-2
- ceph-17.2.1, rhbz#2101744, rebuild with gtest, gmock 1.12.0
- fix inconsistent tabs, spaces
- temporarily disable cephfs-shell until install issues (python3-11?) are resolved
* Thu Jun 23 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.2.1-1
- ceph-17.2.1 GA
* Fri Jun 17 2022 Robert-André Mauchin <zebob.m@gmail.com> - 2:17.2.0-7
- Rebuilt for CVE-2022-1996, CVE-2022-24675, CVE-2022-28327, CVE-2022-27191,
CVE-2022-29526, CVE-2022-30629
* Wed Jun 15 2022 Python Maint <python-maint@redhat.com> - 2:17.2.0-6
- Rebuilt for Python 3.11
* Thu May 19 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.2.0-5
- Rebuild for rocksdb 7.2.2
* Wed May 11 2022 Thomas Rodgers <trodgers@redhat.com> - 2:17.2.0-4
- Rebuilt for Boost 1.78
* Wed May 4 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.2.0-3
- 17.2.0 w/ -DWITH_SYSTEM_ARROW
* Wed May 04 2022 Thomas Rodgers <trodgers@redhat.com> - 2:17.2.0-2
- Rebuilt for Boost 1.78
* Wed Apr 20 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.2.0-1
- 17.2.0 GA
* Thu Apr 14 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.1.0-0.10.201.gcc420538
- 17.1.0 snapshot 201
* Fri Apr 8 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.1.0-0.9.175.g086c8f84
- 17.1.0 snapshot 175
* Mon Mar 28 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.1.0-0.7.123.g14f44feb
- 17.1.0 snapshot 123
* Thu Mar 24 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.1.0-0.6.70-g06806b4d
- 17.1.0 snapshot 70
* Mon Mar 21 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.1.0-0.5.56-g60fdd357
- 17.1.0 snapshot 56
* Thu Mar 17 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.1.0-0.4.31-g1ccf6db7
- 17.1.0 snapshot 31 plus rhbz#2064219 (ceph #53266, #54561)
* Wed Mar 16 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.1.0-0.3.28-g77b78287
- 17.1.0 snapshot 28
* Sat Mar 12 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.1.0-0.2.rc1
- 17.1.0 RC1
* Mon Feb 28 2022 Kaleb S. KEITHLEY <kkeithle[at]redhat.com> - 2:17.1.0-0.1.rc1
- 17.1.0 RC1