diff --git a/.github/workflows/link-validator.yml b/.github/workflows/link-validator.yml index c8508c151..be5f45a7c 100644 --- a/.github/workflows/link-validator.yml +++ b/.github/workflows/link-validator.yml @@ -1,5 +1,16 @@ -on: [pull_request] name: Check links for modified files + +on: + workflow_dispatch: + schedule: + - cron: "0 0 * * *" + push: + branches: + - main + pull_request: # Add this section + branches: + - main + jobs: markdown-link-check: runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index ad0a4485d..320bc8f1c 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,5 @@ .idea .DS_Store node_modules +Tests/kaas/results/ +*.tar.gz diff --git a/.zuul.d/config.yaml b/.zuul.d/config.yaml new file mode 100644 index 000000000..4490f6de2 --- /dev/null +++ b/.zuul.d/config.yaml @@ -0,0 +1,52 @@ +--- +- project: + name: SovereignCloudStack/standards + default-branch: main + merge-mode: "squash-merge" + periodic-daily: + jobs: + - scs-check-pco-prod1 + - scs-check-pco-prod2 + - scs-check-pco-prod3 + - scs-check-regio-a + - scs-check-wavestack + periodic-hourly: + jobs: + - scs-check-gx-scs + post: + jobs: + - scs-check-gx-scs +- job: + name: scs-check-gx-scs + parent: base + secrets: + - name: clouds_conf + secret: SECRET_STANDARDS + vars: + cloud: gx-scs + run: playbooks/compliance_check.yaml +- job: + name: scs-check-pco-prod3 + parent: scs-check-gx-scs + vars: + cloud: pco-prod3 +- job: + name: scs-check-pco-prod2 + parent: scs-check-gx-scs + vars: + cloud: pco-prod2 +- job: + name: scs-check-pco-prod1 + parent: scs-check-gx-scs + vars: + cloud: pco-prod1 +- job: + name: scs-check-regio-a + parent: scs-check-gx-scs + vars: + cloud: regio-a +- job: + name: scs-check-wavestack + parent: scs-check-gx-scs + vars: + cloud: wavestack diff --git a/.zuul.d/secure.yaml b/.zuul.d/secure.yaml new file mode 100644 index 000000000..aa40fc8ad --- /dev/null +++ b/.zuul.d/secure.yaml @@ -0,0 +1,136 @@ +--- +- secret: + name: SECRET_STANDARDS + data: + gx_scs_ac_id: !encrypted/pkcs1-oaep + - b/SnLk91ZRXkFj22EjOZk+vqqJIDySsGL9WzaR8nZntJOYfhkqVbp5AV9KDuqP/bj9MrR + yDc2hdkBesm1d62ynjQ94CjP8p7Lhs45FFyOcxGWQwMairD4YtnFsvKfvYtp3mz13n0gF + HOFGycm0CZwO1cETUJFB2O9ekbI9T5iO6PmQiwWbWbT/5EJu8bjAUaLV7ZyGsCZ22FQ8E + 4dbs+2ShRsckitp7iBDWAzsPzX/aq8xuzoZ9Zf0DlHXuQrqENkx721QygsNLxb4dVC8e1 + vT8R6Oy0MBGfn667Ob1yfquileryCu+eXmFPzKZwxn2IWpl3IdSYEs0ZSFkd9ZEuA4UIE + bolgg0hXSCzxoI9M+b0+FTvNmYQw7u4tFJ6YPLhs9QJHhJVj7oywwrZRumrD0XgPzHQgW + RhrL6OS8ChvZ5yjZdRK9L+pDM2MW4kKtKzmXZ5RpoMzNoh8Mkns5YlPrikrkQsYaiq0bx + 540qoAZl+zL0SiV2Z7t8VdGwEroBDpEIrPfIboBPo9eyRbGUKRk9DqiJ+wqMhkEQ0Lu+T + whlE9WShj0BNs4mghjUg5WY9EMmH4IVFTxvbcr2UhfcnGxJJ0I+MfaAP4G8UZ0rRohYgi + i8YmbOQ7NCpyRqz0IniDODo2Cz76/NJ5e53nbTuGNBuoFoigwyGqMvI4vkN2Oc= + gx_scs_ac_secret: !encrypted/pkcs1-oaep + - ZJiujVLXDgWrPSOU6759BE23gzBGtT33c4ziUDNNFpZzW1Q8PwrrtMCBHbbAey1b8qF88 + Yp8wk4EMLMNxB9SD+E1wVBAtHRubzFwOj4/4R1kza430BydqHr+8RE3kTVukj8Wv1B2ZC + SI1gaKJHhvLIVdmiDncI6MKf4AXC0pERIhg618xYNxmGjQT5vxSSZjvwflfJiFutXa0wa + 5jFNo29T3TPQH7mA5SMzM2a0AENt/Ccp6/t+UlNLG3v3M1g/P+xednVOI1tdy+CappvHY + 10u8Eln1L9ZfKKT6ebLLdoYws++numKGgzGkfyS0Ob7gOGaHC3EYyha5y//BqTkGsVmfE + 8ed+zV9fO2BpaUB1h1rlO76SgXlygw/4r+JmNIbqG4b/KEseQeGL4NwR1wOJfG1LEbdsF + xE+zhSf3a3pzjt1q4qL+F4VEzIIJWS7Vn3BZaaKLovRz0iGc8PRnrQyl6arCgtInzbemu + mLv1l8bxsHvMEdb3l6wqqNvVPjo8pgZTH+1MK/2H2Zpg62NI6b+AGkqOPYIr6U3xllT23 + ZLPVpoq1cuvY9Eh3i5OjE7AU7hGcy7cDy7lHSBNYPKv+ZERIneEciApgq6QYEEuf7v7rl + wxiW8mQgmQSsly/+7LrcBpwyv0ouHrg1VOx84dhYHEV++I0FDJGJsa/HcZ/8ZQ= + pco_prod1_ac_id: !encrypted/pkcs1-oaep + - jGScb1B/BfnuDdDnfsJoHnVRaeiTAX1fCB3eYBuUx6grQTQ2SorKWeUeVWqznfJJF0Pug + uE09n6oCwZE3hxzI2VxFA+o4wDBA3azasAs8N3vV+QyFYF5dl+5K1M0xwdkhqAyefw5n8 + EhJvrqmzFk81TA4lDIOprK++16on4BsLnc1eUErYbeUv0AuKbBcq4RYU0AFLG47DA8BTH + 9kMc+RkgAhB4+EtwTrGTITOrEVB/gfEnmbKanuopCFgmcdoa4GliHilHZBc24Ht64TfdN + HHuv8598r43SQX9B1l9ZzThWSs4VlfJEet4hAaSO9uJj2CSbRvC81Ep3UQBJBkSlE3r6g + XI+FIY5x/vtoPj7SFm89gyuwZ50rO3Gp8etJhqddyQjJUrGtZHNpS0LJ514EZ+ckDYuYG + iUd96mxVPVHukSSIfkFPhDrZLG0PhW94EuJjRi/qi9NgqHVKs0JtjwA/YI7tCVa5rq6cs + VfoZ0K9lG3r7Fh6Mzob+mZKMfaLMh73e0ZR8LwvBn/vFHmVTzaApaLJWnOepAWBLOWJyB + Hv5+vWbmohdwxaK6KuaiSENR9ssqerWjGFTeyG0dUSkW3f0N6nS2T9emIkelNLtANAwvw + 7CA6nSrj8k1DNG+nH4RfeN5r+k8cCLCvVj4p/bIJuKSe5dLW6/+ooGyv+dfUDU= + pco_prod1_ac_secret: !encrypted/pkcs1-oaep + - htWKeIuB3Lracz52Zprq+WUe1y7OLEfR8GmrDKUYbUy5CN9gp+kxx4cmSjm/31Xz3wa9I + tssK7NJR5rAIgpzyXwCJaScxek/5d9Zfa/DFw6KJsnATq+VNEtQS/wDnptb2YJ5P5ff7A + zfBnAMIQf0tpRwzcupZPng3zLkYsqfS0ZnSo5RJBL1lH0gwHKbhq7XD4Vn6uCxClPelTT + iL90bk3zJUWB8qmtjVfcmWqepiaBb45Ve7kU5U6/iqoDo6itDRCaHMMmP7NI61Wlpxp3A + La6/X8a3/wMH4KLoWFaDxbirZEsMRHmTmHyYnQtko4WyYYz4s9q/vUEbCTecIiXBRBp8a + 8GkoHO1XkrZuBsd2yFsA8fc2/d0D2gr9iMDdipsPmJbwFEtHM8KH5DmtIiXy2HRl2rO9f + HMCyUdEtH7UT7zPQDuY+jk4ExwL3SCuihVimLFQyWTqAM6r8L3MQAGNVWikxVlWokeQaX + VG+WGHBasBsBieTos/OujXFVqMDgiiyB5G6crr17AKMRldxAcgvPSTG279hdstNCNClQ9 + xE2Yf0v8vXJzM/AWOMJgFanBZ35QDPIYMH6+aMh+BXj/Q0zYpWZ+y1vg358y8OYX4Q+NM + 3AGCgXvUVG3wblRSlUWUac7enMNYxlvHXTeKNLYXwut+A1qPuYFWwE+EvjReEM= + pco_prod2_ac_id: !encrypted/pkcs1-oaep + - cjuaL7RFh1f4LPVP5nKv4eEOOIPKFVsK8vXtv390lX/MRDLxJGA/lb8xxRo77tKFOPlUA + 65jEHYyeDMDn1RP+8Fqq6lEQGo9X3986zNj9+NlaINv/pRHfcNCMltEoLVsOTuBeg+DJC + XvcoAz+D2A9FFTIOaShE6iElUOmHnroSO1v6TrmOVcBdNmRYtuhL8ao7gHgHTnKdkZWPm + QjZfcME+rjftezOdRQWN6W7sLKP44miKXoglTpqxPFpko09MctP0F3YVNecoJzBtlOkMJ + dZlNhkaJeof2RCLN3xKRlwwGwhZA/YlIphE4YdzI2LXXpfHi+kD0DOx3jEixSdMJRdNdn + iHzdMyezpXzfZSGPhFVeCMb/lpVHsVGKn8sRfaH65WQX80t/vDP88k8biRbdg8eYbVHJw + Wtj4xjJbJyGG/Cc1VjBVUxP3YiVRHUGbcjOMytnDr94giMjz05fdOK//N0mmy3qgrRJIX + SYX40oU1gEMv/b1PyX3dsf7a7p4x8LVyvzUG+Kkr4+duVsqqJFJw9ejXSjhyf64oy2uhc + +GsHE6bmIraY+YTVmAI1l4T0g/bxL4ssuyZfyskJEnQrF9JEj1SBbW7JDZrqwYC6BqnSm + BDPhlbgxEfsNhxHk526f/rgG9IQIw4f0ntB+JTbQnAorNoN16IJHV3H7xVFTbc= + pco_prod2_ac_secret: !encrypted/pkcs1-oaep + - f5zoAfFxqVILvjPZWpOgAtpPFPexwPQg6wZMBAArsSY9fCaRSgh4/KyHhtojVf2kgXI2l + 3We0O3HrwHrqItEWXsxGxYt5URRmQQIYbrfSQC5qMz5UdVPKZTK7jnk/ydsLlweeOZdCY + kEVrnmBUqJrvpYRMC2D1CCsXvoBaMLdEU1skPOesa1MlWT2T6bvl8tX+YW6lqEd823idX + m41SwDL/k7eRwqjdcUBs5T9C93j04vqgFB5vLCe7hoJfvdW0B1oHFeWGyz1NpDXgZGM2P + LToMIMCQ2KsRpmk8/gam1GocIce8gbUtOurppvBZFTH0ySnCgzqjf0MHVuxVxfZWjbmzl + 5cqAcEslommZfDMwv6nASvPSH8EJL6wVuCj9QHqcgbILWO/z8VRB7g/yej1ZOjjBdw7Em + TxTcc1Kx2GtQtXLl3GDC6ib8gvJSsEys6nxVTlr1mr3+lq1g8rNl63PXlsk255ZI2Xm+x + FokGXgiJvYxZ+FroMlIVzJKrl3KN5luPM8FAj2CGwjoX2j6V5RlAl99lkKce8KSp76HVM + ItQVFGP94wfzMxeV1MFN5lYYJvOyApDgFLh9B35/Y2n2XcBu8KytvUR0zPojH3Q8YsGXR + y8MeCclG92oucn4huTRMTFdfuUuuAVRXapUMOP7M8HP8tqeYQkQjK1ipH2RyFE= + pco_prod3_ac_id: !encrypted/pkcs1-oaep + - Sxo7/4v13s46G/j45jkBrvI7puOww3VeKwc2wIaTGw7QIABH/wpcECcUqklXePd6iOWIU + apTY8h8uRd+3HyKn5z9b60L0QuSrkE7pawwN544CpFNMq2uImvfv4b/VeLk1VrJn/zO5j + keaIf/bTtg8eAPhakcnZLhDMbjprKsM2P6UpqQf473/n3jeNNoPBGf8lzkAqa2BTsQVRL + FYN1RIVviKUuEkHTY4U4ziw5i2SzHJq061qs+fYK3G4c9UWDEwKxCQilYHU3/6DgzQwzz + 4n+nea2qUGuCc8B4q5qUbnMXVy1d9+pJZwSk3I3GkrROcsJ30C+MH1bsyHIxzZffVfjvT + NtjzX7KvnRRGhdL13HwODZOt6BYx5irN5TsEs4O4RgxaTv2tSQca3SLWW8uR6JsPXuOhl + lKK/dZPzVHWVXp+0bgRlhl/0E85eN2ivt+eIgje/S77tPSxAsxdri0gL/b2L5cT8spDqk + ZhV4VaoxEcgUhh605I09SToM+/V0fdWjIb1IySikj+jyOKTKsi11KUEA8yQKAEvnnI/Xo + vCu7SIuqy8yhPXqe7o+fzmbEsMA2ToQ779CBcxgwfHZk6iC/IFGFfXhHjgj0RzDlk+0yy + hiPzEAtU32Df/gzmo/hvVFiD2FrrI2RCou4kZjq+6xD42q1iupn8Wuzjgdne1M= + pco_prod3_ac_secret: !encrypted/pkcs1-oaep + - iYft4Sbh/hGulIyIur/cn0fJ9eAl97ah0d70/8NZecQMLoJBS9Lcfc9r4l0p/JItieGxE + tPd1qk4Zfx2a1kwIU6l49s+iveNAxxxfRrIckClk8TwwesjXhRjcjecTfDGZsUJ/ZeXa8 + jnA/xe1Aj0oscME9oNpgu1z1ad02uzxuE2YgcONcx2P5boKoWGNovtBzGgBrUleaNQBsa + TLCgooK2es19l1Xh3EEtnI56Ps3vOKOuzbCVF1LzIedeAK7o7XnbTokMz3jydOPj9ebK4 + 8BhSJBJT9PIIVSyf0F0yDoFoWXQu/cItlRWjKnB19ZkyxKMAf+dHUwOqB9JKud7pXPL6x + TuslXjxiB3EXF29Ft+4q43HKV3CGKYi4xu09lwzlxx3QdRknY4GhSy2dB9LBoBhkIikO3 + NACmZxg5z5eFZP+HQSky+uNRrnLKRBvHnc6i0QUYtVuxbprQlUgssvDTByQxQ9a8CVp+Q + riPv16TIXHG/VYteDxT4f/onB6xBBiZ7Bm7drM3nAsM+ZM3WwxUnu4luWZsZYAADV+S31 + ODoxs6vEGmQgoOCCej1a71jkLMx+xdOeRN606H6Jrqfm2BfsYa1ZxUhX1Dk1dgpcVXZOK + gJLXG3zz2PJNa/Zl0/3aqrWJS6+A9lD5XuDHDdPxKfyhwo+R2+zhzScd/bbn4E= + regio_a_ac_id: !encrypted/pkcs1-oaep + - lBlWjvJ6RA3uniS1M4etvbdUxKB9KRNWm53gL3VlPyRkA7Ic2yFcAkZEGodHWH1iqNWfN + p/3B2iFYwuZRktllbc/Ro80pkg52vHOkNkBdXLQd7ZFKG8zNJOxRt2nhDQxQPS7PzcUgo + dLklwSAbAPJohxA1atqH5qw7pfk/EKsCAW8mu3eWbZWrYAcN9faNiDkm58XOKow4t5x9w + c+rG0zmKB4nsqZh+6Bt1iesSzJcUBZIX6eFYQRhj6ev70kCHU5NNYO4y2elNxPOLwSTrx + O2b9lYff/iY95ZiKHgyninMXOuoyQHntN7nKtReoBOuwf/1fFxrxEah5BAH5KDIpZ8NoX + 5iArRHDoaL6L5oLLGs4Z6mojeypQfIHZmcaxkBvK6ZHel+cf4gCC05k1uAVS9005Qb/LH + pWJQoTF5ebCVSwU9JcBptaCss60NRkJP1bVLUuyvRTSbkbcK57BsXYnpXtCxsH3qR56QQ + nlxnrKT0IawjI0qra2QwUHjVJwmwY/U+Im00p2B2mXwJ2HCQoXD1pCcgBECeHkN3ENQeZ + SnE9w4NYo8UR1xjfMGb8xcCg5Lfj/19yxFtPTi/O6MthNmz8GYQMqcZ3x2knoEkt96SY8 + 9IjJxckOkduqBIQrL72CKk7Mk9+2Bb/9STPaMGkIFTGzVCIqtWF0uXs6EEUzWk= + regio_a_ac_secret: !encrypted/pkcs1-oaep + - r0hxDzPwGm9/Lvvp0KhXJt4sr+gISnEji91RLBt1bx+KRlsOZdZS50hO5Yp0bkWP1sWGx + ZphF6STyZmboeQn3+aeeLqhli0KfgAp9RLkQugaSIKNUm4ZF1MbBgBZ+kUrVrlI3JIFAG + su3mwmQnAeo6LvK6KdKbQsZm1/cLp9xQ0R6D24h5Y/OXAnfR7xV/Jb76/ZqSlgh65aqXE + xpYc+evJ7VcQZnqWPsNf/hC4y8tdjhdBR4T8lIi8+kBF8x7P8uFG7NKjnBFuXhXqauiih + HiFVt9oqs5diK8Ujvo+pL9K3kzwuh9v+1k3JyWw4KHeWyH6GcXvDnR0u/DpEnT8t7yACw + DRpfO/J5HLKPNKpRFD2oRx9gMQraslcyPXoA/pZzLsqbpsNzwy350cURl3fonUdA1hyyc + rnOfDsBXiS7omq79zM0/g8Fyarc7Nx0V+C39kn140LlbYfOpnjciJjmKAA5w5D0kP3SCf + VCsXjf0qBBMrzz6HP9z95Bk44fiJ3L/LkA3Iij961dYrQXbZKDrKOiX/QPwrcSrVmjmew + UbPexJFHgvTCqjadoLejSt9cUd9lVzhuzLJ8CS+CcCMbZOno6qathrd2B88riQaPNIGNu + gfkNT9R63ZzKB1qIA2n5RZi7SH9DPIUd0AwLMn2bhp3uok5pNAPP/4/1RkQiCA= + wavestack_ac_id: !encrypted/pkcs1-oaep + - NgtWt9AeOFCvfDaDtYdWAFO1oh+LVLMNi2gyK2N0IHkf5SK68DRkR8asKm10iOIaXVkN4 + riQQqirjYHzIzWS2s/dKoLIH5DTpRHZUl4n8i7sdN5lhdoxjga5+Ep+FWTG8oSWN6ZJFP + jCEhvlPc9znUjZ1xNpLdNyLoRutuSXNmLajTFxvR7SNciAGOZdxHBcCwfL6fXO6UqCBb7 + 9iHVHdSLq8EMa3I3GaZ0M4VgoHfJ8XHC1D82fIQbzgZ1I4UJX6dIV+rkFQ4nhY+xUu4Sx + opWNT264qYejkqFz7a8VIc1X+bqBO/VDyyp7d25ZR8pRjZRJtpkdBcX+Rb0CUzQ8Gb3wo + FLqGxy17EUzB2m4l9+paDOPXnp1zOrSnCnFYKfHhWAkWQAJslubgjFn8tF9Afo1lZ4R83 + veqqznWfPAtzy6Gn0WAgAjNV9VdK0lIpZ5JLLkhzrmjmhpN2dJIcJabfPabi9NwEtqAO/ + 4HDPZqmNbBYFaRGqhNd+ZwuTVEV38N4ZFv7sKDe0dM7IOjtL9zWeqeFI/raPXjyjE5W/P + +HG7rspYyh26JeiYOpNVxXbCn7IzZ48DB7keUZsiTyy6PFHEdzHRrYHxiT1LUZRrpCuLs + vPknbPwhA3KtKi7wKMvI2umXV+0zmzOk6Oq5OkIBv7aQp0eHAbpdL9TTr9vKPc= + wavestack_ac_secret: !encrypted/pkcs1-oaep + - Wd9fYe9nn6Dw4bY/WnQ6BBej8Mw59YWqBVH+zFyVmFIj6WpFpux7vLVVjujYsXIvNAcRA + t3+oky/N/AEINHP7XUczPt15P0/GJ8rAWd4L/NYN/p90hjsRE0QTHzp9GDpy+ZgyUuso6 + HGUfDiykoWrSe/9Vl0BwqZJLzrOjBeK1HOccO8GvVkyRYk5WI2OksVtiQxKbcrffS/BJK + 9vU+VlphqiRVPGe8cyAjtoLwm1HH6FQ5VqjXYi4R2uhgTJM66/ueRky2GFCuFThxUGLQl + YBA0+Kt/2W7ejtlII/7KA5Gy7Zqw5/lM6quVJzJE+jarMj/YbZnNbpUUQws9z88ZfMA0Q + Siix+3irhIwTWDgeZyO2G20pPLGoahj4LWDzi4xFeOY1w0xXB0vkNDpGY59BRBn/oJ5zW + Pap+BdSbstyqHpJDkwteplJxbh5hN510opGFw4D6PbtoYKlsXfD1GPprHB0tnTPot9IgL + wzWYpJQQ6gehh3OTs5i9fLetoaQ59HF01SVDQL44VKT3xi4P5Wr9u+hLCbGDeu+OMPNGM + UYLHSWwKv893lov4K6/e4jz00EikV17kd+tVW1qMKMRFxdrRgwKP+wr50do2zVxhBMu4H + Co5I7CeK4GeN3KURGCKfcm/IRm+8A9j1+ocJelQf4T5DZtUlLrkGBe2SGw/V2I= diff --git a/Standards/scs-0001-v1-sovereign-cloud-standards.md b/Standards/scs-0001-v1-sovereign-cloud-standards.md index 1fc1f2bac..18b8769da 100644 --- a/Standards/scs-0001-v1-sovereign-cloud-standards.md +++ b/Standards/scs-0001-v1-sovereign-cloud-standards.md @@ -4,6 +4,10 @@ type: Procedural status: Stable track: Global stabilized_at: 2022-11-28 +description: | + SCS-0001 outlines the structure, requirements, and lifecycle of standards, procedural documents, and decision + records within the Sovereign Cloud Stack (SCS) community, ensuring clarity, organization, and governance in + the development and maintenance of interoperable and transparent cloud infrastructure standards. --- ## Introduction diff --git a/Standards/scs-0002-v2-standards-docs-org.md b/Standards/scs-0002-v2-standards-docs-org.md index 493edc15b..71583ceef 100644 --- a/Standards/scs-0002-v2-standards-docs-org.md +++ b/Standards/scs-0002-v2-standards-docs-org.md @@ -3,9 +3,12 @@ title: SCS Documentation structure type: Procedural version: 2023-08-03-001 authors: Max Wolfs -state: Draft +status: Draft track: Global replaces: scs-0002-v1-standards-docs-org.md +description: | + SCS-0002 outlines the standardized structure and maintenance processes for easily accessible and + comprehensible content of the SCS project. --- ## Introduction diff --git a/Standards/scs-0003-v1-sovereign-cloud-standards-yaml.md b/Standards/scs-0003-v1-sovereign-cloud-standards-yaml.md index d7b15bc76..2244b9ed3 100644 --- a/Standards/scs-0003-v1-sovereign-cloud-standards-yaml.md +++ b/Standards/scs-0003-v1-sovereign-cloud-standards-yaml.md @@ -3,6 +3,10 @@ title: Sovereign Cloud Standards YAML type: Procedural status: Draft track: Global +description: | + SCS-0003 outlines the standards and certification processes for interoperable and sovereign cloud offerings, + categorizing certifications into levels and layers, and detailing their progression, prerequisites, and versioning + in a machine-readable YAML format for clarity, traceability, and tool integration. --- ## Introduction diff --git a/Standards/scs-0100-v1-flavor-naming.md b/Standards/scs-0100-v1-flavor-naming.md index 105903f71..85c1a4ee3 100644 --- a/Standards/scs-0100-v1-flavor-naming.md +++ b/Standards/scs-0100-v1-flavor-naming.md @@ -2,6 +2,9 @@ title: SCS Flavor Naming Standard version: 2022-09-08-002 authors: Matthias Hamm, Kurt Garloff, Tim Beermann +type: Standard +track: IaaS +status: Stable state: v1.1 (for R3) obsoleted_at: 2023-10-31 --- diff --git a/Standards/scs-0100-v3-flavor-naming.md b/Standards/scs-0100-v3-flavor-naming.md index a96e92e5f..297900c1c 100644 --- a/Standards/scs-0100-v3-flavor-naming.md +++ b/Standards/scs-0100-v3-flavor-naming.md @@ -5,6 +5,11 @@ status: Stable stabilized_at: 2023-06-14 track: IaaS replaces: scs-0100-v2-flavor-naming.md +description: | + The SCS Flavor Naming Standard provides a systematic approach for naming instance flavors in OpenStack + environments, ensuring backward compatibility and clarity on key features like the number of vCPUs, RAM, + and Root Disk, as well as extra features like GPU support and CPU generation. The standard aims for + usability and portability across all SCS flavors. --- ## Introduction @@ -271,6 +276,9 @@ It goes beyond the above example in checking that the discoverable features of flavors (vCPUs, RAM, Disk) match what the flavor names claim. This is used for SCS-compatible compliance testing. +The functionality of the `flavor-name-check.py` script is also +(partially) exposed via the web page . + ## Extensions Extensions provide a possibility for providers that offer a very differentiated set diff --git a/Standards/scs-0101-v1-entropy.md b/Standards/scs-0101-v1-entropy.md index cd864cbec..b7441af8a 100644 --- a/Standards/scs-0101-v1-entropy.md +++ b/Standards/scs-0101-v1-entropy.md @@ -3,6 +3,13 @@ title: SCS Entropy type: Standard status: Draft track: IaaS +description: | + The SCS-0101 Entropy Standard ensures adequate entropy is available in virtual instances, crucial for operations + such as secure key creation in cryptography. The standard recommends using kernel version 5.18 or higher and + activating the hw_rng_model: virtio attribute for images, while compute nodes should employ CPUs with entropy + accessing instructions unfiltered by the hypervisor. It allows the infusion of the hosts entropy sources into + virtual instances and ensures the availability and quality of entropy in virtual environments, promoting system + security and efficiency. --- ## Introduction diff --git a/Standards/scs-0102-v1-image-metadata.md b/Standards/scs-0102-v1-image-metadata.md index 61609043e..edb46c868 100644 --- a/Standards/scs-0102-v1-image-metadata.md +++ b/Standards/scs-0102-v1-image-metadata.md @@ -5,6 +5,12 @@ stabilized_at: 2022-10-31 status: Stable track: IaaS replaces: Image-Metadata-Spec.md +description: | + The SCS-0102 Image Metadata Standard outlines how to categorize and manage metadata for cloud-based operating + system images to ensure usability and clarity. The standard encompasses naming conventions, technical requirements, + image handling protocols including updating and origin, and licensing/support details. These guidelines ensure + that users can understand, access, and utilize OS images effectively, with clear information on features, updates, + and licensing provided through well-defined metadata properties. --- ## Motivation diff --git a/Standards/scs-0103-v1-standard-flavors.md b/Standards/scs-0103-v1-standard-flavors.md index 098ef4dac..9756ce8f9 100644 --- a/Standards/scs-0103-v1-standard-flavors.md +++ b/Standards/scs-0103-v1-standard-flavors.md @@ -3,6 +3,12 @@ title: SCS Standard Flavors and Properties type: Standard status: Draft track: IaaS +description: | + The SCS-0103 standard outlines mandatory and recommended specifications for flavors and properties in OpenStack + environments to ensure uniformity across SCS clouds. Mandatory and recommended flavors are defined with specific + configurations of vCPUs, vCPU types, RAM, and root disk sizes, alongside extra specs like scs:name-vN, scs:cpu-type, + and scs:diskN-type to detail the flavor's specifications. This standard facilitates guaranteed availability and + consistency of flavors, simplifying the deployment process for DevOps teams. --- ## Introduction diff --git a/Standards/scs-0104-v1-standard-images.md b/Standards/scs-0104-v1-standard-images.md index d900d7485..7b8e418ec 100644 --- a/Standards/scs-0104-v1-standard-images.md +++ b/Standards/scs-0104-v1-standard-images.md @@ -3,6 +3,12 @@ title: SCS Standard Images type: Standard status: Draft track: IaaS +description: | + The SCS-0104 standard establishes guidelines for virtual machine images in Sovereign Cloud Stack (SCS) environments, + specifying mandatory, recommended, and optional images via a YAML file, ensuring interoperability and streamlined + deployments. It mandates that image upload via Glance must be allowed, ensuring flexibility for users. The standard's + machine-readable document facilitates automated processing for compliance and integration purposes, promoting + consistency and reliability in cloud environments. --- ## Introduction diff --git a/Standards/scs-0210-v1-k8s-new-version-policy.md b/Standards/scs-0210-v1-k8s-new-version-policy.md index 4fb9b9b47..55b26657c 100644 --- a/Standards/scs-0210-v1-k8s-new-version-policy.md +++ b/Standards/scs-0210-v1-k8s-new-version-policy.md @@ -4,6 +4,12 @@ type: Standard status: Stable stabilized_at: 2023-02-07 track: KaaS +description: | + The SCS-0210 standard outlines the expected pace at which providers should adopt new Kubernetes versions, aiming + for alignment with the rapid development cycle of Kubernetes. Providers must offer the latest minor version within + four months of its release and the newest patch version within a week, ensuring users have timely access to security + updates, bug fixes, and features. The standard emphasizes the need for expedited updates for critical CVEs and + expects providers to thoroughly test new versions before deployment. --- ## Introduction diff --git a/Standards/scs-0210-v2-k8s-version-policy.md b/Standards/scs-0210-v2-k8s-version-policy.md new file mode 100644 index 000000000..710c1c0e3 --- /dev/null +++ b/Standards/scs-0210-v2-k8s-version-policy.md @@ -0,0 +1,81 @@ +--- +title: SCS K8S Version Policy +type: Standard +status: Draft +track: KaaS +--- + +## Introduction + +The Kubernetes project maintains multiple release versions including their patched versions. +In the project, the three most recent minor releases are actively maintained, with a fourth +version being in development. As soon as a new minor version is officially released, +the oldest version is dropped out of the support period. +Kubernetes supports its releases for around 14 months. 12 of these are the standard +support period. The remaining 2 months are the end-of-life support period for things like: + +- CVEs (under the advisement of the Security Response Committee) +- dependency issues (including base image updates) +- critical core component issues + +More information can be found under [Kubernetes Support Period]. + +The Kubernetes release cycle is set around 4 months, which usually results in about +**3 minor** releases per year [Kubernetes Release Cycle](https://kubernetes.io/releases/release/#the-release-cycle). + +Patches to these releases are provided monthly, with the exception of the first patch, +which is usually provided 1-2 weeks after the initial release [Patch Release Cadence](https://kubernetes.io/releases/patch-releases/#cadence). + +## Motivation + +Kubernetes is a living, fast-paced project, which follows a pre-defined release cycle. +This enables forward planning with regards to releases and patches, but also implies a +necessity to upgrade to newer versions quickly, since these often include new features, +important security updates or especially if a previous version falls out of the support +period window. + +We want to achieve an up-to-date policy, meaning that providers should be mostly in +sync with the upstream and don't fall behind the official Kubernetes releases. +This is achievable, since new versions are released periodical on a well communicated +schedule, enabling providers and users to set up processes around it. +Being up to date ensures that security issues and bugs are addressed and new features +are made available when using SCS compliant clusters. + +It is nevertheless important to at least support all Kubernetes versions that are still +inside the support period, since users could depend on specific versions or may need +longer to upgrade their workloads to a newer version. + +The standard therefore should provide a version recency policy as well as a support +window period. + +## Decision + +In order to keep up-to date with the latest Kubernetes features, bug fixes and security improvements, +the provided Kubernetes versions should be kept up to date with the upstream. + +- The latest minor version MUST be provided no later than 4 months after release. +- The latest patch version MUST be provided no later than 1 week after release. +- This time period MUST be even shorter for patches that target critical CVEs (CVSS >= 8). + It is RECOMMENDED to provide a new patch version in a 2 day time period after their release. +- New versions MUST be tested before being rolled out on productive infrastructure; + at least the CNCF E2E tests should be passed beforehand. + +At the same time, providers must support Kubernetes versions at least as long as the +official sources as mentioned in the [Kubernetes Support Period](https://kubernetes.io/releases/patch-releases/#support-period). + +- Kubernetes versions MUST be supported as long as the official sources support them. + The current support period can therefore be found in [Kubernetes Support Period]. +- It is RECOMMENDED to not support versions after this period in order to not encourage + usage of out-of-date versions. + +## Related Documents + +All documents regarding versioning, releases, etc. for the official Kubernetes projects can be found here: +[Kubernetes Releases](https://kubernetes.io/releases/) +[Kubernetes Support Period](https://kubernetes.io/releases/patch-releases/#support-period) +[Kubernetes Release Cycle](https://kubernetes.io/releases/release/#the-release-cycle) +[Patch Release Cadence](https://kubernetes.io/releases/patch-releases/#cadence) + +## Validation / Conformance + +*This section will be updated when the conformance tests are written.* diff --git a/Standards/scs-0211-v1-kaas-default-storage-class.md b/Standards/scs-0211-v1-kaas-default-storage-class.md index 33f9caf52..7e224a9e3 100644 --- a/Standards/scs-0211-v1-kaas-default-storage-class.md +++ b/Standards/scs-0211-v1-kaas-default-storage-class.md @@ -4,11 +4,16 @@ type: Standard status: Stable stabilized_at: 2023-02-13 track: KaaS +description: | + The SCS-0211 standard outlines the properties required for the default StorageClass in Kubernetes as a Service (KaaS). + The standard ensures that the default StorageClass, identified by the "storageclass.kubernetes.io/is-default-class" + annotation, supports the ReadWriteOnce access mode and protects volume data against loss due to single disk or + host hardware failures. --- ## Introduction -Cluster consumers can request persistent storage via [`PersistentVolumeClaims`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#persistentvolumeclaim-v1-core) which is provisioned automatically by cloud-provided automation. +Cluster consumers can request persistent storage via [`PersistentVolumeClaims`](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) which is provisioned automatically by cloud-provided automation. Storage requirements may vary across use cases, so there is the concept of `StorageClasses`. `StorageClasses` define some set of storage properties. So, consumers can choose one of these depending on the use case. [Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/Standards/scs-0216-v1-requirements-for-testing-cluster-stacks.md b/Standards/scs-0216-v1-requirements-for-testing-cluster-stacks.md new file mode 100644 index 000000000..d0b614239 --- /dev/null +++ b/Standards/scs-0216-v1-requirements-for-testing-cluster-stacks.md @@ -0,0 +1,131 @@ +--- +title: Requirements for testing cluster-stacks +type: Decision Record +status: Draft +track: KaaS +--- + +## Introduction + +In this proposal, we discuss the necessity and design considerations of a lightweight solution for testing cluster-stacks. The purpose is to address the challenges associated with testing on an Infrastructure as a Service (IaaS) provider and the limitations of using Docker as the primary containerization tool for testing. This proposal will elaborate on why we need to test in a local environment, specifically a laptop, and the benefits and drawbacks associated with it. We aim to make an informed decision for testing cluster stacks to cater to both the organizational and technical perspectives of our stakeholders. + +## Motivation + +From an organization's point of view, it is crucial to lower the entry barrier for testing. This action will make it possible for anyone, including external contributors, to easily participate in the testing process without needing an account with the IaaS provider. It is also necessary to overcome the hurdles associated with maintaining a balance in the provider account and managing sponsorships to fund the testing. + +From a technical standpoint, there are multiple reasons to favor a local environment for testing. Among them is the ability to test without internet, finish tests in a shorter timeframe, and incur no cost. The provider independence of Cluster Stacks makes it nonsensical to test with a specific provider due to the varied behaviors of different providers. There are also challenges in monitoring and debugging tests run on IaaS providers and dealing with their downtime and limitations on concurrent testing. + +## Design Considerations + +1. **Lightweight Solution** +The testing solution should be lightweight and easy to use in a local environment, such as a laptop. This lightweight solution should minimize dependencies and resource usage while maximizing speed and efficiency of the tests. It should be capable of handling the cluster-stack testing requirements without necessitating a bulky or resource-intensive setup. + +2. **Independence from Specific Providers** +The solution should be generalized and not bound to any specific provider. This consideration ensures that the solution can be applied to any provider, guaranteeing its versatility and broad applicability. + +3. **Offline Testing** +The testing solution should support testing without internet connection, which will enable more robust and flexible testing scenarios. It should be possible to run the tests even in cases of limited or disrupted internet access. + +4. **Fast Execution Time** +The tests should execute within a reasonably short amount of time. The solution must be optimized to ensure quick testing cycles, which can help increase productivity and shorten development cycles. + +5. **No-Cost Solution** +The solution should not impose any additional costs on the organization or individual testers. This characteristic is crucial to enable widespread adoption of the testing process and to lower the entry barrier for contributors. + +6. **Easy Monitoring and Debugging** +The solution should provide easy monitoring and debugging capabilities. It should allow developers to quickly identify, diagnose, and fix issues that arise during testing, without requiring access to any external logs or monitoring tools. + +7. **Concurrent Testing** +The solution should support the ability to run concurrent tests without causing any disruption or downtime. This ability can improve the efficiency and speed of the testing process. + +## Required Features + +The proposed solution should meet the following feature requirements: + +1. Local Environment: The solution should support a local testing environment that allows developers to test cluster stacks on their local machines, reducing dependencies on external providers. +1. Compatibility: The solution should be compatible with various operating systems and platforms, ensuring its usability across diverse environments. +1. Performance: The solution should offer high-performance testing capabilities, allowing fast execution of tests. +1. Offline Support: The solution should allow testing in offline mode, ensuring tests can be performed even without an internet connection. +1. Concurrency: The solution should support running multiple tests concurrently without causing disruptions or conflicts. +1. Monitoring & Debugging: The solution should provide easy-to-use tools for monitoring test progress and debugging issues. +1. Cost-effectiveness: The solution should not require any financial investment from the testers or the organization, promoting broad accessibility and usage. + +## Pros and Cons of Different Approaches + +Two potential approaches for testing cluster stacks are the use of an IaaS provider and the use of a local environment. Here we discuss the pros and cons of these two approaches. + +### IaaS Provider (OpenStack, Hetzner, AWS) + +#### Pros + +- Comprehensive testing environment with advanced capabilities. +- Possibility to mimic real-world production environments closely. + +#### Cons + +- Requires signing up and account management, which can be a barrier for some testers. +- Requires maintaining a balance in the provider account, which can pose financial burdens. +- Internet dependency for testing. +- Potential for prolonged testing time due to various dependencies. +- Challenges with monitoring and debugging. +- Potential downtime and difficulty in running concurrent tests. + +### Local Environment (Docker, Kubevirt) + +#### Pros + +- Faster test execution with no downtime. +- Ability to test without internet. +- Independent of any provider knowledge. +- Cost-free testing. +- Easier integration into CI/CD. +- Simplified monitoring and debugging. + +#### Cons + +- Limited systemd support and containerd support for kubeadm in Docker. +- Inability to mimic the exact real-world production environments. + +## Beyond Docker: Virtual Machine based Approach + +While Docker provides significant benefits for local environment testing, it's important to recognize its limitations. Docker containers, by design, are lightweight and don't contain a full operating system which can lead to challenges when trying to mimic real-world production environments. Also, Docker containers lack some necessary features like systemd which is used in many production environments for initializing and managing services. + +One major aspect that Docker lacks is the ability to mimic real-world production environments effectively. This is primarily due to its nature as a containerization tool, operating within the host OS, and sharing resources among its containers. This might create disparities in behavior when comparing to deployments on real, isolated systems, which could be problematic in some scenarios. + +Furthermore, Docker utilizes a Union File System for its images, leading to the creation of layers. This approach can lead to some complexities when dealing with node-images which comprise a significant chunk of our layers. Handling such situations might require workarounds that could add additional complexity and potential points of failure. This creates a blind spot, as real providers won't require these workarounds, which might lead to disparities in results when comparing testing in local and actual production environments. + +Therefore, to achieve a more accurate representation of real-world environments, we propose a solution that utilizes a virtual machine based approach for local testing. This approach could leverage tools like KubeVirt, Vagrant, or VirtualBox to create and manage virtual machines on the local environment. This strategy would provide a more robust and realistic environment for testing, as it can better emulate the behavior of a full-fledged operating system and thereby more closely mimic a real-world production environment. + +### Virtual Machine Based Approach + +#### Pros + +- Provides a more accurate representation of real-world production environments. +- Allows for full operating system emulation, including features like systemd. +- Can create isolated environments, thereby mimicking actual deployments better than containers. + +#### Cons + +- Potentially more resource-intensive than container-based solutions. +- Increased complexity due to the need for managing full virtual machines rather than lightweight containers. +- Initial setup might be more complicated compared to a Docker-based solution. + +## Proposed Path Forward + +Given the limitations of Docker and the potential advantages of a virtual machine based approach, we propose to investigate further into this strategy. The exact tool or set of tools used can be determined based on a thorough evaluation of the available options. + +Although there might be some initial complexity and potentially higher resource usage compared to Docker, we believe that the benefits of more accurate testing and better emulation of real-world environments outweigh these potential disadvantages. + +The proposed solution should meet all the requirements mentioned in the previous sections of the proposal, in addition to providing the benefits of a virtual machine based approach. By doing so, we aim to establish a robust, reliable, and realistic testing environment for cluster stacks that mimics real-world production environments as closely as possible. + +## Conclusion + +After thoroughly examining the organizational needs, technical requirements, and potential testing approaches, it is evident that testing cluster stacks in a local environment provides significant advantages over using an Infrastructure as a Service (IaaS) provider. A local environment minimizes financial constraints, reduces testing time, offers offline capabilities, and enables greater tester participation without the need for specialized IaaS knowledge. + +While Docker stands out for its broad adoption, cost-effectiveness, and impressive containerization benefits, it also presents some limitations that cannot be overlooked. The lack of full operating system emulation and certain system features like systemd pose challenges to mimic real-world production environments accurately. + +Given Docker's limitations and the need to reproduce realistic testing scenarios, we propose moving beyond Docker to a virtual machine-based approach. Even though this approach may introduce initial complexity and potentially higher resource usage, it promises a more accurate representation of real-world environments, thereby ensuring more reliable and robust test results. + +Tools such as KubeVirt, Vagrant, or VirtualBox could potentially fulfill our requirements, offering benefits such as full operating system emulation and isolated environments. However, an in-depth evaluation of these and possibly other tools is necessary to determine the best path forward. + +In conclusion, our goal is to design a robust, reliable, and realistic testing environment for cluster stacks that closely mimics real-world production environments, aligns with our organizational and technical perspectives, and ensures a low entry barrier for all testers. Embracing a virtual machine-based approach for local environment testing represents a promising strategy to achieve this objective, paving the way for more efficient and reliable cluster stack testing. diff --git a/Standards/scs-0300-v1-requirements-for-sso-identity-federation.md b/Standards/scs-0300-v1-requirements-for-sso-identity-federation.md index 1696a56cb..913e57159 100644 --- a/Standards/scs-0300-v1-requirements-for-sso-identity-federation.md +++ b/Standards/scs-0300-v1-requirements-for-sso-identity-federation.md @@ -4,6 +4,13 @@ type: Decision Record status: Stable stabilized_at: 2023-06-21 track: IAM +description: | + The SCS-0300 standard outlines requirements for Single Sign-On (SSO) identity federation within the Sovereign + Cloud Stack (SCS). It addresses the need for customers to access SCS services using credentials stored and managed + externally, facilitating user onboarding and reducing the need for additional dedicated SCS accounts. The standard + focuses on delegating authentication to external identity providers and mapping users to roles within SCS for + authorization, while also considering the use of machine identities. Keycloak is the current choice as an Identity + Provider (IdP) for its support of OAuth 2.0 grants and its integration with OpenStack and kolla-ansible. --- ## Introduction @@ -133,7 +140,7 @@ a short span of time and increasing risk connected with service restarts. Since version 17, Keycloak claims that it's capability for "cloud native" deployments on Kubernetes has improved. -[Keycloak is offering a REST API](https://www.keycloak.org/docs-api/20.0.1/rest-api/index.html) +Keycloak is offering a [documented REST API](https://www.keycloak.org/documentation) for all aspects of its administration interface. For storage of Keycloak configuration and local user metadata diff --git a/Standards/scs-0301-v1-naming-conventions.md b/Standards/scs-0301-v1-naming-conventions.md index 0841965b8..6540e35e2 100644 --- a/Standards/scs-0301-v1-naming-conventions.md +++ b/Standards/scs-0301-v1-naming-conventions.md @@ -1,6 +1,6 @@ --- -title: Recommended naming for domains/groups/roles/project when onboarding new customers -type: _Standard | Decision Record_ +title: Naming for domains/groups/roles/project when onboarding new customers +type: Decision Record status: Draft track: IAM --- diff --git a/Standards/scs-0302-v1-domain-manager-role.md b/Standards/scs-0302-v1-domain-manager-role.md index 5668b0234..9a7eb8b7e 100644 --- a/Standards/scs-0302-v1-domain-manager-role.md +++ b/Standards/scs-0302-v1-domain-manager-role.md @@ -99,13 +99,15 @@ This means that by creating a new role and extending Keystone's API policy confi The approach described in this standard imposes the following limitations: -1. as a result of the "`identity:list_domains`" rule (see below), Domain Managers are able to see all domains via "`openstack domain list`" and can inspect the metadata of other domains with "`openstack domain show`" +1. as a result of the "`identity:list_domains`" rule (see below), Domain Managers are able to see all domains[^5] via "`openstack domain list`" and can inspect the metadata of other domains with "`openstack domain show`" 2. as a result of the "`identity:list_roles`" rule (see below), Domain Managers are able to see all roles via "`openstack role list`" and can inspect the metadata of other roles with "`openstack role show`" **As a result of points 1 and 2, metadata of all domains and roles will be exposed to all Domain Managers!** If a CSP deems either of these points critical, they may abstain from granting the Domain Manager role to users, effectively disabling the functionality. See [Impact](#impact). +[^5]: see the [corresponding Launchpad bug at Keystone](https://bugs.launchpad.net/keystone/+bug/2041611) + ## Decision A role named "`domain-manager`" is to be created via the Keystone API and the policy adjustments quoted below are to be applied. @@ -121,7 +123,7 @@ The "`is_domain_managed_role`" rule definition is the only exception to this (se # specify a rule that whitelists roles which domain admins are permitted # to assign and revoke within their domain -"is_domain_managed_role": "'member':%(target.role.name)s" +"is_domain_managed_role": "'member':%(target.role.name)s or 'load-balancer_member':%(target.role.name)s" # allow domain admins to retrieve their own domain "identity:get_domain": "(rule:is_domain_manager and token.domain.id:%(target.domain.id)s) or rule:admin_required" @@ -197,11 +199,11 @@ The "`is_domain_managed_role`" rule of the above policy template may be adjusted ##### Example: permitting multiple roles -The following example permits both the "`member`" and "`reader`" role to be assigned/revoked by a Domain Manager. +The following example permits the "`reader`" role to be assigned/revoked by a Domain Manager in addition to the default "`member`" and "`load-balancer_member`" roles. Further roles can be appended using the logical `or` directive. ```yaml -"is_domain_managed_role": "'member':%(target.role.name)s or 'reader':%(target.role.name)s" +"is_domain_managed_role": "'member':%(target.role.name)s or 'load-balancer_member':%(target.role.name)s or 'reader':%(target.role.name)s" ``` **Note regarding the `domain-manager` role** diff --git a/Standards/scs-0412-v1-metering-json.md b/Standards/scs-0412-v1-metering-json.md index f2b3a4433..e8c84090c 100644 --- a/Standards/scs-0412-v1-metering-json.md +++ b/Standards/scs-0412-v1-metering-json.md @@ -2,7 +2,13 @@ title: Exposition of IaaS metering data as JSON type: Standard status: Draft -track: IaaS +track: Ops +description: | + The SCS-0412 standard addresses the need for a standardized interface to expose IaaS metering data in JSON format + within the Sovereign Cloud Stack (SCS). This is to aid cloud operators in integrating SCS IaaS layer data with + their existing billing and customer relationship systems. The standard adopts the Ceilometer HTTP hook format + provided by the OpenStack Ceilometer project for telemetry and metering, avoiding the need for additional translation + layers and implementation components. --- ## Introduction diff --git a/Tests/iaas/flavor-naming/flavor-form.py b/Tests/iaas/flavor-naming/flavor-form.py new file mode 100755 index 000000000..7b8324113 --- /dev/null +++ b/Tests/iaas/flavor-naming/flavor-form.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +# Little wrapper to output flavor pretty description +# from a CGI form with flavor +# +# (c) Kurt Garloff , 11/2023 +# SPDX-License-Identifier: CC-BY-SA-4.0 +""" +flavor-form.py +CGI script to get passed flavor from a html form (GET) +and parses it according to SCS flavor naming. +It returns an error (sometimes with a useful error message) +or a human-readable description of the flavor. +""" + +import os +import sys +import re +import urllib.parse +import html +import importlib +fnmck = importlib.import_module("flavor-name-check") + +# Global variables +FLAVOR_NAME = "" +FLAVOR_SPEC = () +ERROR = "" + + +def parse_name(fnm): + "return tuple with flavor description" + global FLAVOR_SPEC, FLAVOR_NAME, ERROR + FLAVOR_NAME = fnm + try: + FLAVOR_SPEC = fnmck.parsename(fnm) + except (TypeError, NameError, KeyError) as exc: + ERROR = f"\tERROR
\n\t{exc}" + return () + ERROR = "" + return FLAVOR_SPEC + + +def output_parse(): + "output pretty description from SCS flavor name" + fnmd = importlib.import_module("flavor-name-describe") + print('\t
\n\t
') + print(f'\t Flavor name: ') + print('\t ') + # print(' \n
') + print('\t') + if FLAVOR_NAME: + print(f"\t
Flavor {html.escape(FLAVOR_NAME, quote=True)}:") + if FLAVOR_SPEC: + print(f"\t{html.escape(fnmd.prettyname(FLAVOR_SPEC), quote=True)}") + else: + print("\tNot an SCS flavor") + if ERROR: + print(f"\t
{html.escape(ERROR, quote=True)})") + + +def output_generate(): + "input details to generate SCS flavor name" + print("\tNot implemented yet as webform, use") + print('\tflavor-name-check.py -i') + + +def main(argv): + "Entry point for cgi flavor parsing" + print("Content-Type: text/html\n") + form = {"flavor": [""]} + if 'QUERY_STRING' in os.environ: + form = urllib.parse.parse_qs(os.environ['QUERY_STRING']) + # For testing + if len(argv) > 0: + form = {"flavor": [argv[0],]} + find_parse = re.compile(r'^[ \t]*[ \t]*$') + find_generate = re.compile(r'^[ \t]*[ \t]*$') + if "flavor" in form: + parse_name(form["flavor"][0]) + with open("page/index.html", "r", encoding='utf-8') as infile: + for line in infile: + print(line, end='') + if find_parse.match(line): + output_parse() + elif find_generate.match(line): + output_generate() + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/Tests/iaas/flavor-naming/flavor-name-describe.py b/Tests/iaas/flavor-naming/flavor-name-describe.py index ef331b133..c7c964b41 100755 --- a/Tests/iaas/flavor-naming/flavor-name-describe.py +++ b/Tests/iaas/flavor-naming/flavor-name-describe.py @@ -89,7 +89,10 @@ def main(argv): fnmck = importlib.import_module("flavor-name-check") for nm in argv: ret = fnmck.parsename(nm) - print(f'{nm}: {prettyname(ret)}') + if ret: + print(f'{nm}: {prettyname(ret)}') + else: + print(f'{nm}: Not an SCS flavor') if __name__ == "__main__": diff --git a/Tests/iaas/flavor-naming/flavor-names-openstack.py b/Tests/iaas/flavor-naming/flavor-names-openstack.py index 8b512eafc..8e7fda00a 100755 --- a/Tests/iaas/flavor-naming/flavor-names-openstack.py +++ b/Tests/iaas/flavor-naming/flavor-names-openstack.py @@ -58,7 +58,7 @@ def main(argv): ("os-cloud=", "mand=", "verbose", "help", "quiet", "v2plus", "v3", "v1prefer", "accept-old-mandatory")) except getopt.GetoptError as exc: - print(f"{exc}", file=sys.stderr) + print(f"CRITICAL: {exc!r}", file=sys.stderr) usage(1) for opt in opts: if opt[0] == "-h" or opt[0] == "--help": @@ -85,13 +85,14 @@ def main(argv): else: usage(2) if len(args) > 0: - print(f"Extra arguments {str(args)}", file=sys.stderr) + print(f"CRITICAL: Extra arguments {str(args)}", file=sys.stderr) usage(1) scsMandatory, scsRecommended = fnmck.readflavors(scsMandFile, v3mode, fnmck.prefer_old) if not cloud: - print("ERROR: You need to have OS_CLOUD set or pass --os-cloud=CLOUD.", file=sys.stderr) + print("CRITICAL: You need to have OS_CLOUD set or pass --os-cloud=CLOUD.", file=sys.stderr) + sys.exit(1) conn = openstack.connect(cloud=cloud, timeout=32) flavors = conn.compute.flavors() @@ -187,7 +188,7 @@ def main(argv): except NameError as exc: errors += 1 wrongFlv.append(flv.name) - print(f"Wrong flavor \"{flv.name}\": {exc}", file=sys.stderr) + print(f"ERROR: Wrong flavor \"{flv.name}\": {exc}", file=sys.stderr) # This makes the output more readable MSCSFlv.sort() RSCSFlv.sort() @@ -196,8 +197,9 @@ def main(argv): wrongFlv.sort() warnFlv.sort() # We have counted errors on the fly, add missing flavors to the final result - if scsMandatory: - errors += len(scsMandatory) + for fn in scsMandatory: + errors += 1 + print(f"ERROR: Missing mandatory flavor: {fn}", file=sys.stderr) # Produce dicts for YAML reporting flvSCSList = { "MandatoryFlavorsPresent": MSCSFlv, @@ -250,4 +252,10 @@ def main(argv): if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) + try: + sys.exit(main(sys.argv[1:])) + except SystemExit: + raise + except BaseException as exc: + print(f"CRITICAL: {exc!r}", file=sys.stderr) + sys.exit(1) diff --git a/Tests/iaas/flavor-naming/page/index.html b/Tests/iaas/flavor-naming/page/index.html new file mode 100644 index 000000000..87850e3e7 --- /dev/null +++ b/Tests/iaas/flavor-naming/page/index.html @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + +

SCS flavor name parser and generator

+

SCS flavor name parser

+ +

SCS flavor name generator

+ +

References

+ + + diff --git a/Tests/iaas/image-metadata/image-md-check.py b/Tests/iaas/image-metadata/image-md-check.py index f88056c04..253d3989a 100755 --- a/Tests/iaas/image-metadata/image-md-check.py +++ b/Tests/iaas/image-metadata/image-md-check.py @@ -216,6 +216,7 @@ def main(argv): opts, args = getopt.gnu_getopt(argv[1:], "phvc:s", ("private", "help", "os-cloud=", "verbose", "skip-completeness")) except getopt.GetoptError: # as exc: + print("CRITICAL: Command-line syntax error", file=sys.stderr) usage(1) for opt in opts: if opt[0] == "-h" or opt[0] == "--help": @@ -230,18 +231,21 @@ def main(argv): cloud = opt[1] images = args if not cloud: - print("ERROR: Need to specify --os-cloud or set OS_CLOUD environment.", file=sys.stderr) + print("CRITICAL: Need to specify --os-cloud or set OS_CLOUD environment.", file=sys.stderr) usage(1) - conn = openstack.connect(cloud=cloud, timeout=24) - # Do work - if not images: - images = get_imagelist(private) - err = 0 - # Analyse image metadata - for image in images: - err += validate_imageMD(image) - if not skip: - err += report_stdimage_coverage(images) + try: + conn = openstack.connect(cloud=cloud, timeout=24) + # Do work + if not images: + images = get_imagelist(private) + err = 0 + # Analyse image metadata + for image in images: + err += validate_imageMD(image) + if not skip: + err += report_stdimage_coverage(images) + except BaseException as e: + print(f"CRITICAL: {e!r}") return err diff --git a/Tests/kaas/k8s-default-storage-class/Dockerfile_sonobuoy_plugin b/Tests/kaas/k8s-default-storage-class/Dockerfile_sonobuoy_plugin new file mode 100644 index 000000000..bf15cfad0 --- /dev/null +++ b/Tests/kaas/k8s-default-storage-class/Dockerfile_sonobuoy_plugin @@ -0,0 +1,21 @@ +FROM ubuntu + +# Install kubectl +# Note: Latest version may be found on: +# https://aur.archlinux.org/packages/kubectl-bin/ +ADD https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/linux/amd64/kubectl /usr/local/bin/kubectl + +ENV HOME=/config + +# Basic check it works. +RUN apt-get update && \ + apt-get -y install net-tools && \ + apt-get -y install curl && \ + chmod +x /usr/local/bin/kubectl && \ + kubectl version --client + + +COPY ./ ./ +RUN chmod +x ./run_checks.sh + +ENTRYPOINT ["./run_checks.sh"] diff --git a/Tests/kaas/k8s-default-storage-class/helper.py b/Tests/kaas/k8s-default-storage-class/helper.py new file mode 100644 index 000000000..dd5089298 --- /dev/null +++ b/Tests/kaas/k8s-default-storage-class/helper.py @@ -0,0 +1,74 @@ +import yaml +import sys +import logging +from kubernetes import client, config + +manual_result_file_template = { + "name": None, + "status": None, + "details": {"messages": None}, +} + +logger = logging.getLogger("helper") + + +def initialize_logging(): + logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.DEBUG) + + +def print_usage(file=sys.stderr): + """Help output""" + print( + """Usage: k8s_storageclass_check.py [options] +This tool checks the requested k8s default storage class according to the SCS Standard 0211 "kaas-default-storage-class". +Options: + [-k/--kubeconfig PATH_TO_KUBECONFIG] sets kubeconfig file to access kubernetes api + [-d/--debug] enables DEBUG logging channel +""", + end="", + file=file, + ) + + +class SCSTestException(Exception): + """Raised when an Specific test did not pass""" + + def __init__(self, *args, return_code: int): + self.return_code = return_code + + +def setup_k8s_client(kubeconfigfile=None): + + if kubeconfigfile: + logger.debug(f"using kubeconfig file '{kubeconfigfile}'") + config.load_kube_config(kubeconfigfile) + else: + logger.debug(" using system kubeconfig") + config.load_kube_config() + + k8s_api_client = client.CoreV1Api() + k8s_storage_client = client.StorageV1Api() + + return ( + k8s_api_client, + k8s_storage_client, + ) + + +def gen_sonobuoy_result_file(error_n: int, error_msg: str, test_file_name: str): + + test_name = test_file_name.replace(".py", "") + + test_status = "passed" + + if error_n != 0: + test_status = test_name + "_" + str(error_n) + + result_file = manual_result_file_template + + result_file["name"] = test_name + result_file["status"] = test_status + result_file["details"]["messages"] = error_msg + + with open(f"./{test_name}.result.yaml", "w") as file: + yaml.dump(result_file, file) diff --git a/Tests/kaas/k8s-default-storage-class/k8s-default-storage-class-check.py b/Tests/kaas/k8s-default-storage-class/k8s-default-storage-class-check.py new file mode 100644 index 000000000..f8bd6c258 --- /dev/null +++ b/Tests/kaas/k8s-default-storage-class/k8s-default-storage-class-check.py @@ -0,0 +1,268 @@ +#!/usr/bin/env python3 + +"""PersistentVolumeClaims checker + +Return codes: +0: Default StorageClass is available, and setup to SCS standard +1: Not able to connect to k8s api + +31: Default storage class has no provisioner +32: None or more then one default Storage Class is defined + +41: Not able to bind PersitantVolume to PersitantVolumeClaim +42: ReadWriteOnce is not a supported access mode + +All return codes between (and including) 1-19 as well as all return codes ending on 9 +can be seen as failures. + +Check given cloud for conformance with SCS standard regarding +Default StorageClass and PersistentVolumeClaims, to be found under /Standards/scs-0211-v1-kaas-default-storage-class.md + +""" + +import getopt +import sys +import time +import json +import logging + +from kubernetes import client +from helper import gen_sonobuoy_result_file +from helper import SCSTestException +from helper import initialize_logging +from helper import print_usage +from helper import setup_k8s_client + +import logging.config + +logger = logging.getLogger("k8s-default-storage-class-check") + + +def check_default_storageclass(k8s_client_storage): + + api_response = k8s_client_storage.list_storage_class(_preload_content=False) + storageclasses = api_response.read().decode("utf-8") + storageclasses_dict = json.loads(storageclasses) + + ndefault_class = 0 + + for item in storageclasses_dict["items"]: + storage_class_name = item["metadata"]["name"] + annotations = item["metadata"]["annotations"] + + if annotations["storageclass.kubernetes.io/is-default-class"] == "true": + ndefault_class += 1 + default_storage_class = storage_class_name + provisioner = item["provisioner"] + + if provisioner == "kubernetes.io/no-provisioner": + raise SCSTestException( + f"Provisioner is set to: {provisioner}.", + "This means the default storage class has no provisioner.", + return_code=31, + ) + + if ndefault_class != 1: + raise SCSTestException( + "More then one or none default StorageClass is defined! ", + f"Number of defined default StorageClasses = {ndefault_class} ", + return_code=32, + ) + + logger.info(f"One default Storage Class found:'{default_storage_class}'") + return default_storage_class + + +def check_default_persistentvolumeclaim_readwriteonce(k8s_api_instance, storage_class): + """ + 1. Create PersistantVolumeClaim + 2. Create pod which uses the PersitantVolumeClaim + 3. Check if PV got succesfully created using ReadWriteOnce + 4. Delete resources used for testing + """ + + namespace = "default" + pvc_name = "test-pvc" + pv_name = "test-pv" + pod_name = "test-pod" + + # 1. Create PersistantVolumeClaim + logger.debug(f"create pvc: {pvc_name}") + + pvc_meta = client.V1ObjectMeta(name=pvc_name) + pvc_resources = client.V1ResourceRequirements( + requests={"storage": "1Gi"}, + ) + pvc_spec = client.V1PersistentVolumeClaimSpec( + access_modes=["ReadWriteOnce"], + storage_class_name=storage_class, + resources=pvc_resources, + ) + body_pvc = client.V1PersistentVolumeClaim( + api_version="v1", kind="PersistentVolumeClaim", metadata=pvc_meta, spec=pvc_spec + ) + + api_response = k8s_api_instance.create_namespaced_persistent_volume_claim( + namespace, body_pvc + ) + + # 2. Create a pod which makes use of the PersitantVolumeClaim + logger.debug(f"create pod: {pod_name}") + + pod_vol = client.V1Volume( + name=pv_name, + persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(pvc_name), + ) + pod_con = client.V1Container( + name="nginx", + image="nginx", + ports=[client.V1ContainerPort(container_port=80)], + volume_mounts=[ + client.V1VolumeMount(name=pv_name, mount_path="/usr/share/nginx/html") + ], + ) + pod_spec = client.V1PodSpec(volumes=[pod_vol], containers=[pod_con]) + pod_body = client.V1Pod( + api_version="v1", + kind="Pod", + metadata=client.V1ObjectMeta(name=pod_name), + spec=pod_spec, + ) + + api_response = k8s_api_instance.create_namespaced_pod( + namespace, pod_body, _preload_content=False + ) + pod_info = json.loads(api_response.read().decode("utf-8")) + pod_status = pod_info["status"]["phase"] + + # Check if pod is up and running: + retries = 0 + while pod_status != "Running" and retries <= 30: + + api_response = k8s_api_instance.read_namespaced_pod( + pod_name, namespace, _preload_content=False + ) + pod_info = json.loads(api_response.read().decode("utf-8")) + pod_status = pod_info["status"]["phase"] + logger.debug(f"retries:{retries} status:{pod_status}") + time.sleep(1) + retries += 1 + + # assert pod_status == "Running" + if pod_status != "Running": + raise SCSTestException( + "pod is not Running not able to setup test Enviornment", + return_code=13, + ) + + # 3. Check if PV got succesfully created using ReadWriteOnce + logger.debug("check if the created PV supports ReadWriteOnce") + + api_response = k8s_api_instance.list_persistent_volume(_preload_content=False) + + pv_info = json.loads(api_response.read().decode("utf-8")) + pv_list = pv_info["items"] + + logger.debug("searching for corresponding pv") + for pv in pv_list: + logger.debug(f"parsing pv: {pv['metadata']['name']}") + if pv["spec"]["claimRef"]["name"] == pvc_name: + logger.debug(f"found pv to pvc: {pvc_name}") + + if pv["status"]["phase"] != "Bound": + raise SCSTestException( + "Not able to bind pv to pvc", + return_code=41, + ) + + if "ReadWriteOnce" not in pv["spec"]["accessModes"]: + raise SCSTestException( + "access mode 'ReadWriteOnce' is not supported", + return_code=42, + ) + + # 4. Delete resources used for testing + logger.debug(f"delete pod:{pod_name}") + api_response = k8s_api_instance.delete_namespaced_pod(pod_name, namespace) + logger.debug(f"delete pvc:{pvc_name}") + api_response = k8s_api_instance.delete_namespaced_persistent_volume_claim( + pvc_name, namespace + ) + + return 0 + + +def main(argv): + + initialize_logging() + return_code = 0 + return_message = "return_message: FAILED" + + try: + opts, args = getopt.gnu_getopt(argv, "k:h", ["kubeconfig=", "help"]) + except getopt.GetoptError as exc: + logger.debug(f"{exc}", file=sys.stderr) + print_usage() + return 1 + + kubeconfig = None + + for opt in opts: + if opt[0] == "-h" or opt[0] == "--help": + print_usage() + return 0 + if opt[0] == "-k" or opt[0] == "--kubeconfig": + kubeconfig = opt[1] + else: + print_usage(kubeconfig) + return 2 + + # Setup kubernetes client + try: + logger.debug("setup_k8s_client(kubeconfig)") + k8s_core_api, k8s_storage_api = setup_k8s_client(kubeconfig) + except Exception as exception_message: + logger.info(f"{exception_message}") + return_message = f"{exception_message}" + return_code = 1 + + # Check if default storage class is defined (MENTETORY) + try: + logger.info("check_default_storageclass()") + default_class_name = check_default_storageclass(k8s_storage_api) + except SCSTestException as test_exception: + logger.info(f"{test_exception}") + return_message = f"{test_exception}" + return_code = test_exception.return_code + except Exception as exception_message: + logger.info(f"{exception_message}") + return_message = f"{exception_message}" + return_code = 1 + + # Check if default_persistent volume has ReadWriteOnce defined (MENTETORY) + try: + logger.info("check_default_persistentvolume_readwriteonce()") + return_code = check_default_persistentvolumeclaim_readwriteonce( + k8s_core_api, default_class_name + ) + except SCSTestException as test_exception: + logger.info(f"{test_exception}") + return_message = f"{test_exception}" + return_code = test_exception.return_code + except Exception as exception_message: + logger.info(f"{exception_message}") + return_message = f"{exception_message}" + return_code = 1 + + logger.debug(f"return_code:{return_code}") + + if return_code == 0: + return_message = "all tests passed" + + gen_sonobuoy_result_file(return_code, return_message, __file__) + + return return_code + + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/Tests/kaas/k8s-default-storage-class/k8s-default-storage-class-plugin.yaml b/Tests/kaas/k8s-default-storage-class/k8s-default-storage-class-plugin.yaml new file mode 100644 index 000000000..3a73f6159 --- /dev/null +++ b/Tests/kaas/k8s-default-storage-class/k8s-default-storage-class-plugin.yaml @@ -0,0 +1,13 @@ +sonobuoy-config: + driver: Job + plugin-name: k8s-default-storage-class + result-format: manual + resutl-file: k8s-default-storage-class-check.result.yaml + +spec: + args: + - k8s-default-storage-class-check + command: + - ./run_checks.sh + image: ghcr.io/sovereigncloudstack/standards/k8s-default-storage-class:latest + name: k8s-default-storage-class diff --git a/Tests/kaas/k8s-default-storage-class/run_checks.sh b/Tests/kaas/k8s-default-storage-class/run_checks.sh new file mode 100755 index 000000000..6205a8594 --- /dev/null +++ b/Tests/kaas/k8s-default-storage-class/run_checks.sh @@ -0,0 +1,55 @@ +#!/bin/sh + +############################################################################### +##### HELPERS ##### +############################################################################### + +set -x + +# This is the entrypoint for the image and meant to wrap the +# logic of gathering/reporting results to the Sonobuoy worker. + +results_dir="${RESULTS_DIR:-/tmp/results}" +mkdir -p ${results_dir} + +# saveResults prepares the results for handoff to the Sonobuoy worker. +# See: https://github.com/vmware-tanzu/sonobuoy/blob/main/site/content/docs/main/plugins.md +saveResults() { + cd ${results_dir} + echo ${results_dir} + + # Sonobuoy worker expects a tar file. + tar czf results.tar.gz * + + # Signal to the worker that we are done and where to find the results. + printf ${results_dir}/results.tar.gz > ${results_dir}/done +} + +# Ensure that we tell the Sonobuoy worker we are done regardless of results. +trap saveResults EXIT + + +############################################################################### +##### RUN TEST SCRIPTS ##### +############################################################################### + +# Each script name is expected to be given as an arg. If no args, error out +# but print one result file for clarity in the results. +if [ "$#" -eq "0" ]; then + echo "No arguments; expects each argument to be script name" > ${results_dir}/out + exit 1 +fi + +# Iterate through the python tests passed as arguments +i=0 +while [ "$1" != "" ]; do + # Run each arg as a command and save the output in the results directory. + echo "run testscript: [$1.py]" + + python $1.py > ${results_dir}/out_$1 + cp $1.result.yaml ${results_dir} + i=$((i + 1)) + + # Shift all the parameters down by one + shift +done diff --git a/Tests/scs-compatible-iaas.yaml b/Tests/scs-compatible-iaas.yaml index f71735410..d0287df61 100644 --- a/Tests/scs-compatible-iaas.yaml +++ b/Tests/scs-compatible-iaas.yaml @@ -1,24 +1,48 @@ name: SCS Compatible IaaS url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Tests/scs-compatible-iaas.yaml versions: - - version: v1 - stabilized_at: 2021-01-01 - obsoleted_at: 2023-10-31 + - version: v4 standards: + - name: Standard flavors + url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Standards/scs-0103-v1-standard-flavors.md + check_tools: + - executable: ./iaas/standard-flavors/flavors-openstack.py + args: "./iaas/scs-0103-v1-flavors.yaml" + - name: Standard images + url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Standards/scs-0104-v1-standard-images.md + check_tools: + - executable: ./iaas/standard-images/images-openstack.py + args: "./iaas/scs-0104-v1-images.yaml" - name: Flavor naming - url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Drafts/flavor-naming.md + url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Standards/scs-0100-v3-flavor-naming.md check_tools: - executable: ./iaas/flavor-naming/flavor-names-openstack.py - args: "--v1prefer" + args: "--mand=./iaas/scs-0100-v3-flavors.yaml" - name: Image metadata url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Standards/scs-0102-v1-image-metadata.md check_tools: - executable: ./iaas/image-metadata/image-md-check.py - args: -v - - name: OpenStack Powered Compute v2020.11 - url: https://opendev.org/openinfra/interop/src/branch/master/guidelines/2020.11.json - condition: mandatory + args: -s -v + - name: OpenStack Powered Compute v2022.11 + url: https://opendev.org/openinfra/interop/src/branch/master/guidelines/2022.11.json # Unfortunately, no wrapper to run refstack yet, needs to be added + - version: v3 + stabilized_at: 2023-06-15 + obsoleted_at: 2024-04-30 + standards: + - name: Flavor naming + url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Standards/scs-0100-v3-flavor-naming.md + check_tools: + - executable: ./iaas/flavor-naming/flavor-names-openstack.py + args: "--v3" + # Note: "--v3 --v2plus" would outlaw the v1 flavor names. Don't do this yet. + - name: Image metadata + url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Standards/scs-0102-v1-image-metadata.md + check_tools: + - executable: ./iaas/image-metadata/image-md-check.py + args: -v + - name: OpenStack Powered Compute v2022.11 + url: https://opendev.org/openinfra/interop/src/branch/master/guidelines/2022.11.json - version: v2 stabilized_at: 2023-03-23 obsoleted_at: 2023-11-30 @@ -34,24 +58,19 @@ versions: args: -v - name: OpenStack Powered Compute v2022.11 url: https://opendev.org/openinfra/interop/src/branch/master/guidelines/2022.11.json - condition: mandatory - # Unfortunately, no wrapper to run refstack yet, needs to be added - - version: v3 - stabilized_at: 2023-06-15 - obsoleted_at: 2024-04-30 + - version: v1 + stabilized_at: 2021-01-01 + obsoleted_at: 2023-10-31 standards: - name: Flavor naming - url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Standards/scs-0100-v3-flavor-naming.md + url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Standards/scs-0100-v1-flavor-naming.md check_tools: - executable: ./iaas/flavor-naming/flavor-names-openstack.py - args: "--v3" - # Note: "--v3 --v2plus" would outlaw the v1 flavor names. Don't do this yet. + args: "--v1prefer" - name: Image metadata url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Standards/scs-0102-v1-image-metadata.md check_tools: - executable: ./iaas/image-metadata/image-md-check.py args: -v - - name: OpenStack Powered Compute v2022.11 - url: https://opendev.org/openinfra/interop/src/branch/master/guidelines/2022.11.json - condition: mandatory - # Unfortunately, no wrapper to run refstack yet, needs to be added + - name: OpenStack Powered Compute v2020.11 + url: https://opendev.org/openinfra/interop/src/branch/master/guidelines/2020.11.json diff --git a/Tests/scs-compliance-check.py b/Tests/scs-compliance-check.py index 9d1d6fc46..c80675467 100755 --- a/Tests/scs-compliance-check.py +++ b/Tests/scs-compliance-check.py @@ -23,251 +23,282 @@ """ import os +import os.path import sys +import shlex import getopt # import time import datetime import subprocess import copy +from functools import partial +from itertools import chain import yaml -def usage(): - "Output usage information" - print("Usage: scs-compliance-check.py [options] compliance-spec.yaml layer [layer [layer]]") - print("Options: -v/--verbose: More verbose output") - print(" -q/--quiet: Don't output anything but errors") - print(" -s/--single-layer: Don't perform required checks for dependant layers") - print(" -d/--date YYYY-MM-DD: Check standards valid on specified date instead of today") - print(" -V/--version VERS: Force version VERS of the standard (instead of deriving from date)") - print(" -c/--os-cloud CLOUD: Use specified cloud env (instead of OS_CLOUD env var)") - print(" -o/--output path: Generate yaml report of compliance check under given path") +# valid keywords for various parts of the spec, to be checked using `check_keywords` +KEYWORDS = { + 'spec': ('name', 'url', 'versions', 'prerequisite'), + 'version': ('version', 'standards', 'stabilized_at', 'obsoleted_at'), + 'standard': ('check_tools', 'url', 'name', 'condition'), + 'checktool': ('executable', 'args', 'condition', 'classification'), +} -def is_valid_standard(now, stable, obsolete): - "Check if now is after stable and not after obsolete" - if not stable: - return False - if now < stable: - return False - if obsolete and now > obsolete: - return False - return True +def usage(file=sys.stdout): + """Output usage information""" + print("""Usage: scs-compliance-check.py [options] compliance-spec.yaml +Options: -v/--verbose: More verbose output + -q/--quiet: Don't output anything but errors + -s/--single-scope: Don't perform required checks for prerequisite scopes + -d/--date YYYY-MM-DD: Check standards valid on specified date instead of today + -V/--version VERS: Force version VERS of the standard (instead of deriving from date) + -c/--os-cloud CLOUD: Use specified cloud env (instead of OS_CLOUD env var) + -o/--output REPORT_PATH: Generate yaml report of compliance check under given path + -C/--critical-only: Only return critical errors in return code +With -C, the return code will be nonzero precisely when the tests couldn't be run to completion. +""".strip(), file=file) -MYPATH = "." - -def add_search_path(arg0): - """Store path of scs-compliance-check.py to search path, as check tools - referenced in compliance.spec might be relative to it. - """ - global MYPATH - arg0_pidx = arg0.rfind('/') - assert arg0_pidx != -1 - MYPATH = arg0[:arg0_pidx] - # os.environ['PATH'] += ":" + MYPATH - - -def run_check_tool(executable, args, verbose=False, quiet=False): - "Run executable and return exit code" +def run_check_tool(executable, args, env=None, cwd=None): + """Run executable and return `CompletedProcess` instance""" if executable.startswith("http://") or executable.startswith("https://"): - print(f"ERROR: remote check_tool {executable} not yet supported", file=sys.stderr) # TODO: When we start supporting this, consider security concerns # Running downloaded code is always risky # - Certificate pinning for https downloads # - Refuse http # - Check sha256/512 or gpg signature - return 999999 + raise NotImplementedError(f"remote check_tool {executable} not yet supported") if executable.startswith("file://"): executable = executable[7:] - if executable[0] == "/": - exe = [executable, ] - else: - exe = [MYPATH + "/" + executable, ] - if args: - exe.extend(args.split(" ")) - # print(f"{exe}") - # compl = subprocess.run(exe, capture_output=True, text=True, check=False) - compl = subprocess.run(exe, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - encoding='UTF-8', check=False) - if verbose: - print(compl.stdout) - if not quiet: - print(compl.stderr, file=sys.stderr) - return compl.returncode + exe = [os.path.abspath(os.path.join(cwd or ".", executable)), *shlex.split(args)] + return subprocess.run( + exe, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + encoding='UTF-8', check=False, env=env, cwd=cwd, + ) def errcode_to_text(err): "translate error code to text" - if err == 0: - return "PASSED" - return f"{err} ERRORS" - - -def dictval(dct, key): - "Helper: Return dct[key] if it exists, None otherwise" - if key in dct: - return dct[key] - return None - + return f"{err} ERRORS" if err else "PASSED" -def search_version(layerdict, checkdate, forceversion=None): - "Return dict with latest matching version, None if not found" - bestdays = datetime.timedelta(999999999) # Infinity - bestversion = None - for versdict in layerdict: - # print(f'Version {versdict["version"]}') - if forceversion and forceversion == versdict["version"]: - if "stabilized_at" not in versdict: - print(f"WARNING: Forced version {forceversion} not stable", - file=sys.stderr) - return versdict - stabilized = dictval(versdict, "stabilized_at") - if is_valid_standard(checkdate, stabilized, dictval(versdict, "obsoleted_at")): - diffdays = checkdate - stabilized - if diffdays < bestdays: - bestdays = diffdays - bestversion = versdict - # print(f"Identified best version {bestversion}") - if forceversion and bestversion and not bestversion["version"] == forceversion: - print(f"Wanted version {forceversion} which was not found") - sys.exit(3) - return bestversion +class Config: + def __init__(self): + self.arg0 = None + self.verbose = False + self.quiet = False + self.os_cloud = os.environ.get("OS_CLOUD") + self.checkdate = datetime.date.today() + self.version = None + self.output = None + self.classes = ["light", "medium", "heavy"] + self.critical_only = False -def optparse(argv): - "Parse options. Return (args, verbose, quiet, checkdate, version, output)." - verbose = False - quiet = False - checkdate = datetime.date.today() - version = None - output = None - try: - opts, args = getopt.gnu_getopt(argv, "hvqd:V:sc:o:", - ("help", "verbose", "quiet", "date=", "version=", - "os-cloud=", "output=")) - except getopt.GetoptError as exc: - print(f"Option error: {exc}", file=sys.stderr) - usage() - sys.exit(1) - for opt in opts: - if opt[0] == "-h" or opt[0] == "--help": + def apply_argv(self, argv): + """Parse options. May exit the program.""" + try: + opts, args = getopt.gnu_getopt(argv, "hvqd:V:sc:o:r:C", ( + "help", "verbose", "quiet", "date=", "version=", + "os-cloud=", "output=", "resource-usage=", "critical-only" + )) + except getopt.GetoptError as exc: + print(f"Option error: {exc}", file=sys.stderr) usage() - sys.exit(0) - elif opt[0] == "-v" or opt[0] == "--verbose": - verbose = True - elif opt[0] == "-q" or opt[0] == "--quiet": - quiet = True - elif opt[0] == "-d" or opt[0] == "--date": - checkdate = datetime.date.fromisoformat(opt[1]) - elif opt[0] == "-V" or opt[0] == "--version": - version = opt[1] - elif opt[0] == "-c" or opt[0] == "--os-cloud": - os.environ["OS_CLOUD"] = opt[1] - elif opt[0] == "-o" or opt[0] == "--output": - output = opt[1] - else: - print(f"Error: Unknown argument {opt[0]}", file=sys.stderr) - if len(args) < 1: - usage() - sys.exit(1) - return (args, verbose, quiet, checkdate, version, output) + sys.exit(1) + for opt in opts: + if opt[0] == "-h" or opt[0] == "--help": + usage() + sys.exit(0) + elif opt[0] == "-v" or opt[0] == "--verbose": + self.verbose = True + elif opt[0] == "-q" or opt[0] == "--quiet": + self.quiet = True + elif opt[0] == "-d" or opt[0] == "--date": + self.checkdate = datetime.date.fromisoformat(opt[1]) + elif opt[0] == "-V" or opt[0] == "--version": + self.version = opt[1] + elif opt[0] == "-c" or opt[0] == "--os-cloud": + self.os_cloud = opt[1] + elif opt[0] == "-o" or opt[0] == "--output": + self.output = opt[1] + elif opt[0] == "-r" or opt[0] == "--resource-usage": + self.classes = [x.strip() for x in opt[1].split(",")] + elif opt[0] == "-C" or opt[0] == "--critical-only": + self.critical_only = True + else: + print(f"Error: Unknown argument {opt[0]}", file=sys.stderr) + if len(args) < 1: + usage(file=sys.stderr) + sys.exit(1) + self.arg0 = args[0] def condition_optional(cond, default=False): - """check whether condition is in dict cond + """ + check whether condition is in dict cond - If set to mandatory, return False - If set to optional, return True - If set to something else, error out - If unset, return default """ - if "condition" not in cond: - return default - if cond["condition"] == "optional": - return True - if cond["condition"] == "mandatory": - return False - print(f"ERROR in spec parsing condition: {cond['condition']}", file=sys.stderr) - return default + value = cond.get("condition") + value = {None: default, "optional": True, "mandatory": False}.get(value) + if value is None: + print(f"ERROR in spec parsing condition: {cond['condition']}", file=sys.stderr) + value = default + return value + + +def check_keywords(ctx, d): + valid = KEYWORDS[ctx] + invalid = [k for k in d if k not in valid] + if invalid: + print(f"ERROR in spec: {ctx} uses unknown keywords: {','.join(invalid)}", file=sys.stderr) + return len(invalid) -def optstr(optional): - "return 'optional ' if True, otherwise ''" - if optional: - return 'optional ' - return '' +def suppress(*args, **kwargs): + return + + +def invoke_check_tool(check, check_env, check_cwd): + """run check tool and return invokation dict to use in the report""" + try: + compl = run_check_tool(check["executable"], check.get("args", ''), env=check_env, cwd=check_cwd) + except Exception as e: + invokation = { + "rc": 127, + "stdout": [], + "stderr": [f"CRITICAL: {e!s}"], + } + else: + invokation = { + "rc": compl.returncode, + "stdout": compl.stdout.splitlines(), + "stderr": compl.stderr.splitlines(), + } + for signal in ('info', 'warning', 'error', 'critical'): + invokation[signal] = len([ + line + for line in chain(invokation["stderr"], invokation["stdout"]) + if line.lower().startswith(signal) + ]) + return invokation def main(argv): """Entry point for the checker""" - args, verbose, quiet, checkdate, version, output = optparse(argv) - with open(args[0], "r", encoding="UTF-8") as specfile: - specdict = yaml.load(specfile, Loader=yaml.SafeLoader) + config = Config() + config.apply_argv(argv) + if not config.os_cloud: + print("You need to have OS_CLOUD set or pass --os-cloud=CLOUD.", file=sys.stderr) + return 1 + printv = suppress if not config.verbose else partial(print, file=sys.stderr) + printnq = suppress if config.quiet else partial(print, file=sys.stderr) + with open(config.arg0, "r", encoding="UTF-8") as specfile: + spec = yaml.load(specfile, Loader=yaml.SafeLoader) + check_env = {'OS_CLOUD': config.os_cloud, **os.environ} + check_cwd = os.path.dirname(config.arg0) or os.getcwd() + allaborts = 0 allerrors = 0 - report = {} - if output: - for key in "name", "url": - report[key] = dictval(specdict, key) - report["os_cloud"] = os.environ["OS_CLOUD"] - # TODO: Add kubeconfig context as well - report["checked_at"] = checkdate - if "prerequisite" in specdict: + report = { + "spec": copy.deepcopy(spec), + "run": { + "argv": argv, + "os_cloud": config.os_cloud, + # TODO: Add kubeconfig context as well + "checked_at": config.checkdate, + "classes": config.classes, + "forced_version": config.version or None, + "aborts": 0, + "errors": 0, + "versions": {}, + "invokations": {}, + }, + } + check_keywords('spec', spec) + if config.version: + spec["versions"] = [vd for vd in spec["versions"] if vd["version"] == config.version] + if "prerequisite" in spec: print("WARNING: prerequisite not yet implemented!", file=sys.stderr) - bestversion = search_version(specdict["versions"], checkdate, version) - if not bestversion: - print(f"No valid standard found for {checkdate}", file=sys.stderr) - return 2 - errors = 0 - if not quiet: - print(f"Testing {specdict['name']} version {bestversion['version']}") - if "standards" not in bestversion: - print(f"WARNING: No standards defined yet for {specdict['name']} version {bestversion['version']}", - file=sys.stderr) - if output: - report[specdict['name']] = [copy.deepcopy(bestversion)] - for standard in bestversion["standards"]: - optional = condition_optional(standard) - if not quiet: - print("*******************************************************") - print(f"Testing {optstr(optional)}standard {standard['name']} ...") - print(f"Reference: {standard['url']} ...") - if "check_tools" not in standard: - print(f"WARNING: No compliance check tool implemented yet for {standard['name']}") - error = 0 - else: - chkidx = 0 - for check in standard["check_tools"]: - args = dictval(check, 'args') - error = run_check_tool(check["executable"], args, verbose, quiet) - if output: - version_index = 0 # report[layer].index(bestversion) - standard_index = bestversion["standards"].index(standard) - report[specdict['name']][version_index]["standards"][standard_index]["check_tools"][chkidx]["errors"] = error + vrs = report["run"]["versions"] + memo = report["run"]["invokations"] # memoize check tool results + matches = 0 + for vd in spec["versions"]: + check_keywords('version', vd) + stb_date = vd.get("stabilized_at") + obs_date = vd.get("obsoleted_at") + futuristic = not stb_date or config.checkdate < stb_date + outdated = obs_date and obs_date < config.checkdate + vr = vrs[vd["version"]] = { + "status": outdated and "outdated" or futuristic and "preview" or "valid", + "passed": False, + "aborts": 0, + "errors": 0, + "invokations": [], + } + if outdated and not config.version: + continue + matches += 1 + if config.version and outdated: + print(f"WARNING: Forced version {config.version} outdated", file=sys.stderr) + if config.version and futuristic: + print(f"INFO: Forced version {config.version} not (yet) stable", file=sys.stderr) + printnq(f"Testing {spec['name']} version {vd['version']}") + if "standards" not in vd: + print(f"WARNING: No standards defined yet for {spec['name']} version {vd['version']}", + file=sys.stderr) + errors = 0 + aborts = 0 + invokations = vr["invokations"] + for standard in vd.get("standards", ()): + check_keywords('standard', standard) + optional = condition_optional(standard) + printnq("*******************************************************") + printnq(f"Testing {'optional ' * optional}standard {standard['name']} ...") + printnq(f"Reference: {standard['url']} ...") + if "check_tools" not in standard: + printnq(f"WARNING: No check tool specified for {standard['name']}", file=sys.stderr) + for check in standard.get("check_tools", ()): + check_keywords('checktool', check) + if check.get("classification", "light") not in config.classes: + print(f"skipping check tool '{check['executable']}' because of resource classification") + continue + args = check.get('args', '') + memo_key = f"{check['executable']} {args}".strip() + invokation = memo.get(memo_key) + if invokation is None: + invokation = invoke_check_tool(check, check_env, check_cwd) + printv("\n".join(invokation["stdout"])) + printnq("\n".join(invokation["stderr"])) + memo[memo_key] = invokation + invokations.append(memo_key) + abort = invokation["critical"] + error = invokation["error"] + printnq(f"... returned {error} errors, {abort} aborts") if not condition_optional(check, optional): + aborts += abort errors += error - if not quiet: - print(f"... returned {error} errors") - chkidx += 1 - for kwd in check: - if kwd not in ('executable', 'args', 'condition', 'classification'): - print(f"ERROR in spec: check_tools.{kwd} is an unknown keyword", - file=sys.stderr) - for kwd in standard: - if kwd not in ('check_tools', 'url', 'name', 'condition'): - print(f"ERROR in spec: standard.{kwd} is an unknown keyword", file=sys.stderr) - if output: - report[specdict['name']][version_index]["errors"] = errors - with open(output, 'w', encoding='UTF-8') as file: - output = yaml.safe_dump(report, file, default_flow_style=False, sort_keys=False) - if not quiet: - print("*******************************************************") - print(f"Verdict for os_cloud {os.environ['OS_CLOUD']}, {specdict['name']}, " - f"version {bestversion['version']}: {errcode_to_text(errors)}") - allerrors += errors - return allerrors + vr["aborts"] = aborts + vr["errors"] = errors + vr["passed"] = not (aborts + errors) + printnq("*******************************************************") + printnq(f"Verdict for os_cloud {config.os_cloud}, {spec['name']}, " + f"version {vd['version']}: {errcode_to_text(aborts + errors)}") + allaborts += aborts + allerrors += errors + report["run"]["aborts"] = allaborts + report["run"]["errors"] = allerrors + if not matches: + print(f"CRITICAL: No valid scope found for {config.checkdate}", file=sys.stderr) + allaborts += 1 # note: this is after we put the number into the report, so only for return code + if config.output: + with open(config.output, 'w', encoding='UTF-8') as file: + yaml.safe_dump(report, file, default_flow_style=False, sort_keys=False) + return min(127, allaborts + (0 if config.critical_only else allerrors)) if __name__ == "__main__": - add_search_path(sys.argv[0]) sys.exit(main(sys.argv[1:])) diff --git a/playbooks/clouds.yaml.j2 b/playbooks/clouds.yaml.j2 new file mode 100644 index 000000000..78b329d19 --- /dev/null +++ b/playbooks/clouds.yaml.j2 @@ -0,0 +1,55 @@ +--- +clouds: + gx-scs: + auth: + auth_url: https://api.gx-scs.sovereignit.cloud:5000 + application_credential_id: "{{ clouds_conf.gx_scs_ac_id }}" + application_credential_secret: "{{ clouds_conf.gx_scs_ac_secret }}" + region_name: "RegionOne" + interface: "public" + identity_api_version: 3 + auth_type: "v3applicationcredential" + pco-prod1: + region_name: "prod1" + interface: "public" + identity_api_version: 3 + auth: + auth_url: https://prod1.api.pco.get-cloud.io:5000 + application_credential_id: "{{ clouds_conf.pco_prod1_ac_id }}" + application_credential_secret: "{{ clouds_conf.pco_prod1_ac_secret }}" + auth_type: "v3applicationcredential" + pco-prod2: + region_name: "prod2" + interface: "public" + identity_api_version: 3 + auth: + auth_url: https://prod2.api.pco.get-cloud.io:5000 + application_credential_id: "{{ clouds_conf.pco_prod2_ac_id }}" + application_credential_secret: "{{ clouds_conf.pco_prod2_ac_secret }}" + auth_type: "v3applicationcredential" + pco-prod3: + region_name: "prod3" + interface: "public" + identity_api_version: 3 + auth: + auth_url: https://prod3.api.pco.get-cloud.io:5000 + application_credential_id: "{{ clouds_conf.pco_prod3_ac_id }}" + application_credential_secret: "{{ clouds_conf.pco_prod3_ac_secret }}" + auth_type: "v3applicationcredential" + regio-a: + region_name: "RegionA" + interface: "public" + identity_api_version: 3 + auth: + auth_url: https://keystone.services.a.regiocloud.tech/v3/ + application_credential_id: "{{ clouds_conf.regio_a_ac_id }}" + application_credential_secret: "{{ clouds_conf.regio_a_ac_secret }}" + auth_type: "v3applicationcredential" + wavestack: + interface: "public" + identity_api_version: 3 + auth: + auth_url: https://api.wavestack.de:5000 + application_credential_id: "{{ clouds_conf.wavestack_ac_id }}" + application_credential_secret: "{{ clouds_conf.wavestack_ac_secret }}" + auth_type: "v3applicationcredential" diff --git a/playbooks/compliance_check.yaml b/playbooks/compliance_check.yaml new file mode 100644 index 000000000..eafce04af --- /dev/null +++ b/playbooks/compliance_check.yaml @@ -0,0 +1,57 @@ +--- +- name: Run compliance check tool + hosts: all + roles: + - role: ensure-pip # https://zuul-ci.org/docs/zuul-jobs/latest/python-roles.html#role-ensure-pip + tasks: + - name: Create cloud config dir + ansible.builtin.file: + path: "~/.config/openstack" + state: directory + recurse: true + mode: "0700" + + - name: Create cloud config file + ansible.builtin.template: + src: "clouds.yaml.j2" + dest: "~/.config/openstack/clouds.yaml" + mode: "0600" + no_log: true + + - name: Copy Tests on the node + ansible.builtin.copy: + src: "../Tests" + dest: "~/" + mode: 0500 + no_log: false + + - name: Install dependencies + ansible.builtin.pip: + requirements: /home/ubuntu/Tests/requirements.txt + + - name: Run compliance script + ansible.builtin.command: + cmd: python3 /home/ubuntu/Tests/scs-compliance-check.py /home/ubuntu/Tests/scs-compatible-iaas.yaml -c {{ cloud }} -o {{ cloud }}-iaas.yaml -C + register: result + changed_when: true + failed_when: result.rc != 0 + + - ansible.builtin.debug: + msg: "{{ result.stdout }} {{ result.stderr }}" + + - name: Copy result YAML + ansible.builtin.synchronize: + dest: "{{ zuul.executor.log_root }}/{{ cloud }}-iaas.yaml" + mode: pull + src: "{{ cloud }}-iaas.yaml" + verify_host: true + owner: no + group: no + + - name: Return artifact URL + zuul_return: + data: + zuul: + artifacts: + - name: "{{ cloud }}-iaas.yaml" + url: "{{ cloud }}-iaas.yaml"