Quantcast
Channel: Magnus K Karlsson
Viewing all 526 articles
Browse latest View live

Auto Generate OpenAPI/Swagger Specification from Annotated Java Code, with maven, Java 11, Eclipse Microprofile, Java EE 8

$
0
0

OpenAPI Specification (formerly Swagger Specification)

Auto generate documentation for your REST API, is a great way to document and it's auto generated and finally you follows a specification, that people have thought throw.

OpenAPI (part of Eclise Microprofile) Annotation

Maven dependency.

><dependency>
<groupId>org.eclipse.microprofile</groupId>
<artifactId>microprofile</artifactId>
<version>3.0</version>
<type>pom</type>
<scope>provided</scope>
</dependency>

And annotated Java code.

>package se.magnuskkarlsson.example_openapi_swagger.boundary;

import java.util.ArrayList;
import java.util.List;

import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;

import org.eclipse.microprofile.openapi.annotations.Operation;
import org.eclipse.microprofile.openapi.annotations.enums.SchemaType;
import org.eclipse.microprofile.openapi.annotations.media.Content;
import org.eclipse.microprofile.openapi.annotations.media.Schema;
import org.eclipse.microprofile.openapi.annotations.parameters.Parameter;
import org.eclipse.microprofile.openapi.annotations.responses.APIResponse;
import org.eclipse.microprofile.openapi.annotations.responses.APIResponses;

import se.magnuskkarlsson.example_openapi_swagger.entity.Person;

@Path("/persons")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public class PersonResource {

@GET
@Operation(summary = "Get all persons.", description = "Get all persons in DB.")
@APIResponses(value = { //
@APIResponse( //
responseCode = "500", //
description = "Internal Error", //
content = @Content(mediaType = MediaType.APPLICATION_JSON)),
@APIResponse( //
responseCode = "200", //
description = "All persons in DB.", //
content = @Content( //
mediaType = MediaType.APPLICATION_JSON, //
schema = @Schema(implementation = Person.class))) })
public List<Person> search( //
@Parameter( //
description = "The name to search for.", //
required = false, //
example = "*he*", //
schema = @Schema(type = SchemaType.STRING)) //
@QueryParam("name") String name) {
var persons = new ArrayList<Person>();

var person1 = new Person().setName("FOO").setAge(24);
persons.add(person1);

var person2 = new Person().setName("BAR").setAge(51);
persons.add(person2);

return persons;
}

}

And maven plugin, which will auto generate OpenAPI specification and output in YAML file in your webapp root folder.

><!-- https://github.com/kongchen/swagger-maven-plugin -->
<plugin>
<groupId>com.github.kongchen</groupId>
<artifactId>swagger-maven-plugin</artifactId>
<version>3.1.8</version>
<configuration>
<apiSources>
<apiSource>
<springmvc>false</springmvc>
<locations>se.magnuskkarlsson.example_openapi_swagger.boundary</locations>
<!-- <schemes>http</schemes> <host>localhost:8081</host> -->
<basePath>/${project.build.finalName}</basePath>
<info>
<title>Users API</title>
<version>v1</version>
<description>Users rest endpoints</description>
</info>
<outputFormats>yaml</outputFormats>
<swaggerDirectory>${basedir}/src/main/webapp</swaggerDirectory>
</apiSource>
</apiSources>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>generate</goal>
</goals>
</execution>
</executions>
<dependencies>
<dependency>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
<version>2.3.1</version>
</dependency>
</dependencies>
</plugin>

Reference:

Generated OpenAPI YAML Specification

>---
swagger: "2.0"
info:
description: "Users rest endpoints"
version: "v1"
title: "Users API"
basePath: "/example-openapi-swagger-1.0.0-SNAPSHOT"
paths:
/persons:
get:
operationId: "search"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- name: "name"
in: "query"
required: false
type: "string"
responses:
200:
description: "successful operation"
schema:
type: "array"
items:
$ref: "#/definitions/Person"
definitions:
Person:
type: "object"
properties:
name:
type: "string"
age:
type: "integer"
format: "int32"

Complete pom.xml

><?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">

<modelVersion>4.0.0</modelVersion>
<groupId>se.magnuskkarlsson</groupId>
<artifactId>example-openapi-swagger</artifactId>
<version>1.0.0-SNAPSHOT</version>
<packaging>war</packaging>

<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.build.outputEncoding>UTF-8</project.build.outputEncoding>
<failOnMissingWebXml>false</failOnMissingWebXml>
<bouncycastle.version>1.65</bouncycastle.version>
<hibernate.version>5.3.14.Final</hibernate.version>
<hibernate-validator.version>6.0.18.Final</hibernate-validator.version>
<primefaces.version>10.0.0</primefaces.version>
</properties>

<dependencies>
<!-- Java EE 8 -->
<dependency>
<groupId>javax</groupId>
<artifactId>javaee-api</artifactId>
<version>8.0</version>
<scope>provided</scope>
</dependency>
<!-- JBoss EAP 7.3 -->
<dependency>
<groupId>org.eclipse.microprofile</groupId>
<artifactId>microprofile</artifactId>
<version>3.0</version>
<type>pom</type>
<scope>provided</scope>
</dependency>

<!-- Test Support -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-core</artifactId>
<version>2.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<version>1.10.19</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.github.javafaker</groupId>
<artifactId>javafaker</artifactId>
<version>0.17.2</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>1.4.199</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>8.0.17</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.hibernate</groupId>
<artifactId>hibernate-core</artifactId>
<version>${hibernate.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.hibernate</groupId>
<artifactId>hibernate-envers</artifactId>
<version>${hibernate.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.hibernate.validator</groupId>
<artifactId>hibernate-validator</artifactId>
<version>${hibernate-validator.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.glassfish</groupId>
<artifactId>jakarta.el</artifactId>
<version>3.0.2</version>
<scope>test</scope>
</dependency>
</dependencies>

<build>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.1</version>
<configuration>
<release>11</release>
<showDeprecation>true</showDeprecation>
<showWarnings>true</showWarnings>
</configuration>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<!-- https://github.com/kongchen/swagger-maven-plugin -->
<plugin>
<groupId>com.github.kongchen</groupId>
<artifactId>swagger-maven-plugin</artifactId>
<version>3.1.8</version>
<configuration>
<apiSources>
<apiSource>
<springmvc>false</springmvc>
<locations>se.magnuskkarlsson.example_openapi_swagger.boundary</locations>
<!-- <schemes>http</schemes> <host>localhost:8081</host> -->
<basePath>/${project.build.finalName}</basePath>
<info>
<title>Users API</title>
<version>v1</version>
<description>Users rest endpoints</description>
</info>
<outputFormats>yaml</outputFormats>
<swaggerDirectory>${basedir}/src/main/webapp</swaggerDirectory>
</apiSource>
</apiSources>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>generate</goal>
</goals>
</execution>
</executions>
<dependencies>
<dependency>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
<version>2.3.1</version>
</dependency>
</dependencies>
</plugin>
</plugins>
</build>

</project>

MicroProfile OpenAPI on Wildfly 23 and JBoss EAP 7.3.x

$
0
0

Introduction

In my last blog I showed a simple example app annotated with Eclipse MicroProfile OpenAPI Annotation. See https://magnus-k-karlsson.blogspot.com/2021/05/auto-generate-openapiswagger.html

Here will deploy it on Wildfly and call OpenAPI.

Wildfly 23.0.2.Final

On Wildfly is Eclipse MicroProfile already installed and all you need is to start it with the correct configuration.

>$ ./standalone.sh -c standalone-microprofile.xml

And then call it

>$ curl http://localhost:8080/openapi
---
openapi: 3.0.3
info:
title: example-openapi-swagger.war
version: "1.0"
servers:
- url: /example-openapi-swagger
paths:
/api/persons:
get:
summary: Get all persons.
description: Get all persons in DB.
parameters:
- name: name
in: query
description: The name to search for.
required: false
schema:
type: string
example: '*he*'
responses:
"500":
description: Internal Error
content:
application/json: {}
"200":
description: All persons in DB.
content:
application/json:
schema:
$ref: '#/components/schemas/Person'
components:
schemas:
Person:
type: object
properties:
age:
format: int32
type: integer
name:
type: string

And to get JSON instead.

>$ curl http://localhost:8080/openapi?format=JSON  
{
"openapi" : "3.0.3",
"info" : {
"title" : "example-openapi-swagger.war",
"version" : "1.0"
},
"servers" : [ {
"url" : "/example-openapi-swagger"
} ],
"paths" : {
"/api/persons" : {
"get" : {
"summary" : "Get all persons.",
"description" : "Get all persons in DB.",
"parameters" : [ {
"name" : "name",
"in" : "query",
"description" : "The name to search for.",
"required" : false,
"schema" : {
"type" : "string"
},
"example" : "*he*"
} ],
"responses" : {
"500" : {
"description" : "Internal Error",
"content" : {
"application/json" : { }
}
},
"200" : {
"description" : "All persons in DB.",
"content" : {
"application/json" : {
"schema" : {
"$ref" : "#/components/schemas/Person"
}
}
}
}
}
}
}
},
"components" : {
"schemas" : {
"Person" : {
"type" : "object",
"properties" : {
"age" : {
"format" : "int32",
"type" : "integer"
},
"name" : {
"type" : "string"
}
}
}
}
}
}

JBoss EAP 7.3

The JBoss EAP does not come bundled with OpenAPI and you need to install JBOSS EAP XP 2.0.0. See https://access.redhat.com/documentation/en-us/red_hat_jboss_enterprise_application_platform/7.3/pdf/using_eclipse_microprofile_with_jboss_eap_xp_2.0.0/Red_Hat_JBoss_Enterprise_Application_Platform-7.3-Using_Eclipse_MicroProfile_with_JBoss_EAP_XP_2.0.0-en-US.pdf

Kubernetes native configuration management

Getting Started with Red Hat OpenShift Local (formerly Red Hat CodeReady Containers, CRC)

$
0
0

2.3. Installing CRC- https://crc.dev/crc/#installing_gsg

After download have 2 files:

  • /home/magnuskkarlsson/Downloads/crc-linux-amd64.tar.xz
  • /home/magnuskkarlsson/Downloads/pull-secret
$ cd ~/Downloads
$ tar xvf crc-linux-amd64.tar.xz
$ mkdir -p ~/bin
$ cp ~/Downloads/crc-linux-*-amd64/crc ~/bin
$ crc version
CRC version: 2.3.0+dab5e0d
OpenShift version: 4.10.12
Podman version: 3.4.4

2.6. Upgrading CRC - https://crc.dev/crc/#upgrading_gsg

3.2. Setting up CRC

$ crc setup
$ crc start --pull-secret-file ~/Downloads/pull-secret
...
Started the OpenShift cluster.

The server is accessible via web console at:
https://console-openshift-console.apps-crc.testing

Log in as administrator:
Username: kubeadmin
Password: ggNNE-tdBvf-Se8j5-yT8nF

Log in as user:
Username: developer
Password: developer

Use the 'oc' command line interface:
$ eval $(crc oc-env)
$ oc login -u developer https://api.crc.testing:6443

Testing

$ eval $(crc oc-env)

$ oc login -u developer https://api.crc.testing:6443

$ oc completion -h
...
# Installing bash completion on Linux
## If bash-completion is not installed on Linux, install the 'bash-completion' package
## via your distribution's package manager.
## Load the oc completion code for bash into the current shell
source <(oc completion bash)
...
$ source <(oc completion bash)

$ crc console --credentials
To login as a regular user, run 'oc login -u developer -p developer https://api.crc.testing:6443'.
To login as an admin, run 'oc login -u kubeadmin -p ggNNE-tdBvf-Se8j5-yT8nF https://api.crc.testing:6443'

$ oc get clusterversions
NAME VERSION AVAILABLE PROGRESSING SINCE STATUS
version 4.10.12 True False 18d Cluster version is 4.10.12

$ oc get nodes
NAME STATUS ROLES AGE VERSION
crc-xhphl-master-0 Ready master,worker 19d v1.23.5+70fb84c

$ crc console --url
https://console-openshift-console.apps-crc.testing

Other resources:

https://github.com/code-ready/crc

Bash Auto Completion in RHEL 8 and 9

Fedora 35 Keyboard keys not repeating when held down

How to Change the Tab Size in VIM

How to Disable RHEL 9.0 BAD PASSWORD: The password fails the dictionary check - it is based on a dictionary word

$
0
0

NEVER USE THIS IN A NOT TEST/DEVELOPMENT ENVIRONMENT

>$ sudo passwd student
Changing password for user student.
New password:
BAD PASSWORD: The password fails the dictionary check - it is based on a dictionary word

You need to change 2 lines.

>$ sudo vim /etc/pam.d/system-auth
...
# password requisite pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type=
password sufficient pam_unix.so try_first_pass nullok sha512 shadow

Here is the diff

>$ sudo diff /etc/pam.d/system-auth /etc/pam.d/system-auth.ORIG 
10,11c10,11
< # password requisite pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type=
< password sufficient pam_unix.so try_first_pass nullok sha512 shadow
---
> password requisite pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type=
> password sufficient pam_unix.so try_first_pass use_authtok nullok sha512 shadow

Now you can set weak password for users.


RHEL 9.0 Bash Completion and in VIM use Space Instead of Tab

$
0
0
>$ sudo dnf install vim-enhanced bash-completion -y

After installing bash completion you need to login out and then login before completion takes affect.

>$ sudo vim /etc/vimrc
...
" insert space for tab
set expandtab
" number of space for tab
set tabstop=2
" number of space for indentation
set shiftwidth=2

RHEL 9.0 Administratively Log Out Users

$
0
0
># man w
...
NAME
w - Show who is logged on and what they are doing.
...
># w
14:08:39 up 3 min, 2 users, load average: 0.03, 0.08, 0.03
USER TTY LOGIN@ IDLE JCPU PCPU WHAT
student pts/0 14:04 1:16 0.07s 0.05s sshd: student [priv]
root pts/1 14:07 1.00s 0.04s 0.01s w
># man pgrep
...
NAME
pgrep, pkill, pidwait - look up, signal, or wait for processes based on name and other at‐
tributes
...
># pgrep -l -u student
1725 systemd
1728 (sd-pam)
1735 sshd
1736 bash
># kill -l
1) SIGHUP 2) SIGINT 3) SIGQUIT 4) SIGILL 5) SIGTRAP
6) SIGABRT 7) SIGBUS 8) SIGFPE 9) SIGKILL 10) SIGUSR1
11) SIGSEGV 12) SIGUSR2 13) SIGPIPE 14) SIGALRM 15) SIGTERM

Use first SIGTERM, then try SIGINT; and only if both fail, to try again with SIGKILL.

># pkill -SIGKILL -u student

Verify that all users process are terminated with pgrep and w.

RHEL 9.0 Manage Systemd Units

$
0
0
>$ sudo systemctl list-units --type service --all

Systemd units can be of three kinds:

  • Service units have a .service extension and represent system services.
  • Socket units have a .socket extension and represent inter-process communication (IPC) sockets that systemd should monitor.
  • Path units have a .path extension and delay the activation of a service until a specific file-system change occurs.

Custom or override systemd units are stored:

/etc/systemd/system/

System defaults or rpm installed systemd units goes into are stored:

/usr/lib/systemd/system/

>$ sudo systemctl cat sshd.service
$ sudo systemctl edit sshd.service
$ sudo systemctl daemon-reload
>$ sudo systemctl status sshd.service 
$ sudo systemctl restart sshd.service

Completely disabled, so that any start operation on it fails.

>$ sudo systemctl mask sendmail.service
$ sudo systemctl unmask sendmail.service
>$ sudo systemctl enable httpd.service
$ sudo systemctl disable httpd.service
$ sudo systemctl status httpd.service
$ sudo systemctl is-enabled httpd.service

RHEL 9.0 Managing journalctl

$
0
0
>$ sudo journalctl -p err
>$ sudo journalctl --since "2022-07-01" --until "2022-07-10 15:00:00"
>       -S, --since=, -U, --until=
Start showing entries on or newer than the specified date, or on or older than the specified date, respectively.
Date specifications should be of the format "2012-10-30 18:17:16". If the time part is omitted, "00:00:00" is
assumed.
>$ sudo journalctl _PID=1
>$ sudo journalctl _UID=81
>EXAMPLES
...
_SYSTEMD_UNIT=name.service
+ UNIT=name.service _PID=1
+ OBJECT_SYSTEMD_UNIT=name.service _UID=0
+ COREDUMP_UNIT=name.service _UID=0 MESSAGE_ID=fc2e22bc6ee647b6b90729ab34a250b1
...
>$ sudo cat /etc/systemd/journald.conf 
...
# See journald.conf(5) for details.

[Journal]
#Storage=auto
>$ sudo man 5 journald.conf
...
Storage=
Controls where to store journal data. One of "volatile", "persistent", "auto" and "none". If "volatile", journal
log data will be stored only in memory, i.e. below the /run/log/journal hierarchy (which is created if needed).
...
>$ sudo systemctl restart systemd-journald.service

RHEL 9.0 Managing Date, Time and Time Zone

$
0
0
>$ sudo timedatectl 
Local time: Sun 2022-07-10 18:27:46 CEST
Universal time: Sun 2022-07-10 16:27:46 UTC
RTC time: Sun 2022-07-10 16:27:46
Time zone: Europe/Stockholm (CEST, +0200)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no

$ sudo timedatectl -h
timedatectl [OPTIONS...] COMMAND ...

Query or change system time and date settings.

Commands:
status Show current time settings
show Show properties of systemd-timedated
set-time TIME Set system time
set-timezone ZONE Set system time zone
list-timezones Show known time zones
set-local-rtc BOOL Control whether RTC is in local time
set-ntp BOOL Enable or disable network time synchronization

systemd-timesyncd Commands:
timesync-status Show status of systemd-timesyncd
show-timesync Show properties of systemd-timesyncd

Options:
-h --help Show this help message
--version Show package version
--no-pager Do not pipe output into a pager
--no-ask-password Do not prompt for password
-H --host=[USER@]HOST Operate on remote host
-M --machine=CONTAINER Operate on local container
--adjust-system-clock Adjust system clock when changing local RTC mode
--monitor Monitor status of systemd-timesyncd
-p --property=NAME Show only properties by this name
-a --all Show all properties, including empty ones
--value When showing properties, only print the value

See the timedatectl(1) man page for details.

$ sudo timedatectl list-timezones
$ sudo timedatectl set-ntp true
>$ sudo chronyc sources -v

.-- Source mode '^' = server, '=' = peer, '#' = local clock.
/ .- Source state '*' = current best, '+' = combined, '-' = not combined,
| / 'x' = may be in error, '~' = too variable, '?' = unusable.
|| .- xxxx [ yyyy ] +/- zzzz
|| Reachability register (octal) -. | xxxx = adjusted offset,
|| Log2(Polling interval) --. | | yyyy = measured offset,
|| \ | | zzzz = estimated error.
|| | | \
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
^- ntp1.vmar.se 2 7 377 114 +527us[ +527us] +/- 28ms
^- ec2-16-16-55-166.eu-nort> 2 7 377 116 -1587us[-1283us] +/- 43ms
^* time.cloudflare.com 3 7 377 115 +519us[ +823us] +/- 2212us
^- lul1.ntp.netnod.se 1 7 377 115 -3057us[-2753us] +/- 14ms

$ sudo cat /etc/chrony.conf
$ sudo man 5 chrony.conf
>$ sudo systemctl status chronyd.service 
● chronyd.service - NTP client/server
Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; vendor preset: enabled)
...

Networking Basics

$
0
0

Netmask (n 1s, the rest 0s)

Network address (all host bits are 0s)

Broadcast address (all host bits are 1s)

Address range for hosts on subnet (Network address + 1 to Broadcast address - 1)

Number of hosts in network (2^h - 2)


>IP address: 192.168.122.58/24 

Netmask: 255.255.255.0

Network: 192.168.122.0

Broadcast: 192.168.122.255

Range: 192.168.122.1 - 192.168.122.255

Number of hosts: 2^(32-24) - 2 = 254

>IP address: 172.168.181.23/19

1010 1100 . 1010 1000 . 1011 0101 . 0001 0111
1010 1100 . 1010 1000 . 101 19 first bits

Netmask (n 1s, the rest 0s)

1111 1111 . 1111 1111 . 1110 0000 . 0000 0000 255.255.224.0

Network address (all host bits are 0s)

1010 1100 . 1010 1000 . 1010 0000 . 0000 0000 172.168.160.0

Broadcast address (all host bits are 1s)

1010 1100 . 1010 1000 . 1011 1111 . 1111 1111 172.168.191.255

Address range for hosts on subnet (Network address + 1 to Broadcast address - 1)

172.168.160.1 - 172.168.191.254

Number of hosts in network (2^h - 2)

2^(32-19) - 2 = 2^13 - 2 = 8190

>IP address: 192.168.1.100/25

1100 0000 . 1010 1000 . 0000 0001 . 0110 0100
1100 0000 . 1010 1000 . 0000 0001 . 0 25 first bit

Netmask (n 1s, the rest 0s)

1111 1111 . 1111 1111 . 1111 1111 . 1000 0000 255.255.255.128

Network address (all host bits are 0s)

1100 0000 . 1010 1000 . 0000 0001 . 0000 0000 192.168.1.0

Broadcast address (all host bits are 1s)

1100 0000 . 1010 1000 . 0000 0001 . 0111 1111 192.168.1.127

Address range for hosts on subnet (Network address + 1 to Broadcast address - 1)

192.168.1.1 - 192.168.1.126

Number of hosts in network (2^h - 2)

2^(32-25) - 2 = 2^7 - 2 = 126

>IP addresses 172.16.5.34/26

1010 1100 . 0001 0000 . 0000 0101 . 0010 0010
1010 1100 . 0001 0000 . 0000 0101 . 00 26 first bits

Netmask (n 1s, the rest 0s)

1111 1111 . 1111 1111 . 1111 1111 . 1100 0000 255.255.255.192

Network address (all host bits are 0s)

1010 1100 . 0001 0000 . 0000 0101 . 0000 0000 172.16.5.0

Broadcast address (all host bits are 1s)

1010 1100 . 0001 0000 . 0000 0101 . 0011 1111 172.16.5.63

Address range for hosts on subnet (Network address + 1 to Broadcast address - 1)

172.16.5.1 - 172.16.5.62

Number of hosts in network (2^h - 2)

2^(32-26) - 2 = 2^6 - 2 = 62

RHEL 9.0 Install NFS 4 Server and Client. Configure Mount and Automount Direct and Indirect Map

$
0
0

RHEL 9.0 Install NFS 4 Server

Lets start with one server and install NFS 4.

># dnf install -y nfs-utils

# man 5 exports
...
root_squash
Map requests from uid/gid 0 to the anonymous uid/gid. Note that this does not apply to any other uids or gids that might be
equally sensitive, such as user bin or group staff.
...
EXAMPLE
# sample /etc/exports file
/ master(rw) trusty(rw,no_root_squash)
...

Before configure NFS 4 Server, we will create a couple of directories with file permissions.

The user we create below, will be created with specific UID and GID, as they need to be the same on the clients machines.

># mkdir -p /nfs-share/john
# mkdir -p /nfs-share/jane
# mkdir -p /nfs-share/alice
# mkdir /nfs-share/tmp

# groupadd --gid 1101 john
# groupadd --gid 1102 jane
# groupadd --gid 1103 alice

# useradd --uid 1101 --gid 1101 john
# useradd --uid 1102 --gid 1102 jane
# useradd --uid 1103 --gid 1103 alice

# chown john:john /nfs-share/john
# chown jane:jane /nfs-share/jane
# chown alice:alice /nfs-share/alice

# chmod 750 /nfs-share/john
# chmod 750 /nfs-share/jane
# chmod 750 /nfs-share/alice
# chmod 1777 /nfs-share/tmp

# cp /etc/skel/.bash* /nfs-share/john/
# cp /etc/skel/.bash* /nfs-share/jane/
# cp /etc/skel/.bash* /nfs-share/alice/

# chown john:john /nfs-share/john/.bash*
# chown jane:jane /nfs-share/jane/.bash*
# chown alice:alice /nfs-share/alice/.bash*

And now for the NFS 4 Server configuration.

># vim /etc/exports
/nfs-share/john *(rw,root_squash)
/nfs-share/jane *(rw,root_squash)
/nfs-share/alice *(rw,root_squash)

# systemctl enable --now nfs-server.service

# firewall-cmd --add-service=nfs; firewall-cmd --add-service=nfs --permanent

Install NFS 4 on RHEL 9.0 Client

># dnf install -y nfs-utils

NFSv3 used the RPC protocol, which requires a file server that supports NFSv3 connections to run the rpcbind service. An NFSv3 client connects to the rpcbind service at port 111 on the server to request NFS service. The server responds with the current port for the NFS service. Use the showmount command to query the available exports on an RPC-based NFSv3 server.

># showmount --exports server

NFSv4 introduced an export tree that contains all of the paths for the server's exported directories.

>$ sudo mount 192.168.122.76:/ /mnt
$ ls /mnt/
nfs-share
$ sudo umount /mnt

There are 4 different ways to mount NFS shares.

Way 1: Temporary Mount

>$ sudo mkdir -p /nfs-share/tmp
$ sudo mount -t nfs -o rw,sync 192.168.122.76:/nfs-share/tmp /nfs-share/tmp

$ sudo mount | grep 192.168.122.76
192.168.122.76:/ on /mnt type nfs4 (rw,relatime,vers=4.2,rsize=262144,wsize=262144,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=192.168.122.58,local_lock=none,addr=192.168.122.76)

$ sudo umount /nfs-share/tmp

Way 2: Permanent Mount

>$ sudo mkdir -p /nfs-share/tmp
$ sudo vim /etc/fstab
...
192.168.122.76:/nfs-share/tmp /nfs-share/tmp nfs rw,sync 0 0

$ sudo mount -a
$ sudo systemctl daemon-reload

Way 3: Automount Direct Map and Automount Indirect Map

Differene between Automount Direct Map and Indirect Map

An indirect automount is a well known and unchanging mount point that is known before hand. The indirect is the opposite, e.g. user home directory (/home), that you do not know before hand which user will login to a spefic server.

Way 3: Automount Direct Map

>$ sudo dnf install -y autofs nfs-utils

$ man 5 auto.master
...
For direct maps the mount point is always specified as:

/-
...
EXAMPLE
/- auto.data
/home /etc/auto.home
/mnt yp:mnt.map

This will generate two mountpoints for /home and /mnt and install direct mount triggers for each entry in the di‐
rect mount map auto.data. All accesses to /home will lead to the consultation of the map in /etc/auto.home and
all accesses to /mnt will consult the NIS map mnt.map. All accesses to paths in the map auto.data will trigger
mounts when they are accessed and the Name Service Switch configuration will be used to locate the source of the
map auto.data.

To avoid making edits to /etc/auto.master, /etc/auto.master.d may be used. Files in that directory must have a
".autofs" suffix, e.g. /etc/auto.master.d/extra.autofs. Such files contain lines of the same format as the au‐
to.master file, e.g.

/foo /etc/auto.foo
/baz yp:baz.map
...

$ sudo vim /etc/auto.master.d/nfs-share-direct-tmp.autofs
/- /etc/auto.nfs-share-direct-tmp

$ sudo vim /etc/auto.nfs-share-direct-tmp
/nfs-share-direct/tmp -rw,sync 192.168.122.76:/nfs-share/tmp

$ sudo systemctl enable --now autofs

$ sudo mount | grep nfs-share-direct-tmp
/etc/auto.nfs-share-direct-tmp on /nfs-share-direct/tmp type autofs (rw,relatime,fd=17,pgrp=6250,timeout=300,minproto=5,maxproto=5,direct,pipe_ino=74858)

$ echo "HELLO"> /nfs-share-direct/tmp/HELLO

$ cat /nfs-share-direct/tmp/HELLO
HELLO

Way 4: Automount Indirect Map

>$ sudo dnf install -y autofs nfs-utils

$ sudo vim /etc/auto.master.d/nfs-share-indirect-tmp.autofs
/nfs-share-indirect /etc/auto.nfs-share-indirect-tmp

/nfs-share-indirect is the base for the final mount point. The next file is called mapping file.

# vim /etc/auto.nfs-share-indirect-tmp
tmp -rw,sync 192.168.122.76:/nfs-share/tmp

The final mount point (path) is the combined path from and master mapping file, e.g. /shares/work.

Both the directory /nfs-share-indirect and /nfs-share-indirect/tmp are created and removed automatically by the aufofs service.

# systemctl enable --now autofs

$ man 5 autofs
...
-fstype=
is used to specify a filesystem type if the filesystem is not of the default NFS type. This option
is processed by the automounter and not by the mount command.

-strict
is used to treat errors when mounting file systems as fatal. This is important when multiple file
systems should be mounted (`multi-mounts'). If this option is given, no file system is mounted at
all if at least one file system can't be mounted.
...
EXAMPLE
Indirect map:

kernel -ro,soft ftp.kernel.org:/pub/linux
boot -fstype=ext2 :/dev/hda1
windoze -fstype=smbfs ://windoze/c
removable -fstype=ext2 :/dev/hdd
cd -fstype=iso9660,ro :/dev/hdc
floppy -fstype=auto :/dev/fd0
server -rw,hard / -ro myserver.me.org:/ \
/usr myserver.me.org:/usr \
/home myserver.me.org:/home

In the first line we have a NFS remote mount of the kernel directory on ftp.kernel.org. This is mounted read-
only. The second line mounts an ext2 volume from a local ide drive. The third makes a share exported from a
Windows machine available for automounting. The rest should be fairly self-explanatory. The last entry (the last
three lines) is an example of a multi-map (see below).

If you use the automounter for a filesystem without access permissions (like vfat), users usually can't write on
such a filesystem because it is mounted as user root. You can solve this problem by passing the option
gid=<gid>, e.g. gid=floppy. The filesystem is then mounted as group floppy instead of root. Then you can add the
users to this group, and they can write to the filesystem. Here's an example entry for an autofs map:

floppy-vfat -fstype=vfat,sync,gid=floppy,umask=002 :/dev/fd0

Direct map:

/nfs/apps/mozilla bogus:/usr/local/moxill
/nfs/data/budgets tiger:/usr/local/budgets
/tst/sbin bogus:/usr/sbin

FEATURES
Map Key Substitution
An & character in the location is expanded to the value of the key field that matched the line (which probably
only makes sense together with a wildcard key).

Wildcard Key
A map key of * denotes a wild-card entry. This entry is consulted if the specified key does not exist in the map.
A typical wild-card entry looks like this:

* server:/export/home/&

The special character '&' will be replaced by the provided key. So, in the example above, a lookup for the key
'foo' would yield a mount of server:/export/home/foo.
...

To map user homes directories.

>$ sudo vim /etc/auto.master.d/nfs-share-indirect-home.autofs
/home /etc/auto.nfs-share-indirect-home

$ vim /etc/auto.nfs-share-indirect-home
* -rw,sync 192.168.122.76:/nfs-share/&

# systemctl enable --now autofs

# groupadd --gid 1101 john
# useradd --uid 1101 --gid 1101 john
# passwd john
# su - john

$ echo "JOHN"> john
$ pwd
/home/john

RHEL 9.0 LVM, Extend and Swap

$
0
0

Introduction

Logical Volume Manager (LVM)

Physical Volumes (PVs)

Volume Groups (VGs)

Logical Volumes (LVs)

Create Partition Table, PV, VG and LV

># lsblk -fp
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
/dev/sr0
/dev/vda
├─/dev/vda1 xfs e8e38d31-36a2-4ad7-9668-94023cd80424 817.8M 19% /boot
└─/dev/vda2 LVM2_member LVM2 001 y0wzxQ-mGYD-OfjS-LxuL-l2gJ-Rfgt-h5UgR6
├─/dev/mapper/rhel_rhel9-root
│ xfs 221f6235-b21f-48e6-befc-489e271de1f0 15.9G 6% /
└─/dev/mapper/rhel_rhel9-swap
swap 1 245cf443-6a9e-4d32-b7ad-0cbf15a9020d [SWAP]
/dev/vdb
/dev/vdc

# man parted
...
mklabel label-type
Create a new disklabel (partition table) of label-type. label-type should be one of "aix",
"amiga", "bsd", "dvh", "gpt", "loop", "mac", "msdos", "pc98", or "sun".

mkpart [part-type name fs-type] start end
Create a new partition. part-type may be specified only with msdos and dvh partition tables, it
should be one of "primary", "logical", or "extended". name is required for GPT partition tables
and fs-type is optional. fs-type can be one of "btrfs", "ext2", "ext3", "ext4", "fat16",
"fat32", "hfs", "hfs+", "linux-swap", "ntfs", "reiserfs", "udf", or "xfs".
...
set partition flag state
Change the state of the flag on partition to state. Supported flags are: "boot", "root", "swap",
"hidden", "raid", "lvm", "lba", "legacy_boot", "irst", "msftres", "esp", "chromeos_kernel",
"bls_boot" and "palo". state should be either "on" or "off".
...

# parted /dev/vdb mklabel gpt

# parted /dev/vdb mkpart first 0G 3G
# parted /dev/vdb set 1 lvm on

# parted /dev/vdb mkpart second 3G 6G
# parted /dev/vdb set 2 lvm on

# parted /dev/vdb print
Model: Virtio Block Device (virtblk)
Disk /dev/vdb: 10.7GB
Sector size (logical/physical): 512B/512B
Partition Table: gpt
Disk Flags:

Number Start End Size File system Name Flags
1 1049kB 3000MB 2999MB first lvm
2 3000MB 6000MB 3000MB second lvm

# lsblk -fp
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
...
/dev/vdb
├─/dev/vdb1
└─/dev/vdb2

# pvcreate /dev/vdb1 /dev/vdb2

# vgcreate myvg01 /dev/vdb1

# lvcreate --name mylv01 --size 2.7G myvg01

# lsblk /dev/vdb -fp
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
/dev/vdb
├─/dev/vdb1 LVM2_member LVM2 001 soNUus-2dYc-cTHE-OTXg-Ks1y-hQ6U-TLMgGz
│ └─/dev/mapper/myvg01-mylv01 xfs 5ac89db3-6bec-41f1-866d-e6afc3241ccd
└─/dev/vdb2 LVM2_member LVM2 001 MIXCZT-j2af-G0e7-dN30-yeCW-NL9W-m73AFA

# mkfs.xfs /dev/mapper/myvg01-mylv01

# mkdir /myvg01-mylv01

# vim /etc/fstab
...
/dev/mapper/myvg01-mylv01 /myvg01-mylv01 xfs defaults 0 0

# mount -a

# echo "FOO"> /myvg01-mylv01/foo; cat /myvg01-mylv01/foo
FOO

Extend VG, LV and Resize Filesystem

># vgextend myvg01 /dev/vdb2

# lvextend --size +2.7G --resizefs /dev/mapper/myvg01-mylv01

# df -h /myvg01-mylv01/
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/myvg01-mylv01 5.4G 72M 5.4G 2% /myvg01-mylv01

Create Swap

># parted /dev/vdb mkpart third 6G 8G

# parted /dev/vdb set 3 lvm on

# pvcreate /dev/vdb3

# vgcreate myvg02 /dev/vdb3

# lvcreate --name mylv02 --size 1.8G myvg02

# mkswap /dev/mapper/myvg02-mylv02

# free
total used free shared buff/cache available
Mem: 1301304 193984 770756 6976 336564 952776
Swap: 2097148 0 2097148

# swapon /dev/mapper/myvg02-mylv02

# free
total used free shared buff/cache available
Mem: 1301304 194772 769936 6976 336596 951988
Swap: 3985400 0 3985400

# vim /etc/fstab
...
/dev/mapper/myvg02-mylv02 none swap defaults 0 0

# mount -a

RHEL 9.0 Virtual Data Optimizer (VDO)

$
0
0

Introduction

># dnf install -y vdo kmod-kvdo

Prerequisite

># parted /dev/vdb print
Model: Virtio Block Device (virtblk)
Disk /dev/vdb: 10.7GB
Sector size (logical/physical): 512B/512B
Partition Table: gpt
Disk Flags:

Number Start End Size File system Name Flags

Configure

># parted /dev/vdb mkpart first 0G 10G
# parted /dev/vdb set 1 lvm on

# pvcreate /dev/vdb1
# vgcreate myvg-vdo /dev/vdb1

# lvcreate --name mylv-vdo --size 5G --type vdo myvg-vdo

# lsblk /dev/vdb -fp
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
/dev/vdb
└─/dev/vdb1 LVM2_m LVM2 3O1e3e-NMIt-1y5q-jvWk-ONQh-doOn-3V719c
└─/dev/mapper/myvg--vdo-vpool0_vdata

└─/dev/mapper/myvg--vdo-vpool0-vpool

└─/dev/mapper/myvg--vdo-mylv--vdo

# mkfs.xfs /dev/mapper/myvg--vdo-mylv--vdo

# mkdir /myvg--vdo-mylv--vdo
# vim /etc/fstab
...
/dev/mapper/myvg--vdo-mylv--vdo /myvg--vdo-mylv--vdo xfs defaults 0 0

# mount -a

Test

># echo "FOO"> /myvg--vdo-mylv--vdo/foo; cat /myvg--vdo-mylv--vdo/foo
FOO

RHEL 9.0 Stratis

$
0
0

Introduction

># man stratis
...
EXAMPLES
Example 1. Creating a Stratis pool

stratis pool create mypool /dev/sdb /dev/sdc

Example 2. Creating an encrypted pool

stratis key set --capture-key someKeyDescription

stratis pool create --key-desc someKeyDescription mypool /dev/sdb /dev/sdc

Example 3. Creating a filesystem from a pool

stratis filesystem create mypool data1
...

# lsblk /dev/vdc -pf
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
/dev/vdc

Configure

># dnf install -y stratis-cli stratisd

# systemctl enable --now stratisd

# stratis pool create mypool /dev/vdc

# stratis filesystem create mypool data1

# lsblk /dev/stratis/mypool/data1 --output UUID
UUID
e119c223-029f-4b45-a204-3672e37c556f

# find /usr/share/doc/ -type f | xargs grep x-systemd.requires
grep: /usr/share/doc/python3-setuptools/python: No such file or directory
grep: 2: No such file or directory
grep: sunset.rst: No such file or directory
/usr/share/doc/systemd/NEWS: * New /etc/fstab options x-systemd.requires= and
/usr/share/doc/systemd/NEWS: x-systemd.requires-mounts-for= are now supported to express

# mkdir /stratis

# vim /etc/fstab
...
UUID=e119c223-029f-4b45-a204-3672e37c556f /stratis xfs defaults,x-systemd.requires=stratisd.service 0 0

# mount -a

Test

># echo "FOO"> /stratis/foo; cat /stratis/foo

RHEL 9.0 Reset the Root Password

$
0
0

When the boot-loader menu appears, press any key to interrupt the countdown, except the Enter key.

Use the cursor keys to highlight the rescue kernel boot-loader entry (the one with the word rescue in its name).

Press e to edit the current entry.

Use the cursor keys to navigate the line that starts with the linux text.

Press Ctrl+e to move the cursor to the end of the line.

Append the rd.break text to the end of the line.

Press Ctrl+x to boot using the modified configuration.

Press Enter to enter the maintenance mode.

>sh-5.1# mount -o remount,rw /sysroot

sh-5.1# chroot /sysroot

sh-5.1# passwd root

sh-5.1# touch /.autorelabel

RHEL 9.0 Boot in emergency.target

$
0
0

Introduction

># systemctl list-units --type target --all 
UNIT
emergency.target
rescue.target
...

# systemctl get-default
multi-user.target

Boot in emergency.target

When the boot-loader menu appears, press any key to interrupt the countdown, except Enter.

Use the cursor keys to highlight the default boot-loader entry.

Press e to edit the current entry.

Use the cursor keys to navigate the line that starts with linux text.

Press Ctrl+e to move the cursor to the end of the line.

Append the systemd.unit=emergency.target text to the end of the line.

Press Ctrl+x to boot using the modified configuration.

># mount -o remount,rw /

# mount -a

# vim /etc/fstab

# systemctl daemon-reload

# mount -a

# systemctl reboot
Viewing all 526 articles
Browse latest View live