%#!latex %------------------------------------------------------------------------------ % Beginning of journal.tex %------------------------------------------------------------------------------ % \documentclass[12pt, reqno]{amsart} \usepackage{amsmath, amsthm, amscd, amsfonts, amssymb, graphicx, color} \usepackage[bookmarksnumbered, plainpages]{hyperref} \textheight 22.5truecm \textwidth 14.5truecm \setlength{\oddsidemargin}{0.35in}\setlength{\evensidemargin}{0.35in} \setlength{\topmargin}{-.5cm} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{thm}{Theorem}[] \renewcommand{\thethm}{\hspace{-8pt}} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \newtheorem{xca}[theorem]{Exercise} \newtheorem{problem}[theorem]{Problem} \theoremstyle{remark} \newtheorem{remark}[theorem]{Remark} \numberwithin{equation}{section} \def\bs{\bigskip\par} \def\ms{\medskip\par} \def\SD{\strut\displaystyle} \def\G{\diamondsuit} \def\LB{\lower2pt\hbox{\large$\square$}} \def\Gm{\LB\kern-10pt\lower-1pt\hbox{\footnotesize\it m}\kern2pt\hbox{}} \def\DA{\underset{\scriptsize \textrm{log}}{\searrow}} \def\RR{\mathbb{R}} \def\bs{\bigskip\par} \def\f{\varphi} \def\<{\left\langle} \def\>{\right\rangle} \def\epi{\hbox{\rm epi }} \def\dom{\hbox{\rm dom }} \def\Re{\hbox{\rm Re }} \def\epito{\overset{\text{\rm epi}}{\longrightarrow}} \def\downto{\downarrow} \def\itemb#1{\item{\normalfont\bf #1}} \def\e{\varepsilon} \def\I{\hbox{\rm\bf I}} \def\wlim{\operatornamewithlimits{w-lim}} \def\slim{\operatornamewithlimits{s-lim}} \def\ulim{\operatornamewithlimits{u-lim}} \def\mlim{\operatornamewithlimits{M-lim}} \def\~{\hskip-1pt} \def\NQED{\renewcommand{\qedsymbol}{} \vskip-2\baselineskip\par} \def\~{\hskip-2pt} \def\I{\mathbf{I}} \def\({\left(} \def\){\right)} \def\P{\partial} \def\longarrow{\longrightarrow} \def\downto{\downarrow} \def\VV{\lower-0.1ex\hbox{$\ \begin{matrix}\vee\\[-2ex]\vee\end{matrix}\ $}} \def\vv{\lower-0.2ex\hbox{$\ \begin{matrix}\wedge\\[-2ex]\wedge\end{matrix}\ $}} \def\M{\; {\frak m}} \def\phi{\varphi} \def\FNT{\upshape\bf} \def\A{\hbox{\boldmath$A$}} \def\B{\hbox{\boldmath$B$}} \def\D{\hbox{\boldmath$D$}} \def\U{\hbox{\boldmath$U$}} \def\I{\hbox{\boldmath$I$}} \def\O{\hbox{\boldmath$O$}} \def\E{\hbox{\boldmath$E$}} \def\P{\hbox{\boldmath$P$}} \def\X{\hbox{\boldmath$X$}} \def\Y{\hbox{\boldmath$Y$}} \begin{document} \setcounter{page}{59} \noindent\parbox{2.85cm}{\includegraphics*[keepaspectratio=true,scale=1.75]{BJMA.jpg}} \noindent\parbox{4.85in}{\hspace{0.1mm}\\[1.5cm]\noindent Banach J. Math. Anal. 2 (2008), no. 2, 59--67\\ $\frac{\rule{4.55in}{0.05in}}{{}}$\\ {\footnotesize \textcolor[rgb]{0.65,0.00,0.95}{\textsc{\textbf{\large{B}}anach \textbf{\large{J}}ournal of \textbf{\large{M}}athematical \textbf{\large{A}}nalysis}}\\ ISSN: 1735-8787 (electronic)\\ \textcolor[rgb]{0.00,0.00,0.84}{\textbf{http://www.math-analysis.org }}\\ $\frac{{}}{\rule{4.55in}{0.05in}}$}\\[.5in]} \title[Operator-valued innner product]{Operator-valued inner product and operator inequalities} \author[J.I. Fujii]{Jun Ichi Fujii$^1$} \address{$^{1}$ Department of Arts and Sciences (Information Science), Osaka Kyoiku University, Asahigaoka, Kashiwara, Osaka 582-8582, Japan.} \email{\textcolor[rgb]{0.00,0.00,0.84}{fujii@cc.osaka-kyoiku.ac.jp}} \dedicatory{This paper is dedicated to Professor Josip E. Pe\v{c}ari\'{c}\\ \vspace{.5cm} {\rm Submitted by M. S. Moslehian}} \subjclass[2000]{Primary 47A63; Secondary 47A75, 47A80.} \keywords{Schwarz inequality, Jensen inequality, Operator inequality.} \date{Received: 29 March 2008; Accepted 13 May 2008}. \begin{abstract} The Schwarz inequality and Jensen's one are fundamental in a Hilbert space. Regarding a sesquilinear map $B(X,Y)=Y^*X$ as an operator-valued inner product, we discuss operator versions for the above inequalities and give simple conditions that the equalities hold. \end{abstract} \maketitle \section{Introduction} Inequality plays a basic role in analysis and consequently in Mathematics. As surveyed briefly in \cite{Ms}, operator inequalities on a Hilbert space have been discussed particularly since Furuta inequality was established. But it is not easy in general to give a simple condition that the equality in an operator inequality holds. In this note, we observe basic operator inequalities and discuss the equality conditions. To show this, we consider simple linear algebraic structure in operator spaces: For Hilbert spaces $H$ and $K$, the symbol $B(H,K)$ denotes all the (bounded linear) operators from $H$ to $K$ and $B(H)\equiv B(H,H)$. Then, consider an operator matrix $\A=(A_{ij})\in B(H^n)$, a vector $X=(X_j)\in B(H,H^n)$ with operator entries $X_j\in B(H)$, an operator-valued inner product $$Y^*X=\sum_{j=1}^nY_j^*X_j,$$ for $X=(X_j),\,Y=(Y_j)\in B(H,H^n)$ and an operator version $E$ for eigenvalue determined by $\A X=XE$ where $\A\in B(H^n)$, $X\in B(H,H^n)$ and $E\in B(H)$. Particularly in the section 3, these concepts enable us to describe equality conditions very simply. First we give three operator version of so-called Schwarz inequality with simple equality conditions. Second we give a simple equivalent condition to the equality in the Jensen operator inequality by Hansen-Pedersen \cite{HP}. For the sake of convenience, we assume that a basis $\{e_j\}$ in a Hilbert space $H$ is fixed and a vector $x=\sum_jx_je_j\in H$ is often considered as an operator from $\mathbb{C}$ to $H$ (i.e., $H=B(\mathbb{C},H)$) with the inner product $$\=y^*x=\sum_j\overline{y_j}x_j.$$ \section{Schwarz inequalities} The classical Schwarz inequality in a Hilbert space is $|y^*x|\le\|x\|\|y\|$. Here we give three operator versions with equality conditions. First we give a simple operator version on $B(H)$ or $B(H^n)$, where vectors $x, y\in H$ cannot be substituted directly for operators $X$ and $Y$. The condition equivalent to the equality is very simple: \begin{theorem}[\bf Schwarz inequality 1] For operators $X$ and $Y$ on a Hilbert space, the inequality $$Y^*X^*YX+X^*Y^*XY\le Y^*X^*XY+X^*Y^*YX=(XY)^*XY+(YX)^*YX$$ holds with equality only when $X$ commutes with $Y$. \end{theorem} \begin{proof} The require inequality follows from $$0\le(XY-YX)^*(XY-YX)=(XY)^*XY-Y^*X^*YX-X^*Y^*XY+(YX)^*YX.$$ The equality holds only when $(XY-YX)^*(XY-YX)=0$, and hence $XY-YX=0$, that is, $X$ commutes with $Y$. \end{proof} Let $\{e_j\}$ be a fixed orthogonal basis of a Hilbert space $H$. For vectors $x=\sum_jx_je_j$ and $y=\sum_jy_je_j$, put diadic operators $$X_j=x_j\otimes\overline{e_j}\quad\text{and}\quad Y_j=y_j\otimes\overline{e_j},$$ where the diadic operator (the Shatten product) is determined by $(v\otimes\overline{w})z=\v$. Then, the inequality $$Y_j^*X_j^*Y_jX_j+X_j^*Y_j^*X_jY_j\le Y_j^*X_j^*X_jY_j+X_j^*Y_j^*Y_jX_j$$ means $$\(x_j\overline{y_j}\+y_j\overline{x_j}\\)e_j\otimes\overline{e_j}\leqq\(|y_j|^2\+|x_j|^2\\)e_j\otimes\overline{e_j},$$ and consequently $$x_j\overline{y_j}\+y_j\overline{x_j}\\leqq|y_j|^2\+|x_j|^2\.$$ Taking summation for $j$, we have (indeed, Parseval equation) \begin{eqnarray}\label{1} \\+\\\leqq\\+\\ \end{eqnarray} so that the classical inequality yields. By the equality condition, we have $$\x\otimes\overline{e_j}=\y\otimes\overline{e_j},$$ for all $j$, and hence $$\x=\y,$$ which means the linearly dependence of $x$ and $y$. Next we give another version where a vector $x\in H$ can be substituted for $X$: \begin{theorem}[\bf Schwarz inequality 2] For operators $X$ and $Y$ from a Hilbert space to another, the inequality $$(X^*Y)\otimes(Y^*X)+(Y^*X)\otimes(X^*Y)\le (X^*X)\otimes (Y^*Y)+(Y^*Y)\otimes (X^*X)$$ holds with equality only when $X$ and $Y$ are linearly dependent. \end{theorem} \begin{proof} The inequality follows from \begin{align*}0&\le(X\otimes Y-Y\otimes X)^*(X\otimes Y-Y\otimes X)\\ &=(X^*X)\otimes(Y^*Y)-(X^*Y)\otimes(Y^*X)-(Y^*X)\otimes(X^*Y)+(Y^*Y)\otimes(X^*X). \end{align*} Thereby the equality condition is $X\otimes Y=Y\otimes X$, that is, $x_{ij}Y=y_{ij}X$ for all $i$ and $j$, which means the linearly dependence of $X$ and $Y$. \end{proof} Since the tensor product for scalars like $y^*x$ becomes the usual product, the above inequality means also \eqref{1}. Recall the Marcus-Khan theorem for operators (see, \cite{F0}): Let $U$ be the isometry from $H$ to $H\otimes H$ with $Ue_j=e_j\otimes e_j$ for all $j$ (as a diadic representation, $U=\sum_j(e_j\otimes e_j)\otimes\overline{e_j}$). Then the Haradamrd (Shur) product $A\circ B$ is obtained by $$A\circ B=U^*(A\otimes B)U.$$ Since $(A\otimes B)Ue_j=Ae_j\otimes Be_j$, conditions depend on each column vectors $Ae_j$ and $Be_j$. Thus we have a Schwarz inequality for Hadamard products: \begin{corollary} The inequality $(X^*Y)\circ(Y^*X)\le (X^*X)\circ(Y^*Y)$ holds with equality only when all two vectors $Xe_j$ and $Ye_j$ are linearly dependent for each $j$. \end{corollary} Finally in this section, we give an operator mean version. To show this, we use the {\it geometric operator mean} $A\#B$ for positive operators $A$ and $B$ on a Hilbert space. If $A$ is invertible, then $$A\#B=B\#A=A^{\frac{1}{2}}\(A^{-\frac{1}{2}}BA^{-\frac{1}{2}}\)^{\frac{1}{2}}A^{\frac{1}{2}}$$ since $X^*(A\# B)X=(X^*AX\#X^*BX)$ holds for invertible $X$ by general theory of operator means \cite{KA} (see also \cite{Ms}). Ando \cite{A} gives the operator matrix formula: $$A\#B=\max\left\{X\ge O\ \Big|\ \begin{pmatrix}A&X\\ X&B\end{pmatrix}\ge \O\right\}.$$ If $A$ and $B$ are commuting, then $A\#B=(AB)^{\frac{1}{2}}=A^{\frac{1}{2}}B^{\frac{1}{2}}$. Another typical property is the homogeneity: $$(sA)\#(tB)=\sqrt{st}(A\#B)$$ for positive numbers $s$ and $t$. Now we have: \begin{theorem}[\bf Schwarz inequality 3] Let $X$ and $Y$ are operators from a Hilbert space $H$ to another with the polar decomposition $Y^*X=U|Y^*X|$ in $B(H)$ where $U\in B(H)$ is the partial isometry from $\overline{\text{\rm ran}X^*Y}$ to $\overline{\text{\rm ran}Y^*X}$. Then $$|Y^*X|\le X^*X\# U^*Y^*YU=|X|^2\#|YU|^2\quad\(\hbox{resp. }|X^*Y|\le |Y|^2\#|XU^*|^2\).$$ If $X$ $($resp. $Y)$ is an invertible operator in $B(H)$ or operators $\{X, YU\}$ $($resp. $\{Y, XU^*\})$ are linearly dependent, then the equality holds. \end{theorem} \begin{proof} Since $X^*Y=U^*|X^*Y|$ is also the polar decomposition, we have only to show the former case. By $U^*Y^*X=|Y^*U|=X^*YU$, we have \begin{align*}\O&\le \begin{pmatrix}I&O\\ O&U\end{pmatrix}^* \begin{pmatrix}X^*X&X^*Y\\ Y^*X&Y^*Y\end{pmatrix} \begin{pmatrix}I&O\\ O&U\end{pmatrix}\\ &=\begin{pmatrix}X^*X&X^*YU\\ U^*Y^*X&U^*Y^*YU\end{pmatrix} =\begin{pmatrix}X^*X&|Y^*X|\\ |Y^*X|&U^*Y^*YU\end{pmatrix}, \end{align*} and hence $|Y^*X|\le X^*X\# U^*Y^*YU$. Suppose $X\in B(H)$ is invertible. Then \begin{align*} X^*X\# U^*Y^*YU &= |X|^2\# U^*Y^*X(X^*X)^{-1}X^*YU\\ &= |X|\big(I\# |X|^{-1}|Y^*X|(X^*X)^{-1}|Y^*X||X|^{-1}\big)|X|\\ &= |X|\big(I\# (|X|^{-1}|Y^*X||X|^{-1})^2\big)|X|\\ &= |X||X|^{-1}|Y^*X||X|^{-1}|X|=|Y^*X|. \end{align*} Also suppose $sX=rYU$ for some scalars $s$ and $r$. Then \begin{align*} |sr|(X^*X\# U^*Y^*YU)&=(|s|^2X^*X)\#(|r|^2U^*Y^*YU)\\ &= (|r|^2U^*Y^*YU)\#(|r|^2U^*Y^*YU)=|r|^2U^*Y^*YU. \end{align*} On the other hand, the relation $$|sr|^2|Y^*X|^2=|r|^2(sX)^*Y(UU^*)Y^*(sX)=|r|^4(U^*Y^*YU)^2$$ implies $|sr||Y^*X|=|r|^2U^*Y^*YU$, which shows the equality.\end{proof} In the above theorem, putting $X=x$ and $Y=y$ for vectors $x$ and $y$, we have the classical Schwarz inequality. Considering rank 1 (hence noninvertible) operators $X=x\otimes\overline{e_1}$ and $Y=y\otimes\overline{e_1}$ in $B(H)$, we also have the classical one and moreover we see that the equality never holds unless $x$ and $y$ are linearly dependent. The above equality conditions are simple but merely sufficient ones. It seems to be hard to give a simple condition exactly equivalent to the equality considering the following example in $B(H^2)$ where $\X$ is not invertible and operators $\{\X, \Y\U\}$ are not linearly dependent in the usual sense: \begin{example} For operators $X, Y\in B(H)$, let $T\in B(H)$ be a positive invertible operator commuting with $|X|^2+|Y|^2$ which is also assumed to be invertible (e.g., $T=f(|X|^2+|Y|^2)$ for a positive function $f$). Then, put $\X=\begin{pmatrix}X&O\\ Y&O\end{pmatrix}$. For any operator $Z\in B(H)$, the relation (that $O$ is an operator version of eigenvalue for $\X$) $$\X\begin{pmatrix}O\\ Z\end{pmatrix}=\begin{pmatrix}X&O\\ Y&O\end{pmatrix}\begin{pmatrix}O\\ Z\end{pmatrix}=\begin{pmatrix}O\\ O\end{pmatrix}\quad(=\begin{pmatrix}O\\ Z\end{pmatrix}O)$$ implies that $\X\in B(H^2)$ cannot be invertible. Let $\begin{pmatrix}R\\ S\end{pmatrix}\in B(H,H^2)$ be a vector orthogonal to $\begin{pmatrix}X\\ Y\end{pmatrix}$ in the sense $\begin{pmatrix}R\\ S\end{pmatrix}^*\begin{pmatrix}X\\ Y\end{pmatrix}=O$. Then, putting $\Y=\begin{pmatrix}XT& R\\ YT& S\end{pmatrix}$, we have $$\Y^*\X=\begin{pmatrix}TX^*& TY^*\\ R^*& S^*\end{pmatrix}\begin{pmatrix}X&O\\ Y&O\end{pmatrix}=\begin{pmatrix}T(|X|^2+|Y|^2)&O\\ O&O\end{pmatrix}=|\Y^*\X|.$$ Thereby the polar decomposition is $$\Y^*\X=\U|\Y^*\X|\qquad\text{for }\quad \U=\begin{pmatrix}I&O\\ O&O\end{pmatrix}.$$ Since $$\Y\U=\begin{pmatrix}XT& O\\ YT& O\end{pmatrix}\quad\text{and}\quad|\Y\U|^2=\begin{pmatrix}T^2(|X|^2+|Y|^2)^2&O\\O&O\end{pmatrix},$$ we have \begin{align*}|\X|^2\#|\Y\U|^2 &=\begin{pmatrix}|X|^2+|Y|^2&O\\ O&O\end{pmatrix}\#\begin{pmatrix}T(|X|^2+|Y|^2)&O\\ O&O\end{pmatrix}\\ &=\begin{pmatrix}(|X|^2+|Y|^2)\#\big(T^2(|X|^2+|Y|^2)\big)&O\\ O&O\end{pmatrix}\\ &=\begin{pmatrix}\big((|X|^2+|Y|^2)T^2(|X|^2+|Y|^2)\big)^{\frac{1}{2}}&O\\ O&O\end{pmatrix}\\ &=\begin{pmatrix}T(|X|^2+|Y|^2)&O\\ O&O\end{pmatrix}=|\Y^*\X|. \end{align*} \end{example} \section{Proper vectors and Jensen operator equality} The (classical) Jensen inequality is the following one: Take a probability vector $p=(p_j)$ and real numbers $a_j$. If $f$ is convex, then \begin{eqnarray}\label{2} \sum_jp_jf(a_j)\geqq f\(\sum_jp_ja_j\). \end{eqnarray} Moreover, if $f$ is strictly convex and all fixed $a_j$ are distinct, then the equality holds if and only if $p_j=1$ for some $j$ (hence $p_i=0$ for $i\not=j$). This inequality has a Hilbert space-like expression: Let $x=(x_j)$ be a unit vector and $A$ a diagonal selfadjoint matrix diag$(a_1,a_2,\cdots)$. Then \eqref{2} is equivalent to $$x^*f(A)x=x^*\hbox{diag}(f(a_1),\cdots)x=\sum_j|x_j|^2f(a_j)\geqq f\(\sum_j|x_j^2|a_j\)=f(x^*Ax).$$ In this case, if $f$ is strictly convex, then the quality holds for a fixed $A$ if and only if $x$ is a unit eigenvector for $A$. To extend this, consider a block diagonal operator matrix $\A=\hbox{diag}(A_{j})\in B(H^n)$ and a column vector $C=(C_j)\in B(H, H^n)$ for operators $C_j$ on $H$. Suppose that $\A$ is selfadjoint and that $C$ is {\it isometric}: $$I_H=C^*C=\sum_{j}C_j^*C_j,$$ which is called a {\it unit operator}. Then, by \eqref{2}, Jensen's operator inequality should be of form $$C^*f(\A)C=C^*\text{diag}\big(f(A_j)\big)C=\sum_{j}C_j^*f(A_{i})C_j\ge f\(\sum_jC_j^*A_jC_j\)=f(C^*\A C)$$ to preserve positivity for operators. The diagonality for $\A$ is not essential. In fact, Hansen-Pedersen \cite{HP} (see also \cite{F}) give the following result: \begin{thm}[Hansen-Pedersen] Let $\mathcal{I}$ be an $($open$)$ interval and $\A=(A_{ij})$ a selfadjoint operator matrix on $B(H^n)$ with $\sigma(\A)\in\mathcal{I}$. A continuous function $f$ on $\mathcal{I}$ is operator convex $($resp. concave$)$ if and only if \begin{eqnarray}\label{3} C^*f(\A)C\ge f\(C^*\A C\) \quad\(\text{resp. }C^*f(\A)C\le f\(C^*\A C\) \) \end{eqnarray} for all unit operators $C\in B(H,H^n)$. \end{thm} To see the equality condition like the above vector cases, we define a (unit) {\it proper vector} (with operator entries) $X=(X_{i})\in B(H,H^n)$ if $X$ is an isometry satisfying $$\A X=XE=\begin{pmatrix}X_1E\\\vdots\\ X_nE\end{pmatrix}$$ for some operator $E\in B(H)$ which is called an {\it eigen-operator}. Then, we have $$X^*\A X=X^*XE=E,$$ which shows that if $\A$ is selfadjoint (or positive), then so is $E$. Note that if $XCDE=XE$, then $XC$ is also a proper vector with an eigen-operator $DEC$ for the above vector $X\in B(H^n)$. In particular, for every isometry $V\in B(H)$, an operator $XV^*$ is also a proper vector of $\A$ with the eigen-operator $VEV^*$. Here we naturally define an {\it orthogonality} for $X$ and $Y$ by $Y^*X=O$. Then we have a diagonalization theorem: \begin{theorem}\label{diag} Let $\A$ be a selfadjoint operator on $H^n$ and $X^{[J]}=(X_{iJ})$ mutually orthogonal (unit) proper vectors in $B(H,H^n)$ for $\A$ with eigen-operators $E_J$. Then, an operator $\U\equiv(X_{ij})\in B(H^n)$ is an isometry and \begin{eqnarray}\label{4} \U^*\A\U=\begin{pmatrix}E_1&&\\&\ddots&\\&& E_n\end{pmatrix}\equiv \E. \end{eqnarray} Moreover $\U\U^*$ commutes with $\A$ and hence $$\U^*f(\A)\U=f(\U^*\A\U)=f(\E)=\begin{pmatrix}f(E_1)&&\\&\ddots&\\&& f(E_n)\end{pmatrix}$$ for all real continuous functions $f$. Conversely if \eqref{4} holds for an isometry $\U$ with $\U\U^*\A=\A\U\U^*$, then $E_J$ is an eigen-operator for $\A$ with a proper vector $U^{[J]}=(U_{iJ})\in B(H,H^n)$. \end{theorem} \begin{proof} By $\U^*\U=\(X^{[i]*}X^{[j]}\)=(\delta_{ij}I_H)=\I_{H^n}$, an operator $\U$ is an isometry. Since $$\A\U=\A(X^{[1]}\ \cdots\ X^{[n]})=(\A X^{[1]}\ \cdots\ \A X^{[n]})=(X^{[1]}E_1\ \cdots\ X^{[n]}E_n)=\U\E,$$ we have $\U^*\A\U=\U^*\U\E=\E$, which also shows $\E^*=\E$, that is, $E_k^*=E_k$ for all $k$. On the other hand, $\A\U\U^*=\U\E\U^*=\U\U^*\A$ since $\E$ is selfadjoint. It follows that $(\U^*\A\U)^n=\U^*\A^n\U$. Then $p(\U^*\A\U)=\U^*p(\A)\U$ for all polynomials $p$ and consequently $f(\U^*\A\U)=\U^*f(\A)\U$ holds for all continuous functions $f$ by Weierstrass' approximation theorem. The converse is similarly obtained. \end{proof} \begin{example} Let $C$ and $D$ be invertible operators on $H$ with $C^*C+D^*D=I$. Put a projection $$\A=\begin{pmatrix}C&O\\ D&O\end{pmatrix}\begin{pmatrix}C&O\\ D&O\end{pmatrix}^*=\begin{pmatrix}CC^*&CD^*\\ DC^*&DD^*\end{pmatrix}.$$ In fact, $\A$ is selfadjoint and \begin{align*} \A^2&=\begin{pmatrix}C&O\\ D&O\end{pmatrix}\begin{pmatrix}C&O\\ D&O\end{pmatrix}^*\begin{pmatrix}C&O\\ D&O\end{pmatrix}\begin{pmatrix}C&O\\ D&O\end{pmatrix}^*\\ &=\begin{pmatrix}C&O\\ D&O\end{pmatrix}\begin{pmatrix}I&O\\ O&O\end{pmatrix}\begin{pmatrix}C&O\\ D&O\end{pmatrix}^*=\A. \end{align*} Noting that $|C|$ and $|D|$ are commuting, we take mutually orthogonal unit proper vectors $$X^{[1]}=\begin{pmatrix}C\\ D\end{pmatrix},\quad X^{[2]}=\begin{pmatrix}Y_1\\ Y_2\end{pmatrix}\equiv\begin{pmatrix}(C^*)^{-1}|C||D|\\ -D|D|^{-1}|C|\end{pmatrix}.$$ In fact, $X^{[1]*} X^{[1]}=C^*C+D^*D=I$, $$X^{[2]*} X^{[2]}=|D||C|(C^*C)^{-1}|C||D|+|C||D|^{-1}D^*D|D|^{-1}|C|=|D|^2+|C|^2=I$$ and $$X^{[1]*} X^{[2]}=C^* Y_1 + D^* Y_2=|C||D|-D^*D|D|^{-1}|C|=|D||C|-|D||C|=O,$$ which means the orthogonality. Since $$\begin{pmatrix}C&O\\ D&O\end{pmatrix}^*(X^{[1]}\ X^{[2]})=(X^{[1]}\ O)^*(X^{[1]}\ X^{[2]})=\begin{pmatrix}X^{[1]*}X^{[1]}&O\\ X^{[1]*} X^{[2]}&O\end{pmatrix}=\begin{pmatrix}I&O\\ O&O\end{pmatrix},$$ we have $$\A(X^{[1]}\ X^{[2]})=\begin{pmatrix}C&O\\ D&O\end{pmatrix}\begin{pmatrix}I&O\\ O&O\end{pmatrix}=(X^{[1]}\ X^{[2]})\begin{pmatrix}I&O\\ O&O\end{pmatrix}=(X^{[1]}I\ X^{[2]}O),$$ which shows that $X_1$ (resp. $X_2$) is a proper vector with an eigen-operator $I$ (resp. $O$) and we also obtain the diagonalization: $$(X^{[1]}\ X^{[2]})^*\A(X^{[1]}\ X^{[2]})=\begin{pmatrix}I&O\\ O&O\end{pmatrix}.$$ \end{example} \begin{corollary} Under the same assumptions of $f$, $\A$ and $X_j$ for Theorem \ref{diag}, $$X_j^*f(\A)X_j=f(X_j^*\A X_j).$$ \end{corollary} \begin{proof} For a unit operator $V_j=(O\cdots O\ \overset{j\text{-th}}{I}\ O\cdots O)^*$ and $\U$ in Theorem \ref{diag}, \begin{align} X_j^*f(\A)X_j&=V_j^*\U^*f(\A)\U V_j=V_j^*f(\U^*\A\U)V_j\notag\\ &=V_j^*f(\E)V_j=f(E_j)=f(X_j^*\A X_j).\tag*{$\square$} \end{align} \NQED\smallskip\end{proof} Thus proper vectors give the equality. Indeed, this is a condition exactly equivalent to the equality in \eqref{3}: \begin{theorem} Under the same assumptions of Hansen-Pedersen's theorem, suppose $f$ is strictly convex $($or concave$)$. The equality in \eqref{3} holds if and only if $C$ is a proper vector of $\A$. \end{theorem} \begin{proof} It suffices to show the case that $f$ is operator convex on $(-1,1)$ and $\sigma(\A)\subset(-1,1)$. Then $f$ has an integral representation $$f(x)=a+bx+\int_{-1}^1\frac{x^2}{1-tx}dm(t)$$ for some finite measure $m$ on $[-1,1]$ (\cite{B}). Therefore, the above equality can be reduced to that for the non-affine part $$\frac{x^2}{1-tx}=-\frac{x}{t}-\frac{1}{t^2}+\frac{1}{t^2(1-tx)},$$ in particular, for the function $f_1(x)=1/(1-x)$. For $\B=1-\A$, we have $\sigma(\B)\subset(0,2)$ and hence $$C^*\B^{-1}C=C^*f_1(\A)C=f_1(C^*\A C)=(C^*\B C)^{-1}$$ by the equation in \eqref{3}. Take the projection $\P=CC^*\in B(H^n)$ with $\P C=C$. It follows from operator convexity of $g(x)=x^{2}$ that $$(C^*\B^{-\frac{1}{2}}C)^2\le C^*\B^{-1}C=(C^*\B C)^{-1}\le (C^*\B^{\frac{1}{2}}\P\B^{\frac{1}{2}}C)^{-1}=(C^*\B^{\frac{1}{2}}C)^{-2}.$$ Thus we have all the terms in the above are equal, in particular, $$C^*\B^{\frac{1}{2}}\I\B^{\frac{1}{2}}C=C^*\B C=C^*\B^{\frac{1}{2}}\P\B^{\frac{1}{2}}C,$$that is, $C^*\B^{\frac{1}{2}}(\I-\P)\B^{\frac{1}{2}}C=O$. Thereby $$\big((\I-\P)\B^{\frac{1}{2}}\P\big)^*(\I-\P)\B^{\frac{1}{2}}\P=\P\B^{\frac{1}{2}}(\I-\P)\B^{\frac{1}{2}}\P=\O,$$ so that $(\I-\P)\B^{\frac{1}{2}}\P=\O$. Consequently, $\B^{\frac{1}{2}}$ commutes with $\P$ and so does $\A$. Therefore we obtain $$\A C=\A\P C=\P\A C=C(C^*\A C),$$ that is, $C$ is a proper vector with the eigen-operator $C^*\A C$. \end{proof} %---------------------------------------------------------------------------------------% \bibliographystyle{amsplain} \begin{thebibliography}{10} \bibitem{A} T. Ando, \textit{Topics on operator inequalities}, Hokkaido Univ. Lecture Note, 1978. \bibitem{B} J. Bendat and S. Sherman: \textit{ Monotone and convex operator functions}, Trans. Amer. Math. Soc., \textbf{79} (1955), 58--71. \bibitem{HP} F. Hansen and G.K. Pedersen: \textit{Jensen's operator inequality}, Bull. London Math. Soc., 35 (2003), 553--564. \bibitem{F0} J.I. Fujii: \textit{The Marcus-Khan theorem for Hilbert space operators}, Math. Japon., \textbf{41}(1995), 531--535. \bibitem{F} J.I. Fujii and M.Fujii: \textit{Jensen's inequality on any interval for operators}, Proc. 3rd Int. Conf. on Nonlinear Analysis and Convex Analysis , 2004, 29--39. \bibitem{Ms} T. Furuta, J. Mi\'ci\'c, J.E. Pe\v{c}ari\'{c} and Y.Seo, \textit{Mond-Pe\v{c}ari\'{c} Method in Operator Inequalities}, Monographs in Inequalities 1, Element, Zagreb, 2005. \bibitem{KA} F. Kubo and T. Ando, \textit{Means of positive linear operators}, Math. Ann., \textbf{248} (1980), 205--224. \end{thebibliography} \end{document} %------------------------------------------------------------------------------ % End of journal.tex %------------------------------------------------------------------------------ .