\documentclass[12pt, reqno]{amsart} \usepackage{amsmath, amsthm, amscd, amsfonts, amssymb, graphicx, color} \usepackage[bookmarksnumbered, plainpages]{hyperref} \textheight 22.5truecm \textwidth 14.5truecm \setlength{\oddsidemargin}{0.35in}\setlength{\evensidemargin}{0.35in} \setlength{\topmargin}{-.5cm} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \newtheorem{xca}[theorem]{Exercise} \newtheorem{problem}[theorem]{Problem} \theoremstyle{remark} \newtheorem{remark}[theorem]{Remark} \numberwithin{equation}{section} \newcommand{\la}{\langle} \newcommand{\ra}{\rangle} \newcommand{\ol}{\overline} \newcommand{\var}{\varphi} \begin{document} \setcounter{page}{33} \noindent \parbox{2.85cm}{\includegraphics*[keepaspectratio=true,scale=1.75]{BJMA.jpg}} \noindent\parbox{4.85in}{\hspace{0.1mm}\\[1.5cm]\noindent Banach J. Math. Anal. 2 (2008), no. 1, 33--39\\ $\frac{\rule{4.55in}{0.05in}}{{}}$\\ {\footnotesize \textcolor[rgb]{0.65,0.00,0.95}{\textsc{\textbf{\large{B}}anach \textbf{\large{J}}ournal of \textbf{\large{M}}athematical \textbf{\large{A}}nalysis}}\\ ISSN: 1735-8787 (electronic)\\ \textcolor[rgb]{0.00,0.00,0.84}{\textbf{http://www.math-analysis.org }}\\ $\frac{{}}{\rule{4.55in}{0.05in}}$}\\[.5in]} \title[Extended general variational inequalities]{ Auxiliary Principle Technique for Extended General Variational Inequalities } \author[M. Aslam Noor ]{Muhammad Aslam Noor$^1$} \address{$^1$ Department of Mathematics, COMSATS Institute of Information Technology, Islamabad, Pakistan } \email{\textcolor[rgb]{0.00,0.00,0.84}{noormaslam@hotmail.com }} \dedicatory{{\rm Submitted by Th. M. Rassias}} \subjclass[2000]{Primary 49J40; Secondary 90C33} \keywords{Variational inequalities, nonconvex functions, fixed-point problem, convergence, auxiliary principle.} \date{Received: 10 January 2008; Accepted: 22 February 2008.} \begin{abstract} In this paper, we use the auxiliary principle technique to study the existence of a solution of the extended general variational inequalities, which were introduced and studied by the author. Several special cases are also discussed. \end{abstract} \maketitle \section{Introduction } \noindent Variational inequalities, which were introduced in 1960's, represent the optimality conditions for the differentiable convex functions on the convex sets in normed space. It is known that the properties of the solutions of the variational inequalities may not hold, in general, when the convex set is nonconvex. In the recent years, the concept of convexity has been generalized in several directions, see, for example, \cite{2} and the references therein. Noor \cite{15, 16} introduced the nonconvex set, which is called the $(h,g)$-convex set, using the idea of a segmental type of non-connected convexity for sets by taking into account only convex combinations of special types of points, see also \cite{2}. Noor \cite{15} has shown that the minimum of a differentiable $(h,g)$-convex function on a $(h,g)$-convex set can be characterized by a class of variational inequalities. This fact has motivated Noor \cite{13} to introduce and consider a new class of variational inequalities, which is called the {\it extended general nonlinear variational inequality } involving three operators. It has been shown \cite{15} that the extended general variational inequalities are equivalent to the fixed point problems. This alternative equivalence has been used to suggest a wide class of iterative methods for solving the extended general variational inequalities. It is known that it is very difficult to find the projection of the operator except in very special cases. To overcome this drawback, on uses the auxiliary principle technique. This technique is mainly due to Glowinski, Lions and Tremolieres \cite{4}. This technique is more flexible and has been used to develop several numerical methods for solving the variational inequalities and the equilibrium problems. In this paper, we again use the auxiliary principle technique to study the existence of a solution of the extended general variational inequalities. Since the extended general variational inequalities include various classes of variational inequalities and complementarity problems as special cases, results proved in this paper continue to hold for these problems. Results proved in this paper may be viewed as important and significant improvement of the previously known results. It is interesting to explore the applications of these extended general variational inequalities in mathematical and engineering sciences with new and novel aspects. \bigskip \section{Preliminaries } Let $H$ be a real Hilbert space whose inner product and norm are denoted by $\la \cdot, \cdot \ra$ and $\| . \|,$ respectively. Let $K$ be a nonempty closed and convex set in $H.$ For given nonlinear operators $T,g, h:H \rightarrow H$, consider the problem of finding $u \in H, h(u) \in K$ such that \begin{equation}\label{2.1} \la Tu, g(v) - h(u)\ra \geq 0, \qquad \forall v\in H: g(v) \in K. \end{equation} Inequality of type \eqref{2.1} is called the {\it extended general variational inequality involving three operators,} which was introduced and studied by Noor \cite{15}. \medskip We now show that the minimum of a class of differentiable nonconvex functions on $(h, g)$-convex set $K$ in $H$ can be characterized by extended general variational inequality \eqref{2.1}. For this purpose, we recall the following well known concepts, see \cite{2}. \begin{definition} Let $K$ be any set in $H$. The set $K$ is said to be $(h,g)$-convex, if there exist functions $g, h: H \longrightarrow H $ such that \begin{eqnarray*} h(u)+t(g(v)-h(u)) \in K, \quad \forall u,v \in H: h(u), g(v) \in K, \quad t\in [0,1]. \end{eqnarray*} \end{definition} Note that every convex set is $(h,g)$-convex, but the converse is not true, see \cite{2}. If $g = h, $ then the $(h,g)$-convex set $K $ is called the $g$-convex set, which was introduced by Youness \cite{21}. See also Cristescu and Lupsa \cite{2} for its various extensions and generalization. \vskip .2pc \begin{definition} The function $F: K \longrightarrow H$ is said to be $(h,g)$-convex on the $(g,h)$-convex set $K,$ if there exist two functions $h, g$ such that \begin{eqnarray*} F(h(u)+t(g(v)-h(u))) & \leq & (1-t)F(h(u))+tF(g(v))\,, \\ && \quad \forall u,v \in H: h(u),g(v) \in K, \quad t\in [0,1]. \end{eqnarray*} \end{definition} Clearly every convex function is $(h,g)$-convex, but the converse is not true. For $g =h, $ Definition 2.2 is due to Youness \cite{21}. It is known \cite{15} that the minimum of a differentiable $(h,g)$-convex function on a $(h,g)$-convex set $K$ in $H$ can be characterized by the extended general variational inequality \eqref{2.1}. For the sake of completeness and to convey an idea of the technique, we include its proof. \vskip .2pc \begin{lemma} Let $F: K \longrightarrow H$ be a differentiable $(h,g)$-convex function on the $(g,h)$-convex set $K. $ Then $u \in H:h(u) \in K $ is the minimum of $(h,g)$-convex function $F$ on $K$ if and only if $u \in H: h(u) \in K $ satisfies the inequality \begin{eqnarray}\label{2.2} \la F^{\prime }(h(u)), g(v)-h(u)\ra \geq 0, \quad \forall v\in H: g(v) \in K, \end{eqnarray} where $F^{\prime }(u) $ is the Frechet differential of $F$ at $h(u) \in K.$ \end{lemma} \begin{proof} Let $u \in H : h(u) \in K $ be a minimum of $hg$-convex function $F$ on $K.$ Then \begin{eqnarray}\label{2.3} F(h(u)) \leq F(g(v))\,, \quad \forall v\in H: g(v) \in K. \end{eqnarray} Since $K$ is a $hg$-convex set, so, for all $u,v \in H: h(u),g(v) \in K, t \in [0,1], g(v_t)=h(u)+t(g(v)-h(u)) \in K.$ Setting $g(v)= g(v_t)$ in \eqref{2.3}, we have \begin{eqnarray*} F(h(u)) \leq F(h(u)+ t(g(v)-h(u)) \leq F(h(u))+t(F(g(v))-F(h(u))). \end{eqnarray*} Dividing the above inequality by $t$ and taking $t \longrightarrow 0,$ we have \begin{eqnarray*} \la F^{\prime }(h(u)), g(v)-h(u)\ra \geq 0, \quad \forall v\in H: g(v) \in K, \end{eqnarray*} which is the required result \eqref{2.2}. Conversely, let $u \in H: h(u) \in K$ satisfy the inequality \eqref{2.2}. Since $F$ is a $hg$-convex function, $\quad \forall u,v \in H: h(u),g(v) \in K, \quad t \in [0,1], \quad h(u)+t(g(v)-h(u)) \in K $ and \noindent \begin{eqnarray*} F(h(u)+t(g(v)-h(u))) \leq (1-t)F(h(u))+tF(g(v))\,, \end{eqnarray*} which implies that \noindent \begin{eqnarray*} F(g(v))-F(h(u)) \geq \frac{F(h(u)+t(g(v)-g(u)))-F(h(u))}{t}. \end{eqnarray*} \begin{eqnarray*} F(g(v))-F(h(u)) \geq \la F^{\prime }(h(u)),g(v)-h(u)\ra \geq 0, \quad \mbox{using \eqref{2.2},} \end{eqnarray*} which implies that \begin{eqnarray*} F(h(u)) \leq F(g(v))\,, \quad \forall v\in H: g(v) \in K \end{eqnarray*} showing that $u \in K$ is the minimum of $F$ on $K$ in $H.$ \end{proof} Lemma 2.3 implies that $hg$-convex programming problem can be studied via the extended general variational inequality \eqref{2.1} with $Tu = F'(h(u))$. In a similar way, one can show that the extended general variational inequality is the Fritz-John condition of the inequality constrained optimization problem. \vskip .2pc We now list some special cases of the extended general variational inequalities. \medskip \noindent\textbf{I. } \ If $g =h, $ then problem \eqref{2.1} is equivalent to finding $u \in H: g(u) \in K $ such that \begin{eqnarray}\label{2.4} \la Tu,g(v)-g(u) \ra \geq 0, \qquad \forall v\in H:g(v) \in K, \end{eqnarray} which is known as general variational inequality, introduced and studied by Noor \cite{5} in 1988. It turned out that odd order and nonsymmetric obstacle, free, moving, unilateral and equilibrium problems arising in various branches of pure and applied sciences can be studied via general variational inequalities. \medskip \textbf{II.} \ For $g \equiv I$, the identity operator, the extended general variational inequality \eqref{2.1} collapses to: find $u \in H: h(u) \in K $ such that \begin{eqnarray}\label{2.5} \la Tu,v-h(u) \ra \geq 0, \quad \forall v\in K, \end{eqnarray} which is also called the general variational inequality, see Noor \cite{6}. \medskip \textbf{III. } \ For $ h = I, $ the identity operator, the extended general variational inequality \eqref{2.1} is equivalent to finding $u \in KI $ such that \begin{eqnarray}\label{2.6} \la Tu,g(v)- u \ra \geq 0, \quad \forall v\in H: g(v) \in K, \end{eqnarray} which is also called the general variational inequality involving two nonlinear operators which was introduced and studied by Noor \cite{16, 17}. \medskip We would like to emphasize the fact that general variational inequalities \eqref{2.4}, \eqref{2.5} and \eqref{2.6} are quite different from each other and have different applications. \medskip \noindent\textbf{VI. } \ For $ g = h = I, $ the identity operator, the extended general variational inequality \eqref{2.1} is equivalent to finding $u \in K $ such that \begin{eqnarray*} \la Tu,v-u \ra \geq 0, \qquad \forall v \in K, \end{eqnarray*} which is known as the classical variational inequality and was introduced in 1964 by Stampacchia \cite{22}. For the recent applications, numerical methods, sensitivity analysis, dynamical systems and formulations of variational inequalities, see \cite{1}--\cite{22} and the references therein. \medskip \noindent\textbf{V.} \ If $K^{*} = \{ u \in H ; \la u,v \ra \geq 0, \quad \forall v\in K \} $ is a polar (dual) convex cone of a closed convex cone $K$ in $H,$ then problem \eqref{2.1} is equivalent to finding $u \in H$ such that \begin{eqnarray}\label{2.7} g(u) \in K, \quad Tu \in K^{*}, \quad \la g(u), Tu \ra = 0, \end{eqnarray} which is known as the general complementarity problem, see \cite {12,18}. If $g = I,$ the identity operator, then problem \eqref{2.7} is called the generalized complementarity problem. For $g(u)= u-m(u),$ where $m$ is a point-to-point mapping, then problem \eqref{2.7} is called the quasi(implicit) complementarity problem, see \cite{12, 18} and the references therein. From the above discussion, it is clear that the extended general variational inequalities \eqref{2.1} is most general and includes several previously known classes of variational inequalities and related optimization problems as special cases. These variational inequalities have important applications in mathematical programming and engineering sciences. \medskip We also need the following concepts and results. \medskip \noindent\begin{definition} For all $u,v \in H$, an operator $T: H \rightarrow H$ is said to be: \\ \textbf{(i)}{\it strongly monotone}, if there exists a constant $ \alpha > 0$ such that $$ \la Tu - Tv, u-v \ra \geq \alpha ||u-v||^2 $$ \textbf{(ii)} {\it Lipschitz continuous}, if there exists a constant $\beta > 0$ such that $$ ||Tu-Tv|| \leq \beta ||u-v||. $$ \end{definition} From (i) and (ii), it follows that $ \alpha \leq \beta .$ \begin{remark} It follows from the strongly monotonicity of the operator $T, $ that \begin{eqnarray*} \alpha \|u-v\|^2 \leq \la Tu-Tv,u-v \ra \leq \|Tu-Tv\|\|u-v\|, \quad \forall u,v \in H, \end{eqnarray*} which implies that \begin{eqnarray*} \|Tu-Tv\| \geq \alpha \|u-v\|, \quad \forall u,v \in H. \end{eqnarray*} \end{remark} This observation enables us to define the following concept. \begin{definition} The operator $T$ is said to firmly expanding if \begin{eqnarray*} \|Tu-Tv \| \geq \|u-v\|, \quad \forall u,v \in H. \end{eqnarray*} \end{definition} \bigskip %\setcounter{equation}{0} %\setcounter{chapter}{3} \section{ Main Results} In this Section, we use the auxiliary principle technique of Glowinski, Lions and Tremolieres \cite{4} to study the existence of a solution of the extended general variational inequality \eqref{2.1}. \vskip .2pc \begin{theorem} Let $T$ be a strongly monotone with constant $\alpha > 0 $ and Lipschitz continuous with constant $\beta > 0. $ Let $g $ be a strongly monotone and Lipschitz continuous operator with constants $ \sigma > 0 $ and $\delta > 0 $ respectively. If the operator $h$ is firmly expanding and there exists a constant $\rho > 0 $ such that \begin{eqnarray}\label{3.1} |\rho - \frac{\alpha }{\beta ^2 }|< \frac{\sqrt{\alpha ^2 - \beta ^2 k(2-k)}}{\beta ^2}, \quad \alpha > \beta \sqrt{k(2-k)}, \quad k < 1, \end{eqnarray} where \begin{eqnarray}\label{3.2} \theta & = & k +\sqrt{1-2\rho \alpha +\rho ^2 \beta ^2 }\nonumber\\ &&\\ k & = & \sqrt{1-2 \sigma + \delta ^2}\,.\nonumber \end{eqnarray} then the extended general variational inequality \eqref{2.1} has a unique solution. \end{theorem} \medskip \begin{proof} We use the auxiliary principle technique to prove the existence of a solution of \eqref{2.1}. For a given $u \in H: g(u) \in K $ satisfying the extended general variational inequality \eqref{2.1}, we consider the problem of finding a solution $ w \in H:h(w) \in K $ such that \begin{eqnarray}\label{3.3} \la \rho Tu + h(w)-g(u), g(v)-h(w) \ra \geq 0, \quad \forall v \in H:g(v) \in K\,, \end{eqnarray} where $\rho > 0 $ is a constant. The inequality of type \eqref{3.3} is called the auxiliary extended general variational inequality associated with the problem \eqref{2.1}. It is clear that the relation \eqref{3.3} defines a mapping $u \mapsto w. $ It is enough to show that the mapping $ u \mapsto w $ defined by the relation \eqref{3.3} has a unique fixed point belonging to $H $ satisfying the general variational inequality \eqref{2.1}. Let $w_1 \neq w_2 $ be two solutions of \eqref{3.3} related to $u_1, u_2 \in H $ respectively. It is sufficient to show that for a well chosen $ \rho > 0 $, \begin{eqnarray*} \|w_1-w_2\| \leq \theta \|u_1-u_2\|\,, \end{eqnarray*} with $ 0 < \theta < 1, $ where $\theta $ is independent of $u_1$ and $u_2. $ Taking $v = w_2 $(respectively $w_1$) in \eqref{3.3} related to $u_1 $ (respectively $u_2$), adding the resultant, we have \begin{eqnarray*} \la h(w_1)-h(w_2), h(w_1)-h(w_2) \ra \leq \la g(u_1)-g(u_2)-\rho (Tu_1-Tu_2), h(w_1)-h( w_2) \ra\,, \end{eqnarray*} from which we have \begin{eqnarray}\label{3.4} \|h(w_1)-h(w_2)\|& \leq &\|g(u_1)-g(u_2)-\rho (Tu_1-Tu_2 )\| \nonumber \\ & \leq & \|u_1-u_2-(g(u_1)-g(u_2))| + \|u_1-u_2 - \rho (Tu_1-Tu_2)\|\nonumber \\. \end{eqnarray} Since $T$ is both strongly monotone and Lipschitz continuous operator with constants $\alpha > 0 $ and $\beta > 0 $ respectively, it follows that \begin{eqnarray}\label{3.5} \|u_1-u_2 -\rho (Tu_1-Tu_2)\|^2 &\leq & \|u_2-u_2\|^2 - 2 \rho \la u_1-u_2, Tu_1-Tu_2 \ra \nonumber \\ &&+ \rho ^2\|Tu_1-Tu_2\|^2 \nonumber \\ & \leq & \left(1-2\rho \alpha + \rho ^2\beta ^2 \right)\|u_1-u_2\|^2. \end{eqnarray} In a similar way, using the strongly monotonicity with constant $\sigma > 0$ and Lipschitz continuity with constant $\delta > 0, $ we have \begin{eqnarray}\label{3.6} \|u_1-u_2-(g(u_1)-g(u_2))\| \leq \sqrt{1-2\sigma + \delta ^2}\|u_1-u_2\|. \end{eqnarray} From \eqref{3.4}, \eqref{3.5} and \eqref{3.6} and using the fact that the operator $h $ is firmly expanding, we have \begin{eqnarray*} \|w_1-w_2\| &\leq &\left\{ k + \sqrt{1-2\rho \alpha + \rho ^2\beta ^2} \right\}\|u_1-u_2\| \\ & = & \theta \|u_1-u_2\|, \end{eqnarray*} From \eqref{3.1} and \eqref{3.2}, it follows that $\theta < 1 $ showing that the mapping defined by \eqref{3.3} has a fixed point belonging to $K, $ which is the solution of \eqref{2.1}, the required result. \hfill \qquad $\Box $ \end{proof} \bigskip \textbf{Acknowledgement.} The author would like to thank Dr. S. M. Junaid Zaidi, Rector, CIIT, for providing excellent research facilities. \bigskip \bibliographystyle{amsplain} \begin{thebibliography}{10} \bibitem{1} C. Baiocchi and A. Capelo, \textit{Variational and Quasi Variational Inequalities}, J. Wiley and Sons, New York, 1984. \bibitem{2} G. Cristescu and L. Lupsa, \textit{Non-connected Convexities and Applications}, Kluwer Academic Publishers, Dordrecht, Holland, 2002. \bibitem{3} F. Giannessi and A. Maugeri, \textit{Variational Inequalities and Network Equilibrium Problems}, Plenum Press, New York, 1995. \bibitem{4} R. Glowinski, J.L. Lions and R. Tr\'{e}moli\`{e}res, \textit{Numerical Analysis of Variational Inequalities}, North-Holland, Amsterdam, 1981. \bibitem{5} M. Aslam Noor, \textit{General variational inequalities}, Appl. Math. Letters, \textbf{1} (1988), 119--121. \bibitem{6} M. Aslam Noor, \textit{Quasi variational inequalities}, Appl. Math. Letters, \textbf{1} (1988), 367--370. \bibitem{7} M. Aslam Noor, \textit{Wiener-Hopf equations and variational inequalities}, J. Optim. Theory Appl., \textbf{79} (1993), 197--206. \bibitem{8} M. Aslam Noor, \textit{Some algorithms for general monotone mixed variational inequalities}, Mathl. Computer Modelling, \textbf{29}(7) (1999), 1--9. \bibitem{9} M. Aslam Noor, \textit{Some recent advances in variational inequalities}, Part I, basic concepts, New Zealand J. Math., \textbf{ 26} (1997), 53--80. \bibitem{10} M. Aslam Noor, \textit{Some recent advances in variational inequalities}, Part II, other concepts, New Zealand J. Math., \textbf{ 26} (1997), 229--255. \bibitem{11} M. Aslam Noor, \textit{New approximation schemes for general variational inequalities}, J. Math. Anal. Appl., \textbf{251} (2000), 217-229. \bibitem{12} M. Aslam Noor, \textit{Some developments in general variational inequalities}, Appl. Math. Computation, \textbf{152} (2004), 199--277. \bibitem{13} M. Aslam Noor, \textit{Projection-proximal methods for general variational inequalities}, J. Math. Anal. Appl., \textbf{318} (2006), 53--62. \bibitem{14} M. Aslam Noor, \textit{General variational inequalities and nonexpansive mappings}, J. Math. Anal. Appl., \textbf{331} (2007), 810--822. \bibitem{15} M. Aslam Noor, \textit{Extended general variational inequalities} J. Appl. Math. Computing (to appear). \bibitem{16} M. Aslam Noor, \textit{Variational Inequalities and Applications}, Lecture Notes, Mathematics Department, COMSATS Institute of Information Technology, Islamabad, Pakistan, 2007. \bibitem{17} M. Aslam Noor, \textit{Differentiable nonconvex functions and general variational inequalities}, Appl. Math. Computation (to appear). \bibitem{18} M. Aslam Noor, K. Inayat Noor and Th. M. Rassias, \textit{Some aspects of variational inequalities}, J. Comput. Appl. Math., \textbf{47} (1993), 285--312. \bibitem{19} M. Aslam Noor, K. Inayat Noor and Th. M. Rassias, \textit{Set-valued resolvent equations and mixed variational inequalities}, J. Math. Anal. Appl., \textbf{220} (1998), 741--759. \bibitem{20} M. Patriksson, \textit{Nonlinear Programming and Variational Inequality Problems: A Unified Approach}, Kluwer Academic Publishers, Dordrecht, 1998. \bibitem{21} E. A. Youness, $E$-convex sets, \textit{$E$-convex functions and $E$-convex programming}, J. Optim. Theory Appl. \textbf{102}(1999),439--450. \bibitem{22} G. Stampacchia, \textit{Formes bilineaires coercitives sur les ensembles convexes}, C. R. Acad. Sci. Paris, \textbf{258} (1964), 4413--4416 \end{thebibliography} \end{document} .